aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig20
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bonding/bond_3ad.c106
-rw-r--r--drivers/net/bonding/bond_3ad.h283
-rw-r--r--drivers/net/bonding/bond_alb.c23
-rw-r--r--drivers/net/bonding/bond_alb.h181
-rw-r--r--drivers/net/bonding/bond_debugfs.c4
-rw-r--r--drivers/net/bonding/bond_main.c36
-rw-r--r--drivers/net/bonding/bond_netlink.c2
-rw-r--r--drivers/net/bonding/bond_options.c2
-rw-r--r--drivers/net/bonding/bond_options.h130
-rw-r--r--drivers/net/bonding/bond_procfs.c2
-rw-r--r--drivers/net/bonding/bond_sysfs.c2
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c2
-rw-r--r--drivers/net/bonding/bonding.h648
-rw-r--r--drivers/net/can/Makefile3
-rw-r--r--drivers/net/can/c_can/c_can.c13
-rw-r--r--drivers/net/can/c_can/c_can.h25
-rw-r--r--drivers/net/can/c_can/c_can_platform.c201
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/dev.c78
-rw-r--r--drivers/net/can/flexcan.c101
-rw-r--r--drivers/net/can/mscan/mscan.c48
-rw-r--r--drivers/net/can/sja1000/sja1000.c51
-rw-r--r--drivers/net/can/slcan.c7
-rw-r--r--drivers/net/can/vcan.c5
-rw-r--r--drivers/net/dsa/Kconfig15
-rw-r--r--drivers/net/dsa/Makefile3
-rw-r--r--drivers/net/dsa/mv88e6060.c5
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c25
-rw-r--r--drivers/net/dsa/mv88e6131.c12
-rw-r--r--drivers/net/dsa/mv88e6171.c26
-rw-r--r--drivers/net/dsa/mv88e6352.c788
-rw-r--r--drivers/net/dsa/mv88e6xxx.c101
-rw-r--r--drivers/net/dsa/mv88e6xxx.h16
-rw-r--r--drivers/net/dummy.c20
-rw-r--r--drivers/net/ethernet/3com/typhoon.c4
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h63
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c201
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c446
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c533
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c89
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c86
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h112
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c12
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c36
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c75
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c169
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h29
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c126
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c37
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cadence/Kconfig4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h36
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c158
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h52
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c586
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c303
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c509
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h120
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h160
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h90
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h2269
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c138
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c289
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c555
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c38
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c61
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rss.h9
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h20
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c20
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c22
-rw-r--r--drivers/net/ethernet/freescale/fec.h237
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c124
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c63
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h38
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c7
-rw-r--r--drivers/net/ethernet/hp/hp100.c7
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c24
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c19
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c17
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2184
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c101
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c252
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c129
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c127
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c483
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c198
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h62
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h15
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2136
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c47
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c143
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c100
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c16
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c33
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h45
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c183
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c538
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c275
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c134
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h329
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c85
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c1432
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h43
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c757
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c3
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c256
-rw-r--r--drivers/net/ethernet/marvell/sky2.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c557
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c145
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c299
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c348
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c323
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c156
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c15
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c11
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/8139too.c22
-rw-r--r--drivers/net/ethernet/realtek/atp.h246
-rw-r--r--drivers/net/ethernet/realtek/r8169.c47
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c28
-rw-r--r--drivers/net/ethernet/rocker/Kconfig27
-rw-r--r--drivers/net/ethernet/rocker/Makefile5
-rw-r--r--drivers/net/ethernet/rocker/rocker.c4375
-rw-r--r--drivers/net/ethernet/rocker/rocker.h428
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c24
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c18
-rw-r--r--drivers/net/ethernet/sfc/falcon.c10
-rw-r--r--drivers/net/ethernet/sfc/farch.c27
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c2
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h19
-rw-r--r--drivers/net/ethernet/sfc/nic.h112
-rw-r--r--drivers/net/ethernet/sfc/siena.c8
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c269
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig72
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h77
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c211
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c135
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c68
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h28
-rw-r--r--drivers/net/ethernet/sun/cassini.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c9
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c747
-rw-r--r--drivers/net/ethernet/sun/sunvnet.h17
-rw-r--r--drivers/net/ethernet/ti/Kconfig8
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c6
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c6
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c3
-rw-r--r--drivers/net/fddi/defxx.c187
-rw-r--r--drivers/net/fddi/defxx.h4
-rw-r--r--drivers/net/hamradio/6pack.c3
-rw-r--r--drivers/net/hyperv/netvsc.c10
-rw-r--r--drivers/net/hyperv/netvsc_drv.c9
-rw-r--r--drivers/net/hyperv/rndis_filter.c15
-rw-r--r--drivers/net/ieee802154/Kconfig10
-rw-r--r--drivers/net/ieee802154/Makefile1
-rw-r--r--drivers/net/ieee802154/at86rf230.c445
-rw-r--r--drivers/net/ieee802154/cc2520.c73
-rw-r--r--drivers/net/ieee802154/fakehard.c430
-rw-r--r--drivers/net/ieee802154/fakelb.c91
-rw-r--r--drivers/net/ieee802154/mrf24j40.c104
-rw-r--r--drivers/net/ipvlan/Makefile7
-rw-r--r--drivers/net/ipvlan/ipvlan.h121
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c607
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c795
-rw-r--r--drivers/net/irda/act200l-sir.c11
-rw-r--r--drivers/net/irda/actisys-sir.c3
-rw-r--r--drivers/net/irda/ali-ircc.c263
-rw-r--r--drivers/net/irda/donauboe.c96
-rw-r--r--drivers/net/irda/girbil-sir.c14
-rw-r--r--drivers/net/irda/irda-usb.c142
-rw-r--r--drivers/net/irda/irtty-sir.c18
-rw-r--r--drivers/net/irda/litelink-sir.c8
-rw-r--r--drivers/net/irda/ma600-sir.c20
-rw-r--r--drivers/net/irda/mcp2120-sir.c14
-rw-r--r--drivers/net/irda/mcs7780.c53
-rw-r--r--drivers/net/irda/nsc-ircc.c229
-rw-r--r--drivers/net/irda/old_belkin-sir.c8
-rw-r--r--drivers/net/irda/sir_dev.c79
-rw-r--r--drivers/net/irda/sir_dongle.c4
-rw-r--r--drivers/net/irda/smsc-ircc2.c255
-rw-r--r--drivers/net/irda/tekram-sir.c15
-rw-r--r--drivers/net/irda/toim3232-sir.c12
-rw-r--r--drivers/net/irda/via-ircc.c175
-rw-r--r--drivers/net/irda/vlsi_ir.c145
-rw-r--r--drivers/net/irda/vlsi_ir.h3
-rw-r--r--drivers/net/irda/w83977af_ir.c77
-rw-r--r--drivers/net/macvlan.c19
-rw-r--r--drivers/net/macvtap.c140
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c18
-rw-r--r--drivers/net/phy/amd.c17
-rw-r--r--drivers/net/phy/at803x.c14
-rw-r--r--drivers/net/phy/bcm63xx.c15
-rw-r--r--drivers/net/phy/bcm7xxx.c151
-rw-r--r--drivers/net/phy/bcm87xx.c14
-rw-r--r--drivers/net/phy/broadcom.c15
-rw-r--r--drivers/net/phy/cicada.c15
-rw-r--r--drivers/net/phy/davicom.c15
-rw-r--r--drivers/net/phy/et1011c.c17
-rw-r--r--drivers/net/phy/icplus.c15
-rw-r--r--drivers/net/phy/lxt.c15
-rw-r--r--drivers/net/phy/marvell.c67
-rw-r--r--drivers/net/phy/micrel.c344
-rw-r--r--drivers/net/phy/national.c17
-rw-r--r--drivers/net/phy/qsemi.c17
-rw-r--r--drivers/net/phy/realtek.c13
-rw-r--r--drivers/net/phy/smsc.c14
-rw-r--r--drivers/net/phy/spi_ks8995.c4
-rw-r--r--drivers/net/phy/ste10Xp.c15
-rw-r--r--drivers/net/phy/vitesse.c14
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/team/team.c3
-rw-r--r--drivers/net/tun.c134
-rw-r--r--drivers/net/usb/asix_devices.c3
-rw-r--r--drivers/net/usb/cdc-phonet.c6
-rw-r--r--drivers/net/usb/cdc_ether.c19
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/hso.c3
-rw-r--r--drivers/net/usb/r8152.c229
-rw-r--r--drivers/net/usb/rtl8150.c3
-rw-r--r--drivers/net/usb/smsc95xx.c6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c15
-rw-r--r--drivers/net/vxlan.c29
-rw-r--r--drivers/net/wireless/ath/ath.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c92
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h21
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c154
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h100
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c1154
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h46
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h53
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c1256
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h32
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c746
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c579
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c34
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h207
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c1179
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h401
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c28
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h13
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig7
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c42
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c56
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c54
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c181
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.h36
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h498
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h144
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h27
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c338
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c (renamed from drivers/net/wireless/ath/ath9k/spectral.c)165
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.h (renamed from drivers/net/wireless/ath/ath9k/spectral.h)29
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c60
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c98
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h43
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c73
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c361
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h38
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c37
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c4
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c7
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/debug.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.c1
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c30
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c136
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c18
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h4
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h26
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c10
-rw-r--r--drivers/net/wireless/b43/main.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcdc.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c51
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/btcoex.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bus.h (renamed from drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h)11
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c (renamed from drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c)338
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h (renamed from drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h)11
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/common.c168
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/commonring.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/core.c (renamed from drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c)47
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/core.h (renamed from drivers/net/wireless/brcm80211/brcmfmac/dhd.h)10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/debug.c (renamed from drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c)6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/debug.h (renamed from drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h)6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c400
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/feature.c34
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/feature.h1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/flowring.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.c10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c102
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h95
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c19
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/of.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/pcie.c10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/proto.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.c (renamed from drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c)55
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.h (renamed from drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h)8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c15
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c142
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb_rdl.h75
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/vendor.c21
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/debug.c180
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c29
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c16
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h2
-rw-r--r--drivers/net/wireless/cw1200/scan.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h5
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c15
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c21
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h5
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h31
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c51
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c48
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h39
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c198
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h308
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h244
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h25
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c29
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex_legacy.c33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c618
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c200
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h9
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h297
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h179
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c74
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c106
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c531
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h154
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/offloading.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c87
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c40
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c123
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c150
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c786
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c303
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tdls.c569
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c146
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c75
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c45
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c37
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c20
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c404
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c99
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c660
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h28
-rw-r--r--drivers/net/wireless/mwifiex/11n.c4
-rw-r--r--drivers/net/wireless/mwifiex/11n.h4
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c6
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig2
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c130
-rw-r--r--drivers/net/wireless/mwifiex/decl.h19
-rw-r--r--drivers/net/wireless/mwifiex/fw.h34
-rw-r--r--drivers/net/wireless/mwifiex/init.c22
-rw-r--r--drivers/net/wireless/mwifiex/join.c4
-rw-r--r--drivers/net/wireless/mwifiex/main.c123
-rw-r--r--drivers/net/wireless/mwifiex/main.h63
-rw-r--r--drivers/net/wireless/mwifiex/scan.c78
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c2
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h7
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c14
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c4
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c3
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c6
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c288
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c35
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c2
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c5
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c11
-rw-r--r--drivers/net/wireless/mwifiex/usb.c51
-rw-r--r--drivers/net/wireless/mwifiex/usb.h3
-rw-r--r--drivers/net/wireless/mwifiex/util.c38
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c42
-rw-r--r--drivers/net/wireless/mwl8k.c7
-rw-r--r--drivers/net/wireless/p54/net2280.h451
-rw-r--r--drivers/net/wireless/p54/p54usb.h13
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c18
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c45
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c28
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h41
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/rtlwifi/core.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/Makefile5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/Makefile3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/Makefile3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/Makefile3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/phy.c12
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c15
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c23
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netback/netback.c28
-rw-r--r--drivers/net/xen-netfront.c253
567 files changed, 39279 insertions, 17627 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f9009be3f307..d6607ee9c855 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -145,6 +145,26 @@ config MACVTAP
To compile this driver as a module, choose M here: the module
will be called macvtap.
+
+config IPVLAN
+ tristate "IP-VLAN support"
+ depends on INET
+ depends on IPV6
+ ---help---
+ This allows one to create virtual devices off of a main interface
+ and packets will be delivered based on the dest L3 (IPv6/IPv4 addr)
+ on packets. All interfaces (including the main interface) share L2
+ making it transparent to the connected L2 switch.
+
+ Ipvlan devices can be added using the "ip" command from the
+ iproute2 package starting with the iproute2-X.Y.ZZ release:
+
+ "ip link add link <main-dev> [ NAME ] type ipvlan"
+
+ To compile this driver as a module, choose M here: the module
+ will be called ipvlan.
+
+
config VXLAN
tristate "Virtual eXtensible Local Area Network (VXLAN)"
depends on INET
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 61aefdd1e173..e25fdd7d905e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -6,6 +6,7 @@
# Networking Core Drivers
#
obj-$(CONFIG_BONDING) += bonding/
+obj-$(CONFIG_IPVLAN) += ipvlan/
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_IFB) += ifb.o
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 2110215f3528..8baa87df1738 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -29,8 +29,8 @@
#include <linux/if_bonding.h>
#include <linux/pkt_sched.h>
#include <net/net_namespace.h>
-#include "bonding.h"
-#include "bond_3ad.h"
+#include <net/bonding.h>
+#include <net/bond_3ad.h>
/* General definitions */
#define AD_SHORT_TIMEOUT 1
@@ -79,15 +79,21 @@
* --------------------------------------------------------------
* 16 6 1 0
*/
-#define AD_DUPLEX_KEY_BITS 0x1
-#define AD_SPEED_KEY_BITS 0x3E
-#define AD_USER_KEY_BITS 0xFFC0
-
-#define AD_LINK_SPEED_BITMASK_1MBPS 0x1
-#define AD_LINK_SPEED_BITMASK_10MBPS 0x2
-#define AD_LINK_SPEED_BITMASK_100MBPS 0x4
-#define AD_LINK_SPEED_BITMASK_1000MBPS 0x8
-#define AD_LINK_SPEED_BITMASK_10000MBPS 0x10
+#define AD_DUPLEX_KEY_MASKS 0x1
+#define AD_SPEED_KEY_MASKS 0x3E
+#define AD_USER_KEY_MASKS 0xFFC0
+
+enum ad_link_speed_type {
+ AD_LINK_SPEED_1MBPS = 1,
+ AD_LINK_SPEED_10MBPS,
+ AD_LINK_SPEED_100MBPS,
+ AD_LINK_SPEED_1000MBPS,
+ AD_LINK_SPEED_2500MBPS,
+ AD_LINK_SPEED_10000MBPS,
+ AD_LINK_SPEED_20000MBPS,
+ AD_LINK_SPEED_40000MBPS,
+ AD_LINK_SPEED_56000MBPS
+};
/* compare MAC addresses */
#define MAC_ADDRESS_EQUAL(A, B) \
@@ -240,12 +246,16 @@ static inline int __check_agg_selection_timer(struct port *port)
* __get_link_speed - get a port's speed
* @port: the port we're looking at
*
- * Return @port's speed in 802.3ad bitmask format. i.e. one of:
+ * Return @port's speed in 802.3ad enum format. i.e. one of:
* 0,
- * %AD_LINK_SPEED_BITMASK_10MBPS,
- * %AD_LINK_SPEED_BITMASK_100MBPS,
- * %AD_LINK_SPEED_BITMASK_1000MBPS,
- * %AD_LINK_SPEED_BITMASK_10000MBPS
+ * %AD_LINK_SPEED_10MBPS,
+ * %AD_LINK_SPEED_100MBPS,
+ * %AD_LINK_SPEED_1000MBPS,
+ * %AD_LINK_SPEED_2500MBPS,
+ * %AD_LINK_SPEED_10000MBPS
+ * %AD_LINK_SPEED_20000MBPS
+ * %AD_LINK_SPEED_40000MBPS
+ * %AD_LINK_SPEED_56000MBPS
*/
static u16 __get_link_speed(struct port *port)
{
@@ -262,19 +272,35 @@ static u16 __get_link_speed(struct port *port)
else {
switch (slave->speed) {
case SPEED_10:
- speed = AD_LINK_SPEED_BITMASK_10MBPS;
+ speed = AD_LINK_SPEED_10MBPS;
break;
case SPEED_100:
- speed = AD_LINK_SPEED_BITMASK_100MBPS;
+ speed = AD_LINK_SPEED_100MBPS;
break;
case SPEED_1000:
- speed = AD_LINK_SPEED_BITMASK_1000MBPS;
+ speed = AD_LINK_SPEED_1000MBPS;
+ break;
+
+ case SPEED_2500:
+ speed = AD_LINK_SPEED_2500MBPS;
break;
case SPEED_10000:
- speed = AD_LINK_SPEED_BITMASK_10000MBPS;
+ speed = AD_LINK_SPEED_10000MBPS;
+ break;
+
+ case SPEED_20000:
+ speed = AD_LINK_SPEED_20000MBPS;
+ break;
+
+ case SPEED_40000:
+ speed = AD_LINK_SPEED_40000MBPS;
+ break;
+
+ case SPEED_56000:
+ speed = AD_LINK_SPEED_56000MBPS;
break;
default:
@@ -625,21 +651,33 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
if (aggregator->num_of_ports) {
switch (__get_link_speed(aggregator->lag_ports)) {
- case AD_LINK_SPEED_BITMASK_1MBPS:
+ case AD_LINK_SPEED_1MBPS:
bandwidth = aggregator->num_of_ports;
break;
- case AD_LINK_SPEED_BITMASK_10MBPS:
+ case AD_LINK_SPEED_10MBPS:
bandwidth = aggregator->num_of_ports * 10;
break;
- case AD_LINK_SPEED_BITMASK_100MBPS:
+ case AD_LINK_SPEED_100MBPS:
bandwidth = aggregator->num_of_ports * 100;
break;
- case AD_LINK_SPEED_BITMASK_1000MBPS:
+ case AD_LINK_SPEED_1000MBPS:
bandwidth = aggregator->num_of_ports * 1000;
break;
- case AD_LINK_SPEED_BITMASK_10000MBPS:
+ case AD_LINK_SPEED_2500MBPS:
+ bandwidth = aggregator->num_of_ports * 2500;
+ break;
+ case AD_LINK_SPEED_10000MBPS:
bandwidth = aggregator->num_of_ports * 10000;
break;
+ case AD_LINK_SPEED_20000MBPS:
+ bandwidth = aggregator->num_of_ports * 20000;
+ break;
+ case AD_LINK_SPEED_40000MBPS:
+ bandwidth = aggregator->num_of_ports * 40000;
+ break;
+ case AD_LINK_SPEED_56000MBPS:
+ bandwidth = aggregator->num_of_ports * 56000;
+ break;
default:
bandwidth = 0; /* to silence the compiler */
}
@@ -1011,7 +1049,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->sm_rx_state);
switch (port->sm_rx_state) {
case AD_RX_INITIALIZE:
- if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS))
+ if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS))
port->sm_vars &= ~AD_PORT_LACP_ENABLED;
else
port->sm_vars |= AD_PORT_LACP_ENABLED;
@@ -1318,7 +1356,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
/* update the new aggregator's parameters
* if port was responsed from the end-user
*/
- if (port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)
+ if (port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS)
/* if port is full duplex */
port->aggregator->is_individual = false;
else
@@ -1846,7 +1884,7 @@ void bond_3ad_bind_slave(struct slave *slave)
/* if the port is not full duplex, then the port should be not
* lacp Enabled
*/
- if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS))
+ if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS))
port->sm_vars &= ~AD_PORT_LACP_ENABLED;
/* actor system is the bond's system */
port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
@@ -2214,7 +2252,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
spin_lock_bh(&slave->bond->mode_lock);
- port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
+ port->actor_admin_port_key &= ~AD_SPEED_KEY_MASKS;
port->actor_oper_port_key = port->actor_admin_port_key |=
(__get_link_speed(port) << 1);
netdev_dbg(slave->bond->dev, "Port %d changed speed\n", port->actor_port_number);
@@ -2247,7 +2285,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
spin_lock_bh(&slave->bond->mode_lock);
- port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
+ port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
port->actor_oper_port_key = port->actor_admin_port_key |=
__get_duplex(port);
netdev_dbg(slave->bond->dev, "Port %d changed duplex\n", port->actor_port_number);
@@ -2289,18 +2327,18 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
*/
if (link == BOND_LINK_UP) {
port->is_enabled = true;
- port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
+ port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
port->actor_oper_port_key = port->actor_admin_port_key |=
__get_duplex(port);
- port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
+ port->actor_admin_port_key &= ~AD_SPEED_KEY_MASKS;
port->actor_oper_port_key = port->actor_admin_port_key |=
(__get_link_speed(port) << 1);
} else {
/* link has failed */
port->is_enabled = false;
- port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
+ port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
port->actor_oper_port_key = (port->actor_admin_port_key &=
- ~AD_SPEED_KEY_BITS);
+ ~AD_SPEED_KEY_MASKS);
}
netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
port->actor_port_number,
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
deleted file mode 100644
index c5f14ac63f3e..000000000000
--- a/drivers/net/bonding/bond_3ad.h
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- */
-
-#ifndef __BOND_3AD_H__
-#define __BOND_3AD_H__
-
-#include <asm/byteorder.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_ether.h>
-
-/* General definitions */
-#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
-#define AD_TIMER_INTERVAL 100 /*msec*/
-
-#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
-
-#define AD_LACP_SLOW 0
-#define AD_LACP_FAST 1
-
-typedef struct mac_addr {
- u8 mac_addr_value[ETH_ALEN];
-} __packed mac_addr_t;
-
-enum {
- BOND_AD_STABLE = 0,
- BOND_AD_BANDWIDTH = 1,
- BOND_AD_COUNT = 2,
-};
-
-/* rx machine states(43.4.11 in the 802.3ad standard) */
-typedef enum {
- AD_RX_DUMMY,
- AD_RX_INITIALIZE, /* rx Machine */
- AD_RX_PORT_DISABLED, /* rx Machine */
- AD_RX_LACP_DISABLED, /* rx Machine */
- AD_RX_EXPIRED, /* rx Machine */
- AD_RX_DEFAULTED, /* rx Machine */
- AD_RX_CURRENT /* rx Machine */
-} rx_states_t;
-
-/* periodic machine states(43.4.12 in the 802.3ad standard) */
-typedef enum {
- AD_PERIODIC_DUMMY,
- AD_NO_PERIODIC, /* periodic machine */
- AD_FAST_PERIODIC, /* periodic machine */
- AD_SLOW_PERIODIC, /* periodic machine */
- AD_PERIODIC_TX /* periodic machine */
-} periodic_states_t;
-
-/* mux machine states(43.4.13 in the 802.3ad standard) */
-typedef enum {
- AD_MUX_DUMMY,
- AD_MUX_DETACHED, /* mux machine */
- AD_MUX_WAITING, /* mux machine */
- AD_MUX_ATTACHED, /* mux machine */
- AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */
-} mux_states_t;
-
-/* tx machine states(43.4.15 in the 802.3ad standard) */
-typedef enum {
- AD_TX_DUMMY,
- AD_TRANSMIT /* tx Machine */
-} tx_states_t;
-
-/* rx indication types */
-typedef enum {
- AD_TYPE_LACPDU = 1, /* type lacpdu */
- AD_TYPE_MARKER /* type marker */
-} pdu_type_t;
-
-/* rx marker indication types */
-typedef enum {
- AD_MARKER_INFORMATION_SUBTYPE = 1, /* marker imformation subtype */
- AD_MARKER_RESPONSE_SUBTYPE /* marker response subtype */
-} bond_marker_subtype_t;
-
-/* timers types(43.4.9 in the 802.3ad standard) */
-typedef enum {
- AD_CURRENT_WHILE_TIMER,
- AD_ACTOR_CHURN_TIMER,
- AD_PERIODIC_TIMER,
- AD_PARTNER_CHURN_TIMER,
- AD_WAIT_WHILE_TIMER
-} ad_timers_t;
-
-#pragma pack(1)
-
-/* Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard) */
-typedef struct lacpdu {
- u8 subtype; /* = LACP(= 0x01) */
- u8 version_number;
- u8 tlv_type_actor_info; /* = actor information(type/length/value) */
- u8 actor_information_length; /* = 20 */
- __be16 actor_system_priority;
- struct mac_addr actor_system;
- __be16 actor_key;
- __be16 actor_port_priority;
- __be16 actor_port;
- u8 actor_state;
- u8 reserved_3_1[3]; /* = 0 */
- u8 tlv_type_partner_info; /* = partner information */
- u8 partner_information_length; /* = 20 */
- __be16 partner_system_priority;
- struct mac_addr partner_system;
- __be16 partner_key;
- __be16 partner_port_priority;
- __be16 partner_port;
- u8 partner_state;
- u8 reserved_3_2[3]; /* = 0 */
- u8 tlv_type_collector_info; /* = collector information */
- u8 collector_information_length;/* = 16 */
- __be16 collector_max_delay;
- u8 reserved_12[12];
- u8 tlv_type_terminator; /* = terminator */
- u8 terminator_length; /* = 0 */
- u8 reserved_50[50]; /* = 0 */
-} __packed lacpdu_t;
-
-typedef struct lacpdu_header {
- struct ethhdr hdr;
- struct lacpdu lacpdu;
-} __packed lacpdu_header_t;
-
-/* Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) */
-typedef struct bond_marker {
- u8 subtype; /* = 0x02 (marker PDU) */
- u8 version_number; /* = 0x01 */
- u8 tlv_type; /* = 0x01 (marker information) */
- /* = 0x02 (marker response information) */
- u8 marker_length; /* = 0x16 */
- u16 requester_port; /* The number assigned to the port by the requester */
- struct mac_addr requester_system; /* The requester's system id */
- u32 requester_transaction_id; /* The transaction id allocated by the requester, */
- u16 pad; /* = 0 */
- u8 tlv_type_terminator; /* = 0x00 */
- u8 terminator_length; /* = 0x00 */
- u8 reserved_90[90]; /* = 0 */
-} __packed bond_marker_t;
-
-typedef struct bond_marker_header {
- struct ethhdr hdr;
- struct bond_marker marker;
-} __packed bond_marker_header_t;
-
-#pragma pack()
-
-struct slave;
-struct bonding;
-struct ad_info;
-struct port;
-
-#ifdef __ia64__
-#pragma pack(8)
-#endif
-
-/* aggregator structure(43.4.5 in the 802.3ad standard) */
-typedef struct aggregator {
- struct mac_addr aggregator_mac_address;
- u16 aggregator_identifier;
- bool is_individual;
- u16 actor_admin_aggregator_key;
- u16 actor_oper_aggregator_key;
- struct mac_addr partner_system;
- u16 partner_system_priority;
- u16 partner_oper_aggregator_key;
- u16 receive_state; /* BOOLEAN */
- u16 transmit_state; /* BOOLEAN */
- struct port *lag_ports;
- /* ****** PRIVATE PARAMETERS ****** */
- struct slave *slave; /* pointer to the bond slave that this aggregator belongs to */
- u16 is_active; /* BOOLEAN. Indicates if this aggregator is active */
- u16 num_of_ports;
-} aggregator_t;
-
-struct port_params {
- struct mac_addr system;
- u16 system_priority;
- u16 key;
- u16 port_number;
- u16 port_priority;
- u16 port_state;
-};
-
-/* port structure(43.4.6 in the 802.3ad standard) */
-typedef struct port {
- u16 actor_port_number;
- u16 actor_port_priority;
- struct mac_addr actor_system; /* This parameter is added here although it is not specified in the standard, just for simplification */
- u16 actor_system_priority; /* This parameter is added here although it is not specified in the standard, just for simplification */
- u16 actor_port_aggregator_identifier;
- bool ntt;
- u16 actor_admin_port_key;
- u16 actor_oper_port_key;
- u8 actor_admin_port_state;
- u8 actor_oper_port_state;
-
- struct port_params partner_admin;
- struct port_params partner_oper;
-
- bool is_enabled;
-
- /* ****** PRIVATE PARAMETERS ****** */
- u16 sm_vars; /* all state machines variables for this port */
- rx_states_t sm_rx_state; /* state machine rx state */
- u16 sm_rx_timer_counter; /* state machine rx timer counter */
- periodic_states_t sm_periodic_state; /* state machine periodic state */
- u16 sm_periodic_timer_counter; /* state machine periodic timer counter */
- mux_states_t sm_mux_state; /* state machine mux state */
- u16 sm_mux_timer_counter; /* state machine mux timer counter */
- tx_states_t sm_tx_state; /* state machine tx state */
- u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */
- struct slave *slave; /* pointer to the bond slave that this port belongs to */
- struct aggregator *aggregator; /* pointer to an aggregator that this port related to */
- struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */
- u32 transaction_id; /* continuous number for identification of Marker PDU's; */
- struct lacpdu lacpdu; /* the lacpdu that will be sent for this port */
-} port_t;
-
-/* system structure */
-struct ad_system {
- u16 sys_priority;
- struct mac_addr sys_mac_addr;
-};
-
-#ifdef __ia64__
-#pragma pack()
-#endif
-
-/* ========== AD Exported structures to the main bonding code ========== */
-#define BOND_AD_INFO(bond) ((bond)->ad_info)
-#define SLAVE_AD_INFO(slave) ((slave)->ad_info)
-
-struct ad_bond_info {
- struct ad_system system; /* 802.3ad system structure */
- u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
- u16 aggregator_identifier;
-};
-
-struct ad_slave_info {
- struct aggregator aggregator; /* 802.3ad aggregator structure */
- struct port port; /* 802.3ad port structure */
- u16 id;
-};
-
-/* ========== AD Exported functions to the main bonding code ========== */
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
-void bond_3ad_bind_slave(struct slave *slave);
-void bond_3ad_unbind_slave(struct slave *slave);
-void bond_3ad_state_machine_handler(struct work_struct *);
-void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout);
-void bond_3ad_adapter_speed_changed(struct slave *slave);
-void bond_3ad_adapter_duplex_changed(struct slave *slave);
-void bond_3ad_handle_link_change(struct slave *slave, char link);
-int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
-int __bond_3ad_get_active_agg_info(struct bonding *bond,
- struct ad_info *ad_info);
-int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
-int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
- struct slave *slave);
-int bond_3ad_set_carrier(struct bonding *bond);
-void bond_3ad_update_lacp_rate(struct bonding *bond);
-#endif /* __BOND_3AD_H__ */
-
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index d2eadab787c5..bb9e9fc45e1b 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -37,8 +37,8 @@
#include <net/arp.h>
#include <net/ipv6.h>
#include <asm/byteorder.h>
-#include "bonding.h"
-#include "bond_alb.h"
+#include <net/bonding.h>
+#include <net/bond_alb.h>
@@ -475,12 +475,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
skb->dev = client_info->slave->dev;
if (client_info->vlan_id) {
- skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id);
- if (!skb) {
- netdev_err(client_info->slave->bond->dev,
- "failed to insert VLAN tag\n");
- continue;
- }
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ client_info->vlan_id);
}
arp_xmit(skb);
@@ -951,13 +947,8 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
skb->priority = TC_PRIO_CONTROL;
skb->dev = slave->dev;
- if (vid) {
- skb = vlan_put_tag(skb, vlan_proto, vid);
- if (!skb) {
- netdev_err(slave->bond->dev, "failed to insert VLAN tag\n");
- return;
- }
- }
+ if (vid)
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vid);
dev_queue_xmit(skb);
}
@@ -1326,7 +1317,7 @@ static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
}
/* no suitable interface, frame not sent */
- dev_kfree_skb_any(skb);
+ bond_tx_drop(bond->dev, skb);
out:
return NETDEV_TX_OK;
}
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
deleted file mode 100644
index 1ad473b4ade5..000000000000
--- a/drivers/net/bonding/bond_alb.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- */
-
-#ifndef __BOND_ALB_H__
-#define __BOND_ALB_H__
-
-#include <linux/if_ether.h>
-
-struct bonding;
-struct slave;
-
-#define BOND_ALB_INFO(bond) ((bond)->alb_info)
-#define SLAVE_TLB_INFO(slave) ((slave)->tlb_info)
-
-#define ALB_TIMER_TICKS_PER_SEC 10 /* should be a divisor of HZ */
-#define BOND_TLB_REBALANCE_INTERVAL 10 /* In seconds, periodic re-balancing.
- * Used for division - never set
- * to zero !!!
- */
-#define BOND_ALB_DEFAULT_LP_INTERVAL 1
-#define BOND_ALB_LP_INTERVAL(bond) (bond->params.lp_interval) /* In seconds, periodic send of
- * learning packets to the switch
- */
-
-#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
- * ALB_TIMER_TICKS_PER_SEC)
-
-#define BOND_ALB_LP_TICKS(bond) (BOND_ALB_LP_INTERVAL(bond) \
- * ALB_TIMER_TICKS_PER_SEC)
-
-#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
- * Note that this value MUST NOT be smaller
- * because the key hash table is BYTE wide !
- */
-
-
-#define TLB_NULL_INDEX 0xffffffff
-
-/* rlb defs */
-#define RLB_HASH_TABLE_SIZE 256
-#define RLB_NULL_INDEX 0xffffffff
-#define RLB_UPDATE_DELAY (2*ALB_TIMER_TICKS_PER_SEC) /* 2 seconds */
-#define RLB_ARP_BURST_SIZE 2
-#define RLB_UPDATE_RETRY 3 /* 3-ticks - must be smaller than the rlb
- * rebalance interval (5 min).
- */
-/* RLB_PROMISC_TIMEOUT = 10 sec equals the time that the current slave is
- * promiscuous after failover
- */
-#define RLB_PROMISC_TIMEOUT (10*ALB_TIMER_TICKS_PER_SEC)
-
-
-struct tlb_client_info {
- struct slave *tx_slave; /* A pointer to slave used for transmiting
- * packets to a Client that the Hash function
- * gave this entry index.
- */
- u32 tx_bytes; /* Each Client accumulates the BytesTx that
- * were transmitted to it, and after each
- * CallBack the LoadHistory is divided
- * by the balance interval
- */
- u32 load_history; /* This field contains the amount of Bytes
- * that were transmitted to this client by
- * the server on the previous balance
- * interval in Bps.
- */
- u32 next; /* The next Hash table entry index, assigned
- * to use the same adapter for transmit.
- */
- u32 prev; /* The previous Hash table entry index,
- * assigned to use the same
- */
-};
-
-/* -------------------------------------------------------------------------
- * struct rlb_client_info contains all info related to a specific rx client
- * connection. This is the Clients Hash Table entry struct.
- * Note that this is not a proper hash table; if a new client's IP address
- * hash collides with an existing client entry, the old entry is replaced.
- *
- * There is a linked list (linked by the used_next and used_prev members)
- * linking all the used entries of the hash table. This allows updating
- * all the clients without walking over all the unused elements of the table.
- *
- * There are also linked lists of entries with identical hash(ip_src). These
- * allow cleaning up the table from ip_src<->mac_src associations that have
- * become outdated and would cause sending out invalid ARP updates to the
- * network. These are linked by the (src_next and src_prev members).
- * -------------------------------------------------------------------------
- */
-struct rlb_client_info {
- __be32 ip_src; /* the server IP address */
- __be32 ip_dst; /* the client IP address */
- u8 mac_src[ETH_ALEN]; /* the server MAC address */
- u8 mac_dst[ETH_ALEN]; /* the client MAC address */
-
- /* list of used hash table entries, starting at rx_hashtbl_used_head */
- u32 used_next;
- u32 used_prev;
-
- /* ip_src based hashing */
- u32 src_next; /* next entry with same hash(ip_src) */
- u32 src_prev; /* prev entry with same hash(ip_src) */
- u32 src_first; /* first entry with hash(ip_src) == this entry's index */
-
- u8 assigned; /* checking whether this entry is assigned */
- u8 ntt; /* flag - need to transmit client info */
- struct slave *slave; /* the slave assigned to this client */
- unsigned short vlan_id; /* VLAN tag associated with IP address */
-};
-
-struct tlb_slave_info {
- u32 head; /* Index to the head of the bi-directional clients
- * hash table entries list. The entries in the list
- * are the entries that were assigned to use this
- * slave for transmit.
- */
- u32 load; /* Each slave sums the loadHistory of all clients
- * assigned to it
- */
-};
-
-struct alb_bond_info {
- struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
- u32 unbalanced_load;
- int tx_rebalance_counter;
- int lp_counter;
- /* -------- rlb parameters -------- */
- int rlb_enabled;
- struct rlb_client_info *rx_hashtbl; /* Receive hash table */
- u32 rx_hashtbl_used_head;
- u8 rx_ntt; /* flag - need to transmit
- * to all rx clients
- */
- struct slave *rx_slave;/* last slave to xmit from */
- u8 primary_is_promisc; /* boolean */
- u32 rlb_promisc_timeout_counter;/* counts primary
- * promiscuity time
- */
- u32 rlb_update_delay_counter;
- u32 rlb_update_retry_counter;/* counter of retries
- * of client update
- */
- u8 rlb_rebalance; /* flag - indicates that the
- * rx traffic should be
- * rebalanced
- */
-};
-
-int bond_alb_initialize(struct bonding *bond, int rlb_enabled);
-void bond_alb_deinitialize(struct bonding *bond);
-int bond_alb_init_slave(struct bonding *bond, struct slave *slave);
-void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
-void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
-void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
-int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
-int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
-void bond_alb_monitor(struct work_struct *);
-int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
-void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
-#endif /* __BOND_ALB_H__ */
-
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 8f99082f90eb..e52e25a977fa 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -3,8 +3,8 @@
#include <linux/device.h>
#include <linux/netdevice.h>
-#include "bonding.h"
-#include "bond_alb.h"
+#include <net/bonding.h>
+#include <net/bond_alb.h>
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a5115fb7cf33..184c434ae305 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -77,9 +77,9 @@
#include <net/pkt_sched.h>
#include <linux/rculist.h>
#include <net/flow_keys.h>
-#include "bonding.h"
-#include "bond_3ad.h"
-#include "bond_alb.h"
+#include <net/bonding.h>
+#include <net/bond_3ad.h>
+#include <net/bond_alb.h>
/*---------------------------- Module parameters ----------------------------*/
@@ -1526,6 +1526,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
#endif
+ if (!(bond_dev->features & NETIF_F_LRO))
+ dev_disable_lro(slave_dev);
+
res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
new_slave);
if (res) {
@@ -2143,8 +2146,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
netdev_dbg(slave_dev, "inner tag: proto %X vid %X\n",
ntohs(outer_tag->vlan_proto), tags->vlan_id);
- skb = __vlan_put_tag(skb, tags->vlan_proto,
- tags->vlan_id);
+ skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
+ tags->vlan_id);
if (!skb) {
net_err_ratelimited("failed to insert inner VLAN tag\n");
return;
@@ -2156,12 +2159,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
if (outer_tag->vlan_id) {
netdev_dbg(slave_dev, "outer tag: proto %X vid %X\n",
ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
- skb = vlan_put_tag(skb, outer_tag->vlan_proto,
- outer_tag->vlan_id);
- if (!skb) {
- net_err_ratelimited("failed to insert outer VLAN tag\n");
- return;
- }
+ __vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
+ outer_tag->vlan_id);
}
xmit:
@@ -3523,7 +3522,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
}
}
/* no slave that can tx has been found */
- dev_kfree_skb_any(skb);
+ bond_tx_drop(bond->dev, skb);
}
/**
@@ -3585,7 +3584,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
slave_id = bond_rr_gen_slave_id(bond);
bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
} else {
- dev_kfree_skb_any(skb);
+ bond_tx_drop(bond_dev, skb);
}
}
@@ -3604,7 +3603,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
if (slave)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
- dev_kfree_skb_any(skb);
+ bond_tx_drop(bond_dev, skb);
return NETDEV_TX_OK;
}
@@ -3748,8 +3747,7 @@ int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
bond_dev_queue_xmit(bond, skb, slave->dev);
} else {
- dev_kfree_skb_any(skb);
- atomic_long_inc(&dev->tx_dropped);
+ bond_tx_drop(dev, skb);
}
return NETDEV_TX_OK;
@@ -3779,7 +3777,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
- dev_kfree_skb_any(skb);
+ bond_tx_drop(bond_dev, skb);
return NETDEV_TX_OK;
}
@@ -3859,7 +3857,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
/* Should never happen, mode already checked */
netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
WARN_ON_ONCE(1);
- dev_kfree_skb_any(skb);
+ bond_tx_drop(dev, skb);
return NETDEV_TX_OK;
}
}
@@ -3879,7 +3877,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (bond_has_slaves(bond))
ret = __bond_start_xmit(skb, dev);
else
- dev_kfree_skb_any(skb);
+ bond_tx_drop(dev, skb);
rcu_read_unlock();
return ret;
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 45f09a66e6c9..7b1124366011 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -17,7 +17,7 @@
#include <linux/if_ether.h>
#include <net/netlink.h>
#include <net/rtnetlink.h>
-#include "bonding.h"
+#include <net/bonding.h>
static size_t bond_get_slave_size(const struct net_device *bond_dev,
const struct net_device *slave_dev)
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index b62697f4a3de..1a61cc9b3402 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -16,7 +16,7 @@
#include <linux/rcupdate.h>
#include <linux/ctype.h>
#include <linux/inet.h>
-#include "bonding.h"
+#include <net/bonding.h>
static int bond_option_active_slave_set(struct bonding *bond,
const struct bond_opt_value *newval);
diff --git a/drivers/net/bonding/bond_options.h b/drivers/net/bonding/bond_options.h
deleted file mode 100644
index 17ded5b29176..000000000000
--- a/drivers/net/bonding/bond_options.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * drivers/net/bond/bond_options.h - bonding options
- * Copyright (c) 2013 Nikolay Aleksandrov <nikolay@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef _BOND_OPTIONS_H
-#define _BOND_OPTIONS_H
-
-#define BOND_OPT_MAX_NAMELEN 32
-#define BOND_OPT_VALID(opt) ((opt) < BOND_OPT_LAST)
-#define BOND_MODE_ALL_EX(x) (~(x))
-
-/* Option flags:
- * BOND_OPTFLAG_NOSLAVES - check if the bond device is empty before setting
- * BOND_OPTFLAG_IFDOWN - check if the bond device is down before setting
- * BOND_OPTFLAG_RAWVAL - the option parses the value itself
- */
-enum {
- BOND_OPTFLAG_NOSLAVES = BIT(0),
- BOND_OPTFLAG_IFDOWN = BIT(1),
- BOND_OPTFLAG_RAWVAL = BIT(2)
-};
-
-/* Value type flags:
- * BOND_VALFLAG_DEFAULT - mark the value as default
- * BOND_VALFLAG_(MIN|MAX) - mark the value as min/max
- */
-enum {
- BOND_VALFLAG_DEFAULT = BIT(0),
- BOND_VALFLAG_MIN = BIT(1),
- BOND_VALFLAG_MAX = BIT(2)
-};
-
-/* Option IDs, their bit positions correspond to their IDs */
-enum {
- BOND_OPT_MODE,
- BOND_OPT_PACKETS_PER_SLAVE,
- BOND_OPT_XMIT_HASH,
- BOND_OPT_ARP_VALIDATE,
- BOND_OPT_ARP_ALL_TARGETS,
- BOND_OPT_FAIL_OVER_MAC,
- BOND_OPT_ARP_INTERVAL,
- BOND_OPT_ARP_TARGETS,
- BOND_OPT_DOWNDELAY,
- BOND_OPT_UPDELAY,
- BOND_OPT_LACP_RATE,
- BOND_OPT_MINLINKS,
- BOND_OPT_AD_SELECT,
- BOND_OPT_NUM_PEER_NOTIF,
- BOND_OPT_MIIMON,
- BOND_OPT_PRIMARY,
- BOND_OPT_PRIMARY_RESELECT,
- BOND_OPT_USE_CARRIER,
- BOND_OPT_ACTIVE_SLAVE,
- BOND_OPT_QUEUE_ID,
- BOND_OPT_ALL_SLAVES_ACTIVE,
- BOND_OPT_RESEND_IGMP,
- BOND_OPT_LP_INTERVAL,
- BOND_OPT_SLAVES,
- BOND_OPT_TLB_DYNAMIC_LB,
- BOND_OPT_LAST
-};
-
-/* This structure is used for storing option values and for passing option
- * values when changing an option. The logic when used as an arg is as follows:
- * - if string != NULL -> parse it, if the opt is RAW type then return it, else
- * return the parse result
- * - if string == NULL -> parse value
- */
-struct bond_opt_value {
- char *string;
- u64 value;
- u32 flags;
-};
-
-struct bonding;
-
-struct bond_option {
- int id;
- const char *name;
- const char *desc;
- u32 flags;
-
- /* unsuppmodes is used to denote modes in which the option isn't
- * supported.
- */
- unsigned long unsuppmodes;
- /* supported values which this option can have, can be a subset of
- * BOND_OPTVAL_RANGE's value range
- */
- const struct bond_opt_value *values;
-
- int (*set)(struct bonding *bond, const struct bond_opt_value *val);
-};
-
-int __bond_opt_set(struct bonding *bond, unsigned int option,
- struct bond_opt_value *val);
-int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
-
-const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
- struct bond_opt_value *val);
-const struct bond_option *bond_opt_get(unsigned int option);
-const struct bond_option *bond_opt_get_by_name(const char *name);
-const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
-
-/* This helper is used to initialize a bond_opt_value structure for parameter
- * passing. There should be either a valid string or value, but not both.
- * When value is ULLONG_MAX then string will be used.
- */
-static inline void __bond_opt_init(struct bond_opt_value *optval,
- char *string, u64 value)
-{
- memset(optval, 0, sizeof(*optval));
- optval->value = ULLONG_MAX;
- if (value == ULLONG_MAX)
- optval->string = string;
- else
- optval->value = value;
-}
-#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value)
-#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX)
-
-void bond_option_arp_ip_targets_clear(struct bonding *bond);
-
-#endif /* _BOND_OPTIONS_H */
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index a3948f8d1e53..976f5ad2a0f2 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -2,7 +2,7 @@
#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
-#include "bonding.h"
+#include <net/bonding.h>
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 8ffbafd500fd..7e9e151d4d61 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -40,7 +40,7 @@
#include <net/netns/generic.h>
#include <linux/nsproxy.h>
-#include "bonding.h"
+#include <net/bonding.h>
#define to_dev(obj) container_of(obj, struct device, kobj)
#define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd))))
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index b01b0ce4d1be..23618a831612 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include "bonding.h"
+#include <net/bonding.h>
struct slave_attribute {
struct attribute attr;
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
deleted file mode 100644
index 10920f0686e2..000000000000
--- a/drivers/net/bonding/bonding.h
+++ /dev/null
@@ -1,648 +0,0 @@
-/*
- * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'.
- *
- * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
- * NCM: Network and Communications Management, Inc.
- *
- * BUT, I'm the one who modified it for ethernet, so:
- * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov
- *
- * This software may be used and distributed according to the terms
- * of the GNU Public License, incorporated herein by reference.
- *
- */
-
-#ifndef _LINUX_BONDING_H
-#define _LINUX_BONDING_H
-
-#include <linux/timer.h>
-#include <linux/proc_fs.h>
-#include <linux/if_bonding.h>
-#include <linux/cpumask.h>
-#include <linux/in6.h>
-#include <linux/netpoll.h>
-#include <linux/inetdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/reciprocal_div.h>
-#include <linux/if_link.h>
-
-#include "bond_3ad.h"
-#include "bond_alb.h"
-#include "bond_options.h"
-
-#define DRV_VERSION "3.7.1"
-#define DRV_RELDATE "April 27, 2011"
-#define DRV_NAME "bonding"
-#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
-
-#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
-
-#define BOND_MAX_ARP_TARGETS 16
-
-#define BOND_DEFAULT_MIIMON 100
-
-/*
- * Less bad way to call ioctl from within the kernel; this needs to be
- * done some other way to get the call out of interrupt context.
- * Needs "ioctl" variable to be supplied by calling context.
- */
-#define IOCTL(dev, arg, cmd) ({ \
- int res = 0; \
- mm_segment_t fs = get_fs(); \
- set_fs(get_ds()); \
- res = ioctl(dev, arg, cmd); \
- set_fs(fs); \
- res; })
-
-#define BOND_MODE(bond) ((bond)->params.mode)
-
-/* slave list primitives */
-#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
-
-#define bond_has_slaves(bond) !list_empty(bond_slave_list(bond))
-
-/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */
-#define bond_first_slave(bond) \
- (bond_has_slaves(bond) ? \
- netdev_adjacent_get_private(bond_slave_list(bond)->next) : \
- NULL)
-#define bond_last_slave(bond) \
- (bond_has_slaves(bond) ? \
- netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \
- NULL)
-
-/* Caller must have rcu_read_lock */
-#define bond_first_slave_rcu(bond) \
- netdev_lower_get_first_private_rcu(bond->dev)
-
-#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
-#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
-
-/**
- * bond_for_each_slave - iterate over all slaves
- * @bond: the bond holding this list
- * @pos: current slave
- * @iter: list_head * iterator
- *
- * Caller must hold RTNL
- */
-#define bond_for_each_slave(bond, pos, iter) \
- netdev_for_each_lower_private((bond)->dev, pos, iter)
-
-/* Caller must have rcu_read_lock */
-#define bond_for_each_slave_rcu(bond, pos, iter) \
- netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-extern atomic_t netpoll_block_tx;
-
-static inline void block_netpoll_tx(void)
-{
- atomic_inc(&netpoll_block_tx);
-}
-
-static inline void unblock_netpoll_tx(void)
-{
- atomic_dec(&netpoll_block_tx);
-}
-
-static inline int is_netpoll_tx_blocked(struct net_device *dev)
-{
- if (unlikely(netpoll_tx_running(dev)))
- return atomic_read(&netpoll_block_tx);
- return 0;
-}
-#else
-#define block_netpoll_tx()
-#define unblock_netpoll_tx()
-#define is_netpoll_tx_blocked(dev) (0)
-#endif
-
-struct bond_params {
- int mode;
- int xmit_policy;
- int miimon;
- u8 num_peer_notif;
- int arp_interval;
- int arp_validate;
- int arp_all_targets;
- int use_carrier;
- int fail_over_mac;
- int updelay;
- int downdelay;
- int lacp_fast;
- unsigned int min_links;
- int ad_select;
- char primary[IFNAMSIZ];
- int primary_reselect;
- __be32 arp_targets[BOND_MAX_ARP_TARGETS];
- int tx_queues;
- int all_slaves_active;
- int resend_igmp;
- int lp_interval;
- int packets_per_slave;
- int tlb_dynamic_lb;
- struct reciprocal_value reciprocal_packets_per_slave;
-};
-
-struct bond_parm_tbl {
- char *modename;
- int mode;
-};
-
-struct slave {
- struct net_device *dev; /* first - useful for panic debug */
- struct bonding *bond; /* our master */
- int delay;
- /* all three in jiffies */
- unsigned long last_link_up;
- unsigned long last_rx;
- unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
- s8 link; /* one of BOND_LINK_XXXX */
- s8 new_link;
- u8 backup:1, /* indicates backup slave. Value corresponds with
- BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
- inactive:1, /* indicates inactive slave */
- should_notify:1; /* indicateds whether the state changed */
- u8 duplex;
- u32 original_mtu;
- u32 link_failure_count;
- u32 speed;
- u16 queue_id;
- u8 perm_hwaddr[ETH_ALEN];
- struct ad_slave_info *ad_info;
- struct tlb_slave_info tlb_info;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- struct netpoll *np;
-#endif
- struct kobject kobj;
- struct rtnl_link_stats64 slave_stats;
-};
-
-struct bond_up_slave {
- unsigned int count;
- struct rcu_head rcu;
- struct slave *arr[0];
-};
-
-/*
- * Link pseudo-state only used internally by monitors
- */
-#define BOND_LINK_NOCHANGE -1
-
-/*
- * Here are the locking policies for the two bonding locks:
- * Get rcu_read_lock when reading or RTNL when writing slave list.
- */
-struct bonding {
- struct net_device *dev; /* first - useful for panic debug */
- struct slave __rcu *curr_active_slave;
- struct slave __rcu *current_arp_slave;
- struct slave __rcu *primary_slave;
- struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
- bool force_primary;
- s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
- int (*recv_probe)(const struct sk_buff *, struct bonding *,
- struct slave *);
- /* mode_lock is used for mode-specific locking needs, currently used by:
- * 3ad mode (4) - protect against running bond_3ad_unbind_slave() and
- * bond_3ad_state_machine_handler() concurrently and also
- * the access to the state machine shared variables.
- * TLB mode (5) - to sync the use and modifications of its hash table
- * ALB mode (6) - to sync the use and modifications of its hash table
- */
- spinlock_t mode_lock;
- u8 send_peer_notif;
- u8 igmp_retrans;
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *proc_entry;
- char proc_file_name[IFNAMSIZ];
-#endif /* CONFIG_PROC_FS */
- struct list_head bond_list;
- u32 rr_tx_counter;
- struct ad_bond_info ad_info;
- struct alb_bond_info alb_info;
- struct bond_params params;
- struct workqueue_struct *wq;
- struct delayed_work mii_work;
- struct delayed_work arp_work;
- struct delayed_work alb_work;
- struct delayed_work ad_work;
- struct delayed_work mcast_work;
- struct delayed_work slave_arr_work;
-#ifdef CONFIG_DEBUG_FS
- /* debugging support via debugfs */
- struct dentry *debug_dir;
-#endif /* CONFIG_DEBUG_FS */
- struct rtnl_link_stats64 bond_stats;
-};
-
-#define bond_slave_get_rcu(dev) \
- ((struct slave *) rcu_dereference(dev->rx_handler_data))
-
-#define bond_slave_get_rtnl(dev) \
- ((struct slave *) rtnl_dereference(dev->rx_handler_data))
-
-struct bond_vlan_tag {
- __be16 vlan_proto;
- unsigned short vlan_id;
-};
-
-/**
- * Returns NULL if the net_device does not belong to any of the bond's slaves
- *
- * Caller must hold bond lock for read
- */
-static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
- struct net_device *slave_dev)
-{
- return netdev_lower_dev_get_private(bond->dev, slave_dev);
-}
-
-static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
-{
- return slave->bond;
-}
-
-static inline bool bond_should_override_tx_queue(struct bonding *bond)
-{
- return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
- BOND_MODE(bond) == BOND_MODE_ROUNDROBIN;
-}
-
-static inline bool bond_is_lb(const struct bonding *bond)
-{
- return BOND_MODE(bond) == BOND_MODE_TLB ||
- BOND_MODE(bond) == BOND_MODE_ALB;
-}
-
-static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
-{
- return (BOND_MODE(bond) == BOND_MODE_TLB) &&
- (bond->params.tlb_dynamic_lb == 0);
-}
-
-static inline bool bond_mode_uses_xmit_hash(const struct bonding *bond)
-{
- return (BOND_MODE(bond) == BOND_MODE_8023AD ||
- BOND_MODE(bond) == BOND_MODE_XOR ||
- bond_is_nondyn_tlb(bond));
-}
-
-static inline bool bond_mode_uses_arp(int mode)
-{
- return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB &&
- mode != BOND_MODE_ALB;
-}
-
-static inline bool bond_mode_uses_primary(int mode)
-{
- return mode == BOND_MODE_ACTIVEBACKUP || mode == BOND_MODE_TLB ||
- mode == BOND_MODE_ALB;
-}
-
-static inline bool bond_uses_primary(struct bonding *bond)
-{
- return bond_mode_uses_primary(BOND_MODE(bond));
-}
-
-static inline bool bond_slave_is_up(struct slave *slave)
-{
- return netif_running(slave->dev) && netif_carrier_ok(slave->dev);
-}
-
-static inline void bond_set_active_slave(struct slave *slave)
-{
- if (slave->backup) {
- slave->backup = 0;
- rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
- }
-}
-
-static inline void bond_set_backup_slave(struct slave *slave)
-{
- if (!slave->backup) {
- slave->backup = 1;
- rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
- }
-}
-
-static inline void bond_set_slave_state(struct slave *slave,
- int slave_state, bool notify)
-{
- if (slave->backup == slave_state)
- return;
-
- slave->backup = slave_state;
- if (notify) {
- rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
- slave->should_notify = 0;
- } else {
- if (slave->should_notify)
- slave->should_notify = 0;
- else
- slave->should_notify = 1;
- }
-}
-
-static inline void bond_slave_state_change(struct bonding *bond)
-{
- struct list_head *iter;
- struct slave *tmp;
-
- bond_for_each_slave(bond, tmp, iter) {
- if (tmp->link == BOND_LINK_UP)
- bond_set_active_slave(tmp);
- else if (tmp->link == BOND_LINK_DOWN)
- bond_set_backup_slave(tmp);
- }
-}
-
-static inline void bond_slave_state_notify(struct bonding *bond)
-{
- struct list_head *iter;
- struct slave *tmp;
-
- bond_for_each_slave(bond, tmp, iter) {
- if (tmp->should_notify) {
- rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_ATOMIC);
- tmp->should_notify = 0;
- }
- }
-}
-
-static inline int bond_slave_state(struct slave *slave)
-{
- return slave->backup;
-}
-
-static inline bool bond_is_active_slave(struct slave *slave)
-{
- return !bond_slave_state(slave);
-}
-
-static inline bool bond_slave_can_tx(struct slave *slave)
-{
- return bond_slave_is_up(slave) && slave->link == BOND_LINK_UP &&
- bond_is_active_slave(slave);
-}
-
-#define BOND_PRI_RESELECT_ALWAYS 0
-#define BOND_PRI_RESELECT_BETTER 1
-#define BOND_PRI_RESELECT_FAILURE 2
-
-#define BOND_FOM_NONE 0
-#define BOND_FOM_ACTIVE 1
-#define BOND_FOM_FOLLOW 2
-
-#define BOND_ARP_TARGETS_ANY 0
-#define BOND_ARP_TARGETS_ALL 1
-
-#define BOND_ARP_VALIDATE_NONE 0
-#define BOND_ARP_VALIDATE_ACTIVE (1 << BOND_STATE_ACTIVE)
-#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP)
-#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \
- BOND_ARP_VALIDATE_BACKUP)
-#define BOND_ARP_FILTER (BOND_ARP_VALIDATE_ALL + 1)
-#define BOND_ARP_FILTER_ACTIVE (BOND_ARP_VALIDATE_ACTIVE | \
- BOND_ARP_FILTER)
-#define BOND_ARP_FILTER_BACKUP (BOND_ARP_VALIDATE_BACKUP | \
- BOND_ARP_FILTER)
-
-#define BOND_SLAVE_NOTIFY_NOW true
-#define BOND_SLAVE_NOTIFY_LATER false
-
-static inline int slave_do_arp_validate(struct bonding *bond,
- struct slave *slave)
-{
- return bond->params.arp_validate & (1 << bond_slave_state(slave));
-}
-
-static inline int slave_do_arp_validate_only(struct bonding *bond)
-{
- return bond->params.arp_validate & BOND_ARP_FILTER;
-}
-
-static inline int bond_is_ip_target_ok(__be32 addr)
-{
- return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr);
-}
-
-/* Get the oldest arp which we've received on this slave for bond's
- * arp_targets.
- */
-static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond,
- struct slave *slave)
-{
- int i = 1;
- unsigned long ret = slave->target_last_arp_rx[0];
-
- for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++)
- if (time_before(slave->target_last_arp_rx[i], ret))
- ret = slave->target_last_arp_rx[i];
-
- return ret;
-}
-
-static inline unsigned long slave_last_rx(struct bonding *bond,
- struct slave *slave)
-{
- if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
- return slave_oldest_target_arp_rx(bond, slave);
-
- return slave->last_rx;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static inline void bond_netpoll_send_skb(const struct slave *slave,
- struct sk_buff *skb)
-{
- struct netpoll *np = slave->np;
-
- if (np)
- netpoll_send_skb(np, skb);
-}
-#else
-static inline void bond_netpoll_send_skb(const struct slave *slave,
- struct sk_buff *skb)
-{
-}
-#endif
-
-static inline void bond_set_slave_inactive_flags(struct slave *slave,
- bool notify)
-{
- if (!bond_is_lb(slave->bond))
- bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
- if (!slave->bond->params.all_slaves_active)
- slave->inactive = 1;
-}
-
-static inline void bond_set_slave_active_flags(struct slave *slave,
- bool notify)
-{
- bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify);
- slave->inactive = 0;
-}
-
-static inline bool bond_is_slave_inactive(struct slave *slave)
-{
- return slave->inactive;
-}
-
-static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local)
-{
- struct in_device *in_dev;
- __be32 addr = 0;
-
- rcu_read_lock();
- in_dev = __in_dev_get_rcu(dev);
-
- if (in_dev)
- addr = inet_confirm_addr(dev_net(dev), in_dev, dst, local,
- RT_SCOPE_HOST);
- rcu_read_unlock();
- return addr;
-}
-
-struct bond_net {
- struct net *net; /* Associated network namespace */
- struct list_head dev_list;
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *proc_dir;
-#endif
- struct class_attribute class_attr_bonding_masters;
-};
-
-int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
-void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
-int bond_create(struct net *net, const char *name);
-int bond_create_sysfs(struct bond_net *net);
-void bond_destroy_sysfs(struct bond_net *net);
-void bond_prepare_sysfs_group(struct bonding *bond);
-int bond_sysfs_slave_add(struct slave *slave);
-void bond_sysfs_slave_del(struct slave *slave);
-int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
-int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
-u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
-void bond_select_active_slave(struct bonding *bond);
-void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
-void bond_create_debugfs(void);
-void bond_destroy_debugfs(void);
-void bond_debug_register(struct bonding *bond);
-void bond_debug_unregister(struct bonding *bond);
-void bond_debug_reregister(struct bonding *bond);
-const char *bond_mode_name(int mode);
-void bond_setup(struct net_device *bond_dev);
-unsigned int bond_get_num_tx_queues(void);
-int bond_netlink_init(void);
-void bond_netlink_fini(void);
-struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
-const char *bond_slave_link_status(s8 link);
-struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
- struct net_device *end_dev,
- int level);
-int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave);
-void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay);
-
-#ifdef CONFIG_PROC_FS
-void bond_create_proc_entry(struct bonding *bond);
-void bond_remove_proc_entry(struct bonding *bond);
-void bond_create_proc_dir(struct bond_net *bn);
-void bond_destroy_proc_dir(struct bond_net *bn);
-#else
-static inline void bond_create_proc_entry(struct bonding *bond)
-{
-}
-
-static inline void bond_remove_proc_entry(struct bonding *bond)
-{
-}
-
-static inline void bond_create_proc_dir(struct bond_net *bn)
-{
-}
-
-static inline void bond_destroy_proc_dir(struct bond_net *bn)
-{
-}
-#endif
-
-static inline struct slave *bond_slave_has_mac(struct bonding *bond,
- const u8 *mac)
-{
- struct list_head *iter;
- struct slave *tmp;
-
- bond_for_each_slave(bond, tmp, iter)
- if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
- return tmp;
-
- return NULL;
-}
-
-/* Caller must hold rcu_read_lock() for read */
-static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
- const u8 *mac)
-{
- struct list_head *iter;
- struct slave *tmp;
-
- bond_for_each_slave_rcu(bond, tmp, iter)
- if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
- return tmp;
-
- return NULL;
-}
-
-/* Caller must hold rcu_read_lock() for read */
-static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
-{
- struct list_head *iter;
- struct slave *tmp;
- struct netdev_hw_addr *ha;
-
- bond_for_each_slave_rcu(bond, tmp, iter)
- if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
- return true;
-
- if (netdev_uc_empty(bond->dev))
- return false;
-
- netdev_for_each_uc_addr(ha, bond->dev)
- if (ether_addr_equal_64bits(mac, ha->addr))
- return true;
-
- return false;
-}
-
-/* Check if the ip is present in arp ip list, or first free slot if ip == 0
- * Returns -1 if not found, index if found
- */
-static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
-{
- int i;
-
- for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
- if (targets[i] == ip)
- return i;
- else if (targets[i] == 0)
- break;
-
- return -1;
-}
-
-/* exported from bond_main.c */
-extern int bond_net_id;
-extern const struct bond_parm_tbl bond_lacp_tbl[];
-extern const struct bond_parm_tbl xmit_hashtype_tbl[];
-extern const struct bond_parm_tbl arp_validate_tbl[];
-extern const struct bond_parm_tbl arp_all_targets_tbl[];
-extern const struct bond_parm_tbl fail_over_mac_tbl[];
-extern const struct bond_parm_tbl pri_reselect_tbl[];
-extern struct bond_parm_tbl ad_select_tbl[];
-
-/* exported from bond_netlink.c */
-extern struct rtnl_link_ops bond_link_ops;
-
-#endif /* _LINUX_BONDING_H */
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index fc9304143f44..c533c62b0f5e 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -29,4 +29,5 @@ obj-$(CONFIG_CAN_GRCAN) += grcan.o
obj-$(CONFIG_CAN_RCAR) += rcar_can.o
obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o
-subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
+subdir-ccflags-y += -D__CHECK_ENDIAN__
+subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 8e78bb48f5a4..f94a9fa60488 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -35,6 +35,7 @@
#include <linux/list.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -603,6 +604,8 @@ static int c_can_start(struct net_device *dev)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ /* activate pins */
+ pinctrl_pm_select_default_state(dev->dev.parent);
return 0;
}
@@ -611,6 +614,9 @@ static void c_can_stop(struct net_device *dev)
struct c_can_priv *priv = netdev_priv(dev);
c_can_irq_control(priv, false);
+
+ /* deactivate pins */
+ pinctrl_pm_select_sleep_state(dev->dev.parent);
priv->can.state = CAN_STATE_STOPPED;
}
@@ -1244,6 +1250,13 @@ int register_c_can_dev(struct net_device *dev)
struct c_can_priv *priv = netdev_priv(dev);
int err;
+ /* Deactivate pins to prevent DRA7 DCAN IP from being
+ * stuck in transition when module is disabled.
+ * Pins are activated in c_can_start() and deactivated
+ * in c_can_stop()
+ */
+ pinctrl_pm_select_sleep_state(dev->dev.parent);
+
c_can_pm_runtime_enable(priv);
dev->flags |= IFF_ECHO; /* we support local echo */
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 99ad1aa576b0..8acdc7fa4792 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -169,6 +169,28 @@ enum c_can_dev_id {
BOSCH_D_CAN,
};
+struct raminit_bits {
+ u8 start;
+ u8 done;
+};
+
+struct c_can_driver_data {
+ enum c_can_dev_id id;
+
+ /* RAMINIT register description. Optional. */
+ const struct raminit_bits *raminit_bits; /* Array of START/DONE bit positions */
+ u8 raminit_num; /* Number of CAN instances on the SoC */
+ bool raminit_pulse; /* If set, sets and clears START bit (pulse) */
+};
+
+/* Out of band RAMINIT register access via syscon regmap */
+struct c_can_raminit {
+ struct regmap *syscon; /* for raminit ctrl. reg. access */
+ unsigned int reg; /* register index within syscon */
+ struct raminit_bits bits;
+ bool needs_pulse;
+};
+
/* c_can private data structure */
struct c_can_priv {
struct can_priv can; /* must be the first member */
@@ -186,8 +208,7 @@ struct c_can_priv {
const u16 *regs;
void *priv; /* for board-specific data */
enum c_can_dev_id type;
- u32 __iomem *raminit_ctrlreg;
- int instance;
+ struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */
void (*raminit) (const struct c_can_priv *priv, bool enable);
u32 comm_rcv_high;
u32 rxmasked;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index fb279d6ae484..a4535d2142a7 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -32,14 +32,13 @@
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <linux/can/dev.h>
#include "c_can.h"
-#define CAN_RAMINIT_START_MASK(i) (0x001 << (i))
-#define CAN_RAMINIT_DONE_MASK(i) (0x100 << (i))
-#define CAN_RAMINIT_ALL_MASK(i) (0x101 << (i))
#define DCAN_RAM_INIT_BIT (1 << 3)
static DEFINE_SPINLOCK(raminit_lock);
/*
@@ -72,39 +71,63 @@ static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
writew(val, priv->base + 2 * priv->regs[index]);
}
-static void c_can_hw_raminit_wait_ti(const struct c_can_priv *priv, u32 mask,
- u32 val)
+static void c_can_hw_raminit_wait_syscon(const struct c_can_priv *priv,
+ u32 mask, u32 val)
{
+ const struct c_can_raminit *raminit = &priv->raminit_sys;
+ int timeout = 0;
+ u32 ctrl = 0;
+
/* We look only at the bits of our instance. */
val &= mask;
- while ((readl(priv->raminit_ctrlreg) & mask) != val)
+ do {
udelay(1);
+ timeout++;
+
+ regmap_read(raminit->syscon, raminit->reg, &ctrl);
+ if (timeout == 1000) {
+ dev_err(&priv->dev->dev, "%s: time out\n", __func__);
+ break;
+ }
+ } while ((ctrl & mask) != val);
}
-static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable)
+static void c_can_hw_raminit_syscon(const struct c_can_priv *priv, bool enable)
{
- u32 mask = CAN_RAMINIT_ALL_MASK(priv->instance);
- u32 ctrl;
+ const struct c_can_raminit *raminit = &priv->raminit_sys;
+ u32 ctrl = 0;
+ u32 mask;
spin_lock(&raminit_lock);
- ctrl = readl(priv->raminit_ctrlreg);
+ mask = 1 << raminit->bits.start | 1 << raminit->bits.done;
+ regmap_read(raminit->syscon, raminit->reg, &ctrl);
+
/* We clear the done and start bit first. The start bit is
* looking at the 0 -> transition, but is not self clearing;
* And we clear the init done bit as well.
+ * NOTE: DONE must be written with 1 to clear it.
*/
- ctrl &= ~CAN_RAMINIT_START_MASK(priv->instance);
- ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
- writel(ctrl, priv->raminit_ctrlreg);
- ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
- c_can_hw_raminit_wait_ti(priv, mask, ctrl);
+ ctrl &= ~(1 << raminit->bits.start);
+ ctrl |= 1 << raminit->bits.done;
+ regmap_write(raminit->syscon, raminit->reg, ctrl);
+
+ ctrl &= ~(1 << raminit->bits.done);
+ c_can_hw_raminit_wait_syscon(priv, mask, ctrl);
if (enable) {
/* Set start bit and wait for the done bit. */
- ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
- writel(ctrl, priv->raminit_ctrlreg);
- ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
- c_can_hw_raminit_wait_ti(priv, mask, ctrl);
+ ctrl |= 1 << raminit->bits.start;
+ regmap_write(raminit->syscon, raminit->reg, ctrl);
+
+ /* clear START bit if start pulse is needed */
+ if (raminit->needs_pulse) {
+ ctrl &= ~(1 << raminit->bits.start);
+ regmap_write(raminit->syscon, raminit->reg, ctrl);
+ }
+
+ ctrl |= 1 << raminit->bits.done;
+ c_can_hw_raminit_wait_syscon(priv, mask, ctrl);
}
spin_unlock(&raminit_lock);
}
@@ -159,26 +182,60 @@ static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
}
}
+static const struct c_can_driver_data c_can_drvdata = {
+ .id = BOSCH_C_CAN,
+};
+
+static const struct c_can_driver_data d_can_drvdata = {
+ .id = BOSCH_D_CAN,
+};
+
+static const struct raminit_bits dra7_raminit_bits[] = {
+ [0] = { .start = 3, .done = 1, },
+ [1] = { .start = 5, .done = 2, },
+};
+
+static const struct c_can_driver_data dra7_dcan_drvdata = {
+ .id = BOSCH_D_CAN,
+ .raminit_num = ARRAY_SIZE(dra7_raminit_bits),
+ .raminit_bits = dra7_raminit_bits,
+ .raminit_pulse = true,
+};
+
+static const struct raminit_bits am3352_raminit_bits[] = {
+ [0] = { .start = 0, .done = 8, },
+ [1] = { .start = 1, .done = 9, },
+};
+
+static const struct c_can_driver_data am3352_dcan_drvdata = {
+ .id = BOSCH_D_CAN,
+ .raminit_num = ARRAY_SIZE(am3352_raminit_bits),
+ .raminit_bits = am3352_raminit_bits,
+};
+
static struct platform_device_id c_can_id_table[] = {
- [BOSCH_C_CAN_PLATFORM] = {
+ {
.name = KBUILD_MODNAME,
- .driver_data = BOSCH_C_CAN,
+ .driver_data = (kernel_ulong_t)&c_can_drvdata,
},
- [BOSCH_C_CAN] = {
+ {
.name = "c_can",
- .driver_data = BOSCH_C_CAN,
+ .driver_data = (kernel_ulong_t)&c_can_drvdata,
},
- [BOSCH_D_CAN] = {
+ {
.name = "d_can",
- .driver_data = BOSCH_D_CAN,
- }, {
- }
+ .driver_data = (kernel_ulong_t)&d_can_drvdata,
+ },
+ { /* sentinel */ },
};
MODULE_DEVICE_TABLE(platform, c_can_id_table);
static const struct of_device_id c_can_of_table[] = {
- { .compatible = "bosch,c_can", .data = &c_can_id_table[BOSCH_C_CAN] },
- { .compatible = "bosch,d_can", .data = &c_can_id_table[BOSCH_D_CAN] },
+ { .compatible = "bosch,c_can", .data = &c_can_drvdata },
+ { .compatible = "bosch,d_can", .data = &d_can_drvdata },
+ { .compatible = "ti,dra7-d_can", .data = &dra7_dcan_drvdata },
+ { .compatible = "ti,am3352-d_can", .data = &am3352_dcan_drvdata },
+ { .compatible = "ti,am4372-d_can", .data = &am3352_dcan_drvdata },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, c_can_of_table);
@@ -190,21 +247,20 @@ static int c_can_plat_probe(struct platform_device *pdev)
struct net_device *dev;
struct c_can_priv *priv;
const struct of_device_id *match;
- const struct platform_device_id *id;
- struct resource *mem, *res;
+ struct resource *mem;
int irq;
struct clk *clk;
-
- if (pdev->dev.of_node) {
- match = of_match_device(c_can_of_table, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Failed to find matching dt id\n");
- ret = -EINVAL;
- goto exit;
- }
- id = match->data;
+ const struct c_can_driver_data *drvdata;
+ struct device_node *np = pdev->dev.of_node;
+
+ match = of_match_device(c_can_of_table, &pdev->dev);
+ if (match) {
+ drvdata = match->data;
+ } else if (pdev->id_entry->driver_data) {
+ drvdata = (struct c_can_driver_data *)
+ platform_get_device_id(pdev)->driver_data;
} else {
- id = platform_get_device_id(pdev);
+ return -ENODEV;
}
/* get the appropriate clk */
@@ -236,7 +292,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
}
priv = netdev_priv(dev);
- switch (id->driver_data) {
+ switch (drvdata->id) {
case BOSCH_C_CAN:
priv->regs = reg_map_c_can;
switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
@@ -263,27 +319,50 @@ static int c_can_plat_probe(struct platform_device *pdev)
priv->read_reg32 = d_can_plat_read_reg32;
priv->write_reg32 = d_can_plat_write_reg32;
- if (pdev->dev.of_node)
- priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can");
- else
- priv->instance = pdev->id;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- /* Not all D_CAN modules have a separate register for the D_CAN
- * RAM initialization. Use default RAM init bit in D_CAN module
- * if not specified in DT.
+ /* Check if we need custom RAMINIT via syscon. Mostly for TI
+ * platforms. Only supported with DT boot.
*/
- if (!res) {
+ if (np && of_property_read_bool(np, "syscon-raminit")) {
+ u32 id;
+ struct c_can_raminit *raminit = &priv->raminit_sys;
+
+ ret = -EINVAL;
+ raminit->syscon = syscon_regmap_lookup_by_phandle(np,
+ "syscon-raminit");
+ if (IS_ERR(raminit->syscon)) {
+ /* can fail with -EPROBE_DEFER */
+ ret = PTR_ERR(raminit->syscon);
+ free_c_can_dev(dev);
+ return ret;
+ }
+
+ if (of_property_read_u32_index(np, "syscon-raminit", 1,
+ &raminit->reg)) {
+ dev_err(&pdev->dev,
+ "couldn't get the RAMINIT reg. offset!\n");
+ goto exit_free_device;
+ }
+
+ if (of_property_read_u32_index(np, "syscon-raminit", 2,
+ &id)) {
+ dev_err(&pdev->dev,
+ "couldn't get the CAN instance ID\n");
+ goto exit_free_device;
+ }
+
+ if (id >= drvdata->raminit_num) {
+ dev_err(&pdev->dev,
+ "Invalid CAN instance ID\n");
+ goto exit_free_device;
+ }
+
+ raminit->bits = drvdata->raminit_bits[id];
+ raminit->needs_pulse = drvdata->raminit_pulse;
+
+ priv->raminit = c_can_hw_raminit_syscon;
+ } else {
priv->raminit = c_can_hw_raminit;
- break;
}
-
- priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!priv->raminit_ctrlreg || priv->instance < 0)
- dev_info(&pdev->dev, "control memory is not used for raminit\n");
- else
- priv->raminit = c_can_hw_raminit_ti;
break;
default:
ret = -EINVAL;
@@ -295,7 +374,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
priv->device = &pdev->dev;
priv->can.clock.freq = clk_get_rate(clk);
priv->priv = clk;
- priv->type = id->driver_data;
+ priv->type = drvdata->id;
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index d8379278d648..c486fe510f37 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -60,7 +60,7 @@ MODULE_DESCRIPTION(KBUILD_MODNAME "CAN netdevice driver");
*
* The message objects 1..14 can be used for TX and RX while the message
* objects 15 is optimized for RX. It has a shadow register for reliable
- * data receiption under heavy bus load. Therefore it makes sense to use
+ * data reception under heavy bus load. Therefore it makes sense to use
* this message object for the needed use case. The frame type (EFF/SFF)
* for the message object 15 can be defined via kernel module parameter
* "msgobj15_eff". If not equal 0, it will receive 29-bit EFF frames,
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 2cfe5012e4e5..3ec8f6f25e5f 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -273,6 +273,84 @@ static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
return err;
}
+static void can_update_state_error_stats(struct net_device *dev,
+ enum can_state new_state)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (new_state <= priv->state)
+ return;
+
+ switch (new_state) {
+ case CAN_STATE_ERROR_WARNING:
+ priv->can_stats.error_warning++;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ priv->can_stats.error_passive++;
+ break;
+ case CAN_STATE_BUS_OFF:
+ default:
+ break;
+ };
+}
+
+static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return CAN_ERR_CRTL_ACTIVE;
+ case CAN_STATE_ERROR_WARNING:
+ return CAN_ERR_CRTL_TX_WARNING;
+ case CAN_STATE_ERROR_PASSIVE:
+ return CAN_ERR_CRTL_TX_PASSIVE;
+ default:
+ return 0;
+ }
+}
+
+static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return CAN_ERR_CRTL_ACTIVE;
+ case CAN_STATE_ERROR_WARNING:
+ return CAN_ERR_CRTL_RX_WARNING;
+ case CAN_STATE_ERROR_PASSIVE:
+ return CAN_ERR_CRTL_RX_PASSIVE;
+ default:
+ return 0;
+ }
+}
+
+void can_change_state(struct net_device *dev, struct can_frame *cf,
+ enum can_state tx_state, enum can_state rx_state)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ enum can_state new_state = max(tx_state, rx_state);
+
+ if (unlikely(new_state == priv->state)) {
+ netdev_warn(dev, "%s: oops, state did not change", __func__);
+ return;
+ }
+
+ netdev_dbg(dev, "New error state: %d\n", new_state);
+
+ can_update_state_error_stats(dev, new_state);
+ priv->state = new_state;
+
+ if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
+ cf->can_id |= CAN_ERR_BUSOFF;
+ return;
+ }
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= tx_state >= rx_state ?
+ can_tx_state_to_frame(dev, tx_state) : 0;
+ cf->data[1] |= tx_state <= rx_state ?
+ can_rx_state_to_frame(dev, rx_state) : 0;
+}
+EXPORT_SYMBOL_GPL(can_change_state);
+
/*
* Local echo of CAN messages
*
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 60f86bd0434a..dde05486bc99 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -577,98 +577,30 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
return 1;
}
-static void do_state(struct net_device *dev,
- struct can_frame *cf, enum can_state new_state)
-{
- struct flexcan_priv *priv = netdev_priv(dev);
- struct can_berr_counter bec;
-
- __flexcan_get_berr_counter(dev, &bec);
-
- switch (priv->can.state) {
- case CAN_STATE_ERROR_ACTIVE:
- /*
- * from: ERROR_ACTIVE
- * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
- * => : there was a warning int
- */
- if (new_state >= CAN_STATE_ERROR_WARNING &&
- new_state <= CAN_STATE_BUS_OFF) {
- netdev_dbg(dev, "Error Warning IRQ\n");
- priv->can.can_stats.error_warning++;
-
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (bec.txerr > bec.rxerr) ?
- CAN_ERR_CRTL_TX_WARNING :
- CAN_ERR_CRTL_RX_WARNING;
- }
- case CAN_STATE_ERROR_WARNING: /* fallthrough */
- /*
- * from: ERROR_ACTIVE, ERROR_WARNING
- * to : ERROR_PASSIVE, BUS_OFF
- * => : error passive int
- */
- if (new_state >= CAN_STATE_ERROR_PASSIVE &&
- new_state <= CAN_STATE_BUS_OFF) {
- netdev_dbg(dev, "Error Passive IRQ\n");
- priv->can.can_stats.error_passive++;
-
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (bec.txerr > bec.rxerr) ?
- CAN_ERR_CRTL_TX_PASSIVE :
- CAN_ERR_CRTL_RX_PASSIVE;
- }
- break;
- case CAN_STATE_BUS_OFF:
- netdev_err(dev, "BUG! "
- "hardware recovered automatically from BUS_OFF\n");
- break;
- default:
- break;
- }
-
- /* process state changes depending on the new state */
- switch (new_state) {
- case CAN_STATE_ERROR_WARNING:
- netdev_dbg(dev, "Error Warning\n");
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (bec.txerr > bec.rxerr) ?
- CAN_ERR_CRTL_TX_WARNING :
- CAN_ERR_CRTL_RX_WARNING;
- break;
- case CAN_STATE_ERROR_ACTIVE:
- netdev_dbg(dev, "Error Active\n");
- cf->can_id |= CAN_ERR_PROT;
- cf->data[2] = CAN_ERR_PROT_ACTIVE;
- break;
- case CAN_STATE_BUS_OFF:
- cf->can_id |= CAN_ERR_BUSOFF;
- can_bus_off(dev);
- break;
- default:
- break;
- }
-}
-
static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
struct can_frame *cf;
- enum can_state new_state;
+ enum can_state new_state = 0, rx_state = 0, tx_state = 0;
int flt;
+ struct can_berr_counter bec;
flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
- if (likely(!(reg_esr & (FLEXCAN_ESR_TX_WRN |
- FLEXCAN_ESR_RX_WRN))))
- new_state = CAN_STATE_ERROR_ACTIVE;
- else
- new_state = CAN_STATE_ERROR_WARNING;
- } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE))
+ tx_state = unlikely(reg_esr & FLEXCAN_ESR_TX_WRN) ?
+ CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
+ rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ?
+ CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
+ new_state = max(tx_state, rx_state);
+ } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) {
+ __flexcan_get_berr_counter(dev, &bec);
new_state = CAN_STATE_ERROR_PASSIVE;
- else
+ rx_state = bec.rxerr >= bec.txerr ? new_state : 0;
+ tx_state = bec.rxerr <= bec.txerr ? new_state : 0;
+ } else {
new_state = CAN_STATE_BUS_OFF;
+ }
/* state hasn't changed */
if (likely(new_state == priv->can.state))
@@ -678,8 +610,11 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
if (unlikely(!skb))
return 0;
- do_state(dev, cf, new_state);
- priv->can.state = new_state;
+ can_change_state(dev, cf, tx_state, rx_state);
+
+ if (unlikely(new_state == CAN_STATE_BUS_OFF))
+ can_bus_off(dev);
+
netif_receive_skb(skb);
dev->stats.rx_packets++;
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index e0c9be5e2ab7..e36b7400d5cc 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -289,18 +289,15 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-/* This function returns the old state to see where we came from */
-static enum can_state check_set_state(struct net_device *dev, u8 canrflg)
+static enum can_state get_new_state(struct net_device *dev, u8 canrflg)
{
struct mscan_priv *priv = netdev_priv(dev);
- enum can_state state, old_state = priv->can.state;
- if (canrflg & MSCAN_CSCIF && old_state <= CAN_STATE_BUS_OFF) {
- state = state_map[max(MSCAN_STATE_RX(canrflg),
- MSCAN_STATE_TX(canrflg))];
- priv->can.state = state;
- }
- return old_state;
+ if (unlikely(canrflg & MSCAN_CSCIF))
+ return state_map[max(MSCAN_STATE_RX(canrflg),
+ MSCAN_STATE_TX(canrflg))];
+
+ return priv->can.state;
}
static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
@@ -349,7 +346,7 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
struct mscan_priv *priv = netdev_priv(dev);
struct mscan_regs __iomem *regs = priv->reg_base;
struct net_device_stats *stats = &dev->stats;
- enum can_state old_state;
+ enum can_state new_state;
netdev_dbg(dev, "error interrupt (canrflg=%#x)\n", canrflg);
frame->can_id = CAN_ERR_FLAG;
@@ -363,27 +360,13 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
frame->data[1] = 0;
}
- old_state = check_set_state(dev, canrflg);
- /* State changed */
- if (old_state != priv->can.state) {
- switch (priv->can.state) {
- case CAN_STATE_ERROR_WARNING:
- frame->can_id |= CAN_ERR_CRTL;
- priv->can.can_stats.error_warning++;
- if ((priv->shadow_statflg & MSCAN_RSTAT_MSK) <
- (canrflg & MSCAN_RSTAT_MSK))
- frame->data[1] |= CAN_ERR_CRTL_RX_WARNING;
- if ((priv->shadow_statflg & MSCAN_TSTAT_MSK) <
- (canrflg & MSCAN_TSTAT_MSK))
- frame->data[1] |= CAN_ERR_CRTL_TX_WARNING;
- break;
- case CAN_STATE_ERROR_PASSIVE:
- frame->can_id |= CAN_ERR_CRTL;
- priv->can.can_stats.error_passive++;
- frame->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
- break;
- case CAN_STATE_BUS_OFF:
- frame->can_id |= CAN_ERR_BUSOFF;
+ new_state = get_new_state(dev, canrflg);
+ if (new_state != priv->can.state) {
+ can_change_state(dev, frame,
+ state_map[MSCAN_STATE_TX(canrflg)],
+ state_map[MSCAN_STATE_RX(canrflg)]);
+
+ if (priv->can.state == CAN_STATE_BUS_OFF) {
/*
* The MSCAN on the MPC5200 does recover from bus-off
* automatically. To avoid that we stop the chip doing
@@ -396,9 +379,6 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
MSCAN_SLPRQ | MSCAN_INITRQ);
}
can_bus_off(dev);
- break;
- default:
- break;
}
}
priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index b27ac6074afb..32bd7f451aa4 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -392,12 +392,20 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
struct can_frame *cf;
struct sk_buff *skb;
enum can_state state = priv->can.state;
+ enum can_state rx_state, tx_state;
+ unsigned int rxerr, txerr;
uint8_t ecc, alc;
skb = alloc_can_err_skb(dev, &cf);
if (skb == NULL)
return -ENOMEM;
+ txerr = priv->read_reg(priv, SJA1000_TXERR);
+ rxerr = priv->read_reg(priv, SJA1000_RXERR);
+
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
if (isrc & IRQ_DOI) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
@@ -412,13 +420,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
/* error warning interrupt */
netdev_dbg(dev, "error warning interrupt\n");
- if (status & SR_BS) {
+ if (status & SR_BS)
state = CAN_STATE_BUS_OFF;
- cf->can_id |= CAN_ERR_BUSOFF;
- can_bus_off(dev);
- } else if (status & SR_ES) {
+ else if (status & SR_ES)
state = CAN_STATE_ERROR_WARNING;
- } else
+ else
state = CAN_STATE_ERROR_ACTIVE;
}
if (isrc & IRQ_BEI) {
@@ -452,10 +458,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
if (isrc & IRQ_EPI) {
/* error passive interrupt */
netdev_dbg(dev, "error passive interrupt\n");
- if (status & SR_ES)
- state = CAN_STATE_ERROR_PASSIVE;
+
+ if (state == CAN_STATE_ERROR_PASSIVE)
+ state = CAN_STATE_ERROR_WARNING;
else
- state = CAN_STATE_ERROR_ACTIVE;
+ state = CAN_STATE_ERROR_PASSIVE;
}
if (isrc & IRQ_ALI) {
/* arbitration lost interrupt */
@@ -467,27 +474,15 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
cf->data[0] = alc & 0x1f;
}
- if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
- state == CAN_STATE_ERROR_PASSIVE)) {
- uint8_t rxerr = priv->read_reg(priv, SJA1000_RXERR);
- uint8_t txerr = priv->read_reg(priv, SJA1000_TXERR);
- cf->can_id |= CAN_ERR_CRTL;
- if (state == CAN_STATE_ERROR_WARNING) {
- priv->can.can_stats.error_warning++;
- cf->data[1] = (txerr > rxerr) ?
- CAN_ERR_CRTL_TX_WARNING :
- CAN_ERR_CRTL_RX_WARNING;
- } else {
- priv->can.can_stats.error_passive++;
- cf->data[1] = (txerr > rxerr) ?
- CAN_ERR_CRTL_TX_PASSIVE :
- CAN_ERR_CRTL_RX_PASSIVE;
- }
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
+ if (state != priv->can.state) {
+ tx_state = txerr >= rxerr ? state : 0;
+ rx_state = txerr <= rxerr ? state : 0;
- priv->can.state = state;
+ can_change_state(dev, cf, tx_state, rx_state);
+
+ if(state == CAN_STATE_BUS_OFF)
+ can_bus_off(dev);
+ }
netif_rx(skb);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index acb5b92ace92..c837eb91d43e 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -56,9 +56,6 @@
#include <linux/can.h>
#include <linux/can/skb.h>
-static __initconst const char banner[] =
- KERN_INFO "slcan: serial line CAN interface driver\n";
-
MODULE_ALIAS_LDISC(N_SLCAN);
MODULE_DESCRIPTION("serial line CAN interface");
MODULE_LICENSE("GPL");
@@ -702,8 +699,8 @@ static int __init slcan_init(void)
if (maxdev < 4)
maxdev = 4; /* Sanity */
- printk(banner);
- printk(KERN_INFO "slcan: %d dynamic interface channels.\n", maxdev);
+ pr_info("slcan: serial line CAN interface driver\n");
+ pr_info("slcan: %d dynamic interface channels.\n", maxdev);
slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL);
if (!slcan_devs)
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 4e94057ef5cf..674f367087c5 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -50,9 +50,6 @@
#include <linux/slab.h>
#include <net/rtnetlink.h>
-static __initconst const char banner[] =
- KERN_INFO "vcan: Virtual CAN interface driver\n";
-
MODULE_DESCRIPTION("virtual CAN interface");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
@@ -173,7 +170,7 @@ static struct rtnl_link_ops vcan_link_ops __read_mostly = {
static __init int vcan_init_module(void)
{
- printk(banner);
+ pr_info("vcan: Virtual CAN interface driver\n");
if (echo)
printk(KERN_INFO "vcan: enabled echo on driver level.\n");
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 9234d808cbb3..7cf8f4ac281f 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -37,13 +37,22 @@ config NET_DSA_MV88E6123_61_65
ethernet switch chips.
config NET_DSA_MV88E6171
- tristate "Marvell 88E6171 ethernet switch chip support"
+ tristate "Marvell 88E6171/6172 ethernet switch chip support"
select NET_DSA
select NET_DSA_MV88E6XXX
select NET_DSA_TAG_EDSA
---help---
- This enables support for the Marvell 88E6171 ethernet switch
- chip.
+ This enables support for the Marvell 88E6171/6172 ethernet switch
+ chips.
+
+config NET_DSA_MV88E6352
+ tristate "Marvell 88E6176/88E6352 ethernet switch chip support"
+ select NET_DSA
+ select NET_DSA_MV88E6XXX
+ select NET_DSA_TAG_EDSA
+ ---help---
+ This enables support for the Marvell 88E6176 and 88E6352 ethernet
+ switch chips.
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 23a90de9830e..e2d51c4b9382 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -7,6 +7,9 @@ endif
ifdef CONFIG_NET_DSA_MV88E6131
mv88e6xxx_drv-y += mv88e6131.o
endif
+ifdef CONFIG_NET_DSA_MV88E6352
+mv88e6xxx_drv-y += mv88e6352.o
+endif
ifdef CONFIG_NET_DSA_MV88E6171
mv88e6xxx_drv-y += mv88e6171.o
endif
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 05b0ca3bf71d..c29aebe1e62b 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -69,8 +69,11 @@ static char *mv88e6060_probe(struct device *host_dev, int sw_addr)
ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03);
if (ret >= 0) {
- ret &= 0xfff0;
if (ret == 0x0600)
+ return "Marvell 88E6060 (A0)";
+ if (ret == 0x0601 || ret == 0x0602)
+ return "Marvell 88E6060 (B0)";
+ if ((ret & 0xfff0) == 0x0600)
return "Marvell 88E6060";
}
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index a332c53ff955..e9c736e1cef3 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -299,6 +299,7 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
mutex_init(&ps->smi_mutex);
mutex_init(&ps->stats_mutex);
+ mutex_init(&ps->phy_mutex);
ret = mv88e6123_61_65_switch_reset(ds);
if (ret < 0)
@@ -329,16 +330,28 @@ static int mv88e6123_61_65_port_to_phy_addr(int port)
static int
mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int addr = mv88e6123_61_65_port_to_phy_addr(port);
- return mv88e6xxx_phy_read(ds, addr, regnum);
+ int ret;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = mv88e6xxx_phy_read(ds, addr, regnum);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
}
static int
mv88e6123_61_65_phy_write(struct dsa_switch *ds,
int port, int regnum, u16 val)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int addr = mv88e6123_61_65_port_to_phy_addr(port);
- return mv88e6xxx_phy_write(ds, addr, regnum, val);
+ int ret;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = mv88e6xxx_phy_write(ds, addr, regnum, val);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
}
static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = {
@@ -372,6 +385,9 @@ static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = {
{ "hist_256_511bytes", 4, 0x0b, },
{ "hist_512_1023bytes", 4, 0x0c, },
{ "hist_1024_max_bytes", 4, 0x0d, },
+ { "sw_in_discards", 4, 0x110, },
+ { "sw_in_filtered", 2, 0x112, },
+ { "sw_out_filtered", 2, 0x113, },
};
static void
@@ -406,6 +422,11 @@ struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
.get_strings = mv88e6123_61_65_get_strings,
.get_ethtool_stats = mv88e6123_61_65_get_ethtool_stats,
.get_sset_count = mv88e6123_61_65_get_sset_count,
+#ifdef CONFIG_NET_DSA_HWMON
+ .get_temp = mv88e6xxx_get_temp,
+#endif
+ .get_regs_len = mv88e6xxx_get_regs_len,
+ .get_regs = mv88e6xxx_get_regs,
};
MODULE_ALIAS("platform:mv88e6123");
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index 244c735014fa..1230f52aa70e 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -21,6 +21,7 @@
#define ID_6085 0x04a0
#define ID_6095 0x0950
#define ID_6131 0x1060
+#define ID_6131_B2 0x1066
static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
{
@@ -32,12 +33,15 @@ static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
if (ret >= 0) {
- ret &= 0xfff0;
- if (ret == ID_6085)
+ int ret_masked = ret & 0xfff0;
+
+ if (ret_masked == ID_6085)
return "Marvell 88E6085";
- if (ret == ID_6095)
+ if (ret_masked == ID_6095)
return "Marvell 88E6095/88E6095F";
- if (ret == ID_6131)
+ if (ret == ID_6131_B2)
+ return "Marvell 88E6131 (B2)";
+ if (ret_masked == ID_6131)
return "Marvell 88E6131";
}
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index 78d8e876f3aa..aa33d16f2e22 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -1,4 +1,4 @@
-/* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support
+/* net/dsa/mv88e6171.c - Marvell 88e6171/8826172 switch chip support
* Copyright (c) 2008-2009 Marvell Semiconductor
* Copyright (c) 2014 Claudio Leite <leitec@staticky.com>
*
@@ -29,6 +29,8 @@ static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
if (ret >= 0) {
if ((ret & 0xfff0) == 0x1710)
return "Marvell 88E6171";
+ if ((ret & 0xfff0) == 0x1720)
+ return "Marvell 88E6172";
}
return NULL;
@@ -314,6 +316,8 @@ static int mv88e6171_setup(struct dsa_switch *ds)
return ret;
}
+ mutex_init(&ps->phy_mutex);
+
return 0;
}
@@ -327,18 +331,28 @@ static int mv88e6171_port_to_phy_addr(int port)
static int
mv88e6171_phy_read(struct dsa_switch *ds, int port, int regnum)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int addr = mv88e6171_port_to_phy_addr(port);
+ int ret;
- return mv88e6xxx_phy_read(ds, addr, regnum);
+ mutex_lock(&ps->phy_mutex);
+ ret = mv88e6xxx_phy_read(ds, addr, regnum);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
}
static int
mv88e6171_phy_write(struct dsa_switch *ds,
int port, int regnum, u16 val)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int addr = mv88e6171_port_to_phy_addr(port);
+ int ret;
- return mv88e6xxx_phy_write(ds, addr, regnum, val);
+ mutex_lock(&ps->phy_mutex);
+ ret = mv88e6xxx_phy_write(ds, addr, regnum, val);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
}
static struct mv88e6xxx_hw_stat mv88e6171_hw_stats[] = {
@@ -406,6 +420,12 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
.get_strings = mv88e6171_get_strings,
.get_ethtool_stats = mv88e6171_get_ethtool_stats,
.get_sset_count = mv88e6171_get_sset_count,
+#ifdef CONFIG_NET_DSA_HWMON
+ .get_temp = mv88e6xxx_get_temp,
+#endif
+ .get_regs_len = mv88e6xxx_get_regs_len,
+ .get_regs = mv88e6xxx_get_regs,
};
MODULE_ALIAS("platform:mv88e6171");
+MODULE_ALIAS("platform:mv88e6172");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
new file mode 100644
index 000000000000..258d9ef5ef25
--- /dev/null
+++ b/drivers/net/dsa/mv88e6352.c
@@ -0,0 +1,788 @@
+/*
+ * net/dsa/mv88e6352.c - Marvell 88e6352 switch chip support
+ *
+ * Copyright (c) 2014 Guenter Roeck
+ *
+ * Derived from mv88e6123_61_65.c
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+#include "mv88e6xxx.h"
+
+static int mv88e6352_wait(struct dsa_switch *ds, int reg, u16 mask)
+{
+ unsigned long timeout = jiffies + HZ / 10;
+
+ while (time_before(jiffies, timeout)) {
+ int ret;
+
+ ret = REG_READ(REG_GLOBAL2, reg);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & mask))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+ return -ETIMEDOUT;
+}
+
+static inline int mv88e6352_phy_wait(struct dsa_switch *ds)
+{
+ return mv88e6352_wait(ds, 0x18, 0x8000);
+}
+
+static inline int mv88e6352_eeprom_load_wait(struct dsa_switch *ds)
+{
+ return mv88e6352_wait(ds, 0x14, 0x0800);
+}
+
+static inline int mv88e6352_eeprom_busy_wait(struct dsa_switch *ds)
+{
+ return mv88e6352_wait(ds, 0x14, 0x8000);
+}
+
+static int __mv88e6352_phy_read(struct dsa_switch *ds, int addr, int regnum)
+{
+ int ret;
+
+ REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum);
+
+ ret = mv88e6352_phy_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ return REG_READ(REG_GLOBAL2, 0x19);
+}
+
+static int __mv88e6352_phy_write(struct dsa_switch *ds, int addr, int regnum,
+ u16 val)
+{
+ REG_WRITE(REG_GLOBAL2, 0x19, val);
+ REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum);
+
+ return mv88e6352_phy_wait(ds);
+}
+
+static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
+{
+ struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
+ int ret;
+
+ if (bus == NULL)
+ return NULL;
+
+ ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+ if (ret >= 0) {
+ if ((ret & 0xfff0) == 0x1760)
+ return "Marvell 88E6176";
+ if (ret == 0x3521)
+ return "Marvell 88E6352 (A0)";
+ if (ret == 0x3522)
+ return "Marvell 88E6352 (A1)";
+ if ((ret & 0xfff0) == 0x3520)
+ return "Marvell 88E6352";
+ }
+
+ return NULL;
+}
+
+static int mv88e6352_switch_reset(struct dsa_switch *ds)
+{
+ unsigned long timeout;
+ int ret;
+ int i;
+
+ /* Set all ports to the disabled state. */
+ for (i = 0; i < 7; i++) {
+ ret = REG_READ(REG_PORT(i), 0x04);
+ REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
+ }
+
+ /* Wait for transmit queues to drain. */
+ usleep_range(2000, 4000);
+
+ /* Reset the switch. Keep PPU active (bit 14, undocumented).
+ * The PPU needs to be active to support indirect phy register
+ * accesses through global registers 0x18 and 0x19.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
+
+ /* Wait up to one second for reset to complete. */
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ if ((ret & 0x8800) == 0x8800)
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mv88e6352_setup_global(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ /* Discard packets with excessive collisions,
+ * mask all interrupt sources, enable PPU (bit 14, undocumented).
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0x6000);
+
+ /* Set the default address aging time to 5 minutes, and
+ * enable address learn messages to be sent to all message
+ * ports.
+ */
+ REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
+
+ /* Configure the priority mapping registers. */
+ ret = mv88e6xxx_config_prio(ds);
+ if (ret < 0)
+ return ret;
+
+ /* Configure the upstream port, and configure the upstream
+ * port as the port to which ingress and egress monitor frames
+ * are to be sent.
+ */
+ REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+
+ /* Disable remote management for now, and set the switch's
+ * DSA device number.
+ */
+ REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
+
+ /* Send all frames with destination addresses matching
+ * 01:80:c2:00:00:2x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
+
+ /* Send all frames with destination addresses matching
+ * 01:80:c2:00:00:0x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
+
+ /* Disable the loopback filter, disable flow control
+ * messages, disable flood broadcast override, disable
+ * removing of provider tags, disable ATU age violation
+ * interrupts, disable tag flow control, force flow
+ * control priority to the highest, and send all special
+ * multicast frames to the CPU at the highest priority.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
+
+ /* Program the DSA routing table. */
+ for (i = 0; i < 32; i++) {
+ int nexthop = 0x1f;
+
+ if (i != ds->index && i < ds->dst->pd->nr_chips)
+ nexthop = ds->pd->rtable[i] & 0x1f;
+
+ REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
+ }
+
+ /* Clear all trunk masks. */
+ for (i = 0; i < 8; i++)
+ REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7f);
+
+ /* Clear all trunk mappings. */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
+
+ /* Disable ingress rate limiting by resetting all ingress
+ * rate limit registers to their initial state.
+ */
+ for (i = 0; i < 7; i++)
+ REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
+
+ /* Initialise cross-chip port VLAN table to reset defaults. */
+ REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
+
+ /* Clear the priority override table. */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
+
+ /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
+
+ return 0;
+}
+
+static int mv88e6352_setup_port(struct dsa_switch *ds, int p)
+{
+ int addr = REG_PORT(p);
+ u16 val;
+
+ /* MAC Forcing register: don't force link, speed, duplex
+ * or flow control state to any particular values on physical
+ * ports, but force the CPU port and all DSA ports to 1000 Mb/s
+ * full duplex.
+ */
+ if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
+ REG_WRITE(addr, 0x01, 0x003e);
+ else
+ REG_WRITE(addr, 0x01, 0x0003);
+
+ /* Do not limit the period of time that this port can be
+ * paused for by the remote end or the period of time that
+ * this port can pause the remote end.
+ */
+ REG_WRITE(addr, 0x02, 0x0000);
+
+ /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
+ * disable Header mode, enable IGMP/MLD snooping, disable VLAN
+ * tunneling, determine priority by looking at 802.1p and IP
+ * priority fields (IP prio has precedence), and set STP state
+ * to Forwarding.
+ *
+ * If this is the CPU link, use DSA or EDSA tagging depending
+ * on which tagging mode was configured.
+ *
+ * If this is a link to another switch, use DSA tagging mode.
+ *
+ * If this is the upstream port for this switch, enable
+ * forwarding of unknown unicasts and multicasts.
+ */
+ val = 0x0433;
+ if (dsa_is_cpu_port(ds, p)) {
+ if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
+ val |= 0x3300;
+ else
+ val |= 0x0100;
+ }
+ if (ds->dsa_port_mask & (1 << p))
+ val |= 0x0100;
+ if (p == dsa_upstream_port(ds))
+ val |= 0x000c;
+ REG_WRITE(addr, 0x04, val);
+
+ /* Port Control 1: disable trunking. Also, if this is the
+ * CPU port, enable learn messages to be sent to this port.
+ */
+ REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
+
+ /* Port based VLAN map: give each port its own address
+ * database, allow the CPU port to talk to each of the 'real'
+ * ports, and allow each of the 'real' ports to only talk to
+ * the upstream port.
+ */
+ val = (p & 0xf) << 12;
+ if (dsa_is_cpu_port(ds, p))
+ val |= ds->phys_port_mask;
+ else
+ val |= 1 << dsa_upstream_port(ds);
+ REG_WRITE(addr, 0x06, val);
+
+ /* Default VLAN ID and priority: don't set a default VLAN
+ * ID, and set the default packet priority to zero.
+ */
+ REG_WRITE(addr, 0x07, 0x0000);
+
+ /* Port Control 2: don't force a good FCS, set the maximum
+ * frame size to 10240 bytes, don't let the switch add or
+ * strip 802.1q tags, don't discard tagged or untagged frames
+ * on this port, do a destination address lookup on all
+ * received packets as usual, disable ARP mirroring and don't
+ * send a copy of all transmitted/received frames on this port
+ * to the CPU.
+ */
+ REG_WRITE(addr, 0x08, 0x2080);
+
+ /* Egress rate control: disable egress rate control. */
+ REG_WRITE(addr, 0x09, 0x0001);
+
+ /* Egress rate control 2: disable egress rate control. */
+ REG_WRITE(addr, 0x0a, 0x0000);
+
+ /* Port Association Vector: when learning source addresses
+ * of packets, add the address to the address database using
+ * a port bitmap that has only the bit for this port set and
+ * the other bits clear.
+ */
+ REG_WRITE(addr, 0x0b, 1 << p);
+
+ /* Port ATU control: disable limiting the number of address
+ * database entries that this port is allowed to use.
+ */
+ REG_WRITE(addr, 0x0c, 0x0000);
+
+ /* Priority Override: disable DA, SA and VTU priority override. */
+ REG_WRITE(addr, 0x0d, 0x0000);
+
+ /* Port Ethertype: use the Ethertype DSA Ethertype value. */
+ REG_WRITE(addr, 0x0f, ETH_P_EDSA);
+
+ /* Tag Remap: use an identity 802.1p prio -> switch prio
+ * mapping.
+ */
+ REG_WRITE(addr, 0x18, 0x3210);
+
+ /* Tag Remap 2: use an identity 802.1p prio -> switch prio
+ * mapping.
+ */
+ REG_WRITE(addr, 0x19, 0x7654);
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_DSA_HWMON
+
+static int mv88e6352_phy_page_read(struct dsa_switch *ds,
+ int port, int page, int reg)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = __mv88e6352_phy_write(ds, port, 0x16, page);
+ if (ret < 0)
+ goto error;
+ ret = __mv88e6352_phy_read(ds, port, reg);
+error:
+ __mv88e6352_phy_write(ds, port, 0x16, 0x0);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
+}
+
+static int mv88e6352_phy_page_write(struct dsa_switch *ds,
+ int port, int page, int reg, int val)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = __mv88e6352_phy_write(ds, port, 0x16, page);
+ if (ret < 0)
+ goto error;
+
+ ret = __mv88e6352_phy_write(ds, port, reg, val);
+error:
+ __mv88e6352_phy_write(ds, port, 0x16, 0x0);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
+}
+
+static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
+{
+ int ret;
+
+ *temp = 0;
+
+ ret = mv88e6352_phy_page_read(ds, 0, 6, 27);
+ if (ret < 0)
+ return ret;
+
+ *temp = (ret & 0xff) - 25;
+
+ return 0;
+}
+
+static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp)
+{
+ int ret;
+
+ *temp = 0;
+
+ ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+ if (ret < 0)
+ return ret;
+
+ *temp = (((ret >> 8) & 0x1f) * 5) - 25;
+
+ return 0;
+}
+
+static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp)
+{
+ int ret;
+
+ ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+ if (ret < 0)
+ return ret;
+ temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
+ return mv88e6352_phy_page_write(ds, 0, 6, 26,
+ (ret & 0xe0ff) | (temp << 8));
+}
+
+static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
+{
+ int ret;
+
+ *alarm = false;
+
+ ret = mv88e6352_phy_page_read(ds, 0, 6, 26);
+ if (ret < 0)
+ return ret;
+
+ *alarm = !!(ret & 0x40);
+
+ return 0;
+}
+#endif /* CONFIG_NET_DSA_HWMON */
+
+static int mv88e6352_setup(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+ int i;
+
+ mutex_init(&ps->smi_mutex);
+ mutex_init(&ps->stats_mutex);
+ mutex_init(&ps->phy_mutex);
+ mutex_init(&ps->eeprom_mutex);
+
+ ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
+
+ ret = mv88e6352_switch_reset(ds);
+ if (ret < 0)
+ return ret;
+
+ /* @@@ initialise vtu and atu */
+
+ ret = mv88e6352_setup_global(ds);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < 7; i++) {
+ ret = mv88e6352_setup_port(ds, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mv88e6352_port_to_phy_addr(int port)
+{
+ if (port >= 0 && port <= 4)
+ return port;
+ return -EINVAL;
+}
+
+static int
+mv88e6352_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int addr = mv88e6352_port_to_phy_addr(port);
+ int ret;
+
+ if (addr < 0)
+ return addr;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = __mv88e6352_phy_read(ds, addr, regnum);
+ mutex_unlock(&ps->phy_mutex);
+
+ return ret;
+}
+
+static int
+mv88e6352_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int addr = mv88e6352_port_to_phy_addr(port);
+ int ret;
+
+ if (addr < 0)
+ return addr;
+
+ mutex_lock(&ps->phy_mutex);
+ ret = __mv88e6352_phy_write(ds, addr, regnum, val);
+ mutex_unlock(&ps->phy_mutex);
+
+ return ret;
+}
+
+static struct mv88e6xxx_hw_stat mv88e6352_hw_stats[] = {
+ { "in_good_octets", 8, 0x00, },
+ { "in_bad_octets", 4, 0x02, },
+ { "in_unicast", 4, 0x04, },
+ { "in_broadcasts", 4, 0x06, },
+ { "in_multicasts", 4, 0x07, },
+ { "in_pause", 4, 0x16, },
+ { "in_undersize", 4, 0x18, },
+ { "in_fragments", 4, 0x19, },
+ { "in_oversize", 4, 0x1a, },
+ { "in_jabber", 4, 0x1b, },
+ { "in_rx_error", 4, 0x1c, },
+ { "in_fcs_error", 4, 0x1d, },
+ { "out_octets", 8, 0x0e, },
+ { "out_unicast", 4, 0x10, },
+ { "out_broadcasts", 4, 0x13, },
+ { "out_multicasts", 4, 0x12, },
+ { "out_pause", 4, 0x15, },
+ { "excessive", 4, 0x11, },
+ { "collisions", 4, 0x1e, },
+ { "deferred", 4, 0x05, },
+ { "single", 4, 0x14, },
+ { "multiple", 4, 0x17, },
+ { "out_fcs_error", 4, 0x03, },
+ { "late", 4, 0x1f, },
+ { "hist_64bytes", 4, 0x08, },
+ { "hist_65_127bytes", 4, 0x09, },
+ { "hist_128_255bytes", 4, 0x0a, },
+ { "hist_256_511bytes", 4, 0x0b, },
+ { "hist_512_1023bytes", 4, 0x0c, },
+ { "hist_1024_max_bytes", 4, 0x0d, },
+ { "sw_in_discards", 4, 0x110, },
+ { "sw_in_filtered", 2, 0x112, },
+ { "sw_out_filtered", 2, 0x113, },
+};
+
+static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->eeprom_mutex);
+
+ ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14,
+ 0xc000 | (addr & 0xff));
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6352_eeprom_busy_wait(ds);
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x15);
+error:
+ mutex_unlock(&ps->eeprom_mutex);
+ return ret;
+}
+
+static int mv88e6352_get_eeprom(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ int offset;
+ int len;
+ int ret;
+
+ offset = eeprom->offset;
+ len = eeprom->len;
+ eeprom->len = 0;
+
+ eeprom->magic = 0xc3ec4951;
+
+ ret = mv88e6352_eeprom_load_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ if (offset & 1) {
+ int word;
+
+ word = mv88e6352_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ *data++ = (word >> 8) & 0xff;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ while (len >= 2) {
+ int word;
+
+ word = mv88e6352_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ *data++ = word & 0xff;
+ *data++ = (word >> 8) & 0xff;
+
+ offset += 2;
+ len -= 2;
+ eeprom->len += 2;
+ }
+
+ if (len) {
+ int word;
+
+ word = mv88e6352_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ *data++ = word & 0xff;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ return 0;
+}
+
+static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds)
+{
+ int ret;
+
+ ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x14);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & 0x0400))
+ return -EROFS;
+
+ return 0;
+}
+
+static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr,
+ u16 data)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->eeprom_mutex);
+
+ ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x15, data);
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14,
+ 0xb000 | (addr & 0xff));
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6352_eeprom_busy_wait(ds);
+error:
+ mutex_unlock(&ps->eeprom_mutex);
+ return ret;
+}
+
+static int mv88e6352_set_eeprom(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ int offset;
+ int ret;
+ int len;
+
+ if (eeprom->magic != 0xc3ec4951)
+ return -EINVAL;
+
+ ret = mv88e6352_eeprom_is_readonly(ds);
+ if (ret)
+ return ret;
+
+ offset = eeprom->offset;
+ len = eeprom->len;
+ eeprom->len = 0;
+
+ ret = mv88e6352_eeprom_load_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ if (offset & 1) {
+ int word;
+
+ word = mv88e6352_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ word = (*data++ << 8) | (word & 0xff);
+
+ ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
+ if (ret < 0)
+ return ret;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ while (len >= 2) {
+ int word;
+
+ word = *data++;
+ word |= *data++ << 8;
+
+ ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
+ if (ret < 0)
+ return ret;
+
+ offset += 2;
+ len -= 2;
+ eeprom->len += 2;
+ }
+
+ if (len) {
+ int word;
+
+ word = mv88e6352_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ word = (word & 0xff00) | *data++;
+
+ ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
+ if (ret < 0)
+ return ret;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ return 0;
+}
+
+static void
+mv88e6352_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6352_hw_stats),
+ mv88e6352_hw_stats, port, data);
+}
+
+static void
+mv88e6352_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
+{
+ mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6352_hw_stats),
+ mv88e6352_hw_stats, port, data);
+}
+
+static int mv88e6352_get_sset_count(struct dsa_switch *ds)
+{
+ return ARRAY_SIZE(mv88e6352_hw_stats);
+}
+
+struct dsa_switch_driver mv88e6352_switch_driver = {
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .priv_size = sizeof(struct mv88e6xxx_priv_state),
+ .probe = mv88e6352_probe,
+ .setup = mv88e6352_setup,
+ .set_addr = mv88e6xxx_set_addr_indirect,
+ .phy_read = mv88e6352_phy_read,
+ .phy_write = mv88e6352_phy_write,
+ .poll_link = mv88e6xxx_poll_link,
+ .get_strings = mv88e6352_get_strings,
+ .get_ethtool_stats = mv88e6352_get_ethtool_stats,
+ .get_sset_count = mv88e6352_get_sset_count,
+#ifdef CONFIG_NET_DSA_HWMON
+ .get_temp = mv88e6352_get_temp,
+ .get_temp_limit = mv88e6352_get_temp_limit,
+ .set_temp_limit = mv88e6352_set_temp_limit,
+ .get_temp_alarm = mv88e6352_get_temp_alarm,
+#endif
+ .get_eeprom = mv88e6352_get_eeprom,
+ .set_eeprom = mv88e6352_set_eeprom,
+ .get_regs_len = mv88e6xxx_get_regs_len,
+ .get_regs = mv88e6xxx_get_regs,
+};
+
+MODULE_ALIAS("platform:mv88e6352");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index a6c90cf5634d..cd6807c6b4ed 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -485,20 +485,108 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
for (i = 0; i < nr_stats; i++) {
struct mv88e6xxx_hw_stat *s = stats + i;
u32 low;
- u32 high;
-
+ u32 high = 0;
+
+ if (s->reg >= 0x100) {
+ int ret;
+
+ ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
+ s->reg - 0x100);
+ if (ret < 0)
+ goto error;
+ low = ret;
+ if (s->sizeof_stat == 4) {
+ ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
+ s->reg - 0x100 + 1);
+ if (ret < 0)
+ goto error;
+ high = ret;
+ }
+ data[i] = (((u64)high) << 16) | low;
+ continue;
+ }
mv88e6xxx_stats_read(ds, s->reg, &low);
if (s->sizeof_stat == 8)
mv88e6xxx_stats_read(ds, s->reg + 1, &high);
- else
- high = 0;
data[i] = (((u64)high) << 32) | low;
}
-
+error:
mutex_unlock(&ps->stats_mutex);
}
+int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
+{
+ return 32 * sizeof(u16);
+}
+
+void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
+ struct ethtool_regs *regs, void *_p)
+{
+ u16 *p = _p;
+ int i;
+
+ regs->version = 0;
+
+ memset(p, 0xff, 32 * sizeof(u16));
+
+ for (i = 0; i < 32; i++) {
+ int ret;
+
+ ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
+ if (ret >= 0)
+ p[i] = ret;
+ }
+}
+
+#ifdef CONFIG_NET_DSA_HWMON
+
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+ int val;
+
+ *temp = 0;
+
+ mutex_lock(&ps->phy_mutex);
+
+ ret = mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+ if (ret < 0)
+ goto error;
+
+ /* Enable temperature sensor */
+ ret = mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+ if (ret < 0)
+ goto error;
+
+ /* Wait for temperature to stabilize */
+ usleep_range(10000, 12000);
+
+ val = mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+ if (val < 0) {
+ ret = val;
+ goto error;
+ }
+
+ /* Disable temperature sensor */
+ ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+ if (ret < 0)
+ goto error;
+
+ *temp = ((val & 0x1f) - 5) * 5;
+
+error:
+ mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+ mutex_unlock(&ps->phy_mutex);
+ return ret;
+}
+#endif /* CONFIG_NET_DSA_HWMON */
+
static int __init mv88e6xxx_init(void)
{
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
@@ -507,6 +595,9 @@ static int __init mv88e6xxx_init(void)
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
register_switch_driver(&mv88e6123_61_65_switch_driver);
#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
+ register_switch_driver(&mv88e6352_switch_driver);
+#endif
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
register_switch_driver(&mv88e6171_switch_driver);
#endif
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 5e5145ad9525..03e397efde36 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -37,6 +37,17 @@ struct mv88e6xxx_priv_state {
*/
struct mutex stats_mutex;
+ /* This mutex serializes phy access for chips with
+ * indirect phy addressing. It is unused for chips
+ * with direct phy access.
+ */
+ struct mutex phy_mutex;
+
+ /* This mutex serializes eeprom access for chips with
+ * eeprom support.
+ */
+ struct mutex eeprom_mutex;
+
int id; /* switch product id */
};
@@ -67,9 +78,14 @@ void mv88e6xxx_get_strings(struct dsa_switch *ds,
void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
int nr_stats, struct mv88e6xxx_hw_stat *stats,
int port, uint64_t *data);
+int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
+void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
+ struct ethtool_regs *regs, void *_p);
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
extern struct dsa_switch_driver mv88e6131_switch_driver;
extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
+extern struct dsa_switch_driver mv88e6352_switch_driver;
extern struct dsa_switch_driver mv88e6171_switch_driver;
#define REG_READ(addr, reg) \
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index ff435fbd1ad0..49adbf1b7574 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -38,6 +38,9 @@
#include <net/rtnetlink.h>
#include <linux/u64_stats_sync.h>
+#define DRV_NAME "dummy"
+#define DRV_VERSION "1.0"
+
static int numdummies = 1;
/* fake multicast ability */
@@ -120,12 +123,24 @@ static const struct net_device_ops dummy_netdev_ops = {
.ndo_change_carrier = dummy_change_carrier,
};
+static void dummy_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+}
+
+static const struct ethtool_ops dummy_ethtool_ops = {
+ .get_drvinfo = dummy_get_drvinfo,
+};
+
static void dummy_setup(struct net_device *dev)
{
ether_setup(dev);
/* Initialize the device structure. */
dev->netdev_ops = &dummy_netdev_ops;
+ dev->ethtool_ops = &dummy_ethtool_ops;
dev->destructor = free_netdev;
/* Fill in device structure with ethernet-generic values. */
@@ -150,7 +165,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
}
static struct rtnl_link_ops dummy_link_ops __read_mostly = {
- .kind = "dummy",
+ .kind = DRV_NAME,
.setup = dummy_setup,
.validate = dummy_validate,
};
@@ -209,4 +224,5 @@ static void __exit dummy_cleanup_module(void)
module_init(dummy_init_module);
module_exit(dummy_cleanup_module);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_RTNL_LINK("dummy");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 48775b88bac7..dede43f4ce09 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -1285,7 +1285,7 @@ typhoon_request_firmware(struct typhoon *tp)
return err;
}
- image_data = (u8 *) typhoon_fw->data;
+ image_data = typhoon_fw->data;
remaining = typhoon_fw->size;
if (remaining < sizeof(struct typhoon_file_header))
goto invalid_fw;
@@ -1343,7 +1343,7 @@ typhoon_download_firmware(struct typhoon *tp)
int i;
int err;
- image_data = (u8 *) typhoon_fw->data;
+ image_data = typhoon_fw->data;
fHdr = (struct typhoon_file_header *) image_data;
/* Cannot just map the firmware image using pci_map_single() as
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 1ed1fbba5d58..df76050d0a9d 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -155,6 +155,7 @@ source "drivers/net/ethernet/qualcomm/Kconfig"
source "drivers/net/ethernet/realtek/Kconfig"
source "drivers/net/ethernet/renesas/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
+source "drivers/net/ethernet/rocker/Kconfig"
config S6GMAC
tristate "S6105 GMAC ethernet support"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 6e0b629e9859..bf56f8b36e90 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
obj-$(CONFIG_SH_ETH) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
+obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
obj-$(CONFIG_S6GMAC) += s6gmac.o
obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 8319c99331b0..7a5e4aa5415e 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -179,7 +179,7 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
- depends on OF_NET
+ depends on OF_NET && HAS_IOMEM
select PHYLIB
select AMD_XGBE_PHY
select BITREVERSE
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index caade30820d5..75b08c63d39f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -125,9 +125,6 @@
#define DMA_AXIAWCR 0x3018
#define DMA_DSR0 0x3020
#define DMA_DSR1 0x3024
-#define DMA_DSR2 0x3028
-#define DMA_DSR3 0x302c
-#define DMA_DSR4 0x3030
/* DMA register entry bit positions and sizes */
#define DMA_AXIARCR_DRC_INDEX 0
@@ -158,10 +155,6 @@
#define DMA_AXIAWCR_TDC_WIDTH 4
#define DMA_AXIAWCR_TDD_INDEX 28
#define DMA_AXIAWCR_TDD_WIDTH 2
-#define DMA_DSR0_RPS_INDEX 8
-#define DMA_DSR0_RPS_WIDTH 4
-#define DMA_DSR0_TPS_INDEX 12
-#define DMA_DSR0_TPS_WIDTH 4
#define DMA_ISR_MACIS_INDEX 17
#define DMA_ISR_MACIS_WIDTH 1
#define DMA_ISR_MTLIS_INDEX 16
@@ -175,6 +168,20 @@
#define DMA_SBMR_UNDEF_INDEX 0
#define DMA_SBMR_UNDEF_WIDTH 1
+/* DMA register values */
+#define DMA_DSR_RPS_WIDTH 4
+#define DMA_DSR_TPS_WIDTH 4
+#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH)
+#define DMA_DSR0_RPS_START 8
+#define DMA_DSR0_TPS_START 12
+#define DMA_DSRX_FIRST_QUEUE 3
+#define DMA_DSRX_INC 4
+#define DMA_DSRX_QPR 4
+#define DMA_DSRX_RPS_START 0
+#define DMA_DSRX_TPS_START 4
+#define DMA_TPS_STOPPED 0x00
+#define DMA_TPS_SUSPENDED 0x06
+
/* DMA channel register offsets
* Multiple channels can be active. The first channel has registers
* that begin at 0x3100. Each subsequent channel has registers that
@@ -207,6 +214,8 @@
/* DMA channel register entry bit positions and sizes */
#define DMA_CH_CR_PBLX8_INDEX 16
#define DMA_CH_CR_PBLX8_WIDTH 1
+#define DMA_CH_CR_SPH_INDEX 24
+#define DMA_CH_CR_SPH_WIDTH 1
#define DMA_CH_IER_AIE_INDEX 15
#define DMA_CH_IER_AIE_WIDTH 1
#define DMA_CH_IER_FBEE_INDEX 12
@@ -306,6 +315,9 @@
#define MAC_MACA0LR 0x0304
#define MAC_MACA1HR 0x0308
#define MAC_MACA1LR 0x030c
+#define MAC_RSSCR 0x0c80
+#define MAC_RSSAR 0x0c88
+#define MAC_RSSDR 0x0c8c
#define MAC_TSCR 0x0d00
#define MAC_SSIR 0x0d04
#define MAC_STSR 0x0d08
@@ -429,6 +441,8 @@
#define MAC_RCR_CST_WIDTH 1
#define MAC_RCR_DCRCC_INDEX 3
#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_HDSMS_INDEX 12
+#define MAC_RCR_HDSMS_WIDTH 3
#define MAC_RCR_IPC_INDEX 9
#define MAC_RCR_IPC_WIDTH 1
#define MAC_RCR_JE_INDEX 8
@@ -445,6 +459,24 @@
#define MAC_RFCR_UP_WIDTH 1
#define MAC_RQC0R_RXQ0EN_INDEX 0
#define MAC_RQC0R_RXQ0EN_WIDTH 2
+#define MAC_RSSAR_ADDRT_INDEX 2
+#define MAC_RSSAR_ADDRT_WIDTH 1
+#define MAC_RSSAR_CT_INDEX 1
+#define MAC_RSSAR_CT_WIDTH 1
+#define MAC_RSSAR_OB_INDEX 0
+#define MAC_RSSAR_OB_WIDTH 1
+#define MAC_RSSAR_RSSIA_INDEX 8
+#define MAC_RSSAR_RSSIA_WIDTH 8
+#define MAC_RSSCR_IP2TE_INDEX 1
+#define MAC_RSSCR_IP2TE_WIDTH 1
+#define MAC_RSSCR_RSSE_INDEX 0
+#define MAC_RSSCR_RSSE_WIDTH 1
+#define MAC_RSSCR_TCP4TE_INDEX 2
+#define MAC_RSSCR_TCP4TE_WIDTH 1
+#define MAC_RSSCR_UDP4TE_INDEX 3
+#define MAC_RSSCR_UDP4TE_WIDTH 1
+#define MAC_RSSDR_DMCH_INDEX 0
+#define MAC_RSSDR_DMCH_WIDTH 4
#define MAC_SSIR_SNSINC_INDEX 8
#define MAC_SSIR_SNSINC_WIDTH 8
#define MAC_SSIR_SSINC_INDEX 16
@@ -844,9 +876,13 @@
#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
#define RX_NORMAL_DESC0_OVT_INDEX 0
#define RX_NORMAL_DESC0_OVT_WIDTH 16
+#define RX_NORMAL_DESC2_HL_INDEX 0
+#define RX_NORMAL_DESC2_HL_WIDTH 10
#define RX_NORMAL_DESC3_CDA_INDEX 27
#define RX_NORMAL_DESC3_CDA_WIDTH 1
#define RX_NORMAL_DESC3_CTXT_INDEX 30
@@ -855,14 +891,27 @@
#define RX_NORMAL_DESC3_ES_WIDTH 1
#define RX_NORMAL_DESC3_ETLT_INDEX 16
#define RX_NORMAL_DESC3_ETLT_WIDTH 4
+#define RX_NORMAL_DESC3_FD_INDEX 29
+#define RX_NORMAL_DESC3_FD_WIDTH 1
#define RX_NORMAL_DESC3_INTE_INDEX 30
#define RX_NORMAL_DESC3_INTE_WIDTH 1
+#define RX_NORMAL_DESC3_L34T_INDEX 20
+#define RX_NORMAL_DESC3_L34T_WIDTH 4
#define RX_NORMAL_DESC3_LD_INDEX 28
#define RX_NORMAL_DESC3_LD_WIDTH 1
#define RX_NORMAL_DESC3_OWN_INDEX 31
#define RX_NORMAL_DESC3_OWN_WIDTH 1
#define RX_NORMAL_DESC3_PL_INDEX 0
#define RX_NORMAL_DESC3_PL_WIDTH 14
+#define RX_NORMAL_DESC3_RSV_INDEX 26
+#define RX_NORMAL_DESC3_RSV_WIDTH 1
+
+#define RX_DESC3_L34T_IPV4_TCP 1
+#define RX_DESC3_L34T_IPV4_UDP 2
+#define RX_DESC3_L34T_IPV4_ICMP 3
+#define RX_DESC3_L34T_IPV6_TCP 9
+#define RX_DESC3_L34T_IPV6_UDP 10
+#define RX_DESC3_L34T_IPV6_ICMP 11
#define RX_CONTEXT_DESC3_TSA_INDEX 4
#define RX_CONTEXT_DESC3_TSA_WIDTH 1
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index b15551bad7fa..a50891f52197 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -117,7 +117,7 @@
#include "xgbe.h"
#include "xgbe-common.h"
-static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *);
+static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
static void xgbe_free_ring(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring)
@@ -131,13 +131,35 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
if (ring->rdata) {
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
- xgbe_unmap_skb(pdata, rdata);
+ xgbe_unmap_rdata(pdata, rdata);
}
kfree(ring->rdata);
ring->rdata = NULL;
}
+ if (ring->rx_hdr_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
+ ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_hdr_pa.pages);
+
+ ring->rx_hdr_pa.pages = NULL;
+ ring->rx_hdr_pa.pages_len = 0;
+ ring->rx_hdr_pa.pages_offset = 0;
+ ring->rx_hdr_pa.pages_dma = 0;
+ }
+
+ if (ring->rx_buf_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
+ ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_buf_pa.pages);
+
+ ring->rx_buf_pa.pages = NULL;
+ ring->rx_buf_pa.pages_len = 0;
+ ring->rx_buf_pa.pages_offset = 0;
+ ring->rx_buf_pa.pages_dma = 0;
+ }
+
if (ring->rdesc) {
dma_free_coherent(pdata->dev,
(sizeof(struct xgbe_ring_desc) *
@@ -233,6 +255,96 @@ err_ring:
return ret;
}
+static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+ struct xgbe_page_alloc *pa, gfp_t gfp, int order)
+{
+ struct page *pages = NULL;
+ dma_addr_t pages_dma;
+ int ret;
+
+ /* Try to obtain pages, decreasing order if necessary */
+ gfp |= __GFP_COLD | __GFP_COMP;
+ while (order >= 0) {
+ pages = alloc_pages(gfp, order);
+ if (pages)
+ break;
+
+ order--;
+ }
+ if (!pages)
+ return -ENOMEM;
+
+ /* Map the pages */
+ pages_dma = dma_map_page(pdata->dev, pages, 0,
+ PAGE_SIZE << order, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(pdata->dev, pages_dma);
+ if (ret) {
+ put_page(pages);
+ return ret;
+ }
+
+ pa->pages = pages;
+ pa->pages_len = PAGE_SIZE << order;
+ pa->pages_offset = 0;
+ pa->pages_dma = pages_dma;
+
+ return 0;
+}
+
+static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
+ struct xgbe_page_alloc *pa,
+ unsigned int len)
+{
+ get_page(pa->pages);
+ bd->pa = *pa;
+
+ bd->dma = pa->pages_dma + pa->pages_offset;
+ bd->dma_len = len;
+
+ pa->pages_offset += len;
+ if ((pa->pages_offset + len) > pa->pages_len) {
+ /* This data descriptor is responsible for unmapping page(s) */
+ bd->pa_unmap = *pa;
+
+ /* Get a new allocation next time */
+ pa->pages = NULL;
+ pa->pages_len = 0;
+ pa->pages_offset = 0;
+ pa->pages_dma = 0;
+ }
+}
+
+static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring,
+ struct xgbe_ring_data *rdata)
+{
+ int order, ret;
+
+ if (!ring->rx_hdr_pa.pages) {
+ ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (!ring->rx_buf_pa.pages) {
+ order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
+ ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
+ order);
+ if (ret)
+ return ret;
+ }
+
+ /* Set up the header page info */
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ XGBE_SKB_ALLOC_SIZE);
+
+ /* Set up the buffer page info */
+ xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
+ pdata->rx_buf_size);
+
+ return 0;
+}
+
static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -266,7 +378,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
ring->cur = 0;
ring->dirty = 0;
- ring->tx.queue_stopped = 0;
+ memset(&ring->tx, 0, sizeof(ring->tx));
hw_if->tx_desc_init(channel);
}
@@ -281,8 +393,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
struct xgbe_ring *ring;
struct xgbe_ring_desc *rdesc;
struct xgbe_ring_data *rdata;
- dma_addr_t rdesc_dma, skb_dma;
- struct sk_buff *skb = NULL;
+ dma_addr_t rdesc_dma;
unsigned int i, j;
DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
@@ -302,22 +413,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
rdata->rdesc = rdesc;
rdata->rdesc_dma = rdesc_dma;
- /* Allocate skb & assign to each rdesc */
- skb = dev_alloc_skb(pdata->rx_buf_size);
- if (skb == NULL)
- break;
- skb_dma = dma_map_single(pdata->dev, skb->data,
- pdata->rx_buf_size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(pdata->dev, skb_dma)) {
- netdev_alert(pdata->netdev,
- "failed to do the dma map\n");
- dev_kfree_skb_any(skb);
+ if (xgbe_map_rx_buffer(pdata, ring, rdata))
break;
- }
- rdata->skb = skb;
- rdata->skb_dma = skb_dma;
- rdata->skb_dma_len = pdata->rx_buf_size;
rdesc++;
rdesc_dma += sizeof(struct xgbe_ring_desc);
@@ -325,8 +422,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
ring->cur = 0;
ring->dirty = 0;
- ring->rx.realloc_index = 0;
- ring->rx.realloc_threshold = 0;
+ memset(&ring->rx, 0, sizeof(ring->rx));
hw_if->rx_desc_init(channel);
}
@@ -334,8 +430,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
}
-static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
- struct xgbe_ring_data *rdata)
+static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
+ struct xgbe_ring_data *rdata)
{
if (rdata->skb_dma) {
if (rdata->mapped_as_page) {
@@ -354,8 +450,29 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
rdata->skb = NULL;
}
- rdata->tso_header = 0;
- rdata->len = 0;
+ if (rdata->rx.hdr.pa.pages)
+ put_page(rdata->rx.hdr.pa.pages);
+
+ if (rdata->rx.hdr.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
+ rdata->rx.hdr.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(rdata->rx.hdr.pa_unmap.pages);
+ }
+
+ if (rdata->rx.buf.pa.pages)
+ put_page(rdata->rx.buf.pa.pages);
+
+ if (rdata->rx.buf.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
+ rdata->rx.buf.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(rdata->rx.buf.pa_unmap.pages);
+ }
+
+ memset(&rdata->tx, 0, sizeof(rdata->tx));
+ memset(&rdata->rx, 0, sizeof(rdata->rx));
+
rdata->mapped_as_page = 0;
if (rdata->state_saved) {
@@ -413,7 +530,6 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
}
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = packet->header_len;
- rdata->tso_header = 1;
offset = packet->header_len;
@@ -497,7 +613,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
err_out:
while (start_index < cur_index) {
rdata = XGBE_GET_DESC_DATA(ring, start_index++);
- xgbe_unmap_skb(pdata, rdata);
+ xgbe_unmap_rdata(pdata, rdata);
}
DBGPR("<--xgbe_map_tx_skb: count=0\n");
@@ -505,40 +621,25 @@ err_out:
return 0;
}
-static void xgbe_realloc_skb(struct xgbe_channel *channel)
+static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
- struct sk_buff *skb = NULL;
- dma_addr_t skb_dma;
int i;
- DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n",
+ DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
ring->rx.realloc_index);
for (i = 0; i < ring->dirty; i++) {
rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
/* Reset rdata values */
- xgbe_unmap_skb(pdata, rdata);
+ xgbe_unmap_rdata(pdata, rdata);
- /* Allocate skb & assign to each rdesc */
- skb = dev_alloc_skb(pdata->rx_buf_size);
- if (skb == NULL)
+ if (xgbe_map_rx_buffer(pdata, ring, rdata))
break;
- skb_dma = dma_map_single(pdata->dev, skb->data,
- pdata->rx_buf_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(pdata->dev, skb_dma)) {
- netdev_alert(pdata->netdev,
- "failed to do the dma map\n");
- dev_kfree_skb_any(skb);
- break;
- }
- rdata->skb = skb;
- rdata->skb_dma = skb_dma;
- rdata->skb_dma_len = pdata->rx_buf_size;
hw_if->rx_desc_reset(rdata);
@@ -546,7 +647,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
}
ring->dirty = 0;
- DBGPR("<--xgbe_realloc_skb\n");
+ DBGPR("<--xgbe_realloc_rx_buffer\n");
}
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
@@ -556,8 +657,8 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
desc_if->free_ring_resources = xgbe_free_ring_resources;
desc_if->map_tx_skb = xgbe_map_tx_skb;
- desc_if->realloc_skb = xgbe_realloc_skb;
- desc_if->unmap_skb = xgbe_unmap_skb;
+ desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
+ desc_if->unmap_rdata = xgbe_unmap_rdata;
desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 9da3a03e8c07..53f5f66ec2ee 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -335,6 +335,161 @@ static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
}
}
+static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
+ }
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
+}
+
+static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
+ unsigned int index, unsigned int val)
+{
+ unsigned int wait;
+ int ret = 0;
+
+ mutex_lock(&pdata->rss_mutex);
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
+
+ wait = 1000;
+ while (wait--) {
+ if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+ goto unlock;
+
+ usleep_range(1000, 1500);
+ }
+
+ ret = -EBUSY;
+
+unlock:
+ mutex_unlock(&pdata->rss_mutex);
+
+ return ret;
+}
+
+static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
+{
+ unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+ unsigned int *key = (unsigned int *)&pdata->rss_key;
+ int ret;
+
+ while (key_regs--) {
+ ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
+ key_regs, *key++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ ret = xgbe_write_rss_reg(pdata,
+ XGBE_RSS_LOOKUP_TABLE_TYPE, i,
+ pdata->rss_table[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
+{
+ memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
+
+ return xgbe_write_rss_hash_key(pdata);
+}
+
+static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
+ const u32 *table)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
+ XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
+
+ return xgbe_write_rss_lookup_table(pdata);
+}
+
+static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ /* Program the hash key */
+ ret = xgbe_write_rss_hash_key(pdata);
+ if (ret)
+ return ret;
+
+ /* Program the lookup table */
+ ret = xgbe_write_rss_lookup_table(pdata);
+ if (ret)
+ return ret;
+
+ /* Set the RSS options */
+ XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
+
+ /* Enable RSS */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
+
+ return 0;
+}
+
+static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
+
+ return 0;
+}
+
+static void xgbe_config_rss(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return;
+
+ if (pdata->netdev->features & NETIF_F_RXHASH)
+ ret = xgbe_enable_rss(pdata);
+ else
+ ret = xgbe_disable_rss(pdata);
+
+ if (ret)
+ netdev_err(pdata->netdev,
+ "error configuring RSS, RSS disabled\n");
+}
+
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
{
unsigned int max_q_count, q_count;
@@ -465,17 +620,21 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
if (channel->tx_ring) {
/* Enable the following Tx interrupts
- * TIE - Transmit Interrupt Enable (unless polling)
+ * TIE - Transmit Interrupt Enable (unless using
+ * per channel interrupts)
*/
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ if (!pdata->per_channel_irq)
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
}
if (channel->rx_ring) {
/* Enable following Rx interrupts
* RBUE - Receive Buffer Unavailable Enable
- * RIE - Receive Interrupt Enable
+ * RIE - Receive Interrupt Enable (unless using
+ * per channel interrupts)
*/
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ if (!pdata->per_channel_irq)
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
}
XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
@@ -880,13 +1039,15 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
rdesc->desc1 = 0;
rdesc->desc2 = 0;
rdesc->desc3 = 0;
+
+ /* Make sure ownership is written to the descriptor */
+ wmb();
}
static void xgbe_tx_desc_init(struct xgbe_channel *channel)
{
struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_ring_data *rdata;
- struct xgbe_ring_desc *rdesc;
int i;
int start_index = ring->cur;
@@ -895,26 +1056,11 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
/* Initialze all descriptors */
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
- rdesc = rdata->rdesc;
- /* Initialize Tx descriptor
- * Set buffer 1 (lo) address to zero
- * Set buffer 1 (hi) address to zero
- * Reset all other control bits (IC, TTSE, B2L & B1L)
- * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
- * etc)
- */
- rdesc->desc0 = 0;
- rdesc->desc1 = 0;
- rdesc->desc2 = 0;
- rdesc->desc3 = 0;
+ /* Initialize Tx descriptor */
+ xgbe_tx_desc_reset(rdata);
}
- /* Make sure everything is written to the descriptor(s) before
- * telling the device about them
- */
- wmb();
-
/* Update the total number of Tx descriptors */
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
@@ -933,19 +1079,19 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
struct xgbe_ring_desc *rdesc = rdata->rdesc;
/* Reset the Rx descriptor
- * Set buffer 1 (lo) address to dma address (lo)
- * Set buffer 1 (hi) address to dma address (hi)
- * Set buffer 2 (lo) address to zero
- * Set buffer 2 (hi) address to zero and set control bits
- * OWN and INTE
+ * Set buffer 1 (lo) address to header dma address (lo)
+ * Set buffer 1 (hi) address to header dma address (hi)
+ * Set buffer 2 (lo) address to buffer dma address (lo)
+ * Set buffer 2 (hi) address to buffer dma address (hi) and
+ * set control bits OWN and INTE
*/
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
- rdesc->desc2 = 0;
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma));
+ rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
+ rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
- rdesc->desc3 = 0;
- if (rdata->interrupt)
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
+ rdata->interrupt ? 1 : 0);
/* Since the Rx DMA engine is likely running, make sure everything
* is written to the descriptor(s) before setting the OWN bit
@@ -964,7 +1110,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
- struct xgbe_ring_desc *rdesc;
unsigned int start_index = ring->cur;
unsigned int rx_coalesce, rx_frames;
unsigned int i;
@@ -977,34 +1122,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
/* Initialize all descriptors */
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
- rdesc = rdata->rdesc;
- /* Initialize Rx descriptor
- * Set buffer 1 (lo) address to dma address (lo)
- * Set buffer 1 (hi) address to dma address (hi)
- * Set buffer 2 (lo) address to zero
- * Set buffer 2 (hi) address to zero and set control
- * bits OWN and INTE appropriateley
- */
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
- rdesc->desc2 = 0;
- rdesc->desc3 = 0;
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
- rdata->interrupt = 1;
- if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
- /* Clear interrupt on completion bit */
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
- 0);
+ /* Set interrupt on completion bit as appropriate */
+ if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
rdata->interrupt = 0;
- }
- }
+ else
+ rdata->interrupt = 1;
- /* Make sure everything is written to the descriptors before
- * telling the device about them
- */
- wmb();
+ /* Initialize Rx descriptor */
+ xgbe_rx_desc_reset(rdata);
+ }
/* Update the total number of Rx descriptors */
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
@@ -1198,7 +1325,30 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
xgbe_config_flow_control(pdata);
}
-static void xgbe_pre_xmit(struct xgbe_channel *channel)
+static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
+ struct xgbe_ring *ring)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring_data *rdata;
+
+ /* Issue a poll command to Tx DMA by writing address
+ * of next immediate free descriptor */
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ /* Start the Tx coalescing timer */
+ if (pdata->tx_usecs && !channel->tx_timer_active) {
+ channel->tx_timer_active = 1;
+ hrtimer_start(&channel->tx_timer,
+ ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+ }
+
+ ring->tx.xmit_more = 0;
+}
+
+static void xgbe_dev_xmit(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->tx_ring;
@@ -1207,11 +1357,11 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
struct xgbe_packet_data *packet = &ring->packet_data;
unsigned int csum, tso, vlan;
unsigned int tso_context, vlan_context;
- unsigned int tx_coalesce, tx_frames;
+ unsigned int tx_set_ic;
int start_index = ring->cur;
int i;
- DBGPR("-->xgbe_pre_xmit\n");
+ DBGPR("-->xgbe_dev_xmit\n");
csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
CSUM_ENABLE);
@@ -1230,10 +1380,26 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
else
vlan_context = 0;
- tx_coalesce = (pdata->tx_usecs || pdata->tx_frames) ? 1 : 0;
- tx_frames = pdata->tx_frames;
- if (tx_coalesce && !channel->tx_timer_active)
- ring->coalesce_count = 0;
+ /* Determine if an interrupt should be generated for this Tx:
+ * Interrupt:
+ * - Tx frame count exceeds the frame count setting
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set exceeds the frame count setting
+ * No interrupt:
+ * - No frame count setting specified (ethtool -C ethX tx-frames 0)
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set does not exceed the frame count setting
+ */
+ ring->coalesce_count += packet->tx_packets;
+ if (!pdata->tx_frames)
+ tx_set_ic = 0;
+ else if (packet->tx_packets > pdata->tx_frames)
+ tx_set_ic = 1;
+ else if ((ring->coalesce_count % pdata->tx_frames) <
+ packet->tx_packets)
+ tx_set_ic = 1;
+ else
+ tx_set_ic = 0;
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdesc = rdata->rdesc;
@@ -1300,13 +1466,6 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
- /* Set IC bit based on Tx coalescing settings */
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
- if (tx_coalesce && (!tx_frames ||
- (++ring->coalesce_count % tx_frames)))
- /* Clear IC bit */
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
-
/* Mark it as First Descriptor */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
@@ -1351,13 +1510,6 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
rdata->skb_dma_len);
- /* Set IC bit based on Tx coalescing settings */
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
- if (tx_coalesce && (!tx_frames ||
- (++ring->coalesce_count % tx_frames)))
- /* Clear IC bit */
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
-
/* Set OWN bit */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
@@ -1373,6 +1525,14 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
/* Set LAST bit for the last descriptor */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
+ /* Set IC bit based on Tx coalescing settings */
+ if (tx_set_ic)
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
+
+ /* Save the Tx info to report back during cleanup */
+ rdata->tx.packets = packet->tx_packets;
+ rdata->tx.bytes = packet->tx_bytes;
+
/* In case the Tx DMA engine is running, make sure everything
* is written to the descriptor(s) before setting the OWN bit
* for the first descriptor
@@ -1391,26 +1551,19 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
/* Make sure ownership is written to the descriptor */
wmb();
- /* Issue a poll command to Tx DMA by writing address
- * of next immediate free descriptor */
ring->cur++;
- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
- XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
- lower_32_bits(rdata->rdesc_dma));
-
- /* Start the Tx coalescing timer */
- if (tx_coalesce && !channel->tx_timer_active) {
- channel->tx_timer_active = 1;
- hrtimer_start(&channel->tx_timer,
- ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
- HRTIMER_MODE_REL);
- }
+ if (!packet->skb->xmit_more ||
+ netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
+ channel->queue_index)))
+ xgbe_tx_start_xmit(channel, ring);
+ else
+ ring->tx.xmit_more = 1;
DBGPR(" %s: descriptors %u to %u written\n",
channel->name, start_index & (ring->rdesc_count - 1),
(ring->cur - 1) & (ring->rdesc_count - 1));
- DBGPR("<--xgbe_pre_xmit\n");
+ DBGPR("<--xgbe_dev_xmit\n");
}
static int xgbe_dev_read(struct xgbe_channel *channel)
@@ -1420,7 +1573,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
struct xgbe_ring_desc *rdesc;
struct xgbe_packet_data *packet = &ring->packet_data;
struct net_device *netdev = channel->pdata->netdev;
- unsigned int err, etlt;
+ unsigned int err, etlt, l34t;
DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
@@ -1431,6 +1584,9 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
return 1;
+ /* Make sure descriptor fields are read after reading the OWN bit */
+ rmb();
+
#ifdef XGMAC_ENABLE_RX_DESC_DUMP
xgbe_dump_rx_desc(ring, rdesc, ring->cur);
#endif
@@ -1454,8 +1610,33 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
CONTEXT_NEXT, 1);
+ /* Get the header length */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
+ rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
+ RX_NORMAL_DESC2, HL);
+
+ /* Get the RSS hash */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RSS_HASH, 1);
+
+ packet->rss_hash = le32_to_cpu(rdesc->desc1);
+
+ l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
+ switch (l34t) {
+ case RX_DESC3_L34T_IPV4_TCP:
+ case RX_DESC3_L34T_IPV4_UDP:
+ case RX_DESC3_L34T_IPV6_TCP:
+ case RX_DESC3_L34T_IPV6_UDP:
+ packet->rss_hash_type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ packet->rss_hash_type = PKT_HASH_TYPE_L3;
+ }
+ }
+
/* Get the packet length */
- rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
+ rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
/* Not all the data has been transferred for this packet */
@@ -1478,7 +1659,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
DBGPR(" err=%u, etlt=%#x\n", err, etlt);
- if (!err || (err && !etlt)) {
+ if (!err || !etlt) {
+ /* No error if err is 0 or etlt is 0 */
if ((etlt == 0x09) &&
(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
@@ -2298,6 +2480,47 @@ static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
}
+static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
+ struct xgbe_channel *channel)
+{
+ unsigned int tx_dsr, tx_pos, tx_qidx;
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ /* Calculate the status register to read and the position within */
+ if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
+ tx_dsr = DMA_DSR0;
+ tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) +
+ DMA_DSR0_TPS_START;
+ } else {
+ tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
+
+ tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
+ tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
+ DMA_DSRX_TPS_START;
+ }
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * descriptors. Wait for the Tx engine to enter the stopped or
+ * suspended state. Don't wait forever though...
+ */
+ tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
+ while (time_before(jiffies, tx_timeout)) {
+ tx_status = XGMAC_IOREAD(pdata, tx_dsr);
+ tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
+ if ((tx_status == DMA_TPS_STOPPED) ||
+ (tx_status == DMA_TPS_SUSPENDED))
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ if (!time_before(jiffies, tx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Tx DMA channel %u to stop\n",
+ channel->queue_index);
+}
+
static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel;
@@ -2326,6 +2549,15 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
struct xgbe_channel *channel;
unsigned int i;
+ /* Prepare for Tx DMA channel stop */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ xgbe_prepare_tx_stop(pdata, channel);
+ }
+
/* Disable MAC Tx */
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
@@ -2417,6 +2649,15 @@ static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
struct xgbe_channel *channel;
unsigned int i;
+ /* Prepare for Tx DMA channel stop */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ xgbe_prepare_tx_stop(pdata, channel);
+ }
+
/* Disable MAC Tx */
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
@@ -2485,6 +2726,8 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
xgbe_config_tso_mode(pdata);
+ xgbe_config_sph_mode(pdata);
+ xgbe_config_rss(pdata);
desc_if->wrapper_tx_desc_init(pdata);
desc_if->wrapper_rx_desc_init(pdata);
xgbe_enable_dma_interrupts(pdata);
@@ -2561,7 +2804,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->powerup_rx = xgbe_powerup_rx;
hw_if->powerdown_rx = xgbe_powerdown_rx;
- hw_if->pre_xmit = xgbe_pre_xmit;
+ hw_if->dev_xmit = xgbe_dev_xmit;
hw_if->dev_read = xgbe_dev_read;
hw_if->enable_int = xgbe_enable_int;
hw_if->disable_int = xgbe_disable_int;
@@ -2575,6 +2818,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->rx_desc_reset = xgbe_rx_desc_reset;
hw_if->is_last_desc = xgbe_is_last_desc;
hw_if->is_context_desc = xgbe_is_context_desc;
+ hw_if->tx_start_xmit = xgbe_tx_start_xmit;
/* For FLOW ctrl */
hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
@@ -2620,5 +2864,11 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->config_dcb_tc = xgbe_config_dcb_tc;
hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
+ /* For Receive Side Scaling */
+ hw_if->enable_rss = xgbe_enable_rss;
+ hw_if->disable_rss = xgbe_disable_rss;
+ hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
+ hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
+
DBGPR("<--xgbe_init_function_ptrs\n");
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index d0e35302410f..7bb5f07dbeef 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -114,6 +114,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
@@ -126,14 +127,126 @@
#include "xgbe.h"
#include "xgbe-common.h"
-static int xgbe_poll(struct napi_struct *, int);
+static int xgbe_one_poll(struct napi_struct *, int);
+static int xgbe_all_poll(struct napi_struct *, int);
static void xgbe_set_rx_mode(struct net_device *);
+static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel_mem, *channel;
+ struct xgbe_ring *tx_ring, *rx_ring;
+ unsigned int count, i;
+ int ret = -ENOMEM;
+
+ count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+
+ channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
+ if (!channel_mem)
+ goto err_channel;
+
+ tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
+ GFP_KERNEL);
+ if (!tx_ring)
+ goto err_tx_ring;
+
+ rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
+ GFP_KERNEL);
+ if (!rx_ring)
+ goto err_rx_ring;
+
+ for (i = 0, channel = channel_mem; i < count; i++, channel++) {
+ snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
+ channel->pdata = pdata;
+ channel->queue_index = i;
+ channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * i);
+
+ if (pdata->per_channel_irq) {
+ /* Get the DMA interrupt (offset 1) */
+ ret = platform_get_irq(pdata->pdev, i + 1);
+ if (ret < 0) {
+ netdev_err(pdata->netdev,
+ "platform_get_irq %u failed\n",
+ i + 1);
+ goto err_irq;
+ }
+
+ channel->dma_irq = ret;
+ }
+
+ if (i < pdata->tx_ring_count) {
+ spin_lock_init(&tx_ring->lock);
+ channel->tx_ring = tx_ring++;
+ }
+
+ if (i < pdata->rx_ring_count) {
+ spin_lock_init(&rx_ring->lock);
+ channel->rx_ring = rx_ring++;
+ }
+
+ DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
+ channel->name, channel->queue_index, channel->dma_regs,
+ channel->dma_irq, channel->tx_ring, channel->rx_ring);
+ }
+
+ pdata->channel = channel_mem;
+ pdata->channel_count = count;
+
+ return 0;
+
+err_irq:
+ kfree(rx_ring);
+
+err_rx_ring:
+ kfree(tx_ring);
+
+err_tx_ring:
+ kfree(channel_mem);
+
+err_channel:
+ return ret;
+}
+
+static void xgbe_free_channels(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->channel)
+ return;
+
+ kfree(pdata->channel->rx_ring);
+ kfree(pdata->channel->tx_ring);
+ kfree(pdata->channel);
+
+ pdata->channel = NULL;
+ pdata->channel_count = 0;
+}
+
static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
{
return (ring->rdesc_count - (ring->cur - ring->dirty));
}
+static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
+ struct xgbe_ring *ring, unsigned int count)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+
+ if (count > xgbe_tx_avail_desc(ring)) {
+ DBGPR(" Tx queue stopped, not enough descriptors available\n");
+ netif_stop_subqueue(pdata->netdev, channel->queue_index);
+ ring->tx.queue_stopped = 1;
+
+ /* If we haven't notified the hardware because of xmit_more
+ * support, tell it now
+ */
+ if (ring->tx.xmit_more)
+ pdata->hw_if.tx_start_xmit(channel, ring);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return 0;
+}
+
static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
{
unsigned int rx_buf_size;
@@ -144,8 +257,8 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
}
rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE)
- rx_buf_size = XGBE_RX_MIN_BUF_SIZE;
+ rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
+
rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
~(XGBE_RX_BUF_ALIGN - 1);
@@ -213,11 +326,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (!dma_isr)
goto isr_done;
- DBGPR("-->xgbe_isr\n");
-
DBGPR(" DMA_ISR = %08x\n", dma_isr);
- DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
- DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
for (i = 0; i < pdata->channel_count; i++) {
if (!(dma_isr & (1 << i)))
@@ -228,6 +337,10 @@ static irqreturn_t xgbe_isr(int irq, void *data)
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
+ /* If we get a TI or RI interrupt that means per channel DMA
+ * interrupts are not enabled, so we use the private data napi
+ * structure, not the per channel napi structure
+ */
if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
if (napi_schedule_prep(&pdata->napi)) {
@@ -270,12 +383,28 @@ static irqreturn_t xgbe_isr(int irq, void *data)
DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
- DBGPR("<--xgbe_isr\n");
-
isr_done:
return IRQ_HANDLED;
}
+static irqreturn_t xgbe_dma_isr(int irq, void *data)
+{
+ struct xgbe_channel *channel = data;
+
+ /* Per channel DMA interrupts are enabled, so we use the per
+ * channel napi structure and not the private data napi structure
+ */
+ if (napi_schedule_prep(&channel->napi)) {
+ /* Disable Tx and Rx interrupts */
+ disable_irq_nosync(channel->dma_irq);
+
+ /* Turn on polling */
+ __napi_schedule(&channel->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
{
struct xgbe_channel *channel = container_of(timer,
@@ -283,18 +412,24 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
tx_timer);
struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_prv_data *pdata = channel->pdata;
+ struct napi_struct *napi;
unsigned long flags;
DBGPR("-->xgbe_tx_timer\n");
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
spin_lock_irqsave(&ring->lock, flags);
- if (napi_schedule_prep(&pdata->napi)) {
+ if (napi_schedule_prep(napi)) {
/* Disable Tx and Rx interrupts */
- xgbe_disable_rx_tx_ints(pdata);
+ if (pdata->per_channel_irq)
+ disable_irq(channel->dma_irq);
+ else
+ xgbe_disable_rx_tx_ints(pdata);
/* Turn on polling */
- __napi_schedule(&pdata->napi);
+ __napi_schedule(napi);
}
channel->tx_timer_active = 0;
@@ -430,18 +565,46 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
{
- if (add)
- netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll,
- NAPI_POLL_WEIGHT);
- napi_enable(&pdata->napi);
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (add)
+ netif_napi_add(pdata->netdev, &channel->napi,
+ xgbe_one_poll, NAPI_POLL_WEIGHT);
+
+ napi_enable(&channel->napi);
+ }
+ } else {
+ if (add)
+ netif_napi_add(pdata->netdev, &pdata->napi,
+ xgbe_all_poll, NAPI_POLL_WEIGHT);
+
+ napi_enable(&pdata->napi);
+ }
}
static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
{
- napi_disable(&pdata->napi);
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ napi_disable(&channel->napi);
- if (del)
- netif_napi_del(&pdata->napi);
+ if (del)
+ netif_napi_del(&channel->napi);
+ }
+ } else {
+ napi_disable(&pdata->napi);
+
+ if (del)
+ netif_napi_del(&pdata->napi);
+ }
}
void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
@@ -472,7 +635,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_init_rx_coalesce\n");
}
-static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
+static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
{
struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_channel *channel;
@@ -480,7 +643,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
struct xgbe_ring_data *rdata;
unsigned int i, j;
- DBGPR("-->xgbe_free_tx_skbuff\n");
+ DBGPR("-->xgbe_free_tx_data\n");
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
@@ -490,14 +653,14 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
for (j = 0; j < ring->rdesc_count; j++) {
rdata = XGBE_GET_DESC_DATA(ring, j);
- desc_if->unmap_skb(pdata, rdata);
+ desc_if->unmap_rdata(pdata, rdata);
}
}
- DBGPR("<--xgbe_free_tx_skbuff\n");
+ DBGPR("<--xgbe_free_tx_data\n");
}
-static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
+static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
{
struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_channel *channel;
@@ -505,7 +668,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
struct xgbe_ring_data *rdata;
unsigned int i, j;
- DBGPR("-->xgbe_free_rx_skbuff\n");
+ DBGPR("-->xgbe_free_rx_data\n");
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
@@ -515,11 +678,11 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
for (j = 0; j < ring->rdesc_count; j++) {
rdata = XGBE_GET_DESC_DATA(ring, j);
- desc_if->unmap_skb(pdata, rdata);
+ desc_if->unmap_rdata(pdata, rdata);
}
}
- DBGPR("<--xgbe_free_rx_skbuff\n");
+ DBGPR("<--xgbe_free_rx_data\n");
}
static void xgbe_adjust_link(struct net_device *netdev)
@@ -735,7 +898,10 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
static void xgbe_stop(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
struct net_device *netdev = pdata->netdev;
+ struct netdev_queue *txq;
+ unsigned int i;
DBGPR("-->xgbe_stop\n");
@@ -749,12 +915,23 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
hw_if->disable_tx(pdata);
hw_if->disable_rx(pdata);
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ continue;
+
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ netdev_tx_reset_queue(txq);
+ }
+
DBGPR("<--xgbe_stop\n");
}
static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
{
+ struct xgbe_channel *channel;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int i;
DBGPR("-->xgbe_restart_dev\n");
@@ -763,10 +940,15 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
return;
xgbe_stop(pdata);
- synchronize_irq(pdata->irq_number);
+ synchronize_irq(pdata->dev_irq);
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ synchronize_irq(channel->dma_irq);
+ }
- xgbe_free_tx_skbuff(pdata);
- xgbe_free_rx_skbuff(pdata);
+ xgbe_free_tx_data(pdata);
+ xgbe_free_rx_data(pdata);
/* Issue software reset to device if requested */
if (reset)
@@ -1008,6 +1190,12 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
packet->tcp_header_len, packet->tcp_payload_len);
DBGPR(" packet->mss=%u\n", packet->mss);
+ /* Update the number of packets that will ultimately be transmitted
+ * along with the extra bytes for each extra packet
+ */
+ packet->tx_packets = skb_shinfo(skb)->gso_segs;
+ packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
+
return 0;
}
@@ -1033,17 +1221,22 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
unsigned int len;
unsigned int i;
+ packet->skb = skb;
+
context_desc = 0;
packet->rdesc_count = 0;
+ packet->tx_packets = 1;
+ packet->tx_bytes = skb->len;
+
if (xgbe_is_tso(skb)) {
- /* TSO requires an extra desriptor if mss is different */
+ /* TSO requires an extra descriptor if mss is different */
if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
context_desc = 1;
packet->rdesc_count++;
}
- /* TSO requires an extra desriptor for TSO header */
+ /* TSO requires an extra descriptor for TSO header */
packet->rdesc_count++;
XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
@@ -1091,6 +1284,8 @@ static int xgbe_open(struct net_device *netdev)
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel = NULL;
+ unsigned int i = 0;
int ret;
DBGPR("-->xgbe_open\n");
@@ -1119,24 +1314,48 @@ static int xgbe_open(struct net_device *netdev)
goto err_ptpclk;
pdata->rx_buf_size = ret;
+ /* Allocate the channel and ring structures */
+ ret = xgbe_alloc_channels(pdata);
+ if (ret)
+ goto err_ptpclk;
+
/* Allocate the ring descriptors and buffers */
ret = desc_if->alloc_ring_resources(pdata);
if (ret)
- goto err_ptpclk;
+ goto err_channels;
/* Initialize the device restart and Tx timestamp work struct */
INIT_WORK(&pdata->restart_work, xgbe_restart);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
/* Request interrupts */
- ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
+ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
netdev->name, pdata);
if (ret) {
netdev_alert(netdev, "error requesting irq %d\n",
- pdata->irq_number);
- goto err_irq;
+ pdata->dev_irq);
+ goto err_rings;
+ }
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ snprintf(channel->dma_irq_name,
+ sizeof(channel->dma_irq_name) - 1,
+ "%s-TxRx-%u", netdev_name(netdev),
+ channel->queue_index);
+
+ ret = devm_request_irq(pdata->dev, channel->dma_irq,
+ xgbe_dma_isr, 0,
+ channel->dma_irq_name, channel);
+ if (ret) {
+ netdev_alert(netdev,
+ "error requesting irq %d\n",
+ channel->dma_irq);
+ goto err_irq;
+ }
+ }
}
- pdata->irq_number = netdev->irq;
ret = xgbe_start(pdata);
if (ret)
@@ -1149,12 +1368,21 @@ static int xgbe_open(struct net_device *netdev)
err_start:
hw_if->exit(pdata);
- devm_free_irq(pdata->dev, pdata->irq_number, pdata);
- pdata->irq_number = 0;
-
err_irq:
+ if (pdata->per_channel_irq) {
+ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+ for (i--, channel--; i < pdata->channel_count; i--, channel--)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ }
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+err_rings:
desc_if->free_ring_resources(pdata);
+err_channels:
+ xgbe_free_channels(pdata);
+
err_ptpclk:
clk_disable_unprepare(pdata->ptpclk);
@@ -1172,6 +1400,8 @@ static int xgbe_close(struct net_device *netdev)
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ unsigned int i;
DBGPR("-->xgbe_close\n");
@@ -1181,15 +1411,20 @@ static int xgbe_close(struct net_device *netdev)
/* Issue software reset to device */
hw_if->exit(pdata);
- /* Free all the ring data */
+ /* Free the ring descriptors and buffers */
desc_if->free_ring_resources(pdata);
- /* Release the interrupt */
- if (pdata->irq_number != 0) {
- devm_free_irq(pdata->dev, pdata->irq_number, pdata);
- pdata->irq_number = 0;
+ /* Release the interrupts */
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
}
+ /* Free the channel and ring structures */
+ xgbe_free_channels(pdata);
+
/* Disable the clocks */
clk_disable_unprepare(pdata->ptpclk);
clk_disable_unprepare(pdata->sysclk);
@@ -1210,12 +1445,14 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
struct xgbe_channel *channel;
struct xgbe_ring *ring;
struct xgbe_packet_data *packet;
+ struct netdev_queue *txq;
unsigned long flags;
int ret;
DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
channel = pdata->channel + skb->queue_mapping;
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
ring = channel->tx_ring;
packet = &ring->packet_data;
@@ -1234,13 +1471,9 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
xgbe_packet_info(pdata, ring, skb, packet);
/* Check that there are enough descriptors available */
- if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
- DBGPR(" Tx queue stopped, not enough descriptors available\n");
- netif_stop_subqueue(netdev, channel->queue_index);
- ring->tx.queue_stopped = 1;
- ret = NETDEV_TX_BUSY;
+ ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
+ if (ret)
goto tx_netdev_return;
- }
ret = xgbe_prep_tso(skb, packet);
if (ret) {
@@ -1257,13 +1490,21 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
xgbe_prep_tx_tstamp(pdata, skb, packet);
+ /* Report on the actual number of bytes (to be) sent */
+ netdev_tx_sent_queue(txq, packet->tx_bytes);
+
/* Configure required descriptor fields for transmission */
- hw_if->pre_xmit(channel);
+ hw_if->dev_xmit(channel);
#ifdef XGMAC_ENABLE_TX_PKT_DUMP
xgbe_print_pkt(netdev, skb, true);
#endif
+ /* Stop the queue in advance if there may not be enough descriptors */
+ xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
+
+ ret = NETDEV_TX_OK;
+
tx_netdev_return:
spin_unlock_irqrestore(&ring->lock, flags);
@@ -1420,14 +1661,20 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
static void xgbe_poll_controller(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_channel *channel;
+ unsigned int i;
DBGPR("-->xgbe_poll_controller\n");
- disable_irq(pdata->irq_number);
-
- xgbe_isr(pdata->irq_number, pdata);
-
- enable_irq(pdata->irq_number);
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ xgbe_dma_isr(channel->dma_irq, channel);
+ } else {
+ disable_irq(pdata->dev_irq);
+ xgbe_isr(pdata->dev_irq, pdata);
+ enable_irq(pdata->dev_irq);
+ }
DBGPR("<--xgbe_poll_controller\n");
}
@@ -1465,12 +1712,21 @@ static int xgbe_set_features(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
- netdev_features_t rxcsum, rxvlan, rxvlan_filter;
+ netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
+ int ret = 0;
+ rxhash = pdata->netdev_features & NETIF_F_RXHASH;
rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
+ if ((features & NETIF_F_RXHASH) && !rxhash)
+ ret = hw_if->enable_rss(pdata);
+ else if (!(features & NETIF_F_RXHASH) && rxhash)
+ ret = hw_if->disable_rss(pdata);
+ if (ret)
+ return ret;
+
if ((features & NETIF_F_RXCSUM) && !rxcsum)
hw_if->enable_rx_csum(pdata);
else if (!(features & NETIF_F_RXCSUM) && rxcsum)
@@ -1524,7 +1780,7 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
- desc_if->realloc_skb(channel);
+ desc_if->realloc_rx_buffer(channel);
/* Update the Rx Tail Pointer Register with address of
* the last cleaned entry */
@@ -1533,6 +1789,31 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
lower_32_bits(rdata->rdesc_dma));
}
+static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
+ struct xgbe_ring_data *rdata,
+ unsigned int *len)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct sk_buff *skb;
+ u8 *packet;
+ unsigned int copy_len;
+
+ skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len);
+ if (!skb)
+ return NULL;
+
+ packet = page_address(rdata->rx.hdr.pa.pages) +
+ rdata->rx.hdr.pa.pages_offset;
+ copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len;
+ copy_len = min(rdata->rx.hdr.dma_len, copy_len);
+ skb_copy_to_linear_data(skb, packet, copy_len);
+ skb_put(skb, copy_len);
+
+ *len -= copy_len;
+
+ return skb;
+}
+
static int xgbe_tx_poll(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
@@ -1542,8 +1823,10 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
struct net_device *netdev = pdata->netdev;
+ struct netdev_queue *txq;
unsigned long flags;
int processed = 0;
+ unsigned int tx_packets = 0, tx_bytes = 0;
DBGPR("-->xgbe_tx_poll\n");
@@ -1551,6 +1834,8 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
if (!ring)
return 0;
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+
spin_lock_irqsave(&ring->lock, flags);
while ((processed < XGBE_TX_DESC_MAX_PROC) &&
@@ -1561,26 +1846,41 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
if (!hw_if->tx_complete(rdesc))
break;
+ /* Make sure descriptor fields are read after reading the OWN
+ * bit */
+ rmb();
+
#ifdef XGMAC_ENABLE_TX_DESC_DUMP
xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
#endif
+ if (hw_if->is_last_desc(rdesc)) {
+ tx_packets += rdata->tx.packets;
+ tx_bytes += rdata->tx.bytes;
+ }
+
/* Free the SKB and reset the descriptor for re-use */
- desc_if->unmap_skb(pdata, rdata);
+ desc_if->unmap_rdata(pdata, rdata);
hw_if->tx_desc_reset(rdata);
processed++;
ring->dirty++;
}
+ if (!processed)
+ goto unlock;
+
+ netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
+
if ((ring->tx.queue_stopped == 1) &&
(xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
ring->tx.queue_stopped = 0;
- netif_wake_subqueue(netdev, channel->queue_index);
+ netif_tx_wake_queue(txq);
}
DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
+unlock:
spin_unlock_irqrestore(&ring->lock, flags);
return processed;
@@ -1594,6 +1894,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
struct xgbe_ring_data *rdata;
struct xgbe_packet_data *packet;
struct net_device *netdev = pdata->netdev;
+ struct napi_struct *napi;
struct sk_buff *skb;
struct skb_shared_hwtstamps *hwtstamps;
unsigned int incomplete, error, context_next, context;
@@ -1607,6 +1908,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!ring)
return 0;
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
packet = &ring->packet_data;
while (packet_count < budget) {
@@ -1641,10 +1944,6 @@ read_again:
ring->cur++;
ring->dirty++;
- dma_unmap_single(pdata->dev, rdata->skb_dma,
- rdata->skb_dma_len, DMA_FROM_DEVICE);
- rdata->skb_dma = 0;
-
incomplete = XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES,
INCOMPLETE);
@@ -1667,33 +1966,40 @@ read_again:
}
if (!context) {
- put_len = rdata->len - len;
- if (skb) {
- if (pskb_expand_head(skb, 0, put_len,
- GFP_ATOMIC)) {
- DBGPR("pskb_expand_head error\n");
- if (incomplete) {
- error = 1;
- goto read_again;
- }
-
- dev_kfree_skb(skb);
- goto next_packet;
+ put_len = rdata->rx.len - len;
+ len += put_len;
+
+ if (!skb) {
+ dma_sync_single_for_cpu(pdata->dev,
+ rdata->rx.hdr.dma,
+ rdata->rx.hdr.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb = xgbe_create_skb(pdata, rdata, &put_len);
+ if (!skb) {
+ error = 1;
+ goto skip_data;
}
- memcpy(skb_tail_pointer(skb), rdata->skb->data,
- put_len);
- } else {
- skb = rdata->skb;
- rdata->skb = NULL;
}
- skb_put(skb, put_len);
- len += put_len;
+
+ if (put_len) {
+ dma_sync_single_for_cpu(pdata->dev,
+ rdata->rx.buf.dma,
+ rdata->rx.buf.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ rdata->rx.buf.pa.pages,
+ rdata->rx.buf.pa.pages_offset,
+ put_len, rdata->rx.buf.dma_len);
+ rdata->rx.buf.pa.pages = NULL;
+ }
}
+skip_data:
if (incomplete || context_next)
goto read_again;
- /* Stray Context Descriptor? */
if (!skb)
goto next_packet;
@@ -1733,13 +2039,18 @@ read_again:
hwtstamps->hwtstamp = ns_to_ktime(nsec);
}
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, RSS_HASH))
+ skb_set_hash(skb, packet->rss_hash,
+ packet->rss_hash_type);
+
skb->dev = netdev;
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, channel->queue_index);
- skb_mark_napi_id(skb, &pdata->napi);
+ skb_mark_napi_id(skb, napi);
netdev->last_rx = jiffies;
- napi_gro_receive(&pdata->napi, skb);
+ napi_gro_receive(napi, skb);
next_packet:
packet_count++;
@@ -1761,7 +2072,35 @@ next_packet:
return packet_count;
}
-static int xgbe_poll(struct napi_struct *napi, int budget)
+static int xgbe_one_poll(struct napi_struct *napi, int budget)
+{
+ struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
+ napi);
+ int processed = 0;
+
+ DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
+
+ /* Cleanup Tx ring first */
+ xgbe_tx_poll(channel);
+
+ /* Process Rx ring next */
+ processed = xgbe_rx_poll(channel, budget);
+
+ /* If we processed everything, we are done */
+ if (processed < budget) {
+ /* Turn off polling */
+ napi_complete(napi);
+
+ /* Enable Tx and Rx interrupts */
+ enable_irq(channel->dma_irq);
+ }
+
+ DBGPR("<--xgbe_one_poll: received = %d\n", processed);
+
+ return processed;
+}
+
+static int xgbe_all_poll(struct napi_struct *napi, int budget)
{
struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
napi);
@@ -1770,7 +2109,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget)
int processed, last_processed;
unsigned int i;
- DBGPR("-->xgbe_poll: budget=%d\n", budget);
+ DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
processed = 0;
ring_budget = budget / pdata->rx_ring_count;
@@ -1798,7 +2137,7 @@ static int xgbe_poll(struct napi_struct *napi, int budget)
xgbe_enable_rx_tx_ints(pdata);
}
- DBGPR("<--xgbe_poll: received = %d\n", processed);
+ DBGPR("<--xgbe_all_poll: received = %d\n", processed);
return processed;
}
@@ -1812,10 +2151,10 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
while (count--) {
rdata = XGBE_GET_DESC_DATA(ring, idx);
rdesc = rdata->rdesc;
- DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
- (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
- le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
- le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
+ pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+ (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+ le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
+ le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
idx++;
}
}
@@ -1823,9 +2162,9 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
unsigned int idx)
{
- DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
- le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
- le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
+ pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
+ le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
+ le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
}
void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 49508ec98b72..ebf489351555 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -452,9 +452,9 @@ static int xgbe_set_coalesce(struct net_device *netdev,
rx_usecs);
return -EINVAL;
}
- if (rx_frames > pdata->channel->rx_ring->rdesc_count) {
+ if (rx_frames > pdata->rx_desc_count) {
netdev_alert(netdev, "rx-frames is limited to %d frames\n",
- pdata->channel->rx_ring->rdesc_count);
+ pdata->rx_desc_count);
return -EINVAL;
}
@@ -462,9 +462,9 @@ static int xgbe_set_coalesce(struct net_device *netdev,
tx_frames = ec->tx_max_coalesced_frames;
/* Check the bounds of values for Tx */
- if (tx_frames > pdata->channel->tx_ring->rdesc_count) {
+ if (tx_frames > pdata->tx_desc_count) {
netdev_alert(netdev, "tx-frames is limited to %d frames\n",
- pdata->channel->tx_ring->rdesc_count);
+ pdata->tx_desc_count);
return -EINVAL;
}
@@ -481,6 +481,82 @@ static int xgbe_set_coalesce(struct net_device *netdev,
return 0;
}
+static int xgbe_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = pdata->rx_ring_count;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static u32 xgbe_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return sizeof(pdata->rss_key);
+}
+
+static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return ARRAY_SIZE(pdata->rss_table);
+}
+
+static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int i;
+
+ if (indir) {
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
+ indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
+ MAC_RSSDR, DMCH);
+ }
+
+ if (key)
+ memcpy(key, pdata->rss_key, sizeof(pdata->rss_key));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int ret;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ ret = hw_if->set_rss_lookup_table(pdata, indir);
+ if (ret)
+ return ret;
+ }
+
+ if (key) {
+ ret = hw_if->set_rss_hash_key(pdata, key);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int xgbe_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *ts_info)
{
@@ -526,6 +602,11 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.get_strings = xgbe_get_strings,
.get_ethtool_stats = xgbe_get_ethtool_stats,
.get_sset_count = xgbe_get_sset_count,
+ .get_rxnfc = xgbe_get_rxnfc,
+ .get_rxfh_key_size = xgbe_get_rxfh_key_size,
+ .get_rxfh_indir_size = xgbe_get_rxfh_indir_size,
+ .get_rxfh = xgbe_get_rxfh,
+ .set_rxfh = xgbe_set_rxfh,
.get_ts_info = xgbe_get_ts_info,
};
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index f5a8fa03921a..dbd3850b8b0a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -133,60 +133,6 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(XGBE_DRV_VERSION);
MODULE_DESCRIPTION(XGBE_DRV_DESC);
-static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
-{
- struct xgbe_channel *channel_mem, *channel;
- struct xgbe_ring *tx_ring, *rx_ring;
- unsigned int count, i;
-
- DBGPR("-->xgbe_alloc_rings\n");
-
- count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
-
- channel_mem = devm_kcalloc(pdata->dev, count,
- sizeof(struct xgbe_channel), GFP_KERNEL);
- if (!channel_mem)
- return NULL;
-
- tx_ring = devm_kcalloc(pdata->dev, pdata->tx_ring_count,
- sizeof(struct xgbe_ring), GFP_KERNEL);
- if (!tx_ring)
- return NULL;
-
- rx_ring = devm_kcalloc(pdata->dev, pdata->rx_ring_count,
- sizeof(struct xgbe_ring), GFP_KERNEL);
- if (!rx_ring)
- return NULL;
-
- for (i = 0, channel = channel_mem; i < count; i++, channel++) {
- snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
- channel->pdata = pdata;
- channel->queue_index = i;
- channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
- (DMA_CH_INC * i);
-
- if (i < pdata->tx_ring_count) {
- spin_lock_init(&tx_ring->lock);
- channel->tx_ring = tx_ring++;
- }
-
- if (i < pdata->rx_ring_count) {
- spin_lock_init(&rx_ring->lock);
- channel->rx_ring = rx_ring++;
- }
-
- DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n",
- channel->name, channel->queue_index, channel->dma_regs,
- channel->tx_ring, channel->rx_ring);
- }
-
- pdata->channel_count = count;
-
- DBGPR("<--xgbe_alloc_rings\n");
-
- return channel_mem;
-}
-
static void xgbe_default_config(struct xgbe_prv_data *pdata)
{
DBGPR("-->xgbe_default_config\n");
@@ -224,6 +170,7 @@ static int xgbe_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
const u8 *mac_addr;
+ unsigned int i;
int ret;
DBGPR("--> xgbe_probe\n");
@@ -244,6 +191,7 @@ static int xgbe_probe(struct platform_device *pdev)
spin_lock_init(&pdata->lock);
mutex_init(&pdata->xpcs_mutex);
+ mutex_init(&pdata->rss_mutex);
spin_lock_init(&pdata->tstamp_lock);
/* Set and validate the number of descriptors for a ring */
@@ -318,12 +266,18 @@ static int xgbe_probe(struct platform_device *pdev)
pdata->awcache = XGBE_DMA_SYS_AWCACHE;
}
+ /* Check for per channel interrupt support */
+ if (of_property_read_bool(dev->of_node, XGBE_DMA_IRQS))
+ pdata->per_channel_irq = 1;
+
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
- dev_err(dev, "platform_get_irq failed\n");
+ dev_err(dev, "platform_get_irq 0 failed\n");
goto err_io;
}
- netdev->irq = ret;
+ pdata->dev_irq = ret;
+
+ netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
/* Set all the function pointers */
@@ -383,13 +337,16 @@ static int xgbe_probe(struct platform_device *pdev)
goto err_io;
}
- /* Allocate the rings for the DMA channels */
- pdata->channel = xgbe_alloc_rings(pdata);
- if (!pdata->channel) {
- dev_err(dev, "ring allocation failed\n");
- ret = -ENOMEM;
- goto err_io;
- }
+ /* Initialize RSS hash key and lookup table */
+ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
+
+ for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
+ XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
+ i % pdata->rx_ring_count);
+
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
/* Prepare to regsiter with MDIO */
pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
@@ -421,6 +378,9 @@ static int xgbe_probe(struct platform_device *pdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (pdata->hw_feat.rss)
+ netdev->hw_features |= NETIF_F_RXHASH;
+
netdev->vlan_features |= NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 789957d43a13..f9ec762ac3f0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -140,11 +140,25 @@
#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+/* Descriptors required for maximum contigous TSO/GSO packet */
+#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
+
+/* Maximum possible descriptors needed for an SKB:
+ * - Maximum number of SKB frags
+ * - Maximum descriptors for contiguous TSO/GSO packet
+ * - Possible context descriptor
+ * - Possible TSO header descriptor
+ */
+#define XGBE_TX_MAX_DESCS (MAX_SKB_FRAGS + XGBE_TX_MAX_SPLIT + 2)
+
#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
#define XGBE_RX_BUF_ALIGN 64
+#define XGBE_SKB_ALLOC_SIZE 256
+#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */
#define XGBE_MAX_DMA_CHANNELS 16
#define XGBE_MAX_QUEUES 16
+#define XGBE_DMA_STOP_TIMEOUT 5
/* DMA cache settings - Outer sharable, write-back, write-allocate */
#define XGBE_DMA_OS_AXDOMAIN 0x2
@@ -171,6 +185,7 @@
/* Device-tree clock names */
#define XGBE_DMA_CLOCK "dma_clk"
#define XGBE_PTP_CLOCK "ptp_clk"
+#define XGBE_DMA_IRQS "amd,per-channel-interrupt"
/* Timestamp support - values based on 50MHz PTP clock
* 50MHz => 20 nsec
@@ -212,9 +227,17 @@
/* Maximum MAC address hash table size (256 bits = 8 bytes) */
#define XGBE_MAC_HASH_TABLE_SIZE 8
+/* Receive Side Scaling */
+#define XGBE_RSS_HASH_KEY_SIZE 40
+#define XGBE_RSS_MAX_TABLE_SIZE 256
+#define XGBE_RSS_LOOKUP_TABLE_TYPE 0
+#define XGBE_RSS_HASH_KEY_TYPE 1
+
struct xgbe_prv_data;
struct xgbe_packet_data {
+ struct sk_buff *skb;
+
unsigned int attributes;
unsigned int errors;
@@ -230,14 +253,53 @@ struct xgbe_packet_data {
unsigned short vlan_ctag;
u64 rx_tstamp;
+
+ u32 rss_hash;
+ enum pkt_hash_types rss_hash_type;
+
+ unsigned int tx_packets;
+ unsigned int tx_bytes;
};
/* Common Rx and Tx descriptor mapping */
struct xgbe_ring_desc {
- unsigned int desc0;
- unsigned int desc1;
- unsigned int desc2;
- unsigned int desc3;
+ __le32 desc0;
+ __le32 desc1;
+ __le32 desc2;
+ __le32 desc3;
+};
+
+/* Page allocation related values */
+struct xgbe_page_alloc {
+ struct page *pages;
+ unsigned int pages_len;
+ unsigned int pages_offset;
+
+ dma_addr_t pages_dma;
+};
+
+/* Ring entry buffer data */
+struct xgbe_buffer_data {
+ struct xgbe_page_alloc pa;
+ struct xgbe_page_alloc pa_unmap;
+
+ dma_addr_t dma;
+ unsigned int dma_len;
+};
+
+/* Tx-related ring data */
+struct xgbe_tx_ring_data {
+ unsigned int packets; /* BQL packet count */
+ unsigned int bytes; /* BQL byte count */
+};
+
+/* Rx-related ring data */
+struct xgbe_rx_ring_data {
+ struct xgbe_buffer_data hdr; /* Header locations */
+ struct xgbe_buffer_data buf; /* Payload locations */
+
+ unsigned short hdr_len; /* Length of received header */
+ unsigned short len; /* Length of received packet */
};
/* Structure used to hold information related to the descriptor
@@ -251,9 +313,9 @@ struct xgbe_ring_data {
struct sk_buff *skb; /* Virtual address of SKB */
dma_addr_t skb_dma; /* DMA address of SKB data */
unsigned int skb_dma_len; /* Length of SKB DMA area */
- unsigned int tso_header; /* TSO header indicator */
- unsigned short len; /* Length of received Rx packet */
+ struct xgbe_tx_ring_data tx; /* Tx-related data */
+ struct xgbe_rx_ring_data rx; /* Rx-related data */
unsigned int interrupt; /* Interrupt indicator */
@@ -291,6 +353,10 @@ struct xgbe_ring {
*/
struct xgbe_ring_data *rdata;
+ /* Page allocation for RX buffers */
+ struct xgbe_page_alloc rx_hdr_pa;
+ struct xgbe_page_alloc rx_buf_pa;
+
/* Ring index values
* cur - Tx: index of descriptor to be used for current transfer
* Rx: index of descriptor to check for packet availability
@@ -307,6 +373,7 @@ struct xgbe_ring {
union {
struct {
unsigned int queue_stopped;
+ unsigned int xmit_more;
unsigned short cur_mss;
unsigned short cur_vlan_ctag;
} tx;
@@ -331,6 +398,13 @@ struct xgbe_channel {
unsigned int queue_index;
void __iomem *dma_regs;
+ /* Per channel interrupt irq number */
+ int dma_irq;
+ char dma_irq_name[IFNAMSIZ + 32];
+
+ /* Netdev related settings */
+ struct napi_struct napi;
+
unsigned int saved_ier;
unsigned int tx_timer_active;
@@ -456,7 +530,7 @@ struct xgbe_hw_if {
int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
- void (*pre_xmit)(struct xgbe_channel *);
+ void (*dev_xmit)(struct xgbe_channel *);
int (*dev_read)(struct xgbe_channel *);
void (*tx_desc_init)(struct xgbe_channel *);
void (*rx_desc_init)(struct xgbe_channel *);
@@ -464,6 +538,7 @@ struct xgbe_hw_if {
void (*tx_desc_reset)(struct xgbe_ring_data *);
int (*is_last_desc)(struct xgbe_ring_desc *);
int (*is_context_desc)(struct xgbe_ring_desc *);
+ void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *);
/* For FLOW ctrl */
int (*config_tx_flow_control)(struct xgbe_prv_data *);
@@ -509,14 +584,20 @@ struct xgbe_hw_if {
/* For Data Center Bridging config */
void (*config_dcb_tc)(struct xgbe_prv_data *);
void (*config_dcb_pfc)(struct xgbe_prv_data *);
+
+ /* For Receive Side Scaling */
+ int (*enable_rss)(struct xgbe_prv_data *);
+ int (*disable_rss)(struct xgbe_prv_data *);
+ int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *);
+ int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
};
struct xgbe_desc_if {
int (*alloc_ring_resources)(struct xgbe_prv_data *);
void (*free_ring_resources)(struct xgbe_prv_data *);
int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
- void (*realloc_skb)(struct xgbe_channel *);
- void (*unmap_skb)(struct xgbe_prv_data *, struct xgbe_ring_data *);
+ void (*realloc_rx_buffer)(struct xgbe_channel *);
+ void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
};
@@ -581,7 +662,11 @@ struct xgbe_prv_data {
/* XPCS indirect addressing mutex */
struct mutex xpcs_mutex;
- int irq_number;
+ /* RSS addressing mutex */
+ struct mutex rss_mutex;
+
+ int dev_irq;
+ unsigned int per_channel_irq;
struct xgbe_hw_if hw_if;
struct xgbe_desc_if desc_if;
@@ -624,7 +709,7 @@ struct xgbe_prv_data {
unsigned int rx_riwt;
unsigned int rx_frames;
- /* Current MTU */
+ /* Current Rx buffer size */
unsigned int rx_buf_size;
/* Flow control settings */
@@ -632,6 +717,11 @@ struct xgbe_prv_data {
unsigned int tx_pause;
unsigned int rx_pause;
+ /* Receive Side Scaling settings */
+ u8 rss_key[XGBE_RSS_HASH_KEY_SIZE];
+ u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
+ u32 rss_options;
+
/* MDIO settings */
struct module *phy_module;
char *mii_bus_id;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 123669696184..83a50280bb70 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -761,10 +761,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
ndev = pdata->ndev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr");
- if (!res) {
- dev_err(dev, "Resource enet_csr not defined\n");
- return -ENODEV;
- }
pdata->base_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->base_addr)) {
dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
@@ -772,10 +768,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr");
- if (!res) {
- dev_err(dev, "Resource ring_csr not defined\n");
- return -ENODEV;
- }
pdata->ring_csr_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->ring_csr_addr)) {
dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
@@ -783,10 +775,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd");
- if (!res) {
- dev_err(dev, "Resource ring_cmd not defined\n");
- return -ENODEV;
- }
pdata->ring_cmd_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->ring_cmd_addr)) {
dev_err(dev, "Unable to retrieve ENET Ring command region\n");
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index c3e260c21734..888247ad9068 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -62,7 +62,6 @@ config BCM63XX_ENET
config BCMGENET
tristate "Broadcom GENET internal MAC support"
- depends on OF
select MII
select PHYLIB
select FIXED_PHY if BCMGENET=y
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 416620fa8fac..d86d6baf9681 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -836,7 +836,7 @@ static int b44_rx(struct b44 *bp, int budget)
struct sk_buff *copy_skb;
b44_recycle_rx(bp, cons, bp->rx_prod);
- copy_skb = netdev_alloc_skb_ip_align(bp->dev, len);
+ copy_skb = napi_alloc_skb(&bp->napi, len);
if (copy_skb == NULL)
goto drop_it_no_recycle;
@@ -2104,6 +2104,7 @@ static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
bp->flags &= ~B44_FLAG_WOL_ENABLE;
spin_unlock_irq(&bp->lock);
+ device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
return 0;
}
@@ -2452,6 +2453,7 @@ static int b44_init_one(struct ssb_device *sdev,
}
}
+ device_set_wakeup_capable(sdev->dev, true);
netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 3e8d1a88ed3d..21206d33b638 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -385,7 +385,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
if (len < copybreak) {
struct sk_buff *nskb;
- nskb = netdev_alloc_skb_ip_align(dev, len);
+ nskb = napi_alloc_skb(&priv->napi, len);
if (!nskb) {
/* forget packet, just rearm desc */
dev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 531bb7c57531..a91a8c263391 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -274,6 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
/* RBUF misc statistics */
STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
+ STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+ STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
+ STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
};
#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -477,6 +480,7 @@ static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
RX_BUF_LENGTH, DMA_FROM_DEVICE);
ret = dma_mapping_error(kdev, mapping);
if (ret) {
+ priv->mib.rx_dma_failed++;
bcm_sysport_free_cb(cb);
netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
return ret;
@@ -526,6 +530,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
unsigned int p_index;
u16 len, status;
struct bcm_rsb *rsb;
+ int ret;
/* Determine how much we should process since last call */
p_index = rdma_readl(priv, RDMA_PROD_INDEX);
@@ -620,7 +625,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
napi_gro_receive(&priv->napi, skb);
refill:
- bcm_sysport_rx_refill(priv, cb);
+ ret = bcm_sysport_rx_refill(priv, cb);
+ if (ret)
+ priv->mib.alloc_rx_buff_failed++;
}
return processed;
@@ -731,9 +738,11 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
/* re-enable TX interrupt */
intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+
+ return 0;
}
- return 0;
+ return budget;
}
static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
@@ -971,6 +980,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
if (dma_mapping_error(kdev, mapping)) {
+ priv->mib.tx_dma_failed++;
netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
skb->data, skb_len);
ret = NETDEV_TX_OK;
@@ -1399,6 +1409,27 @@ static void topctrl_flush(struct bcm_sysport_priv *priv)
topctrl_writel(priv, 0, TX_FLUSH_CNTL);
}
+static int bcm_sysport_change_mac(struct net_device *dev, void *p)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ /* interface is disabled, changes to MAC will be reflected on next
+ * open call
+ */
+ if (!netif_running(dev))
+ return 0;
+
+ umac_set_hw_addr(priv, dev->dev_addr);
+
+ return 0;
+}
+
static void bcm_sysport_netif_start(struct net_device *dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -1618,6 +1649,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
.ndo_stop = bcm_sysport_stop,
.ndo_set_features = bcm_sysport_set_features,
.ndo_set_rx_mode = bcm_sysport_set_rx_mode,
+ .ndo_set_mac_address = bcm_sysport_change_mac,
};
#define REV_FMT "v%2x.%02x"
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index b08dab828101..fc19417d82a5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -557,6 +557,9 @@ struct bcm_sysport_mib {
u32 rxchk_other_pkt_disc;
u32 rbuf_ovflow_cnt;
u32 rbuf_err_cnt;
+ u32 alloc_rx_buff_failed;
+ u32 rx_dma_failed;
+ u32 tx_dma_failed;
};
/* HW maintains a large list of counters */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 40beef5bca88..1d1147c93d59 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1015,7 +1015,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
*/
if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
(len <= RX_COPY_THRESH)) {
- skb = netdev_alloc_skb_ip_align(bp->dev, len);
+ skb = napi_alloc_skb(&fp->napi, len);
if (skb == NULL) {
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n");
@@ -1139,7 +1139,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
prefetch(&fp->sb_running_index[SM_RX_ID]);
- napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+ napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
return IRQ_HANDLED;
}
@@ -2099,7 +2099,7 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
if (config_hash) {
/* RSS keys */
- prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
+ netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 1edc931b1458..ffe4e003e636 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3358,12 +3358,18 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
return T_ETH_INDIRECTION_TABLE_SIZE;
}
-static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
+static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct bnx2x *bp = netdev_priv(dev);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
size_t i;
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!indir)
+ return 0;
+
/* Get the current configuration of the RSS indirection table */
bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
@@ -3383,11 +3389,21 @@ static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
}
static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key)
+ const u8 *key, const u8 hfunc)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
+ /* We require at least one supported parameter to be changed and no
+ * change in any of the unsupported parameters
+ */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+
+ if (!indir)
+ return 0;
+
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
/*
* The same as in bnx2x_get_rxfh: we can't use a memcpy()
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 893cdb6a423e..691f0bf09ee1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1932,7 +1932,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
for_each_cos_in_tx_queue(fp, cos)
prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
prefetch(&fp->sb_running_index[SM_RX_ID]);
- napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+ napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
status &= ~mask;
}
}
@@ -3164,6 +3164,8 @@ static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
gen_init->mtu = bp->dev->mtu;
gen_init->cos = cos;
+
+ gen_init->fp_hsi = ETH_FP_HSI_VERSION;
}
static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
@@ -12538,7 +12540,7 @@ static int bnx2x_validate_addr(struct net_device *dev)
}
static int bnx2x_get_phys_port_id(struct net_device *netdev,
- struct netdev_phys_port_id *ppid)
+ struct netdev_phys_item_id *ppid)
{
struct bnx2x *bp = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 7bc2924a7e24..07cdf9bbffef 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4336,7 +4336,7 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
test_bit(BNX2X_Q_FLG_FCOE, flags) ?
LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
- gen_data->fp_hsi_ver = ETH_FP_HSI_VERSION;
+ gen_data->fp_hsi_ver = params->fp_hsi;
DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index e97275f456c0..86baecb7c60c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -937,6 +937,8 @@ struct bnx2x_general_setup_params {
u8 spcl_id;
u16 mtu;
u8 cos;
+
+ u8 fp_hsi;
};
struct bnx2x_rxq_setup_params {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index c88b20af87df..e5aca2de1871 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -193,6 +193,7 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
/* Setup-op general parameters */
setup_p->gen_params.spcl_id = vf->sp_cl_id;
setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
+ setup_p->gen_params.fp_hsi = vf->fp_hsi;
/* Setup-op pause params:
* Nothing to do, the pause thresholds are set by default to 0 which
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 01bafa4ac045..66ee62a0401a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -205,6 +205,8 @@ struct bnx2x_virtf {
/* slow-path operations */
struct mutex op_mutex; /* one vfop at a time mutex */
enum channel_tlvs op_current;
+
+ u8 fp_hsi;
};
#define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index b1d9c44aa56c..be40eabc5304 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -224,6 +224,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
+ struct vfpf_fp_hsi_resp_tlv *fp_hsi_resp;
u32 vf_id;
bool resources_acquired = false;
@@ -237,6 +238,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
req->vfdev_info.vf_id = vf_id;
req->vfdev_info.vf_os = 0;
+ req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION;
req->resc_request.num_rxqs = rx_count;
req->resc_request.num_txqs = tx_count;
@@ -316,9 +318,14 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
memset(&bp->vf2pf_mbox->resp, 0,
sizeof(union pfvf_tlvs));
} else {
- /* PF reports error */
- BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
- bp->acquire_resp.hdr.status);
+ /* Determine reason of PF failure of acquire process */
+ fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
+ CHANNEL_TLV_FP_HSI_SUPPORT);
+ if (fp_hsi_resp && !fp_hsi_resp->is_supported)
+ BNX2X_ERR("Old hypervisor - doesn't support current fastpath HSI version; Need to downgrade VF driver [or upgrade hypervisor]\n");
+ else
+ BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
+ bp->acquire_resp.hdr.status);
rc = -EAGAIN;
goto out;
}
@@ -333,6 +340,25 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->flags |= HAS_PHYS_PORT_ID;
}
+ /* Old Hypevisors might not even support the FP_HSI_SUPPORT TLV.
+ * If that's the case, we need to make certain required FW was
+ * supported by such a hypervisor [i.e., v0-v2].
+ */
+ fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
+ CHANNEL_TLV_FP_HSI_SUPPORT);
+ if (!fp_hsi_resp && (ETH_FP_HSI_VERSION > ETH_FP_HSI_VER_2)) {
+ BNX2X_ERR("Old hypervisor - need to downgrade VF's driver\n");
+
+ /* Since acquire succeeded on the PF side, we need to send a
+ * release message in order to allow future probes.
+ */
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+ bnx2x_vfpf_release(bp);
+
+ rc = -EINVAL;
+ goto out;
+ }
+
/* get HW info */
bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
bp->link_params.chip_id = bp->common.chip_id;
@@ -1125,6 +1151,26 @@ static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
*offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
}
+static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ void *buffer,
+ u16 *offset)
+{
+ struct vfpf_fp_hsi_resp_tlv *fp_hsi;
+
+ bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT,
+ sizeof(struct vfpf_fp_hsi_resp_tlv));
+
+ fp_hsi = (struct vfpf_fp_hsi_resp_tlv *)
+ (((u8 *)buffer) + *offset);
+ fp_hsi->is_supported = (vf->fp_hsi > ETH_FP_HSI_VERSION) ? 0 : 1;
+
+ /* Offset should continue representing the offset to the tail
+ * of TLV data (outside this function scope)
+ */
+ *offset += sizeof(struct vfpf_fp_hsi_resp_tlv);
+}
+
static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx, int vfop_status)
{
@@ -1219,6 +1265,12 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
CHANNEL_TLV_PHYS_PORT_ID))
bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
+ /* `New' vfs will want to know if fastpath HSI is supported, since
+ * if that's not the case they could print into system log the fact
+ * the driver version must be updated.
+ */
+ bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length);
+
bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -1288,6 +1340,23 @@ static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
goto out;
}
+ /* Verify the VF fastpath HSI can be supported by the loaded FW.
+ * Linux vfs should be oblivious to changes between v0 and v2.
+ */
+ if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
+ vf->fp_hsi = acquire->vfdev_info.fp_hsi_ver;
+ else
+ vf->fp_hsi = max_t(u8, acquire->vfdev_info.fp_hsi_ver,
+ ETH_FP_HSI_VER_2);
+ if (vf->fp_hsi > ETH_FP_HSI_VERSION) {
+ DP(BNX2X_MSG_IOV,
+ "VF [%d] - Can't support acquire request since VF requests a FW version which is too new [%02x > %02x]\n",
+ vf->abs_vfid, acquire->vfdev_info.fp_hsi_ver,
+ ETH_FP_HSI_VERSION);
+ rc = -EINVAL;
+ goto out;
+ }
+
/* acquire the resources */
rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index 15670c499a20..b86479fc0d2f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -124,7 +124,7 @@ struct vfpf_acquire_tlv {
#define VF_OS_UNDEFINED (0 << VF_OS_SHIFT)
#define VF_OS_WINDOWS (1 << VF_OS_SHIFT)
- u8 padding;
+ u8 fp_hsi_ver;
u8 caps;
#define VF_CAP_SUPPORT_EXT_BULLETIN (1 << 0)
} vfdev_info;
@@ -204,6 +204,12 @@ struct vfpf_port_phys_id_resp_tlv {
u8 padding[2];
};
+struct vfpf_fp_hsi_resp_tlv {
+ struct channel_tlv tl;
+ u8 is_supported;
+ u8 padding[3];
+};
+
#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
* stats will be coalesced on
* the leading RSS queue
@@ -448,6 +454,7 @@ enum channel_tlvs {
CHANNEL_TLV_UPDATE_RSS,
CHANNEL_TLV_PHYS_PORT_ID,
CHANNEL_TLV_UPDATE_TPA,
+ CHANNEL_TLV_FP_HSI_SUPPORT,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index da1a2500c91c..7078bd386fb7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -42,6 +42,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/phy.h>
+#include <linux/platform_data/bcmgenet.h>
#include <asm/unaligned.h>
@@ -613,6 +614,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
UMAC_RBUF_OVFL_CNT),
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
+ STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+ STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
+ STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -711,6 +715,98 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
}
}
+static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
+ u32 reg;
+
+ if (enable && !priv->clk_eee_enabled) {
+ clk_prepare_enable(priv->clk_eee);
+ priv->clk_eee_enabled = true;
+ }
+
+ reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
+ if (enable)
+ reg |= EEE_EN;
+ else
+ reg &= ~EEE_EN;
+ bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
+
+ /* Enable EEE and switch to a 27Mhz clock automatically */
+ reg = __raw_readl(priv->base + off);
+ if (enable)
+ reg |= TBUF_EEE_EN | TBUF_PM_EN;
+ else
+ reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
+ __raw_writel(reg, priv->base + off);
+
+ /* Do the same for thing for RBUF */
+ reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
+ if (enable)
+ reg |= RBUF_EEE_EN | RBUF_PM_EN;
+ else
+ reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
+ bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
+
+ if (!enable && priv->clk_eee_enabled) {
+ clk_disable_unprepare(priv->clk_eee);
+ priv->clk_eee_enabled = false;
+ }
+
+ priv->eee.eee_enabled = enable;
+ priv->eee.eee_active = enable;
+}
+
+static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct ethtool_eee *p = &priv->eee;
+
+ if (GENET_IS_V1(priv))
+ return -EOPNOTSUPP;
+
+ e->eee_enabled = p->eee_enabled;
+ e->eee_active = p->eee_active;
+ e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
+
+ return phy_ethtool_get_eee(priv->phydev, e);
+}
+
+static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct ethtool_eee *p = &priv->eee;
+ int ret = 0;
+
+ if (GENET_IS_V1(priv))
+ return -EOPNOTSUPP;
+
+ p->eee_enabled = e->eee_enabled;
+
+ if (!p->eee_enabled) {
+ bcmgenet_eee_enable_set(dev, false);
+ } else {
+ ret = phy_init_eee(priv->phydev, 0);
+ if (ret) {
+ netif_err(priv, hw, dev, "EEE initialization failed\n");
+ return ret;
+ }
+
+ bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
+ bcmgenet_eee_enable_set(dev, true);
+ }
+
+ return phy_ethtool_set_eee(priv->phydev, e);
+}
+
+static int bcmgenet_nway_reset(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ return genphy_restart_aneg(priv->phydev);
+}
+
/* standard ethtool support functions. */
static struct ethtool_ops bcmgenet_ethtool_ops = {
.get_strings = bcmgenet_get_strings,
@@ -724,6 +820,9 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
.set_msglevel = bcmgenet_set_msglevel,
.get_wol = bcmgenet_get_wol,
.set_wol = bcmgenet_set_wol,
+ .get_eee = bcmgenet_get_eee,
+ .set_eee = bcmgenet_set_eee,
+ .nway_reset = bcmgenet_nway_reset,
};
/* Power down the unimac, based on mode. */
@@ -989,6 +1088,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping);
if (ret) {
+ priv->mib.tx_dma_failed++;
netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
dev_kfree_skb(skb);
return ret;
@@ -1035,6 +1135,7 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
skb_frag_size(frag), DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping);
if (ret) {
+ priv->mib.tx_dma_failed++;
netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
__func__);
return ret;
@@ -1231,6 +1332,7 @@ static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb)
priv->rx_buf_len, DMA_FROM_DEVICE);
ret = dma_mapping_error(kdev, mapping);
if (ret) {
+ priv->mib.rx_dma_failed++;
bcmgenet_free_cb(cb);
netif_err(priv, rx_err, priv->dev,
"%s DMA map failed\n", __func__);
@@ -1397,8 +1499,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
/* refill RX path on the current control block */
refill:
err = bcmgenet_rx_refill(priv, cb);
- if (err)
+ if (err) {
+ priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, dev, "Rx refill failed\n");
+ }
rxpktprocessed++;
priv->rx_read_ptr++;
@@ -2400,6 +2504,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
struct bcmgenet_hw_params *params;
u32 reg;
u8 major;
+ u16 gphy_rev;
if (GENET_IS_V4(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
@@ -2448,8 +2553,29 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
* to pass this information to the PHY driver. The PHY driver expects
* to find the PHY major revision in bits 15:8 while the GENET register
* stores that information in bits 7:0, account for that.
+ *
+ * On newer chips, starting with PHY revision G0, a new scheme is
+ * deployed similar to the Starfighter 2 switch with GPHY major
+ * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
+ * is reserved as well as special value 0x01ff, we have a small
+ * heuristic to check for the new GPHY revision and re-arrange things
+ * so the GPHY driver is happy.
*/
- priv->gphy_rev = (reg & 0xffff) << 8;
+ gphy_rev = reg & 0xffff;
+
+ /* This is the good old scheme, just GPHY major, no minor nor patch */
+ if ((gphy_rev & 0xf0) != 0)
+ priv->gphy_rev = gphy_rev << 8;
+
+ /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
+ else if ((gphy_rev & 0xff00) != 0)
+ priv->gphy_rev = gphy_rev;
+
+ /* This is reserved so should require special treatment */
+ else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
+ pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
+ return;
+ }
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!(params->flags & GENET_HAS_40BITS))
@@ -2483,8 +2609,9 @@ static const struct of_device_id bcmgenet_match[] = {
static int bcmgenet_probe(struct platform_device *pdev)
{
+ struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
struct device_node *dn = pdev->dev.of_node;
- const struct of_device_id *of_id;
+ const struct of_device_id *of_id = NULL;
struct bcmgenet_priv *priv;
struct net_device *dev;
const void *macaddr;
@@ -2498,9 +2625,11 @@ static int bcmgenet_probe(struct platform_device *pdev)
return -ENOMEM;
}
- of_id = of_match_node(bcmgenet_match, dn);
- if (!of_id)
- return -EINVAL;
+ if (dn) {
+ of_id = of_match_node(bcmgenet_match, dn);
+ if (!of_id)
+ return -EINVAL;
+ }
priv = netdev_priv(dev);
priv->irq0 = platform_get_irq(pdev, 0);
@@ -2512,11 +2641,15 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
- macaddr = of_get_mac_address(dn);
- if (!macaddr) {
- dev_err(&pdev->dev, "can't find MAC address\n");
- err = -EINVAL;
- goto err;
+ if (dn) {
+ macaddr = of_get_mac_address(dn);
+ if (!macaddr) {
+ dev_err(&pdev->dev, "can't find MAC address\n");
+ err = -EINVAL;
+ goto err;
+ }
+ } else {
+ macaddr = pd->mac_address;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2556,7 +2689,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->dev = dev;
priv->pdev = pdev;
- priv->version = (enum bcmgenet_version)of_id->data;
+ if (of_id)
+ priv->version = (enum bcmgenet_version)of_id->data;
+ else
+ priv->version = pd->genet_version;
priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
if (IS_ERR(priv->clk))
@@ -2577,6 +2713,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk_wol))
dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+ priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
+ if (IS_ERR(priv->clk_eee)) {
+ dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
+ priv->clk_eee = NULL;
+ }
+
err = reset_umac(priv);
if (err)
goto err_clk_disable;
@@ -2727,6 +2869,9 @@ static int bcmgenet_resume(struct device *d)
phy_resume(priv->phydev);
+ if (priv->eee.eee_enabled)
+ bcmgenet_eee_enable_set(dev, true);
+
bcmgenet_netif_start(dev);
return 0;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 31b2da5f9b82..b36ddec0cc0a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -143,6 +143,9 @@ struct bcmgenet_mib_counters {
u32 rbuf_ovflow_cnt;
u32 rbuf_err_cnt;
u32 mdf_err_cnt;
+ u32 alloc_rx_buff_failed;
+ u32 rx_dma_failed;
+ u32 tx_dma_failed;
};
#define UMAC_HD_BKP_CTRL 0x004
@@ -182,6 +185,21 @@ struct bcmgenet_mib_counters {
#define UMAC_MAC1 0x010
#define UMAC_MAX_FRAME_LEN 0x014
+#define UMAC_EEE_CTRL 0x064
+#define EN_LPI_RX_PAUSE (1 << 0)
+#define EN_LPI_TX_PFC (1 << 1)
+#define EN_LPI_TX_PAUSE (1 << 2)
+#define EEE_EN (1 << 3)
+#define RX_FIFO_CHECK (1 << 4)
+#define EEE_TX_CLK_DIS (1 << 5)
+#define DIS_EEE_10M (1 << 6)
+#define LP_IDLE_PREDICTION_MODE (1 << 7)
+
+#define UMAC_EEE_LPI_TIMER 0x068
+#define UMAC_EEE_WAKE_TIMER 0x06C
+#define UMAC_EEE_REF_COUNT 0x070
+#define EEE_REFERENCE_COUNT_MASK 0xffff
+
#define UMAC_TX_FLUSH 0x334
#define UMAC_MIB_START 0x400
@@ -229,6 +247,10 @@ struct bcmgenet_mib_counters {
#define RBUF_RXCHK_EN (1 << 0)
#define RBUF_SKIP_FCS (1 << 4)
+#define RBUF_ENERGY_CTRL 0x9c
+#define RBUF_EEE_EN (1 << 0)
+#define RBUF_PM_EN (1 << 1)
+
#define RBUF_TBUF_SIZE_CTRL 0xb4
#define RBUF_HFB_CTRL_V1 0x38
@@ -244,6 +266,9 @@ struct bcmgenet_mib_counters {
#define TBUF_CTRL 0x00
#define TBUF_BP_MC 0x0C
+#define TBUF_ENERGY_CTRL 0x14
+#define TBUF_EEE_EN (1 << 0)
+#define TBUF_PM_EN (1 << 1)
#define TBUF_CTRL_V1 0x80
#define TBUF_BP_MC_V1 0xA0
@@ -548,6 +573,8 @@ struct bcmgenet_priv {
struct device_node *phy_dn;
struct mii_bus *mii_bus;
u16 gphy_rev;
+ struct clk *clk_eee;
+ bool clk_eee_enabled;
/* PHY device variables */
int old_link;
@@ -584,6 +611,8 @@ struct bcmgenet_priv {
u32 wolopts;
struct bcmgenet_mib_counters mib;
+
+ struct ethtool_eee eee;
};
#define GENET_IO_MACRO(name, offset) \
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 933cd7e7cd33..446889cc3c6a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -23,6 +23,7 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
+#include <linux/platform_data/bcmgenet.h>
#include "bcmgenet.h"
@@ -312,22 +313,6 @@ static int bcmgenet_mii_probe(struct net_device *dev)
u32 phy_flags;
int ret;
- if (priv->phydev) {
- pr_info("PHY already attached\n");
- return 0;
- }
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
- ret = of_phy_register_fixed_link(dn);
- if (ret)
- return ret;
-
- priv->phy_dn = of_node_get(dn);
- }
-
/* Communicate the integrated PHY revision */
phy_flags = priv->gphy_rev;
@@ -337,11 +322,39 @@ static int bcmgenet_mii_probe(struct net_device *dev)
priv->old_duplex = -1;
priv->old_pause = -1;
- phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
- phy_flags, priv->phy_interface);
- if (!phydev) {
- pr_err("could not attach to PHY\n");
- return -ENODEV;
+ if (dn) {
+ if (priv->phydev) {
+ pr_info("PHY already attached\n");
+ return 0;
+ }
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
+ ret = of_phy_register_fixed_link(dn);
+ if (ret)
+ return ret;
+
+ priv->phy_dn = of_node_get(dn);
+ }
+
+ phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
+ phy_flags, priv->phy_interface);
+ if (!phydev) {
+ pr_err("could not attach to PHY\n");
+ return -ENODEV;
+ }
+ } else {
+ phydev = priv->phydev;
+ phydev->dev_flags = phy_flags;
+
+ ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
+ priv->phy_interface);
+ if (ret) {
+ pr_err("could not attach to PHY\n");
+ return -ENODEV;
+ }
}
priv->phydev = phydev;
@@ -438,6 +451,75 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
return 0;
}
+static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct bcmgenet_platform_data *pd = kdev->platform_data;
+ struct mii_bus *mdio = priv->mii_bus;
+ struct phy_device *phydev;
+ int ret;
+
+ if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) {
+ /*
+ * Internal or external PHY with MDIO access
+ */
+ if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR)
+ mdio->phy_mask = ~(1 << pd->phy_address);
+ else
+ mdio->phy_mask = 0;
+
+ ret = mdiobus_register(mdio);
+ if (ret) {
+ dev_err(kdev, "failed to register MDIO bus\n");
+ return ret;
+ }
+
+ if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR)
+ phydev = mdio->phy_map[pd->phy_address];
+ else
+ phydev = phy_find_first(mdio);
+
+ if (!phydev) {
+ dev_err(kdev, "failed to register PHY device\n");
+ mdiobus_unregister(mdio);
+ return -ENODEV;
+ }
+ } else {
+ /*
+ * MoCA port or no MDIO access.
+ * Use fixed PHY to represent the link layer.
+ */
+ struct fixed_phy_status fphy_status = {
+ .link = 1,
+ .speed = pd->phy_speed,
+ .duplex = pd->phy_duplex,
+ .pause = 0,
+ .asym_pause = 0,
+ };
+
+ phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+ if (!phydev || IS_ERR(phydev)) {
+ dev_err(kdev, "failed to register fixed PHY device\n");
+ return -ENODEV;
+ }
+ }
+
+ priv->phydev = phydev;
+ priv->phy_interface = pd->phy_interface;
+
+ return 0;
+}
+
+static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv)
+{
+ struct device_node *dn = priv->pdev->dev.of_node;
+
+ if (dn)
+ return bcmgenet_mii_of_init(priv);
+ else
+ return bcmgenet_mii_pd_init(priv);
+}
+
int bcmgenet_mii_init(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -447,7 +529,7 @@ int bcmgenet_mii_init(struct net_device *dev)
if (ret)
return ret;
- ret = bcmgenet_mii_of_init(priv);
+ ret = bcmgenet_mii_bus_init(priv);
if (ret)
goto out_free;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 77f8f836cbbe..bb48a610b72a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -10541,19 +10541,14 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
udelay(100);
if (tg3_flag(tp, ENABLE_RSS)) {
+ u32 rss_key[10];
+
tg3_rss_write_indir_tbl(tp);
- /* Setup the "secret" hash key. */
- tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
- tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
- tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
- tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
- tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
- tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
- tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
- tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
- tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
- tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
+ netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
+
+ for (i = 0; i < 10 ; i++)
+ tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
}
tp->rx_mode = RX_MODE_ENABLE;
@@ -12566,22 +12561,38 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
return size;
}
-static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
+static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
{
struct tg3 *tp = netdev_priv(dev);
int i;
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!indir)
+ return 0;
+
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
indir[i] = tp->rss_ind_tbl[i];
return 0;
}
-static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key)
+static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
+ const u8 hfunc)
{
struct tg3 *tp = netdev_priv(dev);
size_t i;
+ /* We require at least one supported parameter to be changed and no
+ * change in any of the unsupported parameters
+ */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+
+ if (!indir)
+ return 0;
+
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
tp->rss_ind_tbl[i] = indir[i];
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index c3861de9dc81..323721838cf9 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2054,7 +2054,7 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
BFI_ENET_RSS_IPV4_TCP);
rx_config->rss_config.hash_mask =
bnad->num_rxp_per_rx - 1;
- get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
+ netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
sizeof(rx_config->rss_config.toeplitz_hash_key));
} else {
rx_config->rss_status = BNA_STATUS_T_DISABLED;
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 9e089d24466e..6932be08c62c 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -35,8 +35,8 @@ config MACB
---help---
The Cadence MACB ethernet interface is found on many Atmel AT32 and
AT91 parts. This driver also supports the Cadence GEM (Gigabit
- Ethernet MAC found in some ARM SoC devices). Note: the Gigabit mode
- is not yet supported. Say Y to include support for the MACB/GEM chip.
+ Ethernet MAC found in some ARM SoC devices). Say Y to include
+ support for the MACB/GEM chip.
To compile this driver as a module, choose M here: the module
will be called macb.
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 4c5879389003..babe2a915b00 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -301,7 +301,7 @@ unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
struct sched_port *p = &s->p[port];
unsigned int max_avail_segs;
- pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
+ pr_debug("%s mtu=%d speed=%d\n", __func__, mtu, speed);
if (speed)
p->speed = speed;
if (mtu)
@@ -1025,7 +1025,7 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
/**
* get_packet - return the next ingress packet buffer
- * @pdev: the PCI device that received the packet
+ * @adapter: the adapter that received the packet
* @fl: the SGE free list holding the packet
* @len: the actual packet length, excluding any SGE padding
*
@@ -1037,14 +1037,15 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
* threshold and the packet is too big to copy, or (b) the packet should
* be copied but there is no memory for the copy.
*/
-static inline struct sk_buff *get_packet(struct pci_dev *pdev,
+static inline struct sk_buff *get_packet(struct adapter *adapter,
struct freelQ *fl, unsigned int len)
{
- struct sk_buff *skb;
const struct freelQ_ce *ce = &fl->centries[fl->cidx];
+ struct pci_dev *pdev = adapter->pdev;
+ struct sk_buff *skb;
if (len < copybreak) {
- skb = netdev_alloc_skb_ip_align(NULL, len);
+ skb = napi_alloc_skb(&adapter->napi, len);
if (!skb)
goto use_orig_buf;
@@ -1357,7 +1358,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
struct sge_port_stats *st;
struct net_device *dev;
- skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
+ skb = get_packet(adapter, fl, len - sge->rx_pkt_pad);
if (unlikely(!skb)) {
sge->stats.rx_drops++;
return;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 1df65c915b99..b85280775997 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
+cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 0514b7431746..2c37e1bf253a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -222,6 +222,12 @@ struct tp_err_stats {
u32 ofldCongDefer;
};
+struct sge_params {
+ u32 hps; /* host page size for our PF/VF */
+ u32 eq_qpp; /* egress queues/page for our PF/VF */
+ u32 iq_qpp; /* egress queues/page for our PF/VF */
+};
+
struct tp_params {
unsigned int ntxchan; /* # of Tx channels */
unsigned int tre; /* log2 of core clocks per TP tick */
@@ -285,6 +291,7 @@ enum chip_type {
};
struct adapter_params {
+ struct sge_params sge;
struct tp_params tp;
struct vpd_params vpd;
struct pci_params pci;
@@ -318,10 +325,10 @@ struct adapter_params {
#include "t4fw_api.h"
#define FW_VERSION(chip) ( \
- FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \
- FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \
- FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \
- FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD))
+ FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
+ FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
+ FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \
+ FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD))
#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
struct fw_info {
@@ -354,7 +361,7 @@ struct link_config {
unsigned char link_ok; /* link up? */
};
-#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
+#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
enum {
MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
@@ -431,7 +438,8 @@ struct sge_fl { /* SGE free-buffer queue state */
struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
__be64 *desc; /* address of HW Rx descriptor ring */
dma_addr_t addr; /* bus address of HW ring start */
- u64 udb; /* BAR2 offset of User Doorbell area */
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
};
/* A packet gather list */
@@ -461,7 +469,8 @@ struct sge_rspq { /* state for an SGE response queue */
u16 abs_id; /* absolute SGE id for the response q */
__be64 *desc; /* address of HW response ring */
dma_addr_t phys_addr; /* physical address of the ring */
- u64 udb; /* BAR2 offset of User Doorbell area */
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
unsigned int iqe_len; /* entry size */
unsigned int size; /* capacity of response queue */
struct adapter *adap;
@@ -519,7 +528,8 @@ struct sge_txq {
int db_disabled;
unsigned short db_pidx;
unsigned short db_pidx_inc;
- u64 udb; /* BAR2 offset of User Doorbell area */
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
};
struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
@@ -995,6 +1005,15 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
const u8 *fw_data, unsigned int fw_size,
struct fw_hdr *card_fw, enum dev_state state, int *reset);
int t4_prep_adapter(struct adapter *adapter);
+
+enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
+int t4_bar2_sge_qregs(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid);
+
+int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
@@ -1085,4 +1104,5 @@ void t4_db_dropped(struct adapter *adapter);
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val);
void t4_sge_decode_idma_state(struct adapter *adapter, int state);
+void t4_free_mem(void *addr);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 4fe33606f372..a35d1ec6950e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -243,7 +243,7 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap,
const struct fw_port_cmd *pcmd)
{
const union fw_port_dcb *fwdcb = &pcmd->u.dcb;
- int port = FW_PORT_CMD_PORTID_GET(be32_to_cpu(pcmd->op_to_portid));
+ int port = FW_PORT_CMD_PORTID_G(be32_to_cpu(pcmd->op_to_portid));
struct net_device *dev = adap->port[port];
struct port_info *pi = netdev_priv(dev);
struct port_dcb_info *dcb = &pi->dcb;
@@ -256,12 +256,12 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap,
if (dcb_type == FW_PORT_DCB_TYPE_CONTROL) {
enum cxgb4_dcb_state_input input =
((pcmd->u.dcb.control.all_syncd_pkd &
- FW_PORT_CMD_ALL_SYNCD)
+ FW_PORT_CMD_ALL_SYNCD_F)
? CXGB4_DCB_STATE_FW_ALLSYNCED
: CXGB4_DCB_STATE_FW_INCOMPLETE);
if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) {
- dcb_running_version = FW_PORT_CMD_DCB_VERSION_GET(
+ dcb_running_version = FW_PORT_CMD_DCB_VERSION_G(
be16_to_cpu(
pcmd->u.dcb.control.dcb_version_to_app_state));
if (dcb_running_version == FW_PORT_DCB_VER_CEE1D01 ||
@@ -519,7 +519,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
- pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY);
+ pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
if (err != FW_PORT_DCB_CFG_SUCCESS)
@@ -583,7 +583,7 @@ static void cxgb4_setpgbwgcfg_tx(struct net_device *dev, int pgid,
INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
- pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY);
+ pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
@@ -623,7 +623,7 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
- pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY);
+ pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
pcmd.u.dcb.pfc.type = FW_PORT_DCB_TYPE_PFC;
pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
@@ -842,7 +842,7 @@ static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
/* write out new app table entry */
INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
- pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY);
+ pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
pcmd.u.dcb.app_priority.protocolid = cpu_to_be16(app_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
index 2a6aa88984f4..31ce425616c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
@@ -42,12 +42,12 @@
do { \
memset(&(__pcmd), 0, sizeof(__pcmd)); \
(__pcmd).op_to_portid = \
- cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) | \
- FW_CMD_REQUEST | \
- FW_CMD_##__op | \
- FW_PORT_CMD_PORTID(__port)); \
+ cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | \
+ FW_CMD_REQUEST_F | \
+ FW_CMD_##__op##_F | \
+ FW_PORT_CMD_PORTID_V(__port)); \
(__pcmd).action_to_len16 = \
- cpu_to_be32(FW_PORT_CMD_ACTION(__action) | \
+ cpu_to_be32(FW_PORT_CMD_ACTION_V(__action) | \
FW_LEN16(pcmd)); \
} while (0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
new file mode 100644
index 000000000000..c98a350d857e
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -0,0 +1,158 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/string_helpers.h>
+#include <linux/sort.h>
+
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4fw_api.h"
+#include "cxgb4_debugfs.h"
+#include "l2t.h"
+
+static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ loff_t pos = *ppos;
+ loff_t avail = file_inode(file)->i_size;
+ unsigned int mem = (uintptr_t)file->private_data & 3;
+ struct adapter *adap = file->private_data - mem;
+ __be32 *data;
+ int ret;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= avail)
+ return 0;
+ if (count > avail - pos)
+ count = avail - pos;
+
+ data = t4_alloc_mem(count);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock(&adap->win0_lock);
+ ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
+ spin_unlock(&adap->win0_lock);
+ if (ret) {
+ t4_free_mem(data);
+ return ret;
+ }
+ ret = copy_to_user(buf, data, count);
+
+ t4_free_mem(data);
+ if (ret)
+ return -EFAULT;
+
+ *ppos = pos + count;
+ return count;
+}
+
+static const struct file_operations mem_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = mem_read,
+ .llseek = default_llseek,
+};
+
+static void add_debugfs_mem(struct adapter *adap, const char *name,
+ unsigned int idx, unsigned int size_mb)
+{
+ struct dentry *de;
+
+ de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
+ (void *)adap + idx, &mem_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = size_mb << 20;
+}
+
+/* Add an array of Debug FS files.
+ */
+void add_debugfs_files(struct adapter *adap,
+ struct t4_debugfs_entry *files,
+ unsigned int nfiles)
+{
+ int i;
+
+ /* debugfs support is best effort */
+ for (i = 0; i < nfiles; i++)
+ debugfs_create_file(files[i].name, files[i].mode,
+ adap->debugfs_root,
+ (void *)adap + files[i].data,
+ files[i].ops);
+}
+
+int t4_setup_debugfs(struct adapter *adap)
+{
+ int i;
+ u32 size;
+
+ static struct t4_debugfs_entry t4_debugfs_files[] = {
+ { "l2t", &t4_l2t_fops, S_IRUSR, 0},
+ };
+
+ add_debugfs_files(adap,
+ t4_debugfs_files,
+ ARRAY_SIZE(t4_debugfs_files));
+
+ i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (i & EDRAM0_ENABLE_F) {
+ size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
+ add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM0_SIZE_G(size));
+ }
+ if (i & EDRAM1_ENABLE_F) {
+ size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
+ add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size));
+ }
+ if (is_t4(adap->params.chip)) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+ if (i & EXT_MEM_ENABLE_F)
+ add_debugfs_mem(adap, "mc", MEM_MC,
+ EXT_MEM_SIZE_G(size));
+ } else {
+ if (i & EXT_MEM0_ENABLE_F) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+ add_debugfs_mem(adap, "mc0", MEM_MC0,
+ EXT_MEM0_SIZE_G(size));
+ }
+ if (i & EXT_MEM1_ENABLE_F) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+ add_debugfs_mem(adap, "mc1", MEM_MC1,
+ EXT_MEM1_SIZE_G(size));
+ }
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
new file mode 100644
index 000000000000..a3d8867efd3d
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
@@ -0,0 +1,52 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_DEBUGFS_H
+#define __CXGB4_DEBUGFS_H
+
+#include <linux/export.h>
+
+struct t4_debugfs_entry {
+ const char *name;
+ const struct file_operations *ops;
+ mode_t mode;
+ unsigned char data;
+};
+
+int t4_setup_debugfs(struct adapter *adap);
+void add_debugfs_files(struct adapter *adap,
+ struct t4_debugfs_entry *files,
+ unsigned int nfiles);
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 279873cb6e3a..c8c5b3d36d4e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -61,6 +61,7 @@
#include <net/neighbour.h>
#include <net/netevent.h>
#include <net/addrconf.h>
+#include <net/bonding.h>
#include <asm/uaccess.h>
#include "cxgb4.h"
@@ -68,10 +69,9 @@
#include "t4_msg.h"
#include "t4fw_api.h"
#include "cxgb4_dcb.h"
+#include "cxgb4_debugfs.h"
#include "l2t.h"
-#include <../drivers/net/bonding/bonding.h>
-
#ifdef DRV_VERSION
#undef DRV_VERSION
#endif
@@ -141,7 +141,7 @@ static unsigned int pfvfres_pmask(struct adapter *adapter,
* Give PF's access to all of the ports.
*/
if (vf == 0)
- return FW_PFVF_CMD_PMASK_MASK;
+ return FW_PFVF_CMD_PMASK_M;
/*
* For VFs, we'll assign them access to the ports based purely on the
@@ -210,114 +210,25 @@ struct filter_entry {
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
-#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
-
-static const struct pci_device_id cxgb4_pci_tbl[] = {
- CH_DEVICE(0xa000, 0), /* PE10K */
- CH_DEVICE(0x4001, -1),
- CH_DEVICE(0x4002, -1),
- CH_DEVICE(0x4003, -1),
- CH_DEVICE(0x4004, -1),
- CH_DEVICE(0x4005, -1),
- CH_DEVICE(0x4006, -1),
- CH_DEVICE(0x4007, -1),
- CH_DEVICE(0x4008, -1),
- CH_DEVICE(0x4009, -1),
- CH_DEVICE(0x400a, -1),
- CH_DEVICE(0x400d, -1),
- CH_DEVICE(0x400e, -1),
- CH_DEVICE(0x4080, -1),
- CH_DEVICE(0x4081, -1),
- CH_DEVICE(0x4082, -1),
- CH_DEVICE(0x4083, -1),
- CH_DEVICE(0x4084, -1),
- CH_DEVICE(0x4085, -1),
- CH_DEVICE(0x4086, -1),
- CH_DEVICE(0x4087, -1),
- CH_DEVICE(0x4088, -1),
- CH_DEVICE(0x4401, 4),
- CH_DEVICE(0x4402, 4),
- CH_DEVICE(0x4403, 4),
- CH_DEVICE(0x4404, 4),
- CH_DEVICE(0x4405, 4),
- CH_DEVICE(0x4406, 4),
- CH_DEVICE(0x4407, 4),
- CH_DEVICE(0x4408, 4),
- CH_DEVICE(0x4409, 4),
- CH_DEVICE(0x440a, 4),
- CH_DEVICE(0x440d, 4),
- CH_DEVICE(0x440e, 4),
- CH_DEVICE(0x4480, 4),
- CH_DEVICE(0x4481, 4),
- CH_DEVICE(0x4482, 4),
- CH_DEVICE(0x4483, 4),
- CH_DEVICE(0x4484, 4),
- CH_DEVICE(0x4485, 4),
- CH_DEVICE(0x4486, 4),
- CH_DEVICE(0x4487, 4),
- CH_DEVICE(0x4488, 4),
- CH_DEVICE(0x5001, 4),
- CH_DEVICE(0x5002, 4),
- CH_DEVICE(0x5003, 4),
- CH_DEVICE(0x5004, 4),
- CH_DEVICE(0x5005, 4),
- CH_DEVICE(0x5006, 4),
- CH_DEVICE(0x5007, 4),
- CH_DEVICE(0x5008, 4),
- CH_DEVICE(0x5009, 4),
- CH_DEVICE(0x500A, 4),
- CH_DEVICE(0x500B, 4),
- CH_DEVICE(0x500C, 4),
- CH_DEVICE(0x500D, 4),
- CH_DEVICE(0x500E, 4),
- CH_DEVICE(0x500F, 4),
- CH_DEVICE(0x5010, 4),
- CH_DEVICE(0x5011, 4),
- CH_DEVICE(0x5012, 4),
- CH_DEVICE(0x5013, 4),
- CH_DEVICE(0x5014, 4),
- CH_DEVICE(0x5015, 4),
- CH_DEVICE(0x5080, 4),
- CH_DEVICE(0x5081, 4),
- CH_DEVICE(0x5082, 4),
- CH_DEVICE(0x5083, 4),
- CH_DEVICE(0x5084, 4),
- CH_DEVICE(0x5085, 4),
- CH_DEVICE(0x5086, 4),
- CH_DEVICE(0x5087, 4),
- CH_DEVICE(0x5088, 4),
- CH_DEVICE(0x5401, 4),
- CH_DEVICE(0x5402, 4),
- CH_DEVICE(0x5403, 4),
- CH_DEVICE(0x5404, 4),
- CH_DEVICE(0x5405, 4),
- CH_DEVICE(0x5406, 4),
- CH_DEVICE(0x5407, 4),
- CH_DEVICE(0x5408, 4),
- CH_DEVICE(0x5409, 4),
- CH_DEVICE(0x540A, 4),
- CH_DEVICE(0x540B, 4),
- CH_DEVICE(0x540C, 4),
- CH_DEVICE(0x540D, 4),
- CH_DEVICE(0x540E, 4),
- CH_DEVICE(0x540F, 4),
- CH_DEVICE(0x5410, 4),
- CH_DEVICE(0x5411, 4),
- CH_DEVICE(0x5412, 4),
- CH_DEVICE(0x5413, 4),
- CH_DEVICE(0x5414, 4),
- CH_DEVICE(0x5415, 4),
- CH_DEVICE(0x5480, 4),
- CH_DEVICE(0x5481, 4),
- CH_DEVICE(0x5482, 4),
- CH_DEVICE(0x5483, 4),
- CH_DEVICE(0x5484, 4),
- CH_DEVICE(0x5485, 4),
- CH_DEVICE(0x5486, 4),
- CH_DEVICE(0x5487, 4),
- CH_DEVICE(0x5488, 4),
- { 0, }
-};
+/* Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+ static struct pci_device_id cxgb4_pci_tbl[] = {
+#define CH_PCI_DEVICE_ID_FUNCTION 0x4
+
+/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
+ * called for both.
+ */
+#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+ {PCI_VDEVICE(CHELSIO, (devid)), 4}
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
+ { 0, } \
+ }
+
+#include "t4_pci_id_tbl.h"
#define FW4_FNAME "cxgb4/t4fw.bin"
#define FW5_FNAME "cxgb4/t5fw.bin"
@@ -512,9 +423,10 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
u32 name, value;
int err;
- name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
- FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
+ name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(
+ FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
+ FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
value = enable ? i : 0xffffffff;
/* Since we can be called while atomic (from "interrupt
@@ -709,7 +621,7 @@ EXPORT_SYMBOL(cxgb4_dcb_enabled);
/* Handle a Data Center Bridging update message from the firmware. */
static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
{
- int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
+ int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
struct net_device *dev = adap->port[port];
int old_dcb_enabled = cxgb4_dcb_enabled(dev);
int new_dcb_enabled;
@@ -832,17 +744,17 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
#ifdef CONFIG_CHELSIO_T4_DCB
const struct fw_port_cmd *pcmd = (const void *)p->data;
- unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid));
+ unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
unsigned int action =
- FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
+ FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
if (cmd == FW_PORT_CMD &&
action == FW_PORT_ACTION_GET_PORT_INFO) {
- int port = FW_PORT_CMD_PORTID_GET(
+ int port = FW_PORT_CMD_PORTID_G(
be32_to_cpu(pcmd->op_to_portid));
struct net_device *dev = q->adap->port[port];
int state_input = ((pcmd->u.info.dcbxdis_pkd &
- FW_PORT_CMD_DCBXDIS)
+ FW_PORT_CMD_DCBXDIS_F)
? CXGB4_DCB_INPUT_FW_DISABLED
: CXGB4_DCB_INPUT_FW_ENABLED);
@@ -1287,7 +1199,7 @@ void *t4_alloc_mem(size_t size)
/*
* Free memory allocated through alloc_mem().
*/
-static void t4_free_mem(void *addr)
+void t4_free_mem(void *addr)
{
if (is_vmalloc_addr(addr))
vfree(addr);
@@ -1339,52 +1251,52 @@ static int set_filter_wr(struct adapter *adapter, int fidx)
* filter specification structure but for now it's easiest to simply
* put this fairly direct code in line ...
*/
- fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
- fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
+ fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
+ fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
fwr->tid_to_iq =
- htonl(V_FW_FILTER_WR_TID(ftid) |
- V_FW_FILTER_WR_RQTYPE(f->fs.type) |
- V_FW_FILTER_WR_NOREPLY(0) |
- V_FW_FILTER_WR_IQ(f->fs.iq));
+ htonl(FW_FILTER_WR_TID_V(ftid) |
+ FW_FILTER_WR_RQTYPE_V(f->fs.type) |
+ FW_FILTER_WR_NOREPLY_V(0) |
+ FW_FILTER_WR_IQ_V(f->fs.iq));
fwr->del_filter_to_l2tix =
- htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
- V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
- V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
- V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
- V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
- V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
- V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
- V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
- V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
+ htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
+ FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
+ FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
+ FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
+ FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
+ FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
+ FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
+ FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
+ FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
f->fs.newvlan == VLAN_REWRITE) |
- V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
+ FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
f->fs.newvlan == VLAN_REWRITE) |
- V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
- V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
- V_FW_FILTER_WR_PRIO(f->fs.prio) |
- V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
+ FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
+ FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
+ FW_FILTER_WR_PRIO_V(f->fs.prio) |
+ FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
fwr->ethtype = htons(f->fs.val.ethtype);
fwr->ethtypem = htons(f->fs.mask.ethtype);
fwr->frag_to_ovlan_vldm =
- (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
- V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
- V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
- V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
- V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
- V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
+ (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
+ FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
+ FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
+ FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
+ FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
+ FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
fwr->smac_sel = 0;
fwr->rx_chan_rx_rpl_iq =
- htons(V_FW_FILTER_WR_RX_CHAN(0) |
- V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
+ htons(FW_FILTER_WR_RX_CHAN_V(0) |
+ FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
fwr->maci_to_matchtypem =
- htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
- V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
- V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
- V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
- V_FW_FILTER_WR_PORT(f->fs.val.iport) |
- V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
- V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
- V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
+ htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
+ FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
+ FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
+ FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
+ FW_FILTER_WR_PORT_V(f->fs.val.iport) |
+ FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
+ FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
+ FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
fwr->ptcl = f->fs.val.proto;
fwr->ptclm = f->fs.mask.proto;
fwr->ttyp = f->fs.val.tos;
@@ -1615,14 +1527,14 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
if (adapter->params.fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
- FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
- FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
- FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
- FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
- FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
- FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
- FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
- FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
}
static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2721,9 +2633,10 @@ static int set_rspq_intr_params(struct sge_rspq *q,
new_idx = closest_thres(&adap->sge, cnt);
if (q->desc && q->pktcnt_idx != new_idx) {
/* the queue has already been created, update it */
- v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
- FW_PARAMS_PARAM_YZ(q->cntxt_id);
+ v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(
+ FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
+ FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
&new_idx);
if (err)
@@ -2937,7 +2850,7 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
int ret;
const struct firmware *fw;
struct adapter *adap = netdev2adap(netdev);
- unsigned int mbox = FW_PCIE_FW_MASTER_MASK + 1;
+ unsigned int mbox = PCIE_FW_MASTER_M + 1;
ef->data[sizeof(ef->data) - 1] = '\0';
ret = request_firmware(&fw, ef->data, adap->pdev_dev);
@@ -3014,21 +2927,35 @@ static u32 get_rss_table_size(struct net_device *dev)
return pi->rss_size;
}
-static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
+static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
{
const struct port_info *pi = netdev_priv(dev);
unsigned int n = pi->rss_size;
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!p)
+ return 0;
while (n--)
p[n] = pi->rss[n];
return 0;
}
-static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
+static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
+ const u8 hfunc)
{
unsigned int i;
struct port_info *pi = netdev_priv(dev);
+ /* We require at least one supported parameter to be changed and no
+ * change in any of the unsupported parameters
+ */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!p)
+ return 0;
+
for (i = 0; i < pi->rss_size; i++)
pi->rss[i] = p[i];
if (pi->adapter->flags & FULL_INIT_DONE)
@@ -3048,45 +2975,45 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
info->data = 0;
switch (info->flow_type) {
case TCP_V4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
info->data = RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
case UDP_V4_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
info->data = RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case IPV4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
case TCP_V6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
info->data = RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
case UDP_V6_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
info->data = RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case IPV6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
info->data = RXH_IP_SRC | RXH_IP_DST;
break;
}
@@ -3131,102 +3058,14 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.flash_device = set_flash,
};
-/*
- * debugfs support
- */
-static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
-{
- loff_t pos = *ppos;
- loff_t avail = file_inode(file)->i_size;
- unsigned int mem = (uintptr_t)file->private_data & 3;
- struct adapter *adap = file->private_data - mem;
- __be32 *data;
- int ret;
-
- if (pos < 0)
- return -EINVAL;
- if (pos >= avail)
- return 0;
- if (count > avail - pos)
- count = avail - pos;
-
- data = t4_alloc_mem(count);
- if (!data)
- return -ENOMEM;
-
- spin_lock(&adap->win0_lock);
- ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
- spin_unlock(&adap->win0_lock);
- if (ret) {
- t4_free_mem(data);
- return ret;
- }
- ret = copy_to_user(buf, data, count);
-
- t4_free_mem(data);
- if (ret)
- return -EFAULT;
-
- *ppos = pos + count;
- return count;
-}
-
-static const struct file_operations mem_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = mem_read,
- .llseek = default_llseek,
-};
-
-static void add_debugfs_mem(struct adapter *adap, const char *name,
- unsigned int idx, unsigned int size_mb)
-{
- struct dentry *de;
-
- de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
- (void *)adap + idx, &mem_debugfs_fops);
- if (de && de->d_inode)
- de->d_inode->i_size = size_mb << 20;
-}
-
static int setup_debugfs(struct adapter *adap)
{
- int i;
- u32 size;
-
if (IS_ERR_OR_NULL(adap->debugfs_root))
return -1;
- i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
- if (i & EDRAM0_ENABLE) {
- size = t4_read_reg(adap, MA_EDRAM0_BAR);
- add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
- }
- if (i & EDRAM1_ENABLE) {
- size = t4_read_reg(adap, MA_EDRAM1_BAR);
- add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
- }
- if (is_t4(adap->params.chip)) {
- size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
- if (i & EXT_MEM_ENABLE)
- add_debugfs_mem(adap, "mc", MEM_MC,
- EXT_MEM_SIZE_GET(size));
- } else {
- if (i & EXT_MEM_ENABLE) {
- size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
- add_debugfs_mem(adap, "mc0", MEM_MC0,
- EXT_MEM_SIZE_GET(size));
- }
- if (i & EXT_MEM1_ENABLE) {
- size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
- add_debugfs_mem(adap, "mc1", MEM_MC1,
- EXT_MEM_SIZE_GET(size));
- }
- }
- if (adap->l2t)
- debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
- &t4_l2t_fops);
+#ifdef CONFIG_DEBUG_FS
+ t4_setup_debugfs(adap);
+#endif
return 0;
}
@@ -3508,9 +3347,9 @@ int cxgb4_clip_get(const struct net_device *dev,
adap = netdev2adap(dev);
memset(&c, 0, sizeof(c));
- c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE);
- c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
+ c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+ c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
c.ip_hi = *(__be64 *)(lip->s6_addr);
c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
@@ -3525,9 +3364,9 @@ int cxgb4_clip_release(const struct net_device *dev,
adap = netdev2adap(dev);
memset(&c, 0, sizeof(c));
- c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
- FW_CMD_REQUEST | FW_CMD_READ);
- c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
+ c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
+ c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
c.ip_hi = *(__be64 *)(lip->s6_addr);
c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
@@ -3568,7 +3407,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
req->local_ip = sip;
req->peer_ip = htonl(0);
chan = rxq_to_chan(&adap->sge, queue);
- req->opt0 = cpu_to_be64(TX_CHAN(chan));
+ req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
ret = t4_mgmt_tx(adap, skb);
@@ -3611,7 +3450,7 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
req->peer_ip_hi = cpu_to_be64(0);
req->peer_ip_lo = cpu_to_be64(0);
chan = rxq_to_chan(&adap->sge, queue);
- req->opt0 = cpu_to_be64(TX_CHAN(chan));
+ req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
ret = t4_mgmt_tx(adap, skb);
@@ -3893,7 +3732,7 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
{
struct adapter *adap;
u32 offset, memtype, memaddr;
- u32 edc0_size, edc1_size, mc0_size, mc1_size;
+ u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
u32 edc0_end, edc1_end, mc0_end, mc1_end;
int ret;
@@ -3907,9 +3746,12 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
* and EDC1. Some cards will have neither MC0 nor MC1, most cards have
* MC0, and some have both MC0 and MC1.
*/
- edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20;
- edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20;
- mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20;
+ size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
+ edc0_size = EDRAM0_SIZE_G(size) << 20;
+ size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
+ edc1_size = EDRAM1_SIZE_G(size) << 20;
+ size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+ mc0_size = EXT_MEM0_SIZE_G(size) << 20;
edc0_end = edc0_size;
edc1_end = edc0_end + edc1_size;
@@ -3929,9 +3771,8 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
/* T4 only has a single memory channel */
goto err;
} else {
- mc1_size = EXT_MEM_SIZE_GET(
- t4_read_reg(adap,
- MA_EXT_MEMORY1_BAR)) << 20;
+ size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+ mc1_size = EXT_MEM1_SIZE_G(size) << 20;
mc1_end = mc0_end + mc1_size;
if (offset < mc1_end) {
memtype = MEM_MC1;
@@ -3968,6 +3809,22 @@ u64 cxgb4_read_sge_timestamp(struct net_device *dev)
}
EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
+int cxgb4_bar2_sge_qregs(struct net_device *dev,
+ unsigned int qid,
+ enum cxgb4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid)
+{
+ return t4_bar2_sge_qregs(netdev2adap(dev),
+ qid,
+ (qtype == CXGB4_BAR2_QTYPE_EGRESS
+ ? T4_BAR2_QTYPE_EGRESS
+ : T4_BAR2_QTYPE_INGRESS),
+ pbar2_qoffset,
+ pbar2_qid);
+}
+EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
+
static struct pci_driver cxgb4_driver;
static void check_neigh_update(struct neighbour *neigh)
@@ -4150,31 +4007,18 @@ static void process_db_drop(struct work_struct *work)
u32 dropped_db = t4_read_reg(adap, 0x010ac);
u16 qid = (dropped_db >> 15) & 0x1ffff;
u16 pidx_inc = dropped_db & 0x1fff;
- unsigned int s_qpp;
- unsigned short udb_density;
- unsigned long qpshift;
- int page;
- u32 udb;
+ u64 bar2_qoffset;
+ unsigned int bar2_qid;
+ int ret;
- dev_warn(adap->pdev_dev,
- "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
- dropped_db, qid,
- (dropped_db >> 14) & 1,
- (dropped_db >> 13) & 1,
- pidx_inc);
-
- drain_db_fifo(adap, 1);
-
- s_qpp = QUEUESPERPAGEPF1 * adap->fn;
- udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
- SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
- qpshift = PAGE_SHIFT - ilog2(udb_density);
- udb = qid << qpshift;
- udb &= PAGE_MASK;
- page = udb / PAGE_SIZE;
- udb += (qid - (page * udb_density)) * 128;
-
- writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
+ ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
+ &bar2_qoffset, &bar2_qid);
+ if (ret)
+ dev_err(adap->pdev_dev, "doorbell drop recovery: "
+ "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
+ else
+ writel(PIDX_T5(pidx_inc) | QID(bar2_qid),
+ adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
/* Re-enable BAR2 WC */
t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
@@ -4232,12 +4076,8 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.adapter_type = adap->params.chip;
lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
- lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
- t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
- (adap->fn * 4));
- lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
- t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
- (adap->fn * 4));
+ lli.udb_density = 1 << adap->params.sge.eq_qpp;
+ lli.ucq_density = 1 << adap->params.sge.iq_qpp;
lli.filt_mode = adap->params.tp.vlan_pri_map;
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
for (i = 0; i < NCHAN; i++)
@@ -4399,8 +4239,7 @@ static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
if (cxgb4_netdev(event_dev)) {
switch (event) {
case NETDEV_UP:
- ret = cxgb4_clip_get(event_dev,
- (const struct in6_addr *)ifa->addr.s6_addr);
+ ret = cxgb4_clip_get(event_dev, &ifa->addr);
if (ret < 0) {
rcu_read_unlock();
return ret;
@@ -4408,8 +4247,7 @@ static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
ret = NOTIFY_OK;
break;
case NETDEV_DOWN:
- cxgb4_clip_release(event_dev,
- (const struct in6_addr *)ifa->addr.s6_addr);
+ cxgb4_clip_release(event_dev, &ifa->addr);
ret = NOTIFY_OK;
break;
default:
@@ -4478,8 +4316,7 @@ static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
read_lock_bh(&idev->lock);
list_for_each_entry(ifa, &idev->addr_list, if_list) {
- ret = cxgb4_clip_get(dev,
- (const struct in6_addr *)ifa->addr.s6_addr);
+ ret = cxgb4_clip_get(dev, &ifa->addr);
if (ret < 0)
break;
}
@@ -4960,14 +4797,14 @@ static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
*/
memset(&ldst_cmd, 0, sizeof(ldst_cmd));
ldst_cmd.op_to_addrspace =
- htonl(FW_CMD_OP(FW_LDST_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ |
- FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
+ htonl(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
- ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
+ ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
ldst_cmd.u.pcie.ctrl_to_fn =
- (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn));
+ (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
ldst_cmd.u.pcie.r = reg;
ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
&ldst_cmd);
@@ -5054,8 +4891,8 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
/* get device capabilities */
memset(c, 0, sizeof(*c));
- c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_READ);
+ c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
if (ret < 0)
@@ -5071,16 +4908,16 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
dev_err(adap->pdev_dev, "virtualization ACLs not supported");
return ret;
}
- c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE);
+ c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
if (ret < 0)
return ret;
ret = t4_config_glbl_rss(adap, adap->fn,
FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
- FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
- FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
+ FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
+ FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
if (ret < 0)
return ret;
@@ -5241,8 +5078,8 @@ static int adap_init0_config(struct adapter *adapter, int reset)
if (cf->size >= FLASH_CFG_MAX_SIZE)
ret = -ENOMEM;
else {
- params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
ret = t4_query_params(adapter, adapter->mbox,
adapter->fn, 0, 1, params, val);
if (ret == 0) {
@@ -5260,8 +5097,8 @@ static int adap_init0_config(struct adapter *adapter, int reset)
size_t size = cf->size & ~0x3;
__be32 *data = (__be32 *)cf->data;
- mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
- maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
+ mtype = FW_PARAMS_PARAM_Y_G(val[0]);
+ maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
spin_lock(&adapter->win0_lock);
ret = t4_memory_rw(adapter, 0, mtype, maddr,
@@ -5298,13 +5135,13 @@ static int adap_init0_config(struct adapter *adapter, int reset)
*/
memset(&caps_cmd, 0, sizeof(caps_cmd));
caps_cmd.op_to_write =
- htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
+ htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
caps_cmd.cfvalid_to_len16 =
- htonl(FW_CAPS_CONFIG_CMD_CFVALID |
- FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
- FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
+ htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
+ FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
+ FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
@@ -5318,9 +5155,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
if (ret == -ENOENT) {
memset(&caps_cmd, 0, sizeof(caps_cmd));
caps_cmd.op_to_write =
- htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
+ htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
sizeof(caps_cmd), &caps_cmd);
@@ -5343,9 +5180,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
* And now tell the firmware to use the configuration we just loaded.
*/
caps_cmd.op_to_write =
- htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE);
+ htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F);
caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
NULL);
@@ -5416,8 +5253,8 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
* Get device capabilities and select which we'll be using.
*/
memset(&caps_cmd, 0, sizeof(caps_cmd));
- caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_READ);
+ caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
@@ -5433,8 +5270,8 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
goto bye;
}
- caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE);
+ caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
NULL);
if (ret < 0)
@@ -5456,10 +5293,10 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
adapter->flags |= RSS_TNLALLLOOKUP;
ret = t4_config_glbl_rss(adapter, adapter->mbox,
FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
- FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
- FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
+ FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
+ FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F |
((adapter->flags & RSS_TNLALLLOOKUP) ?
- FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
+ FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F : 0));
if (ret < 0)
goto bye;
@@ -5470,7 +5307,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
PFRES_NEQ, PFRES_NETHCTRL,
PFRES_NIQFLINT, PFRES_NIQ,
PFRES_TC, PFRES_NVI,
- FW_PFVF_CMD_CMASK_MASK,
+ FW_PFVF_CMD_CMASK_M,
pfvfres_pmask(adapter, adapter->fn, 0),
PFRES_NEXACTF,
PFRES_R_CAPS, PFRES_WX_CAPS);
@@ -5515,7 +5352,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
VFRES_NEQ, VFRES_NETHCTRL,
VFRES_NIQFLINT, VFRES_NIQ,
VFRES_TC, VFRES_NVI,
- FW_PFVF_CMD_CMASK_MASK,
+ FW_PFVF_CMD_CMASK_M,
pfvfres_pmask(
adapter, pf, vf),
VFRES_NEXACTF,
@@ -5779,8 +5616,8 @@ static int adap_init0(struct adapter *adap)
* and portvec ...
*/
v =
- FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
+ FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
if (ret < 0)
goto bye;
@@ -5802,7 +5639,6 @@ static int adap_init0(struct adapter *adap)
} else {
dev_info(adap->pdev_dev, "Coming up as MASTER: "\
"Initializing adapter\n");
-
/*
* If the firmware doesn't support Configuration
* Files warn user and exit,
@@ -5817,8 +5653,9 @@ static int adap_init0(struct adapter *adap)
* Find out whether we're dealing with a version of
* the firmware which has configuration file support.
*/
- params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(
+ FW_PARAMS_PARAM_DEV_CF));
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
params, val);
@@ -5878,14 +5715,14 @@ static int adap_init0(struct adapter *adap)
* Grab some of our basic fundamental operating parameters.
*/
#define FW_PARAM_DEV(param) \
- (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
#define FW_PARAM_PFVF(param) \
- FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
- FW_PARAMS_PARAM_Y(0) | \
- FW_PARAMS_PARAM_Z(0)
+ FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
+ FW_PARAMS_PARAM_Y_V(0) | \
+ FW_PARAMS_PARAM_Z_V(0)
params[0] = FW_PARAM_PFVF(EQ_START);
params[1] = FW_PARAM_PFVF(L2T_START);
@@ -5945,8 +5782,8 @@ static int adap_init0(struct adapter *adap)
* to manage.
*/
memset(&caps_cmd, 0, sizeof(caps_cmd));
- caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_READ);
+ caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
@@ -6092,6 +5929,7 @@ static int adap_init0(struct adapter *adap)
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
adap->params.b_wnd);
}
+ t4_init_sge_params(adap);
t4_init_tp_params(adap);
adap->flags |= FW_OK;
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 1366ba620c87..152b4c4c7809 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -52,10 +52,10 @@ enum {
};
#define INIT_TP_WR(w, tid) do { \
- (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \
- FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
- (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
- FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) | \
+ FW_WR_IMMDLEN_V(sizeof(*w) - sizeof(w->wr))); \
+ (w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*w), 16)) | \
+ FW_WR_FLOWID_V(tid)); \
(w)->wr.wr_lo = cpu_to_be64(0); \
} while (0)
@@ -65,9 +65,10 @@ enum {
} while (0)
#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
- (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \
- (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
- FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | \
+ FW_WR_ATOMIC_V(atomic)); \
+ (w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(wrlen, 16)) | \
+ FW_WR_FLOWID_V(tid)); \
(w)->wr.wr_lo = cpu_to_be64(0); \
} while (0)
@@ -304,4 +305,11 @@ void cxgb4_enable_db_coalescing(struct net_device *dev);
int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte);
u64 cxgb4_read_sge_timestamp(struct net_device *dev);
+enum cxgb4_bar2_qtype { CXGB4_BAR2_QTYPE_EGRESS, CXGB4_BAR2_QTYPE_INGRESS };
+int cxgb4_bar2_sge_qregs(struct net_device *dev,
+ unsigned int qid,
+ enum cxgb4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid);
+
#endif /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 96041397ee15..a047baa9fd04 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -435,9 +435,9 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
if (tp->vnic_shift >= 0) {
u32 viid = cxgb4_port_viid(dev);
- u32 vf = FW_VIID_VIN_GET(viid);
- u32 pf = FW_VIID_PFN_GET(viid);
- u32 vld = FW_VIID_VIVLD_GET(viid);
+ u32 vf = FW_VIID_VIN_G(viid);
+ u32 pf = FW_VIID_PFN_G(viid);
+ u32 vld = FW_VIID_VIVLD_G(viid);
ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
V_FT_VNID_ID_PF(pf) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 39f2b13e66c7..f12debd98dac 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -527,14 +527,16 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
val |= DBPRIO(1);
wmb();
- /* If we're on T4, use the old doorbell mechanism; otherwise
- * use the new BAR2 mechanism.
+ /* If we don't have access to the new User Doorbell (T5+), use
+ * the old doorbell mechanism; otherwise use the new BAR2
+ * mechanism.
*/
- if (is_t4(adap->params.chip)) {
+ if (unlikely(q->bar2_addr == NULL)) {
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
val | QID(q->cntxt_id));
} else {
- writel(val, adap->bar2 + q->udb + SGE_UDB_KDOORBELL);
+ writel(val | QID(q->bar2_qid),
+ q->bar2_addr + SGE_UDB_KDOORBELL);
/* This Write memory Barrier will force the write to
* the User Doorbell area to be flushed.
@@ -576,7 +578,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
__be64 *d = &q->desc[q->pidx];
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
- gfp |= __GFP_NOWARN | __GFP_COLD;
+ gfp |= __GFP_NOWARN;
if (s->fl_pg_order == 0)
goto alloc_small_pages;
@@ -585,7 +587,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
* Prefer large buffers
*/
while (n) {
- pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order);
+ pg = __dev_alloc_pages(gfp, s->fl_pg_order);
if (unlikely(!pg)) {
q->large_alloc_failed++;
break; /* fall back to single pages */
@@ -615,7 +617,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
alloc_small_pages:
while (n--) {
- pg = __skb_alloc_page(gfp, NULL);
+ pg = __dev_alloc_page(gfp);
if (unlikely(!pg)) {
q->alloc_failed++;
break;
@@ -816,7 +818,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
sgl->addr0 = cpu_to_be64(addr[1]);
}
- sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
+ sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
if (likely(--nfrags == 0))
return;
/*
@@ -850,14 +852,13 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
*end = 0;
}
-/* This function copies a tx_desc struct to memory mapped BAR2 space(user space
- * writes). For coalesced WR SGE, fetches data from the FIFO instead of from
- * Host.
+/* This function copies 64 byte coalesced work request to
+ * memory mapped BAR2 space. For coalesced WR SGE fetches
+ * data from the FIFO instead of from Host.
*/
-static void cxgb_pio_copy(u64 __iomem *dst, struct tx_desc *desc)
+static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
{
- int count = sizeof(*desc) / sizeof(u64);
- u64 *src = (u64 *)desc;
+ int count = 8;
while (count) {
writeq(*src, dst);
@@ -879,7 +880,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
wmb(); /* write descriptors before telling HW */
- if (is_t4(adap->params.chip)) {
+ /* If we don't have access to the new User Doorbell (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(q->bar2_addr == NULL)) {
u32 val = PIDX(n);
unsigned long flags;
@@ -905,21 +909,22 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
*/
WARN_ON(val & DBPRIO(1));
- /* For T5 and later we use the Write-Combine mapped BAR2 User
- * Doorbell mechanism. If we're only writing a single TX
- * Descriptor and TX Write Combining hasn't been disabled, we
- * can use the Write Combining Gather Buffer; otherwise we use
- * the simple doorbell.
+ /* If we're only writing a single TX Descriptor and we can use
+ * Inferred QID registers, we can use the Write Combining
+ * Gather Buffer; otherwise we use the simple doorbell.
*/
- if (n == 1) {
+ if (n == 1 && q->bar2_qid == 0) {
int index = (q->pidx
? (q->pidx - 1)
: (q->size - 1));
+ u64 *wr = (u64 *)&q->desc[index];
- cxgb_pio_copy(adap->bar2 + q->udb + SGE_UDB_WCDOORBELL,
- q->desc + index);
+ cxgb_pio_copy((u64 __iomem *)
+ (q->bar2_addr + SGE_UDB_WCDOORBELL),
+ wr);
} else {
- writel(val, adap->bar2 + q->udb + SGE_UDB_KDOORBELL);
+ writel(val | QID(q->bar2_qid),
+ q->bar2_addr + SGE_UDB_KDOORBELL);
}
/* This Write Memory Barrier will force the write to the User
@@ -1092,10 +1097,10 @@ out_free: dev_kfree_skb_any(skb);
goto out_free;
}
- wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
+ wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
eth_txq_stop(q);
- wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
wr = (void *)&q->q.desc[q->q.pidx];
@@ -1112,8 +1117,8 @@ out_free: dev_kfree_skb_any(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
len += sizeof(*lso);
- wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
- FW_WR_IMMDLEN(len));
+ wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN_V(len));
lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
LSO_FIRST_SLICE | LSO_LAST_SLICE |
LSO_IPV6(v6) |
@@ -1135,8 +1140,8 @@ out_free: dev_kfree_skb_any(skb);
q->tx_cso += ssi->gso_segs;
} else {
len += sizeof(*cpl);
- wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
- FW_WR_IMMDLEN(len));
+ wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN_V(len));
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
@@ -1224,7 +1229,7 @@ static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
{
reclaim_completed_tx_imm(&q->q);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
- wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
+ wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
q->q.stops++;
q->full = 1;
}
@@ -1406,7 +1411,7 @@ static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
{
struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
- wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
+ wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
q->q.stops++;
q->full = 1;
}
@@ -1997,11 +2002,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
params = QINTR_TIMER_IDX(7);
val = CIDXINC(work_done) | SEINTARM(params);
- if (is_t4(q->adap->params.chip)) {
+
+ /* If we don't have access to the new User GTS (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(q->bar2_addr == NULL)) {
t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS),
val | INGRESSQID((u32)q->cntxt_id));
} else {
- writel(val, q->adap->bar2 + q->udb + SGE_UDB_GTS);
+ writel(val | INGRESSQID(q->bar2_qid),
+ q->bar2_addr + SGE_UDB_GTS);
wmb();
}
return work_done;
@@ -2047,11 +2057,16 @@ static unsigned int process_intrq(struct adapter *adap)
}
val = CIDXINC(credits) | SEINTARM(q->intr_params);
- if (is_t4(adap->params.chip)) {
+
+ /* If we don't have access to the new User GTS (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(q->bar2_addr == NULL)) {
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
val | INGRESSQID(q->cntxt_id));
} else {
- writel(val, adap->bar2 + q->udb + SGE_UDB_GTS);
+ writel(val | INGRESSQID(q->bar2_qid),
+ q->bar2_addr + SGE_UDB_GTS);
wmb();
}
spin_unlock(&adap->sge.intrq_lock);
@@ -2235,48 +2250,32 @@ static void sge_tx_timer_cb(unsigned long data)
}
/**
- * udb_address - return the BAR2 User Doorbell address for a Queue
- * @adap: the adapter
- * @cntxt_id: the Queue Context ID
- * @qpp: Queues Per Page (for all PFs)
+ * bar2_address - return the BAR2 address for an SGE Queue's Registers
+ * @adapter: the adapter
+ * @qid: the SGE Queue ID
+ * @qtype: the SGE Queue Type (Egress or Ingress)
+ * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
*
- * Returns the BAR2 address of the user Doorbell associated with the
- * indicated Queue Context ID. Note that this is only applicable
- * for T5 and later.
- */
-static u64 udb_address(struct adapter *adap, unsigned int cntxt_id,
- unsigned int qpp)
-{
- u64 udb;
- unsigned int s_qpp;
- unsigned short udb_density;
- unsigned long qpshift;
- int page;
-
- BUG_ON(is_t4(adap->params.chip));
-
- s_qpp = (QUEUESPERPAGEPF0 +
- (QUEUESPERPAGEPF1 - QUEUESPERPAGEPF0) * adap->fn);
- udb_density = 1 << ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
- qpshift = PAGE_SHIFT - ilog2(udb_density);
- udb = (u64)cntxt_id << qpshift;
- udb &= PAGE_MASK;
- page = udb / PAGE_SIZE;
- udb += (cntxt_id - (page * udb_density)) * SGE_UDB_SIZE;
-
- return udb;
-}
+ * Returns the BAR2 address for the SGE Queue Registers associated with
+ * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
+ * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
+ * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
+ * Registers are supported (e.g. the Write Combining Doorbell Buffer).
+ */
+static void __iomem *bar2_address(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ unsigned int *pbar2_qid)
+{
+ u64 bar2_qoffset;
+ int ret;
-static u64 udb_address_eq(struct adapter *adap, unsigned int cntxt_id)
-{
- return udb_address(adap, cntxt_id,
- t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF));
-}
+ ret = t4_bar2_sge_qregs(adapter, qid, qtype,
+ &bar2_qoffset, pbar2_qid);
+ if (ret)
+ return NULL;
-static u64 udb_address_iq(struct adapter *adap, unsigned int cntxt_id)
-{
- return udb_address(adap, cntxt_id,
- t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF));
+ return adapter->bar2 + bar2_qoffset;
}
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
@@ -2297,20 +2296,20 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
return -ENOMEM;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_CMD_EXEC |
- FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
- c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+ FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0));
+ c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
FW_LEN16(c));
- c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
- FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
- FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
- FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
+ c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
+ FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
+ FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) |
+ FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
-intr_idx - 1));
- c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
- FW_IQ_CMD_IQGTSMODE |
- FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
- FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
+ c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
+ FW_IQ_CMD_IQGTSMODE_F |
+ FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
+ FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
@@ -2323,12 +2322,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
goto fl_nomem;
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
- c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN(1) |
- FW_IQ_CMD_FL0FETCHRO(1) |
- FW_IQ_CMD_FL0DATARO(1) |
- FW_IQ_CMD_FL0PADEN(1));
- c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
- FW_IQ_CMD_FL0FBMAX(3));
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN_F |
+ FW_IQ_CMD_FL0FETCHRO_F |
+ FW_IQ_CMD_FL0DATARO_F |
+ FW_IQ_CMD_FL0PADEN_F);
+ c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) |
+ FW_IQ_CMD_FL0FBMAX_V(3));
c.fl0size = htons(flsz);
c.fl0addr = cpu_to_be64(fl->addr);
}
@@ -2344,8 +2343,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->next_intr_params = iq->intr_params;
iq->cntxt_id = ntohs(c.iqid);
iq->abs_id = ntohs(c.physiqid);
- if (!is_t4(adap->params.chip))
- iq->udb = udb_address_iq(adap, iq->cntxt_id);
+ iq->bar2_addr = bar2_address(adap,
+ iq->cntxt_id,
+ T4_BAR2_QTYPE_INGRESS,
+ &iq->bar2_qid);
iq->size--; /* subtract status entry */
iq->netdev = dev;
iq->handler = hnd;
@@ -2362,11 +2363,13 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
- /* Note, we must initialize the Free List User Doorbell
- * address before refilling the Free List!
+ /* Note, we must initialize the BAR2 Free List User Doorbell
+ * information before refilling the Free List!
*/
- if (!is_t4(adap->params.chip))
- fl->udb = udb_address_eq(adap, fl->cntxt_id);
+ fl->bar2_addr = bar2_address(adap,
+ fl->cntxt_id,
+ T4_BAR2_QTYPE_EGRESS,
+ &fl->bar2_qid);
refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
}
return 0;
@@ -2392,9 +2395,10 @@ err:
static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
{
q->cntxt_id = id;
- if (!is_t4(adap->params.chip))
- q->udb = udb_address_eq(adap, q->cntxt_id);
-
+ q->bar2_addr = bar2_address(adap,
+ q->cntxt_id,
+ T4_BAR2_QTYPE_EGRESS,
+ &q->bar2_qid);
q->in_use = 0;
q->cidx = q->pidx = 0;
q->stops = q->restarts = 0;
@@ -2423,21 +2427,22 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
return -ENOMEM;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_CMD_EXEC |
- FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
- c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
- FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
- c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE |
- FW_EQ_ETH_CMD_VIID(pi->viid));
- c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
- FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
- FW_EQ_ETH_CMD_FETCHRO(1) |
- FW_EQ_ETH_CMD_IQID(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
- FW_EQ_ETH_CMD_FBMAX(3) |
- FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
- FW_EQ_ETH_CMD_EQSIZE(nentries));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+ FW_EQ_ETH_CMD_PFN_V(adap->fn) |
+ FW_EQ_ETH_CMD_VFN_V(0));
+ c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
+ FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
+ c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
+ FW_EQ_ETH_CMD_VIID_V(pi->viid));
+ c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) |
+ FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_ETH_CMD_FETCHRO_V(1) |
+ FW_EQ_ETH_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) |
+ FW_EQ_ETH_CMD_FBMAX_V(3) |
+ FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) |
+ FW_EQ_ETH_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
@@ -2451,7 +2456,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
return ret;
}
- init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
+ init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
txq->txq = netdevq;
txq->tso = txq->tx_cso = txq->vlan_ins = 0;
txq->mapping_err = 0;
@@ -2476,22 +2481,22 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
if (!txq->q.desc)
return -ENOMEM;
- c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_CMD_EXEC |
- FW_EQ_CTRL_CMD_PFN(adap->fn) |
- FW_EQ_CTRL_CMD_VFN(0));
- c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
- FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
- c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+ FW_EQ_CTRL_CMD_PFN_V(adap->fn) |
+ FW_EQ_CTRL_CMD_VFN_V(0));
+ c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
+ FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
+ c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
c.physeqid_pkd = htonl(0);
- c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
- FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
- FW_EQ_CTRL_CMD_FETCHRO |
- FW_EQ_CTRL_CMD_IQID(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
- FW_EQ_CTRL_CMD_FBMAX(3) |
- FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
- FW_EQ_CTRL_CMD_EQSIZE(nentries));
+ c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) |
+ FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_CTRL_CMD_FETCHRO_F |
+ FW_EQ_CTRL_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) |
+ FW_EQ_CTRL_CMD_FBMAX_V(3) |
+ FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) |
+ FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
@@ -2503,7 +2508,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
return ret;
}
- init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
+ init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
@@ -2530,20 +2535,20 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
return -ENOMEM;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_CMD_EXEC |
- FW_EQ_OFLD_CMD_PFN(adap->fn) |
- FW_EQ_OFLD_CMD_VFN(0));
- c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
- FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
- c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
- FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
- FW_EQ_OFLD_CMD_FETCHRO(1) |
- FW_EQ_OFLD_CMD_IQID(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
- FW_EQ_OFLD_CMD_FBMAX(3) |
- FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
- FW_EQ_OFLD_CMD_EQSIZE(nentries));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+ FW_EQ_OFLD_CMD_PFN_V(adap->fn) |
+ FW_EQ_OFLD_CMD_VFN_V(0));
+ c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
+ FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
+ c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) |
+ FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_OFLD_CMD_FETCHRO_F |
+ FW_EQ_OFLD_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) |
+ FW_EQ_OFLD_CMD_FBMAX_V(3) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) |
+ FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
@@ -2557,7 +2562,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
return ret;
}
- init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
+ init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index c623f1fc2e3d..67345c73e570 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -188,9 +188,9 @@ static void t4_report_fw_error(struct adapter *adap)
u32 pcie_fw;
pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
- if (pcie_fw & FW_PCIE_FW_ERR)
+ if (pcie_fw & PCIE_FW_ERR)
dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
- reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]);
+ reason[PCIE_FW_EVAL_G(pcie_fw)]);
}
/*
@@ -310,16 +310,17 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
}
res = t4_read_reg64(adap, data_reg);
- if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
+ if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
fw_asrt(adap, data_reg);
- res = FW_CMD_RETVAL(EIO);
- } else if (rpl)
+ res = FW_CMD_RETVAL_V(EIO);
+ } else if (rpl) {
get_mbox_rpl(adap, rpl, size / 8, data_reg);
+ }
- if (FW_CMD_RETVAL_GET((int)res))
+ if (FW_CMD_RETVAL_G((int)res))
dump_mbox(adap, mbox, data_reg);
t4_write_reg(adap, ctl_reg, 0);
- return -FW_CMD_RETVAL_GET((int)res);
+ return -FW_CMD_RETVAL_G((int)res);
}
}
@@ -483,12 +484,12 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
* MEM_MC0 = 2 -- For T5
* MEM_MC1 = 3 -- For T5
*/
- edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR));
+ edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
if (mtype != MEM_MC1)
memoffset = (mtype * (edc_size * 1024 * 1024));
else {
- mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap,
- MA_EXT_MEMORY_BAR));
+ mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
+ MA_EXT_MEMORY1_BAR_A));
memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
}
@@ -710,8 +711,8 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
* Ask firmware for the Core Clock since it knows how to translate the
* Reference Clock ('V2') VPD field into a Core Clock value ...
*/
- cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+ cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
ret = t4_query_params(adapter, adapter->mbox, 0, 0,
1, &cclk_param, &cclk_val);
@@ -992,10 +993,10 @@ static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
install:
dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
"installing firmware %u.%u.%u.%u on card.\n",
- FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
- FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
- FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
- FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
+ FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
+ FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
+ FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+ FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
return 1;
}
@@ -1067,12 +1068,12 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
"driver compiled with %d.%d.%d.%d, "
"card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
state,
- FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
- FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
- FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
- FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
- FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
- FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
+ FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
+ FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
+ FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
+ FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
+ FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+ FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
ret = EINVAL;
goto bye;
}
@@ -1235,6 +1236,8 @@ out:
if (ret)
dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
ret);
+ else
+ ret = t4_get_fw_version(adap, &adap->params.fw_vers);
return ret;
}
@@ -1259,7 +1262,7 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc)
{
struct fw_port_cmd c;
- unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
+ unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
lc->link_ok = 0;
if (lc->requested_fc & PAUSE_RX)
@@ -1268,9 +1271,9 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
fc |= FW_PORT_CAP_FC_TX;
memset(&c, 0, sizeof(c));
- c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
- c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+ c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
+ c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
FW_LEN16(c));
if (!(lc->supported & FW_PORT_CAP_ANEG)) {
@@ -1298,9 +1301,9 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
struct fw_port_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
- c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+ c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
+ c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
FW_LEN16(c));
c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
@@ -1586,7 +1589,7 @@ static void cim_intr_handler(struct adapter *adapter)
int fat;
- if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR)
+ if (t4_read_reg(adapter, MA_PCIE_FW) & PCIE_FW_ERR)
t4_report_fw_error(adapter);
fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
@@ -2094,9 +2097,9 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
struct fw_rss_ind_tbl_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE |
- FW_RSS_IND_TBL_CMD_VIID(viid));
+ cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_RSS_IND_TBL_CMD_VIID_V(viid));
cmd.retval_len16 = htonl(FW_LEN16(cmd));
/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
@@ -2113,13 +2116,13 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
while (nq > 0) {
unsigned int v;
- v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
+ v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
if (++rsp >= rsp_end)
rsp = rspq;
- v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
+ v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
if (++rsp >= rsp_end)
rsp = rspq;
- v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
+ v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
if (++rsp >= rsp_end)
rsp = rspq;
@@ -2149,14 +2152,14 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
struct fw_rss_glb_config_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE);
+ c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
c.retval_len16 = htonl(FW_LEN16(c));
if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
- c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+ c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
c.u.basicvirtual.mode_pkd =
- htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+ htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
} else
return -EINVAL;
@@ -2576,18 +2579,18 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
{
memset(wr, 0, sizeof(*wr));
- wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
- wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
- wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
- V_FW_FILTER_WR_NOREPLY(qid < 0));
- wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
+ wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
+ wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16));
+ wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) |
+ FW_FILTER_WR_NOREPLY_V(qid < 0));
+ wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F);
if (qid >= 0)
- wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
+ wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid));
}
#define INIT_CMD(var, cmd, rd_wr) do { \
- (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
- FW_CMD_REQUEST | FW_CMD_##rd_wr); \
+ (var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \
+ FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \
(var).retval_len16 = htonl(FW_LEN16(var)); \
} while (0)
@@ -2597,9 +2600,9 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
+ c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE));
c.cycles_to_len16 = htonl(FW_LEN16(c));
c.u.addrval.addr = htonl(addr);
c.u.addrval.val = htonl(val);
@@ -2625,11 +2628,11 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
- FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
+ c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
c.cycles_to_len16 = htonl(FW_LEN16(c));
- c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
- FW_LDST_CMD_MMD(mmd));
+ c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
+ FW_LDST_CMD_MMD_V(mmd));
c.u.mdio.raddr = htons(reg);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
@@ -2655,11 +2658,11 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
+ c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
c.cycles_to_len16 = htonl(FW_LEN16(c));
- c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
- FW_LDST_CMD_MMD(mmd));
+ c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
+ FW_LDST_CMD_MMD_V(mmd));
c.u.mdio.raddr = htons(reg);
c.u.mdio.rval = htons(val);
@@ -2796,13 +2799,13 @@ retry:
memset(&c, 0, sizeof(c));
INIT_CMD(c, HELLO, WRITE);
c.err_to_clearinit = htonl(
- FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
- FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
- FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
- FW_HELLO_CMD_MBMASTER_MASK) |
- FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
- FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
- FW_HELLO_CMD_CLEARINIT);
+ FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
+ FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
+ FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox :
+ FW_HELLO_CMD_MBMASTER_M) |
+ FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
+ FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
+ FW_HELLO_CMD_CLEARINIT_F);
/*
* Issue the HELLO command to the firmware. If it's not successful
@@ -2815,17 +2818,17 @@ retry:
if (ret < 0) {
if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
goto retry;
- if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR)
+ if (t4_read_reg(adap, MA_PCIE_FW) & PCIE_FW_ERR)
t4_report_fw_error(adap);
return ret;
}
v = ntohl(c.err_to_clearinit);
- master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
+ master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
if (state) {
- if (v & FW_HELLO_CMD_ERR)
+ if (v & FW_HELLO_CMD_ERR_F)
*state = DEV_STATE_ERR;
- else if (v & FW_HELLO_CMD_INIT)
+ else if (v & FW_HELLO_CMD_INIT_F)
*state = DEV_STATE_INIT;
else
*state = DEV_STATE_UNINIT;
@@ -2840,9 +2843,9 @@ retry:
* and we wouldn't want to fail pointlessly. (This can happen when an
* OS loads lots of different drivers rapidly at the same time). In
* this case, the Master PF returned by the firmware will be
- * FW_PCIE_FW_MASTER_MASK so the test below will work ...
+ * PCIE_FW_MASTER_M so the test below will work ...
*/
- if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
+ if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
master_mbox != mbox) {
int waiting = FW_CMD_HELLO_TIMEOUT;
@@ -2866,7 +2869,7 @@ retry:
* our retries ...
*/
pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
- if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
+ if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
if (waiting <= 0) {
if (retries-- > 0)
goto retry;
@@ -2881,9 +2884,9 @@ retry:
* report errors preferentially.
*/
if (state) {
- if (pcie_fw & FW_PCIE_FW_ERR)
+ if (pcie_fw & PCIE_FW_ERR)
*state = DEV_STATE_ERR;
- else if (pcie_fw & FW_PCIE_FW_INIT)
+ else if (pcie_fw & PCIE_FW_INIT)
*state = DEV_STATE_INIT;
}
@@ -2892,9 +2895,9 @@ retry:
* there's not a valid Master PF, grab its identity
* for our caller.
*/
- if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
- (pcie_fw & FW_PCIE_FW_MASTER_VLD))
- master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
+ if (master_mbox == PCIE_FW_MASTER_M &&
+ (pcie_fw & PCIE_FW_MASTER_VLD))
+ master_mbox = PCIE_FW_MASTER_G(pcie_fw);
break;
}
}
@@ -2962,7 +2965,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
* Issues a RESET command to firmware (if desired) with a HALT indication
* and then puts the microprocessor into RESET state. The RESET command
* will only be issued if a legitimate mailbox is provided (mbox <=
- * FW_PCIE_FW_MASTER_MASK).
+ * PCIE_FW_MASTER_M).
*
* This is generally used in order for the host to safely manipulate the
* adapter without fear of conflicting with whatever the firmware might
@@ -2977,13 +2980,13 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
* If a legitimate mailbox is provided, issue a RESET command
* with a HALT indication.
*/
- if (mbox <= FW_PCIE_FW_MASTER_MASK) {
+ if (mbox <= PCIE_FW_MASTER_M) {
struct fw_reset_cmd c;
memset(&c, 0, sizeof(c));
INIT_CMD(c, RESET, WRITE);
c.val = htonl(PIORST | PIORSTMODE);
- c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
+ c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3002,8 +3005,8 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
*/
if (ret == 0 || force) {
t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
- t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
- FW_PCIE_FW_HALT);
+ t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F,
+ PCIE_FW_HALT_F);
}
/*
@@ -3042,7 +3045,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
* doing it automatically, we need to clear the PCIE_FW.HALT
* bit.
*/
- t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
+ t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, 0);
/*
* If we've been given a valid mailbox, first try to get the
@@ -3051,7 +3054,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
* valid mailbox or the RESET command failed, fall back to
* hitting the chip with a hammer.
*/
- if (mbox <= FW_PCIE_FW_MASTER_MASK) {
+ if (mbox <= PCIE_FW_MASTER_M) {
t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
msleep(100);
if (t4_fw_reset(adap, mbox,
@@ -3066,7 +3069,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
- if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
+ if (!(t4_read_reg(adap, PCIE_FW) & PCIE_FW_HALT_F))
return 0;
msleep(100);
ms += 100;
@@ -3276,9 +3279,9 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
return -EINVAL;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
- FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
- FW_PARAMS_CMD_VFN(vf));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) |
+ FW_PARAMS_CMD_VFN_V(vf));
c.retval_len16 = htonl(FW_LEN16(c));
for (i = 0; i < nparams; i++, p += 2)
*p = htonl(*params++);
@@ -3316,10 +3319,10 @@ int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
return -EINVAL;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE |
- FW_PARAMS_CMD_PFN(pf) |
- FW_PARAMS_CMD_VFN(vf));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_PARAMS_CMD_PFN_V(pf) |
+ FW_PARAMS_CMD_VFN_V(vf));
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
while (nparams--) {
@@ -3354,9 +3357,9 @@ int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
return -EINVAL;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
- FW_PARAMS_CMD_VFN(vf));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) |
+ FW_PARAMS_CMD_VFN_V(vf));
c.retval_len16 = htonl(FW_LEN16(c));
while (nparams--) {
*p++ = htonl(*params++);
@@ -3396,20 +3399,20 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_pfvf_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
- FW_PFVF_CMD_VFN(vf));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
+ FW_PFVF_CMD_VFN_V(vf));
c.retval_len16 = htonl(FW_LEN16(c));
- c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
- FW_PFVF_CMD_NIQ(rxq));
- c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
- FW_PFVF_CMD_PMASK(pmask) |
- FW_PFVF_CMD_NEQ(txq));
- c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
- FW_PFVF_CMD_NEXACTF(nexact));
- c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
- FW_PFVF_CMD_WX_CAPS(wxcaps) |
- FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
+ c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
+ FW_PFVF_CMD_NIQ_V(rxq));
+ c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) |
+ FW_PFVF_CMD_PMASK_V(pmask) |
+ FW_PFVF_CMD_NEQ_V(txq));
+ c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) |
+ FW_PFVF_CMD_NEXACTF_V(nexact));
+ c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) |
+ FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
+ FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3438,11 +3441,11 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
struct fw_vi_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_CMD_EXEC |
- FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
- c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
- c.portid_pkd = FW_VI_CMD_PORTID(port);
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+ FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
+ c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
+ c.portid_pkd = FW_VI_CMD_PORTID_V(port);
c.nmac = nmac - 1;
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
@@ -3463,8 +3466,8 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
}
}
if (rss_size)
- *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
- return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
+ *rss_size = FW_VI_CMD_RSSSIZE_G(ntohs(c.rsssize_pkd));
+ return FW_VI_CMD_VIID_G(ntohs(c.type_viid));
}
/**
@@ -3491,23 +3494,23 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
if (mtu < 0)
mtu = FW_RXMODE_MTU_NO_CHG;
if (promisc < 0)
- promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
+ promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
if (all_multi < 0)
- all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
+ all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
if (bcast < 0)
- bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
+ bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
if (vlanex < 0)
- vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
+ vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
+ c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID_V(viid));
c.retval_len16 = htonl(FW_LEN16(c));
- c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
- FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
- FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
- FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
- FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
+ c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU_V(mtu) |
+ FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
+ FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
+ FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
+ FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
@@ -3548,15 +3551,15 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
return -EINVAL;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
- FW_VI_MAC_CMD_VIID(viid));
- c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
- FW_CMD_LEN16((naddr + 2) / 2));
+ c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS_V(free) |
+ FW_CMD_LEN16_V((naddr + 2) / 2));
for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
- p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
- FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+ p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
}
@@ -3565,7 +3568,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
return ret;
for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
- u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
+ u16 index = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
if (idx)
idx[i] = index >= max_naddr ? 0xffff : index;
@@ -3611,17 +3614,17 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
- c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
- p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
- FW_VI_MAC_CMD_SMAC_RESULT(mode) |
- FW_VI_MAC_CMD_IDX(idx));
+ c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID_V(viid));
+ c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1));
+ p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
+ FW_VI_MAC_CMD_IDX_V(idx));
memcpy(p->macaddr, addr, sizeof(p->macaddr));
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0) {
- ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
+ ret = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
if (ret >= max_mac_addr)
ret = -ENOMEM;
}
@@ -3645,11 +3648,11 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
struct fw_vi_mac_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
- FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
- c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
- FW_VI_MAC_CMD_HASHUNIEN(ucast) |
- FW_CMD_LEN16(1));
+ c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID_V(viid));
+ c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN_F |
+ FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
+ FW_CMD_LEN16_V(1));
c.u.hash.hashvec = cpu_to_be64(vec);
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
@@ -3672,12 +3675,12 @@ int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
struct fw_vi_enable_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
+ c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
- c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
- FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) |
- FW_VI_ENABLE_CMD_DCB_INFO(dcb_en));
+ c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
+ FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(c) |
+ FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en));
return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3712,9 +3715,9 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
struct fw_vi_enable_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
- c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
+ c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
+ c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
c.blinkdur = htons(nblinks);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3739,11 +3742,11 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_iq_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
- FW_IQ_CMD_VFN(vf));
- c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
- c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
+ FW_IQ_CMD_VFN_V(vf));
+ c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c));
+ c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype));
c.iqid = htons(iqid);
c.fl0id = htons(fl0id);
c.fl1id = htons(fl1id);
@@ -3766,11 +3769,11 @@ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_eq_eth_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
- FW_EQ_ETH_CMD_VFN(vf));
- c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
- c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) |
+ FW_EQ_ETH_CMD_VFN_V(vf));
+ c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
+ c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3790,11 +3793,11 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_eq_ctrl_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
- FW_EQ_CTRL_CMD_VFN(vf));
- c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
- c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) |
+ FW_EQ_CTRL_CMD_VFN_V(vf));
+ c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
+ c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3814,11 +3817,11 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_eq_ofld_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
- FW_EQ_OFLD_CMD_VFN(vf));
- c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
- c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
+ c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) |
+ FW_EQ_OFLD_CMD_VFN_V(vf));
+ c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
+ c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3836,25 +3839,25 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
if (opcode == FW_PORT_CMD) { /* link/module state change message */
int speed = 0, fc = 0;
const struct fw_port_cmd *p = (void *)rpl;
- int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
+ int chan = FW_PORT_CMD_PORTID_G(ntohl(p->op_to_portid));
int port = adap->chan_map[chan];
struct port_info *pi = adap2pinfo(adap, port);
struct link_config *lc = &pi->link_cfg;
u32 stat = ntohl(p->u.info.lstatus_to_modtype);
- int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
- u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
+ int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
+ u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
- if (stat & FW_PORT_CMD_RXPAUSE)
+ if (stat & FW_PORT_CMD_RXPAUSE_F)
fc |= PAUSE_RX;
- if (stat & FW_PORT_CMD_TXPAUSE)
+ if (stat & FW_PORT_CMD_TXPAUSE_F)
fc |= PAUSE_TX;
- if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+ if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
speed = 100;
- else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
speed = 1000;
- else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
- else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
if (link_ok != lc->link_ok || speed != lc->speed ||
@@ -4028,6 +4031,126 @@ int t4_prep_adapter(struct adapter *adapter)
}
/**
+ * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ * @adapter: the adapter
+ * @qid: the Queue ID
+ * @qtype: the Ingress or Egress type for @qid
+ * @pbar2_qoffset: BAR2 Queue Offset
+ * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
+ *
+ * Returns the BAR2 SGE Queue Registers information associated with the
+ * indicated Absolute Queue ID. These are passed back in return value
+ * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
+ * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
+ *
+ * This may return an error which indicates that BAR2 SGE Queue
+ * registers aren't available. If an error is not returned, then the
+ * following values are returned:
+ *
+ * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
+ * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
+ *
+ * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
+ * require the "Inferred Queue ID" ability may be used. E.g. the
+ * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
+ * then these "Inferred Queue ID" register may not be used.
+ */
+int t4_bar2_sge_qregs(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid)
+{
+ unsigned int page_shift, page_size, qpp_shift, qpp_mask;
+ u64 bar2_page_offset, bar2_qoffset;
+ unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
+
+ /* T4 doesn't support BAR2 SGE Queue registers.
+ */
+ if (is_t4(adapter->params.chip))
+ return -EINVAL;
+
+ /* Get our SGE Page Size parameters.
+ */
+ page_shift = adapter->params.sge.hps + 10;
+ page_size = 1 << page_shift;
+
+ /* Get the right Queues per Page parameters for our Queue.
+ */
+ qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
+ ? adapter->params.sge.eq_qpp
+ : adapter->params.sge.iq_qpp);
+ qpp_mask = (1 << qpp_shift) - 1;
+
+ /* Calculate the basics of the BAR2 SGE Queue register area:
+ * o The BAR2 page the Queue registers will be in.
+ * o The BAR2 Queue ID.
+ * o The BAR2 Queue ID Offset into the BAR2 page.
+ */
+ bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+ bar2_qid = qid & qpp_mask;
+ bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
+
+ /* If the BAR2 Queue ID Offset is less than the Page Size, then the
+ * hardware will infer the Absolute Queue ID simply from the writes to
+ * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
+ * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
+ * write to the first BAR2 SGE Queue Area within the BAR2 Page with
+ * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
+ * from the BAR2 Page and BAR2 Queue ID.
+ *
+ * One important censequence of this is that some BAR2 SGE registers
+ * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
+ * there. But other registers synthesize the SGE Queue ID purely
+ * from the writes to the registers -- the Write Combined Doorbell
+ * Buffer is a good example. These BAR2 SGE Registers are only
+ * available for those BAR2 SGE Register areas where the SGE Absolute
+ * Queue ID can be inferred from simple writes.
+ */
+ bar2_qoffset = bar2_page_offset;
+ bar2_qinferred = (bar2_qid_offset < page_size);
+ if (bar2_qinferred) {
+ bar2_qoffset += bar2_qid_offset;
+ bar2_qid = 0;
+ }
+
+ *pbar2_qoffset = bar2_qoffset;
+ *pbar2_qid = bar2_qid;
+ return 0;
+}
+
+/**
+ * t4_init_sge_params - initialize adap->params.sge
+ * @adapter: the adapter
+ *
+ * Initialize various fields of the adapter's SGE Parameters structure.
+ */
+int t4_init_sge_params(struct adapter *adapter)
+{
+ struct sge_params *sge_params = &adapter->params.sge;
+ u32 hps, qpp;
+ unsigned int s_hps, s_qpp;
+
+ /* Extract the SGE Page Size for our PF.
+ */
+ hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE);
+ s_hps = (HOSTPAGESIZEPF0_S +
+ (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
+ sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
+
+ /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
+ */
+ s_qpp = (QUEUESPERPAGEPF0_S +
+ (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
+ qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF);
+ sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
+ qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF);
+ sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
+
+ return 0;
+}
+
+/**
* t4_init_tp_params - initialize adap->params.tp
* @adap: the adapter
*
@@ -4147,11 +4270,11 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
while ((adap->params.portvec & (1 << j)) == 0)
j++;
- c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
- FW_CMD_REQUEST | FW_CMD_READ |
- FW_PORT_CMD_PORTID(j));
+ c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_PORT_CMD_PORTID_V(j));
c.action_to_len16 = htonl(
- FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
FW_LEN16(c));
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret)
@@ -4169,13 +4292,13 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
adap->port[i]->dev_port = j;
ret = ntohl(c.u.info.lstatus_to_modtype);
- p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
- FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
- p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
+ p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
+ FW_PORT_CMD_MDIOADDR_G(ret) : -1;
+ p->port_type = FW_PORT_CMD_PTYPE_G(ret);
p->mod_type = FW_PORT_MOD_TYPE_NA;
- rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_READ |
+ rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
rvc.retval_len16 = htonl(FW_LEN16(rvc));
ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 5f4db2398c71..0f89f68948ab 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -205,16 +205,62 @@ struct work_request_hdr {
#define WR_HDR struct work_request_hdr wr
/* option 0 fields */
-#define S_MSS_IDX 60
-#define M_MSS_IDX 0xF
-#define V_MSS_IDX(x) ((__u64)(x) << S_MSS_IDX)
-#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
+#define TX_CHAN_S 2
+#define TX_CHAN_V(x) ((x) << TX_CHAN_S)
+
+#define ULP_MODE_S 8
+#define ULP_MODE_V(x) ((x) << ULP_MODE_S)
+
+#define RCV_BUFSIZ_S 12
+#define RCV_BUFSIZ_M 0x3FFU
+#define RCV_BUFSIZ_V(x) ((x) << RCV_BUFSIZ_S)
+
+#define SMAC_SEL_S 28
+#define SMAC_SEL_V(x) ((__u64)(x) << SMAC_SEL_S)
+
+#define L2T_IDX_S 36
+#define L2T_IDX_V(x) ((__u64)(x) << L2T_IDX_S)
+
+#define WND_SCALE_S 50
+#define WND_SCALE_V(x) ((__u64)(x) << WND_SCALE_S)
+
+#define KEEP_ALIVE_S 54
+#define KEEP_ALIVE_V(x) ((__u64)(x) << KEEP_ALIVE_S)
+#define KEEP_ALIVE_F KEEP_ALIVE_V(1ULL)
+
+#define MSS_IDX_S 60
+#define MSS_IDX_M 0xF
+#define MSS_IDX_V(x) ((__u64)(x) << MSS_IDX_S)
+#define MSS_IDX_G(x) (((x) >> MSS_IDX_S) & MSS_IDX_M)
/* option 2 fields */
-#define S_RSS_QUEUE 0
-#define M_RSS_QUEUE 0x3FF
-#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
-#define G_RSS_QUEUE(x) (((x) >> S_RSS_QUEUE) & M_RSS_QUEUE)
+#define RSS_QUEUE_S 0
+#define RSS_QUEUE_M 0x3FF
+#define RSS_QUEUE_V(x) ((x) << RSS_QUEUE_S)
+#define RSS_QUEUE_G(x) (((x) >> RSS_QUEUE_S) & RSS_QUEUE_M)
+
+#define RSS_QUEUE_VALID_S 10
+#define RSS_QUEUE_VALID_V(x) ((x) << RSS_QUEUE_VALID_S)
+#define RSS_QUEUE_VALID_F RSS_QUEUE_VALID_V(1U)
+
+#define RX_FC_DISABLE_S 20
+#define RX_FC_DISABLE_V(x) ((x) << RX_FC_DISABLE_S)
+#define RX_FC_DISABLE_F RX_FC_DISABLE_V(1U)
+
+#define RX_FC_VALID_S 22
+#define RX_FC_VALID_V(x) ((x) << RX_FC_VALID_S)
+#define RX_FC_VALID_F RX_FC_VALID_V(1U)
+
+#define RX_CHANNEL_S 26
+#define RX_CHANNEL_V(x) ((x) << RX_CHANNEL_S)
+
+#define WND_SCALE_EN_S 28
+#define WND_SCALE_EN_V(x) ((x) << WND_SCALE_EN_S)
+#define WND_SCALE_EN_F WND_SCALE_EN_V(1U)
+
+#define T5_OPT_2_VALID_S 31
+#define T5_OPT_2_VALID_V(x) ((x) << T5_OPT_2_VALID_S)
+#define T5_OPT_2_VALID_F T5_OPT_2_VALID_V(1U)
struct cpl_pass_open_req {
WR_HDR;
@@ -224,20 +270,11 @@ struct cpl_pass_open_req {
__be32 local_ip;
__be32 peer_ip;
__be64 opt0;
-#define TX_CHAN(x) ((x) << 2)
#define NO_CONG(x) ((x) << 4)
#define DELACK(x) ((x) << 5)
-#define ULP_MODE(x) ((x) << 8)
-#define RCV_BUFSIZ(x) ((x) << 12)
-#define RCV_BUFSIZ_MASK 0x3FFU
#define DSCP(x) ((x) << 22)
-#define SMAC_SEL(x) ((u64)(x) << 28)
-#define L2T_IDX(x) ((u64)(x) << 36)
#define TCAM_BYPASS(x) ((u64)(x) << 48)
#define NAGLE(x) ((u64)(x) << 49)
-#define WND_SCALE(x) ((u64)(x) << 50)
-#define KEEP_ALIVE(x) ((u64)(x) << 54)
-#define MSS_IDX(x) ((u64)(x) << 60)
__be64 opt1;
#define SYN_RSS_ENABLE (1 << 0)
#define SYN_RSS_QUEUE(x) ((x) << 2)
@@ -267,20 +304,13 @@ struct cpl_pass_accept_rpl {
WR_HDR;
union opcode_tid ot;
__be32 opt2;
-#define RSS_QUEUE(x) ((x) << 0)
-#define RSS_QUEUE_VALID (1 << 10)
#define RX_COALESCE_VALID(x) ((x) << 11)
#define RX_COALESCE(x) ((x) << 12)
#define PACE(x) ((x) << 16)
-#define RX_FC_VALID ((1U) << 19)
-#define RX_FC_DISABLE ((1U) << 20)
#define TX_QUEUE(x) ((x) << 23)
-#define RX_CHANNEL(x) ((x) << 26)
#define CCTRL_ECN(x) ((x) << 27)
-#define WND_SCALE_EN(x) ((x) << 28)
#define TSTAMPS_EN(x) ((x) << 29)
#define SACK_EN(x) ((x) << 30)
-#define T5_OPT_2_VALID ((1U) << 31)
__be64 opt0;
};
@@ -305,10 +335,10 @@ struct cpl_act_open_req {
__be32 opt2;
};
-#define S_FILTER_TUPLE 24
-#define M_FILTER_TUPLE 0xFFFFFFFFFF
-#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
-#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE)
+#define FILTER_TUPLE_S 24
+#define FILTER_TUPLE_M 0xFFFFFFFFFF
+#define FILTER_TUPLE_V(x) ((x) << FILTER_TUPLE_S)
+#define FILTER_TUPLE_G(x) (((x) >> FILTER_TUPLE_S) & FILTER_TUPLE_M)
struct cpl_t5_act_open_req {
WR_HDR;
union opcode_tid ot;
@@ -579,10 +609,16 @@ struct cpl_rx_data_ack {
WR_HDR;
union opcode_tid ot;
__be32 credit_dack;
-#define RX_CREDITS(x) ((x) << 0)
-#define RX_FORCE_ACK(x) ((x) << 28)
};
+/* cpl_rx_data_ack.ack_seq fields */
+#define RX_CREDITS_S 0
+#define RX_CREDITS_V(x) ((x) << RX_CREDITS_S)
+
+#define RX_FORCE_ACK_S 28
+#define RX_FORCE_ACK_V(x) ((x) << RX_FORCE_ACK_S)
+#define RX_FORCE_ACK_F RX_FORCE_ACK_V(1U)
+
struct cpl_rx_pkt {
struct rss_header rsshdr;
u8 opcode;
@@ -803,6 +839,9 @@ enum {
ULP_TX_SC_ISGL = 0x83
};
+#define ULPTX_CMD_S 24
+#define ULPTX_CMD_V(x) ((x) << ULPTX_CMD_S)
+
struct ulptx_sge_pair {
__be32 len[2];
__be64 addr[2];
@@ -810,7 +849,6 @@ struct ulptx_sge_pair {
struct ulptx_sgl {
__be32 cmd_nsge;
-#define ULPTX_CMD(x) ((x) << 24)
#define ULPTX_NSGE(x) ((x) << 0)
#define ULPTX_MORE (1U << 23)
__be32 len0;
@@ -821,15 +859,21 @@ struct ulptx_sgl {
struct ulp_mem_io {
WR_HDR;
__be32 cmd;
-#define ULP_MEMIO_ORDER(x) ((x) << 23)
__be32 len16; /* command length */
__be32 dlen; /* data length in 32-byte units */
-#define ULP_MEMIO_DATA_LEN(x) ((x) << 0)
__be32 lock_addr;
-#define ULP_MEMIO_ADDR(x) ((x) << 0)
#define ULP_MEMIO_LOCK(x) ((x) << 31)
};
+/* additional ulp_mem_io.cmd fields */
+#define ULP_MEMIO_ORDER_S 23
+#define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S)
+#define ULP_MEMIO_ORDER_F ULP_MEMIO_ORDER_V(1U)
+
+#define T5_ULP_MEMIO_IMM_S 23
+#define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S)
+#define T5_ULP_MEMIO_IMM_F T5_ULP_MEMIO_IMM_V(1U)
+
#define S_T5_ULP_MEMIO_IMM 23
#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM)
#define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U)
@@ -838,4 +882,12 @@ struct ulp_mem_io {
#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER)
#define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U)
+/* ulp_mem_io.lock_addr fields */
+#define ULP_MEMIO_ADDR_S 0
+#define ULP_MEMIO_ADDR_V(x) ((x) << ULP_MEMIO_ADDR_S)
+
+/* ulp_mem_io.dlen fields */
+#define ULP_MEMIO_DATA_LEN_S 0
+#define ULP_MEMIO_DATA_LEN_V(x) ((x) << ULP_MEMIO_DATA_LEN_S)
+
#endif /* __T4_MSG_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
new file mode 100644
index 000000000000..9e4f95a91fb4
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -0,0 +1,160 @@
+/*
+ * This file is part of the Chelsio T4/T5 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __T4_PCI_ID_TBL_H__
+#define __T4_PCI_ID_TBL_H__
+
+/* The code can defined cpp macros for creating a PCI Device ID Table. This is
+ * useful because it allows the PCI ID Table to be maintained in a single place.
+ *
+ * The macros are:
+ *
+ * CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+ * -- Used to start the definition of the PCI ID Table.
+ *
+ * CH_PCI_DEVICE_ID_FUNCTION
+ * -- The PCI Function Number to use in the PCI Device ID Table. "0"
+ * -- for drivers attaching to PF0-3, "4" for drivers attaching to PF4,
+ * -- "8" for drivers attaching to SR-IOV Virtual Functions, etc.
+ *
+ * CH_PCI_DEVICE_ID_FUNCTION2 [optional]
+ * -- If defined, create a PCI Device ID Table with both
+ * -- CH_PCI_DEVICE_ID_FUNCTION and CH_PCI_DEVICE_ID_FUNCTION2 populated.
+ *
+ * CH_PCI_ID_TABLE_ENTRY(DeviceID)
+ * -- Used for the individual PCI Device ID entries. Note that we will
+ * -- be adding a trailing comma (",") after all of the entries (and
+ * -- between the pairs of entries if CH_PCI_DEVICE_ID_FUNCTION2 is defined).
+ *
+ * CH_PCI_DEVICE_ID_TABLE_DEFINE_END
+ * -- Used to finish the definition of the PCI ID Table. Note that we
+ * -- will be adding a trailing semi-colon (";") here.
+ */
+#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+
+#ifndef CH_PCI_DEVICE_ID_FUNCTION
+#error CH_PCI_DEVICE_ID_FUNCTION not defined!
+#endif
+#ifndef CH_PCI_ID_TABLE_ENTRY
+#error CH_PCI_ID_TABLE_ENTRY not defined!
+#endif
+#ifndef CH_PCI_DEVICE_ID_TABLE_DEFINE_END
+#error CH_PCI_DEVICE_ID_TABLE_DEFINE_END not defined!
+#endif
+
+/* T4 and later ASICs use a PCI Device ID scheme of 0xVFPP where:
+ *
+ * V = "4" for T4; "5" for T5, etc.
+ * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
+ * PP = adapter product designation
+ *
+ * We use this consistency in order to create the proper PCI Device IDs
+ * for the specified CH_PCI_DEVICE_ID_FUNCTION.
+ */
+#ifndef CH_PCI_DEVICE_ID_FUNCTION2
+#define CH_PCI_ID_TABLE_FENTRY(devid) \
+ CH_PCI_ID_TABLE_ENTRY((devid) | \
+ ((CH_PCI_DEVICE_ID_FUNCTION) << 8))
+#else
+#define CH_PCI_ID_TABLE_FENTRY(devid) \
+ CH_PCI_ID_TABLE_ENTRY((devid) | \
+ ((CH_PCI_DEVICE_ID_FUNCTION) << 8)), \
+ CH_PCI_ID_TABLE_ENTRY((devid) | \
+ ((CH_PCI_DEVICE_ID_FUNCTION2) << 8))
+#endif
+
+CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+ /* T4 adapters:
+ */
+ CH_PCI_ID_TABLE_FENTRY(0x4000), /* T440-dbg */
+ CH_PCI_ID_TABLE_FENTRY(0x4001), /* T420-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4002), /* T422-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4003), /* T440-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4004), /* T420-bch */
+ CH_PCI_ID_TABLE_FENTRY(0x4005), /* T440-bch */
+ CH_PCI_ID_TABLE_FENTRY(0x4006), /* T440-ch */
+ CH_PCI_ID_TABLE_FENTRY(0x4007), /* T420-so */
+ CH_PCI_ID_TABLE_FENTRY(0x4008), /* T420-cx */
+ CH_PCI_ID_TABLE_FENTRY(0x4009), /* T420-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x400a), /* T404-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x400b), /* B420-sr */
+ CH_PCI_ID_TABLE_FENTRY(0x400c), /* B404-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x400d), /* T480-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x400e), /* T440-LP-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4080), /* Custom T480-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4081), /* Custom T440-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4082), /* Custom T420-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4083), /* Custom T420-xaui */
+ CH_PCI_ID_TABLE_FENTRY(0x4084), /* Custom T440-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4085), /* Custom T420-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4086), /* Custom T440-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x4087), /* Custom T440-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x4088), /* Custom T440 2-xaui, 2-xfi */
+
+ /* T5 adapters:
+ */
+ CH_PCI_ID_TABLE_FENTRY(0x5000), /* T580-dbg */
+ CH_PCI_ID_TABLE_FENTRY(0x5001), /* T520-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5002), /* T522-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5003), /* T540-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5004), /* T520-bch */
+ CH_PCI_ID_TABLE_FENTRY(0x5005), /* T540-bch */
+ CH_PCI_ID_TABLE_FENTRY(0x5006), /* T540-ch */
+ CH_PCI_ID_TABLE_FENTRY(0x5007), /* T520-so */
+ CH_PCI_ID_TABLE_FENTRY(0x5008), /* T520-cx */
+ CH_PCI_ID_TABLE_FENTRY(0x5009), /* T520-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x500a), /* T504-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x500b), /* B520-sr */
+ CH_PCI_ID_TABLE_FENTRY(0x500c), /* B504-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x500d), /* T580-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x500e), /* T540-LP-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5010), /* T580-LP-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5011), /* T520-LL-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5012), /* T560-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */
+ CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */
+ CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5083), /* Custom T540-LP-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5084), /* Custom T580-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5085), /* Custom 3x T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */
+CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
+
+#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
+
+#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 8d2de1006b08..d7bd34ee65bd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -155,17 +155,22 @@
#define HOSTPAGESIZEPF2_SHIFT 8
#define HOSTPAGESIZEPF2(x) ((x) << HOSTPAGESIZEPF2_SHIFT)
-#define HOSTPAGESIZEPF1_MASK 0x0000000fU
-#define HOSTPAGESIZEPF1_SHIFT 4
-#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_SHIFT)
+#define HOSTPAGESIZEPF1_M 0x0000000fU
+#define HOSTPAGESIZEPF1_S 4
+#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_S)
-#define HOSTPAGESIZEPF0_MASK 0x0000000fU
-#define HOSTPAGESIZEPF0_SHIFT 0
-#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT)
+#define HOSTPAGESIZEPF0_M 0x0000000fU
+#define HOSTPAGESIZEPF0_S 0
+#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_S)
#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
-#define QUEUESPERPAGEPF0_MASK 0x0000000fU
-#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
+#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014
+
+#define QUEUESPERPAGEPF1_S 4
+
+#define QUEUESPERPAGEPF0_S 0
+#define QUEUESPERPAGEPF0_MASK 0x0000000fU
+#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
#define QUEUESPERPAGEPF0 0
#define QUEUESPERPAGEPF1 4
@@ -323,6 +328,7 @@
#define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc
#define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8
#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
+#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
#define S_HP_INT_THRESH 28
#define M_HP_INT_THRESH 0xfU
@@ -511,21 +517,62 @@
#define MC_BIST_STATUS_RDATA 0x7688
-#define MA_EDRAM0_BAR 0x77c0
-#define MA_EDRAM1_BAR 0x77c4
-#define EDRAM_SIZE_MASK 0xfffU
-#define EDRAM_SIZE_GET(x) ((x) & EDRAM_SIZE_MASK)
+#define MA_EDRAM0_BAR_A 0x77c0
+
+#define EDRAM0_SIZE_S 0
+#define EDRAM0_SIZE_M 0xfffU
+#define EDRAM0_SIZE_V(x) ((x) << EDRAM0_SIZE_S)
+#define EDRAM0_SIZE_G(x) (((x) >> EDRAM0_SIZE_S) & EDRAM0_SIZE_M)
+
+#define MA_EDRAM1_BAR_A 0x77c4
+
+#define EDRAM1_SIZE_S 0
+#define EDRAM1_SIZE_M 0xfffU
+#define EDRAM1_SIZE_V(x) ((x) << EDRAM1_SIZE_S)
+#define EDRAM1_SIZE_G(x) (((x) >> EDRAM1_SIZE_S) & EDRAM1_SIZE_M)
+
+#define MA_EXT_MEMORY_BAR_A 0x77c8
+
+#define EXT_MEM_SIZE_S 0
+#define EXT_MEM_SIZE_M 0xfffU
+#define EXT_MEM_SIZE_V(x) ((x) << EXT_MEM_SIZE_S)
+#define EXT_MEM_SIZE_G(x) (((x) >> EXT_MEM_SIZE_S) & EXT_MEM_SIZE_M)
+
+#define MA_EXT_MEMORY1_BAR_A 0x7808
+
+#define EXT_MEM1_SIZE_S 0
+#define EXT_MEM1_SIZE_M 0xfffU
+#define EXT_MEM1_SIZE_V(x) ((x) << EXT_MEM1_SIZE_S)
+#define EXT_MEM1_SIZE_G(x) (((x) >> EXT_MEM1_SIZE_S) & EXT_MEM1_SIZE_M)
+
+#define MA_EXT_MEMORY0_BAR_A 0x77c8
+
+#define EXT_MEM0_SIZE_S 0
+#define EXT_MEM0_SIZE_M 0xfffU
+#define EXT_MEM0_SIZE_V(x) ((x) << EXT_MEM0_SIZE_S)
+#define EXT_MEM0_SIZE_G(x) (((x) >> EXT_MEM0_SIZE_S) & EXT_MEM0_SIZE_M)
+
+#define MA_TARGET_MEM_ENABLE_A 0x77d8
+
+#define EXT_MEM_ENABLE_S 2
+#define EXT_MEM_ENABLE_V(x) ((x) << EXT_MEM_ENABLE_S)
+#define EXT_MEM_ENABLE_F EXT_MEM_ENABLE_V(1U)
+
+#define EDRAM1_ENABLE_S 1
+#define EDRAM1_ENABLE_V(x) ((x) << EDRAM1_ENABLE_S)
+#define EDRAM1_ENABLE_F EDRAM1_ENABLE_V(1U)
+
+#define EDRAM0_ENABLE_S 0
+#define EDRAM0_ENABLE_V(x) ((x) << EDRAM0_ENABLE_S)
+#define EDRAM0_ENABLE_F EDRAM0_ENABLE_V(1U)
-#define MA_EXT_MEMORY_BAR 0x77c8
-#define EXT_MEM_SIZE_MASK 0x00000fffU
-#define EXT_MEM_SIZE_SHIFT 0
-#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT)
+#define EXT_MEM1_ENABLE_S 4
+#define EXT_MEM1_ENABLE_V(x) ((x) << EXT_MEM1_ENABLE_S)
+#define EXT_MEM1_ENABLE_F EXT_MEM1_ENABLE_V(1U)
-#define MA_TARGET_MEM_ENABLE 0x77d8
-#define EXT_MEM1_ENABLE 0x00000010U
-#define EXT_MEM_ENABLE 0x00000004U
-#define EDRAM1_ENABLE 0x00000002U
-#define EDRAM0_ENABLE 0x00000001U
+#define EXT_MEM0_ENABLE_S 2
+#define EXT_MEM0_ENABLE_V(x) ((x) << EXT_MEM0_ENABLE_S)
+#define EXT_MEM0_ENABLE_F EXT_MEM0_ENABLE_V(1U)
#define MA_INT_CAUSE 0x77e0
#define MEM_PERR_INT_CAUSE 0x00000002U
@@ -542,7 +589,6 @@
#define MA_PARITY_ERROR_STATUS 0x77f4
#define MA_PARITY_ERROR_STATUS2 0x7804
-#define MA_EXT_MEMORY1_BAR 0x7808
#define EDC_0_BASE_ADDR 0x7900
#define EDC_BIST_CMD 0x7904
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 3409756a85b9..beaf80a6214b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -109,18 +109,49 @@ struct fw_wr_hdr {
__be32 lo;
};
-#define FW_WR_OP(x) ((x) << 24)
-#define FW_WR_OP_GET(x) (((x) >> 24) & 0xff)
-#define FW_WR_ATOMIC(x) ((x) << 23)
-#define FW_WR_FLUSH(x) ((x) << 22)
-#define FW_WR_COMPL(x) ((x) << 21)
-#define FW_WR_IMMDLEN_MASK 0xff
-#define FW_WR_IMMDLEN(x) ((x) << 0)
-
-#define FW_WR_EQUIQ (1U << 31)
-#define FW_WR_EQUEQ (1U << 30)
-#define FW_WR_FLOWID(x) ((x) << 8)
-#define FW_WR_LEN16(x) ((x) << 0)
+/* work request opcode (hi) */
+#define FW_WR_OP_S 24
+#define FW_WR_OP_M 0xff
+#define FW_WR_OP_V(x) ((x) << FW_WR_OP_S)
+#define FW_WR_OP_G(x) (((x) >> FW_WR_OP_S) & FW_WR_OP_M)
+
+/* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER */
+#define FW_WR_ATOMIC_S 23
+#define FW_WR_ATOMIC_V(x) ((x) << FW_WR_ATOMIC_S)
+
+/* flush flag (hi) - firmware flushes flushable work request buffered
+ * in the flow context.
+ */
+#define FW_WR_FLUSH_S 22
+#define FW_WR_FLUSH_V(x) ((x) << FW_WR_FLUSH_S)
+
+/* completion flag (hi) - firmware generates a cpl_fw6_ack */
+#define FW_WR_COMPL_S 21
+#define FW_WR_COMPL_V(x) ((x) << FW_WR_COMPL_S)
+#define FW_WR_COMPL_F FW_WR_COMPL_V(1U)
+
+/* work request immediate data length (hi) */
+#define FW_WR_IMMDLEN_S 0
+#define FW_WR_IMMDLEN_M 0xff
+#define FW_WR_IMMDLEN_V(x) ((x) << FW_WR_IMMDLEN_S)
+
+/* egress queue status update to associated ingress queue entry (lo) */
+#define FW_WR_EQUIQ_S 31
+#define FW_WR_EQUIQ_V(x) ((x) << FW_WR_EQUIQ_S)
+#define FW_WR_EQUIQ_F FW_WR_EQUIQ_V(1U)
+
+/* egress queue status update to egress queue status entry (lo) */
+#define FW_WR_EQUEQ_S 30
+#define FW_WR_EQUEQ_V(x) ((x) << FW_WR_EQUEQ_S)
+#define FW_WR_EQUEQ_F FW_WR_EQUEQ_V(1U)
+
+/* flow context identifier (lo) */
+#define FW_WR_FLOWID_S 8
+#define FW_WR_FLOWID_V(x) ((x) << FW_WR_FLOWID_S)
+
+/* length in units of 16-bytes (lo) */
+#define FW_WR_LEN16_S 0
+#define FW_WR_LEN16_V(x) ((x) << FW_WR_LEN16_S)
#define HW_TPL_FR_MT_PR_IV_P_FC 0X32B
#define HW_TPL_FR_MT_PR_OV_P_FC 0X327
@@ -166,239 +197,239 @@ struct fw_filter_wr {
__u8 sma[6];
};
-#define S_FW_FILTER_WR_TID 12
-#define M_FW_FILTER_WR_TID 0xfffff
-#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID)
-#define G_FW_FILTER_WR_TID(x) \
- (((x) >> S_FW_FILTER_WR_TID) & M_FW_FILTER_WR_TID)
-
-#define S_FW_FILTER_WR_RQTYPE 11
-#define M_FW_FILTER_WR_RQTYPE 0x1
-#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE)
-#define G_FW_FILTER_WR_RQTYPE(x) \
- (((x) >> S_FW_FILTER_WR_RQTYPE) & M_FW_FILTER_WR_RQTYPE)
-#define F_FW_FILTER_WR_RQTYPE V_FW_FILTER_WR_RQTYPE(1U)
-
-#define S_FW_FILTER_WR_NOREPLY 10
-#define M_FW_FILTER_WR_NOREPLY 0x1
-#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY)
-#define G_FW_FILTER_WR_NOREPLY(x) \
- (((x) >> S_FW_FILTER_WR_NOREPLY) & M_FW_FILTER_WR_NOREPLY)
-#define F_FW_FILTER_WR_NOREPLY V_FW_FILTER_WR_NOREPLY(1U)
-
-#define S_FW_FILTER_WR_IQ 0
-#define M_FW_FILTER_WR_IQ 0x3ff
-#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ)
-#define G_FW_FILTER_WR_IQ(x) \
- (((x) >> S_FW_FILTER_WR_IQ) & M_FW_FILTER_WR_IQ)
-
-#define S_FW_FILTER_WR_DEL_FILTER 31
-#define M_FW_FILTER_WR_DEL_FILTER 0x1
-#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER)
-#define G_FW_FILTER_WR_DEL_FILTER(x) \
- (((x) >> S_FW_FILTER_WR_DEL_FILTER) & M_FW_FILTER_WR_DEL_FILTER)
-#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U)
-
-#define S_FW_FILTER_WR_RPTTID 25
-#define M_FW_FILTER_WR_RPTTID 0x1
-#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID)
-#define G_FW_FILTER_WR_RPTTID(x) \
- (((x) >> S_FW_FILTER_WR_RPTTID) & M_FW_FILTER_WR_RPTTID)
-#define F_FW_FILTER_WR_RPTTID V_FW_FILTER_WR_RPTTID(1U)
-
-#define S_FW_FILTER_WR_DROP 24
-#define M_FW_FILTER_WR_DROP 0x1
-#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP)
-#define G_FW_FILTER_WR_DROP(x) \
- (((x) >> S_FW_FILTER_WR_DROP) & M_FW_FILTER_WR_DROP)
-#define F_FW_FILTER_WR_DROP V_FW_FILTER_WR_DROP(1U)
-
-#define S_FW_FILTER_WR_DIRSTEER 23
-#define M_FW_FILTER_WR_DIRSTEER 0x1
-#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER)
-#define G_FW_FILTER_WR_DIRSTEER(x) \
- (((x) >> S_FW_FILTER_WR_DIRSTEER) & M_FW_FILTER_WR_DIRSTEER)
-#define F_FW_FILTER_WR_DIRSTEER V_FW_FILTER_WR_DIRSTEER(1U)
-
-#define S_FW_FILTER_WR_MASKHASH 22
-#define M_FW_FILTER_WR_MASKHASH 0x1
-#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH)
-#define G_FW_FILTER_WR_MASKHASH(x) \
- (((x) >> S_FW_FILTER_WR_MASKHASH) & M_FW_FILTER_WR_MASKHASH)
-#define F_FW_FILTER_WR_MASKHASH V_FW_FILTER_WR_MASKHASH(1U)
-
-#define S_FW_FILTER_WR_DIRSTEERHASH 21
-#define M_FW_FILTER_WR_DIRSTEERHASH 0x1
-#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH)
-#define G_FW_FILTER_WR_DIRSTEERHASH(x) \
- (((x) >> S_FW_FILTER_WR_DIRSTEERHASH) & M_FW_FILTER_WR_DIRSTEERHASH)
-#define F_FW_FILTER_WR_DIRSTEERHASH V_FW_FILTER_WR_DIRSTEERHASH(1U)
-
-#define S_FW_FILTER_WR_LPBK 20
-#define M_FW_FILTER_WR_LPBK 0x1
-#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK)
-#define G_FW_FILTER_WR_LPBK(x) \
- (((x) >> S_FW_FILTER_WR_LPBK) & M_FW_FILTER_WR_LPBK)
-#define F_FW_FILTER_WR_LPBK V_FW_FILTER_WR_LPBK(1U)
-
-#define S_FW_FILTER_WR_DMAC 19
-#define M_FW_FILTER_WR_DMAC 0x1
-#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC)
-#define G_FW_FILTER_WR_DMAC(x) \
- (((x) >> S_FW_FILTER_WR_DMAC) & M_FW_FILTER_WR_DMAC)
-#define F_FW_FILTER_WR_DMAC V_FW_FILTER_WR_DMAC(1U)
-
-#define S_FW_FILTER_WR_SMAC 18
-#define M_FW_FILTER_WR_SMAC 0x1
-#define V_FW_FILTER_WR_SMAC(x) ((x) << S_FW_FILTER_WR_SMAC)
-#define G_FW_FILTER_WR_SMAC(x) \
- (((x) >> S_FW_FILTER_WR_SMAC) & M_FW_FILTER_WR_SMAC)
-#define F_FW_FILTER_WR_SMAC V_FW_FILTER_WR_SMAC(1U)
-
-#define S_FW_FILTER_WR_INSVLAN 17
-#define M_FW_FILTER_WR_INSVLAN 0x1
-#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN)
-#define G_FW_FILTER_WR_INSVLAN(x) \
- (((x) >> S_FW_FILTER_WR_INSVLAN) & M_FW_FILTER_WR_INSVLAN)
-#define F_FW_FILTER_WR_INSVLAN V_FW_FILTER_WR_INSVLAN(1U)
-
-#define S_FW_FILTER_WR_RMVLAN 16
-#define M_FW_FILTER_WR_RMVLAN 0x1
-#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN)
-#define G_FW_FILTER_WR_RMVLAN(x) \
- (((x) >> S_FW_FILTER_WR_RMVLAN) & M_FW_FILTER_WR_RMVLAN)
-#define F_FW_FILTER_WR_RMVLAN V_FW_FILTER_WR_RMVLAN(1U)
-
-#define S_FW_FILTER_WR_HITCNTS 15
-#define M_FW_FILTER_WR_HITCNTS 0x1
-#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS)
-#define G_FW_FILTER_WR_HITCNTS(x) \
- (((x) >> S_FW_FILTER_WR_HITCNTS) & M_FW_FILTER_WR_HITCNTS)
-#define F_FW_FILTER_WR_HITCNTS V_FW_FILTER_WR_HITCNTS(1U)
-
-#define S_FW_FILTER_WR_TXCHAN 13
-#define M_FW_FILTER_WR_TXCHAN 0x3
-#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN)
-#define G_FW_FILTER_WR_TXCHAN(x) \
- (((x) >> S_FW_FILTER_WR_TXCHAN) & M_FW_FILTER_WR_TXCHAN)
-
-#define S_FW_FILTER_WR_PRIO 12
-#define M_FW_FILTER_WR_PRIO 0x1
-#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO)
-#define G_FW_FILTER_WR_PRIO(x) \
- (((x) >> S_FW_FILTER_WR_PRIO) & M_FW_FILTER_WR_PRIO)
-#define F_FW_FILTER_WR_PRIO V_FW_FILTER_WR_PRIO(1U)
-
-#define S_FW_FILTER_WR_L2TIX 0
-#define M_FW_FILTER_WR_L2TIX 0xfff
-#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX)
-#define G_FW_FILTER_WR_L2TIX(x) \
- (((x) >> S_FW_FILTER_WR_L2TIX) & M_FW_FILTER_WR_L2TIX)
-
-#define S_FW_FILTER_WR_FRAG 7
-#define M_FW_FILTER_WR_FRAG 0x1
-#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG)
-#define G_FW_FILTER_WR_FRAG(x) \
- (((x) >> S_FW_FILTER_WR_FRAG) & M_FW_FILTER_WR_FRAG)
-#define F_FW_FILTER_WR_FRAG V_FW_FILTER_WR_FRAG(1U)
-
-#define S_FW_FILTER_WR_FRAGM 6
-#define M_FW_FILTER_WR_FRAGM 0x1
-#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM)
-#define G_FW_FILTER_WR_FRAGM(x) \
- (((x) >> S_FW_FILTER_WR_FRAGM) & M_FW_FILTER_WR_FRAGM)
-#define F_FW_FILTER_WR_FRAGM V_FW_FILTER_WR_FRAGM(1U)
-
-#define S_FW_FILTER_WR_IVLAN_VLD 5
-#define M_FW_FILTER_WR_IVLAN_VLD 0x1
-#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD)
-#define G_FW_FILTER_WR_IVLAN_VLD(x) \
- (((x) >> S_FW_FILTER_WR_IVLAN_VLD) & M_FW_FILTER_WR_IVLAN_VLD)
-#define F_FW_FILTER_WR_IVLAN_VLD V_FW_FILTER_WR_IVLAN_VLD(1U)
-
-#define S_FW_FILTER_WR_OVLAN_VLD 4
-#define M_FW_FILTER_WR_OVLAN_VLD 0x1
-#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD)
-#define G_FW_FILTER_WR_OVLAN_VLD(x) \
- (((x) >> S_FW_FILTER_WR_OVLAN_VLD) & M_FW_FILTER_WR_OVLAN_VLD)
-#define F_FW_FILTER_WR_OVLAN_VLD V_FW_FILTER_WR_OVLAN_VLD(1U)
-
-#define S_FW_FILTER_WR_IVLAN_VLDM 3
-#define M_FW_FILTER_WR_IVLAN_VLDM 0x1
-#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM)
-#define G_FW_FILTER_WR_IVLAN_VLDM(x) \
- (((x) >> S_FW_FILTER_WR_IVLAN_VLDM) & M_FW_FILTER_WR_IVLAN_VLDM)
-#define F_FW_FILTER_WR_IVLAN_VLDM V_FW_FILTER_WR_IVLAN_VLDM(1U)
-
-#define S_FW_FILTER_WR_OVLAN_VLDM 2
-#define M_FW_FILTER_WR_OVLAN_VLDM 0x1
-#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM)
-#define G_FW_FILTER_WR_OVLAN_VLDM(x) \
- (((x) >> S_FW_FILTER_WR_OVLAN_VLDM) & M_FW_FILTER_WR_OVLAN_VLDM)
-#define F_FW_FILTER_WR_OVLAN_VLDM V_FW_FILTER_WR_OVLAN_VLDM(1U)
-
-#define S_FW_FILTER_WR_RX_CHAN 15
-#define M_FW_FILTER_WR_RX_CHAN 0x1
-#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN)
-#define G_FW_FILTER_WR_RX_CHAN(x) \
- (((x) >> S_FW_FILTER_WR_RX_CHAN) & M_FW_FILTER_WR_RX_CHAN)
-#define F_FW_FILTER_WR_RX_CHAN V_FW_FILTER_WR_RX_CHAN(1U)
-
-#define S_FW_FILTER_WR_RX_RPL_IQ 0
-#define M_FW_FILTER_WR_RX_RPL_IQ 0x3ff
-#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ)
-#define G_FW_FILTER_WR_RX_RPL_IQ(x) \
- (((x) >> S_FW_FILTER_WR_RX_RPL_IQ) & M_FW_FILTER_WR_RX_RPL_IQ)
-
-#define S_FW_FILTER_WR_MACI 23
-#define M_FW_FILTER_WR_MACI 0x1ff
-#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI)
-#define G_FW_FILTER_WR_MACI(x) \
- (((x) >> S_FW_FILTER_WR_MACI) & M_FW_FILTER_WR_MACI)
-
-#define S_FW_FILTER_WR_MACIM 14
-#define M_FW_FILTER_WR_MACIM 0x1ff
-#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM)
-#define G_FW_FILTER_WR_MACIM(x) \
- (((x) >> S_FW_FILTER_WR_MACIM) & M_FW_FILTER_WR_MACIM)
-
-#define S_FW_FILTER_WR_FCOE 13
-#define M_FW_FILTER_WR_FCOE 0x1
-#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE)
-#define G_FW_FILTER_WR_FCOE(x) \
- (((x) >> S_FW_FILTER_WR_FCOE) & M_FW_FILTER_WR_FCOE)
-#define F_FW_FILTER_WR_FCOE V_FW_FILTER_WR_FCOE(1U)
-
-#define S_FW_FILTER_WR_FCOEM 12
-#define M_FW_FILTER_WR_FCOEM 0x1
-#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM)
-#define G_FW_FILTER_WR_FCOEM(x) \
- (((x) >> S_FW_FILTER_WR_FCOEM) & M_FW_FILTER_WR_FCOEM)
-#define F_FW_FILTER_WR_FCOEM V_FW_FILTER_WR_FCOEM(1U)
-
-#define S_FW_FILTER_WR_PORT 9
-#define M_FW_FILTER_WR_PORT 0x7
-#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT)
-#define G_FW_FILTER_WR_PORT(x) \
- (((x) >> S_FW_FILTER_WR_PORT) & M_FW_FILTER_WR_PORT)
-
-#define S_FW_FILTER_WR_PORTM 6
-#define M_FW_FILTER_WR_PORTM 0x7
-#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM)
-#define G_FW_FILTER_WR_PORTM(x) \
- (((x) >> S_FW_FILTER_WR_PORTM) & M_FW_FILTER_WR_PORTM)
-
-#define S_FW_FILTER_WR_MATCHTYPE 3
-#define M_FW_FILTER_WR_MATCHTYPE 0x7
-#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE)
-#define G_FW_FILTER_WR_MATCHTYPE(x) \
- (((x) >> S_FW_FILTER_WR_MATCHTYPE) & M_FW_FILTER_WR_MATCHTYPE)
-
-#define S_FW_FILTER_WR_MATCHTYPEM 0
-#define M_FW_FILTER_WR_MATCHTYPEM 0x7
-#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM)
-#define G_FW_FILTER_WR_MATCHTYPEM(x) \
- (((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM)
+#define FW_FILTER_WR_TID_S 12
+#define FW_FILTER_WR_TID_M 0xfffff
+#define FW_FILTER_WR_TID_V(x) ((x) << FW_FILTER_WR_TID_S)
+#define FW_FILTER_WR_TID_G(x) \
+ (((x) >> FW_FILTER_WR_TID_S) & FW_FILTER_WR_TID_M)
+
+#define FW_FILTER_WR_RQTYPE_S 11
+#define FW_FILTER_WR_RQTYPE_M 0x1
+#define FW_FILTER_WR_RQTYPE_V(x) ((x) << FW_FILTER_WR_RQTYPE_S)
+#define FW_FILTER_WR_RQTYPE_G(x) \
+ (((x) >> FW_FILTER_WR_RQTYPE_S) & FW_FILTER_WR_RQTYPE_M)
+#define FW_FILTER_WR_RQTYPE_F FW_FILTER_WR_RQTYPE_V(1U)
+
+#define FW_FILTER_WR_NOREPLY_S 10
+#define FW_FILTER_WR_NOREPLY_M 0x1
+#define FW_FILTER_WR_NOREPLY_V(x) ((x) << FW_FILTER_WR_NOREPLY_S)
+#define FW_FILTER_WR_NOREPLY_G(x) \
+ (((x) >> FW_FILTER_WR_NOREPLY_S) & FW_FILTER_WR_NOREPLY_M)
+#define FW_FILTER_WR_NOREPLY_F FW_FILTER_WR_NOREPLY_V(1U)
+
+#define FW_FILTER_WR_IQ_S 0
+#define FW_FILTER_WR_IQ_M 0x3ff
+#define FW_FILTER_WR_IQ_V(x) ((x) << FW_FILTER_WR_IQ_S)
+#define FW_FILTER_WR_IQ_G(x) \
+ (((x) >> FW_FILTER_WR_IQ_S) & FW_FILTER_WR_IQ_M)
+
+#define FW_FILTER_WR_DEL_FILTER_S 31
+#define FW_FILTER_WR_DEL_FILTER_M 0x1
+#define FW_FILTER_WR_DEL_FILTER_V(x) ((x) << FW_FILTER_WR_DEL_FILTER_S)
+#define FW_FILTER_WR_DEL_FILTER_G(x) \
+ (((x) >> FW_FILTER_WR_DEL_FILTER_S) & FW_FILTER_WR_DEL_FILTER_M)
+#define FW_FILTER_WR_DEL_FILTER_F FW_FILTER_WR_DEL_FILTER_V(1U)
+
+#define FW_FILTER_WR_RPTTID_S 25
+#define FW_FILTER_WR_RPTTID_M 0x1
+#define FW_FILTER_WR_RPTTID_V(x) ((x) << FW_FILTER_WR_RPTTID_S)
+#define FW_FILTER_WR_RPTTID_G(x) \
+ (((x) >> FW_FILTER_WR_RPTTID_S) & FW_FILTER_WR_RPTTID_M)
+#define FW_FILTER_WR_RPTTID_F FW_FILTER_WR_RPTTID_V(1U)
+
+#define FW_FILTER_WR_DROP_S 24
+#define FW_FILTER_WR_DROP_M 0x1
+#define FW_FILTER_WR_DROP_V(x) ((x) << FW_FILTER_WR_DROP_S)
+#define FW_FILTER_WR_DROP_G(x) \
+ (((x) >> FW_FILTER_WR_DROP_S) & FW_FILTER_WR_DROP_M)
+#define FW_FILTER_WR_DROP_F FW_FILTER_WR_DROP_V(1U)
+
+#define FW_FILTER_WR_DIRSTEER_S 23
+#define FW_FILTER_WR_DIRSTEER_M 0x1
+#define FW_FILTER_WR_DIRSTEER_V(x) ((x) << FW_FILTER_WR_DIRSTEER_S)
+#define FW_FILTER_WR_DIRSTEER_G(x) \
+ (((x) >> FW_FILTER_WR_DIRSTEER_S) & FW_FILTER_WR_DIRSTEER_M)
+#define FW_FILTER_WR_DIRSTEER_F FW_FILTER_WR_DIRSTEER_V(1U)
+
+#define FW_FILTER_WR_MASKHASH_S 22
+#define FW_FILTER_WR_MASKHASH_M 0x1
+#define FW_FILTER_WR_MASKHASH_V(x) ((x) << FW_FILTER_WR_MASKHASH_S)
+#define FW_FILTER_WR_MASKHASH_G(x) \
+ (((x) >> FW_FILTER_WR_MASKHASH_S) & FW_FILTER_WR_MASKHASH_M)
+#define FW_FILTER_WR_MASKHASH_F FW_FILTER_WR_MASKHASH_V(1U)
+
+#define FW_FILTER_WR_DIRSTEERHASH_S 21
+#define FW_FILTER_WR_DIRSTEERHASH_M 0x1
+#define FW_FILTER_WR_DIRSTEERHASH_V(x) ((x) << FW_FILTER_WR_DIRSTEERHASH_S)
+#define FW_FILTER_WR_DIRSTEERHASH_G(x) \
+ (((x) >> FW_FILTER_WR_DIRSTEERHASH_S) & FW_FILTER_WR_DIRSTEERHASH_M)
+#define FW_FILTER_WR_DIRSTEERHASH_F FW_FILTER_WR_DIRSTEERHASH_V(1U)
+
+#define FW_FILTER_WR_LPBK_S 20
+#define FW_FILTER_WR_LPBK_M 0x1
+#define FW_FILTER_WR_LPBK_V(x) ((x) << FW_FILTER_WR_LPBK_S)
+#define FW_FILTER_WR_LPBK_G(x) \
+ (((x) >> FW_FILTER_WR_LPBK_S) & FW_FILTER_WR_LPBK_M)
+#define FW_FILTER_WR_LPBK_F FW_FILTER_WR_LPBK_V(1U)
+
+#define FW_FILTER_WR_DMAC_S 19
+#define FW_FILTER_WR_DMAC_M 0x1
+#define FW_FILTER_WR_DMAC_V(x) ((x) << FW_FILTER_WR_DMAC_S)
+#define FW_FILTER_WR_DMAC_G(x) \
+ (((x) >> FW_FILTER_WR_DMAC_S) & FW_FILTER_WR_DMAC_M)
+#define FW_FILTER_WR_DMAC_F FW_FILTER_WR_DMAC_V(1U)
+
+#define FW_FILTER_WR_SMAC_S 18
+#define FW_FILTER_WR_SMAC_M 0x1
+#define FW_FILTER_WR_SMAC_V(x) ((x) << FW_FILTER_WR_SMAC_S)
+#define FW_FILTER_WR_SMAC_G(x) \
+ (((x) >> FW_FILTER_WR_SMAC_S) & FW_FILTER_WR_SMAC_M)
+#define FW_FILTER_WR_SMAC_F FW_FILTER_WR_SMAC_V(1U)
+
+#define FW_FILTER_WR_INSVLAN_S 17
+#define FW_FILTER_WR_INSVLAN_M 0x1
+#define FW_FILTER_WR_INSVLAN_V(x) ((x) << FW_FILTER_WR_INSVLAN_S)
+#define FW_FILTER_WR_INSVLAN_G(x) \
+ (((x) >> FW_FILTER_WR_INSVLAN_S) & FW_FILTER_WR_INSVLAN_M)
+#define FW_FILTER_WR_INSVLAN_F FW_FILTER_WR_INSVLAN_V(1U)
+
+#define FW_FILTER_WR_RMVLAN_S 16
+#define FW_FILTER_WR_RMVLAN_M 0x1
+#define FW_FILTER_WR_RMVLAN_V(x) ((x) << FW_FILTER_WR_RMVLAN_S)
+#define FW_FILTER_WR_RMVLAN_G(x) \
+ (((x) >> FW_FILTER_WR_RMVLAN_S) & FW_FILTER_WR_RMVLAN_M)
+#define FW_FILTER_WR_RMVLAN_F FW_FILTER_WR_RMVLAN_V(1U)
+
+#define FW_FILTER_WR_HITCNTS_S 15
+#define FW_FILTER_WR_HITCNTS_M 0x1
+#define FW_FILTER_WR_HITCNTS_V(x) ((x) << FW_FILTER_WR_HITCNTS_S)
+#define FW_FILTER_WR_HITCNTS_G(x) \
+ (((x) >> FW_FILTER_WR_HITCNTS_S) & FW_FILTER_WR_HITCNTS_M)
+#define FW_FILTER_WR_HITCNTS_F FW_FILTER_WR_HITCNTS_V(1U)
+
+#define FW_FILTER_WR_TXCHAN_S 13
+#define FW_FILTER_WR_TXCHAN_M 0x3
+#define FW_FILTER_WR_TXCHAN_V(x) ((x) << FW_FILTER_WR_TXCHAN_S)
+#define FW_FILTER_WR_TXCHAN_G(x) \
+ (((x) >> FW_FILTER_WR_TXCHAN_S) & FW_FILTER_WR_TXCHAN_M)
+
+#define FW_FILTER_WR_PRIO_S 12
+#define FW_FILTER_WR_PRIO_M 0x1
+#define FW_FILTER_WR_PRIO_V(x) ((x) << FW_FILTER_WR_PRIO_S)
+#define FW_FILTER_WR_PRIO_G(x) \
+ (((x) >> FW_FILTER_WR_PRIO_S) & FW_FILTER_WR_PRIO_M)
+#define FW_FILTER_WR_PRIO_F FW_FILTER_WR_PRIO_V(1U)
+
+#define FW_FILTER_WR_L2TIX_S 0
+#define FW_FILTER_WR_L2TIX_M 0xfff
+#define FW_FILTER_WR_L2TIX_V(x) ((x) << FW_FILTER_WR_L2TIX_S)
+#define FW_FILTER_WR_L2TIX_G(x) \
+ (((x) >> FW_FILTER_WR_L2TIX_S) & FW_FILTER_WR_L2TIX_M)
+
+#define FW_FILTER_WR_FRAG_S 7
+#define FW_FILTER_WR_FRAG_M 0x1
+#define FW_FILTER_WR_FRAG_V(x) ((x) << FW_FILTER_WR_FRAG_S)
+#define FW_FILTER_WR_FRAG_G(x) \
+ (((x) >> FW_FILTER_WR_FRAG_S) & FW_FILTER_WR_FRAG_M)
+#define FW_FILTER_WR_FRAG_F FW_FILTER_WR_FRAG_V(1U)
+
+#define FW_FILTER_WR_FRAGM_S 6
+#define FW_FILTER_WR_FRAGM_M 0x1
+#define FW_FILTER_WR_FRAGM_V(x) ((x) << FW_FILTER_WR_FRAGM_S)
+#define FW_FILTER_WR_FRAGM_G(x) \
+ (((x) >> FW_FILTER_WR_FRAGM_S) & FW_FILTER_WR_FRAGM_M)
+#define FW_FILTER_WR_FRAGM_F FW_FILTER_WR_FRAGM_V(1U)
+
+#define FW_FILTER_WR_IVLAN_VLD_S 5
+#define FW_FILTER_WR_IVLAN_VLD_M 0x1
+#define FW_FILTER_WR_IVLAN_VLD_V(x) ((x) << FW_FILTER_WR_IVLAN_VLD_S)
+#define FW_FILTER_WR_IVLAN_VLD_G(x) \
+ (((x) >> FW_FILTER_WR_IVLAN_VLD_S) & FW_FILTER_WR_IVLAN_VLD_M)
+#define FW_FILTER_WR_IVLAN_VLD_F FW_FILTER_WR_IVLAN_VLD_V(1U)
+
+#define FW_FILTER_WR_OVLAN_VLD_S 4
+#define FW_FILTER_WR_OVLAN_VLD_M 0x1
+#define FW_FILTER_WR_OVLAN_VLD_V(x) ((x) << FW_FILTER_WR_OVLAN_VLD_S)
+#define FW_FILTER_WR_OVLAN_VLD_G(x) \
+ (((x) >> FW_FILTER_WR_OVLAN_VLD_S) & FW_FILTER_WR_OVLAN_VLD_M)
+#define FW_FILTER_WR_OVLAN_VLD_F FW_FILTER_WR_OVLAN_VLD_V(1U)
+
+#define FW_FILTER_WR_IVLAN_VLDM_S 3
+#define FW_FILTER_WR_IVLAN_VLDM_M 0x1
+#define FW_FILTER_WR_IVLAN_VLDM_V(x) ((x) << FW_FILTER_WR_IVLAN_VLDM_S)
+#define FW_FILTER_WR_IVLAN_VLDM_G(x) \
+ (((x) >> FW_FILTER_WR_IVLAN_VLDM_S) & FW_FILTER_WR_IVLAN_VLDM_M)
+#define FW_FILTER_WR_IVLAN_VLDM_F FW_FILTER_WR_IVLAN_VLDM_V(1U)
+
+#define FW_FILTER_WR_OVLAN_VLDM_S 2
+#define FW_FILTER_WR_OVLAN_VLDM_M 0x1
+#define FW_FILTER_WR_OVLAN_VLDM_V(x) ((x) << FW_FILTER_WR_OVLAN_VLDM_S)
+#define FW_FILTER_WR_OVLAN_VLDM_G(x) \
+ (((x) >> FW_FILTER_WR_OVLAN_VLDM_S) & FW_FILTER_WR_OVLAN_VLDM_M)
+#define FW_FILTER_WR_OVLAN_VLDM_F FW_FILTER_WR_OVLAN_VLDM_V(1U)
+
+#define FW_FILTER_WR_RX_CHAN_S 15
+#define FW_FILTER_WR_RX_CHAN_M 0x1
+#define FW_FILTER_WR_RX_CHAN_V(x) ((x) << FW_FILTER_WR_RX_CHAN_S)
+#define FW_FILTER_WR_RX_CHAN_G(x) \
+ (((x) >> FW_FILTER_WR_RX_CHAN_S) & FW_FILTER_WR_RX_CHAN_M)
+#define FW_FILTER_WR_RX_CHAN_F FW_FILTER_WR_RX_CHAN_V(1U)
+
+#define FW_FILTER_WR_RX_RPL_IQ_S 0
+#define FW_FILTER_WR_RX_RPL_IQ_M 0x3ff
+#define FW_FILTER_WR_RX_RPL_IQ_V(x) ((x) << FW_FILTER_WR_RX_RPL_IQ_S)
+#define FW_FILTER_WR_RX_RPL_IQ_G(x) \
+ (((x) >> FW_FILTER_WR_RX_RPL_IQ_S) & FW_FILTER_WR_RX_RPL_IQ_M)
+
+#define FW_FILTER_WR_MACI_S 23
+#define FW_FILTER_WR_MACI_M 0x1ff
+#define FW_FILTER_WR_MACI_V(x) ((x) << FW_FILTER_WR_MACI_S)
+#define FW_FILTER_WR_MACI_G(x) \
+ (((x) >> FW_FILTER_WR_MACI_S) & FW_FILTER_WR_MACI_M)
+
+#define FW_FILTER_WR_MACIM_S 14
+#define FW_FILTER_WR_MACIM_M 0x1ff
+#define FW_FILTER_WR_MACIM_V(x) ((x) << FW_FILTER_WR_MACIM_S)
+#define FW_FILTER_WR_MACIM_G(x) \
+ (((x) >> FW_FILTER_WR_MACIM_S) & FW_FILTER_WR_MACIM_M)
+
+#define FW_FILTER_WR_FCOE_S 13
+#define FW_FILTER_WR_FCOE_M 0x1
+#define FW_FILTER_WR_FCOE_V(x) ((x) << FW_FILTER_WR_FCOE_S)
+#define FW_FILTER_WR_FCOE_G(x) \
+ (((x) >> FW_FILTER_WR_FCOE_S) & FW_FILTER_WR_FCOE_M)
+#define FW_FILTER_WR_FCOE_F FW_FILTER_WR_FCOE_V(1U)
+
+#define FW_FILTER_WR_FCOEM_S 12
+#define FW_FILTER_WR_FCOEM_M 0x1
+#define FW_FILTER_WR_FCOEM_V(x) ((x) << FW_FILTER_WR_FCOEM_S)
+#define FW_FILTER_WR_FCOEM_G(x) \
+ (((x) >> FW_FILTER_WR_FCOEM_S) & FW_FILTER_WR_FCOEM_M)
+#define FW_FILTER_WR_FCOEM_F FW_FILTER_WR_FCOEM_V(1U)
+
+#define FW_FILTER_WR_PORT_S 9
+#define FW_FILTER_WR_PORT_M 0x7
+#define FW_FILTER_WR_PORT_V(x) ((x) << FW_FILTER_WR_PORT_S)
+#define FW_FILTER_WR_PORT_G(x) \
+ (((x) >> FW_FILTER_WR_PORT_S) & FW_FILTER_WR_PORT_M)
+
+#define FW_FILTER_WR_PORTM_S 6
+#define FW_FILTER_WR_PORTM_M 0x7
+#define FW_FILTER_WR_PORTM_V(x) ((x) << FW_FILTER_WR_PORTM_S)
+#define FW_FILTER_WR_PORTM_G(x) \
+ (((x) >> FW_FILTER_WR_PORTM_S) & FW_FILTER_WR_PORTM_M)
+
+#define FW_FILTER_WR_MATCHTYPE_S 3
+#define FW_FILTER_WR_MATCHTYPE_M 0x7
+#define FW_FILTER_WR_MATCHTYPE_V(x) ((x) << FW_FILTER_WR_MATCHTYPE_S)
+#define FW_FILTER_WR_MATCHTYPE_G(x) \
+ (((x) >> FW_FILTER_WR_MATCHTYPE_S) & FW_FILTER_WR_MATCHTYPE_M)
+
+#define FW_FILTER_WR_MATCHTYPEM_S 0
+#define FW_FILTER_WR_MATCHTYPEM_M 0x7
+#define FW_FILTER_WR_MATCHTYPEM_V(x) ((x) << FW_FILTER_WR_MATCHTYPEM_S)
+#define FW_FILTER_WR_MATCHTYPEM_G(x) \
+ (((x) >> FW_FILTER_WR_MATCHTYPEM_S) & FW_FILTER_WR_MATCHTYPEM_M)
struct fw_ulptx_wr {
__be32 op_to_compl;
@@ -460,65 +491,65 @@ struct fw_ofld_connection_wr {
} tcb;
};
-#define S_FW_OFLD_CONNECTION_WR_VERSION 31
-#define M_FW_OFLD_CONNECTION_WR_VERSION 0x1
-#define V_FW_OFLD_CONNECTION_WR_VERSION(x) \
- ((x) << S_FW_OFLD_CONNECTION_WR_VERSION)
-#define G_FW_OFLD_CONNECTION_WR_VERSION(x) \
- (((x) >> S_FW_OFLD_CONNECTION_WR_VERSION) & \
- M_FW_OFLD_CONNECTION_WR_VERSION)
-#define F_FW_OFLD_CONNECTION_WR_VERSION \
- V_FW_OFLD_CONNECTION_WR_VERSION(1U)
-
-#define S_FW_OFLD_CONNECTION_WR_CPL 30
-#define M_FW_OFLD_CONNECTION_WR_CPL 0x1
-#define V_FW_OFLD_CONNECTION_WR_CPL(x) ((x) << S_FW_OFLD_CONNECTION_WR_CPL)
-#define G_FW_OFLD_CONNECTION_WR_CPL(x) \
- (((x) >> S_FW_OFLD_CONNECTION_WR_CPL) & M_FW_OFLD_CONNECTION_WR_CPL)
-#define F_FW_OFLD_CONNECTION_WR_CPL V_FW_OFLD_CONNECTION_WR_CPL(1U)
-
-#define S_FW_OFLD_CONNECTION_WR_T_STATE 28
-#define M_FW_OFLD_CONNECTION_WR_T_STATE 0xf
-#define V_FW_OFLD_CONNECTION_WR_T_STATE(x) \
- ((x) << S_FW_OFLD_CONNECTION_WR_T_STATE)
-#define G_FW_OFLD_CONNECTION_WR_T_STATE(x) \
- (((x) >> S_FW_OFLD_CONNECTION_WR_T_STATE) & \
- M_FW_OFLD_CONNECTION_WR_T_STATE)
-
-#define S_FW_OFLD_CONNECTION_WR_RCV_SCALE 24
-#define M_FW_OFLD_CONNECTION_WR_RCV_SCALE 0xf
-#define V_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \
- ((x) << S_FW_OFLD_CONNECTION_WR_RCV_SCALE)
-#define G_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \
- (((x) >> S_FW_OFLD_CONNECTION_WR_RCV_SCALE) & \
- M_FW_OFLD_CONNECTION_WR_RCV_SCALE)
-
-#define S_FW_OFLD_CONNECTION_WR_ASTID 0
-#define M_FW_OFLD_CONNECTION_WR_ASTID 0xffffff
-#define V_FW_OFLD_CONNECTION_WR_ASTID(x) \
- ((x) << S_FW_OFLD_CONNECTION_WR_ASTID)
-#define G_FW_OFLD_CONNECTION_WR_ASTID(x) \
- (((x) >> S_FW_OFLD_CONNECTION_WR_ASTID) & M_FW_OFLD_CONNECTION_WR_ASTID)
-
-#define S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 15
-#define M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 0x1
-#define V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \
- ((x) << S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK)
-#define G_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \
- (((x) >> S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) & \
- M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK)
-#define F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK \
- V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(1U)
-
-#define S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 14
-#define M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 0x1
-#define V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \
- ((x) << S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL)
-#define G_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \
- (((x) >> S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) & \
- M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL)
-#define F_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL \
- V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(1U)
+#define FW_OFLD_CONNECTION_WR_VERSION_S 31
+#define FW_OFLD_CONNECTION_WR_VERSION_M 0x1
+#define FW_OFLD_CONNECTION_WR_VERSION_V(x) \
+ ((x) << FW_OFLD_CONNECTION_WR_VERSION_S)
+#define FW_OFLD_CONNECTION_WR_VERSION_G(x) \
+ (((x) >> FW_OFLD_CONNECTION_WR_VERSION_S) & \
+ FW_OFLD_CONNECTION_WR_VERSION_M)
+#define FW_OFLD_CONNECTION_WR_VERSION_F \
+ FW_OFLD_CONNECTION_WR_VERSION_V(1U)
+
+#define FW_OFLD_CONNECTION_WR_CPL_S 30
+#define FW_OFLD_CONNECTION_WR_CPL_M 0x1
+#define FW_OFLD_CONNECTION_WR_CPL_V(x) ((x) << FW_OFLD_CONNECTION_WR_CPL_S)
+#define FW_OFLD_CONNECTION_WR_CPL_G(x) \
+ (((x) >> FW_OFLD_CONNECTION_WR_CPL_S) & FW_OFLD_CONNECTION_WR_CPL_M)
+#define FW_OFLD_CONNECTION_WR_CPL_F FW_OFLD_CONNECTION_WR_CPL_V(1U)
+
+#define FW_OFLD_CONNECTION_WR_T_STATE_S 28
+#define FW_OFLD_CONNECTION_WR_T_STATE_M 0xf
+#define FW_OFLD_CONNECTION_WR_T_STATE_V(x) \
+ ((x) << FW_OFLD_CONNECTION_WR_T_STATE_S)
+#define FW_OFLD_CONNECTION_WR_T_STATE_G(x) \
+ (((x) >> FW_OFLD_CONNECTION_WR_T_STATE_S) & \
+ FW_OFLD_CONNECTION_WR_T_STATE_M)
+
+#define FW_OFLD_CONNECTION_WR_RCV_SCALE_S 24
+#define FW_OFLD_CONNECTION_WR_RCV_SCALE_M 0xf
+#define FW_OFLD_CONNECTION_WR_RCV_SCALE_V(x) \
+ ((x) << FW_OFLD_CONNECTION_WR_RCV_SCALE_S)
+#define FW_OFLD_CONNECTION_WR_RCV_SCALE_G(x) \
+ (((x) >> FW_OFLD_CONNECTION_WR_RCV_SCALE_S) & \
+ FW_OFLD_CONNECTION_WR_RCV_SCALE_M)
+
+#define FW_OFLD_CONNECTION_WR_ASTID_S 0
+#define FW_OFLD_CONNECTION_WR_ASTID_M 0xffffff
+#define FW_OFLD_CONNECTION_WR_ASTID_V(x) \
+ ((x) << FW_OFLD_CONNECTION_WR_ASTID_S)
+#define FW_OFLD_CONNECTION_WR_ASTID_G(x) \
+ (((x) >> FW_OFLD_CONNECTION_WR_ASTID_S) & FW_OFLD_CONNECTION_WR_ASTID_M)
+
+#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S 15
+#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_M 0x1
+#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_V(x) \
+ ((x) << FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S)
+#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_G(x) \
+ (((x) >> FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S) & \
+ FW_OFLD_CONNECTION_WR_CPLRXDATAACK_M)
+#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F \
+ FW_OFLD_CONNECTION_WR_CPLRXDATAACK_V(1U)
+
+#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S 14
+#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_M 0x1
+#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_V(x) \
+ ((x) << FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S)
+#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_G(x) \
+ (((x) >> FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S) & \
+ FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_M)
+#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_F \
+ FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_V(1U)
enum fw_flowc_mnem {
FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
@@ -539,33 +570,56 @@ struct fw_flowc_mnemval {
struct fw_flowc_wr {
__be32 op_to_nparams;
-#define FW_FLOWC_WR_NPARAMS(x) ((x) << 0)
__be32 flowid_len16;
struct fw_flowc_mnemval mnemval[0];
};
+#define FW_FLOWC_WR_NPARAMS_S 0
+#define FW_FLOWC_WR_NPARAMS_V(x) ((x) << FW_FLOWC_WR_NPARAMS_S)
+
struct fw_ofld_tx_data_wr {
__be32 op_to_immdlen;
__be32 flowid_len16;
__be32 plen;
__be32 tunnel_to_proxy;
-#define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19)
-#define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18)
-#define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17)
-#define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16)
-#define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15)
-#define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14)
-#define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10)
-#define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6)
};
+#define FW_OFLD_TX_DATA_WR_TUNNEL_S 19
+#define FW_OFLD_TX_DATA_WR_TUNNEL_V(x) ((x) << FW_OFLD_TX_DATA_WR_TUNNEL_S)
+
+#define FW_OFLD_TX_DATA_WR_SAVE_S 18
+#define FW_OFLD_TX_DATA_WR_SAVE_V(x) ((x) << FW_OFLD_TX_DATA_WR_SAVE_S)
+
+#define FW_OFLD_TX_DATA_WR_FLUSH_S 17
+#define FW_OFLD_TX_DATA_WR_FLUSH_V(x) ((x) << FW_OFLD_TX_DATA_WR_FLUSH_S)
+#define FW_OFLD_TX_DATA_WR_FLUSH_F FW_OFLD_TX_DATA_WR_FLUSH_V(1U)
+
+#define FW_OFLD_TX_DATA_WR_URGENT_S 16
+#define FW_OFLD_TX_DATA_WR_URGENT_V(x) ((x) << FW_OFLD_TX_DATA_WR_URGENT_S)
+
+#define FW_OFLD_TX_DATA_WR_MORE_S 15
+#define FW_OFLD_TX_DATA_WR_MORE_V(x) ((x) << FW_OFLD_TX_DATA_WR_MORE_S)
+
+#define FW_OFLD_TX_DATA_WR_SHOVE_S 14
+#define FW_OFLD_TX_DATA_WR_SHOVE_V(x) ((x) << FW_OFLD_TX_DATA_WR_SHOVE_S)
+#define FW_OFLD_TX_DATA_WR_SHOVE_F FW_OFLD_TX_DATA_WR_SHOVE_V(1U)
+
+#define FW_OFLD_TX_DATA_WR_ULPMODE_S 10
+#define FW_OFLD_TX_DATA_WR_ULPMODE_V(x) ((x) << FW_OFLD_TX_DATA_WR_ULPMODE_S)
+
+#define FW_OFLD_TX_DATA_WR_ULPSUBMODE_S 6
+#define FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(x) \
+ ((x) << FW_OFLD_TX_DATA_WR_ULPSUBMODE_S)
+
struct fw_cmd_wr {
__be32 op_dma;
-#define FW_CMD_WR_DMA (1U << 17)
__be32 len16_pkd;
__be64 cookie_daddr;
};
+#define FW_CMD_WR_DMA_S 17
+#define FW_CMD_WR_DMA_V(x) ((x) << FW_CMD_WR_DMA_S)
+
struct fw_eth_tx_pkt_vm_wr {
__be32 op_immdlen;
__be32 equiq_to_len16;
@@ -641,18 +695,39 @@ struct fw_cmd_hdr {
__be32 lo;
};
-#define FW_CMD_OP(x) ((x) << 24)
-#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff)
-#define FW_CMD_REQUEST (1U << 23)
-#define FW_CMD_REQUEST_GET(x) (((x) >> 23) & 0x1)
-#define FW_CMD_READ (1U << 22)
-#define FW_CMD_WRITE (1U << 21)
-#define FW_CMD_EXEC (1U << 20)
-#define FW_CMD_RAMASK(x) ((x) << 20)
-#define FW_CMD_RETVAL(x) ((x) << 8)
-#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff)
-#define FW_CMD_LEN16(x) ((x) << 0)
-#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
+#define FW_CMD_OP_S 24
+#define FW_CMD_OP_M 0xff
+#define FW_CMD_OP_V(x) ((x) << FW_CMD_OP_S)
+#define FW_CMD_OP_G(x) (((x) >> FW_CMD_OP_S) & FW_CMD_OP_M)
+
+#define FW_CMD_REQUEST_S 23
+#define FW_CMD_REQUEST_V(x) ((x) << FW_CMD_REQUEST_S)
+#define FW_CMD_REQUEST_F FW_CMD_REQUEST_V(1U)
+
+#define FW_CMD_READ_S 22
+#define FW_CMD_READ_V(x) ((x) << FW_CMD_READ_S)
+#define FW_CMD_READ_F FW_CMD_READ_V(1U)
+
+#define FW_CMD_WRITE_S 21
+#define FW_CMD_WRITE_V(x) ((x) << FW_CMD_WRITE_S)
+#define FW_CMD_WRITE_F FW_CMD_WRITE_V(1U)
+
+#define FW_CMD_EXEC_S 20
+#define FW_CMD_EXEC_V(x) ((x) << FW_CMD_EXEC_S)
+#define FW_CMD_EXEC_F FW_CMD_EXEC_V(1U)
+
+#define FW_CMD_RAMASK_S 20
+#define FW_CMD_RAMASK_V(x) ((x) << FW_CMD_RAMASK_S)
+
+#define FW_CMD_RETVAL_S 8
+#define FW_CMD_RETVAL_M 0xff
+#define FW_CMD_RETVAL_V(x) ((x) << FW_CMD_RETVAL_S)
+#define FW_CMD_RETVAL_G(x) (((x) >> FW_CMD_RETVAL_S) & FW_CMD_RETVAL_M)
+
+#define FW_CMD_LEN16_S 0
+#define FW_CMD_LEN16_V(x) ((x) << FW_CMD_LEN16_S)
+
+#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_FIRMWARE = 0x0001,
@@ -685,7 +760,8 @@ enum fw_ldst_func_mod_index {
struct fw_ldst_cmd {
__be32 op_to_addrspace;
-#define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0)
+#define FW_LDST_CMD_ADDRSPACE_S 0
+#define FW_LDST_CMD_ADDRSPACE_V(x) ((x) << FW_LDST_CMD_ADDRSPACE_S)
__be32 cycles_to_len16;
union fw_ldst {
struct fw_ldst_addrval {
@@ -741,15 +817,33 @@ struct fw_ldst_cmd {
} u;
};
-#define FW_LDST_CMD_MSG(x) ((x) << 31)
-#define FW_LDST_CMD_PADDR(x) ((x) << 8)
-#define FW_LDST_CMD_MMD(x) ((x) << 0)
-#define FW_LDST_CMD_FID(x) ((x) << 15)
-#define FW_LDST_CMD_CTL(x) ((x) << 0)
-#define FW_LDST_CMD_RPLCPF(x) ((x) << 0)
-#define FW_LDST_CMD_LC (1U << 4)
-#define FW_LDST_CMD_NACCESS(x) ((x) << 0)
-#define FW_LDST_CMD_FN(x) ((x) << 0)
+#define FW_LDST_CMD_MSG_S 31
+#define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S)
+
+#define FW_LDST_CMD_PADDR_S 8
+#define FW_LDST_CMD_PADDR_V(x) ((x) << FW_LDST_CMD_PADDR_S)
+
+#define FW_LDST_CMD_MMD_S 0
+#define FW_LDST_CMD_MMD_V(x) ((x) << FW_LDST_CMD_MMD_S)
+
+#define FW_LDST_CMD_FID_S 15
+#define FW_LDST_CMD_FID_V(x) ((x) << FW_LDST_CMD_FID_S)
+
+#define FW_LDST_CMD_CTL_S 0
+#define FW_LDST_CMD_CTL_V(x) ((x) << FW_LDST_CMD_CTL_S)
+
+#define FW_LDST_CMD_RPLCPF_S 0
+#define FW_LDST_CMD_RPLCPF_V(x) ((x) << FW_LDST_CMD_RPLCPF_S)
+
+#define FW_LDST_CMD_LC_S 4
+#define FW_LDST_CMD_LC_V(x) ((x) << FW_LDST_CMD_LC_S)
+#define FW_LDST_CMD_LC_F FW_LDST_CMD_LC_V(1U)
+
+#define FW_LDST_CMD_FN_S 0
+#define FW_LDST_CMD_FN_V(x) ((x) << FW_LDST_CMD_FN_S)
+
+#define FW_LDST_CMD_NACCESS_S 0
+#define FW_LDST_CMD_NACCESS_V(x) ((x) << FW_LDST_CMD_NACCESS_S)
struct fw_reset_cmd {
__be32 op_to_write;
@@ -758,11 +852,12 @@ struct fw_reset_cmd {
__be32 halt_pkd;
};
-#define FW_RESET_CMD_HALT_SHIFT 31
-#define FW_RESET_CMD_HALT_MASK 0x1
-#define FW_RESET_CMD_HALT(x) ((x) << FW_RESET_CMD_HALT_SHIFT)
-#define FW_RESET_CMD_HALT_GET(x) \
- (((x) >> FW_RESET_CMD_HALT_SHIFT) & FW_RESET_CMD_HALT_MASK)
+#define FW_RESET_CMD_HALT_S 31
+#define FW_RESET_CMD_HALT_M 0x1
+#define FW_RESET_CMD_HALT_V(x) ((x) << FW_RESET_CMD_HALT_S)
+#define FW_RESET_CMD_HALT_G(x) \
+ (((x) >> FW_RESET_CMD_HALT_S) & FW_RESET_CMD_HALT_M)
+#define FW_RESET_CMD_HALT_F FW_RESET_CMD_HALT_V(1U)
enum fw_hellow_cmd {
fw_hello_cmd_stage_os = 0x0
@@ -772,22 +867,42 @@ struct fw_hello_cmd {
__be32 op_to_write;
__be32 retval_len16;
__be32 err_to_clearinit;
-#define FW_HELLO_CMD_ERR (1U << 31)
-#define FW_HELLO_CMD_INIT (1U << 30)
-#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29)
-#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28)
-#define FW_HELLO_CMD_MBMASTER_MASK 0xfU
-#define FW_HELLO_CMD_MBMASTER_SHIFT 24
-#define FW_HELLO_CMD_MBMASTER(x) ((x) << FW_HELLO_CMD_MBMASTER_SHIFT)
-#define FW_HELLO_CMD_MBMASTER_GET(x) \
- (((x) >> FW_HELLO_CMD_MBMASTER_SHIFT) & FW_HELLO_CMD_MBMASTER_MASK)
-#define FW_HELLO_CMD_MBASYNCNOTINT(x) ((x) << 23)
-#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20)
-#define FW_HELLO_CMD_STAGE(x) ((x) << 17)
-#define FW_HELLO_CMD_CLEARINIT (1U << 16)
__be32 fwrev;
};
+#define FW_HELLO_CMD_ERR_S 31
+#define FW_HELLO_CMD_ERR_V(x) ((x) << FW_HELLO_CMD_ERR_S)
+#define FW_HELLO_CMD_ERR_F FW_HELLO_CMD_ERR_V(1U)
+
+#define FW_HELLO_CMD_INIT_S 30
+#define FW_HELLO_CMD_INIT_V(x) ((x) << FW_HELLO_CMD_INIT_S)
+#define FW_HELLO_CMD_INIT_F FW_HELLO_CMD_INIT_V(1U)
+
+#define FW_HELLO_CMD_MASTERDIS_S 29
+#define FW_HELLO_CMD_MASTERDIS_V(x) ((x) << FW_HELLO_CMD_MASTERDIS_S)
+
+#define FW_HELLO_CMD_MASTERFORCE_S 28
+#define FW_HELLO_CMD_MASTERFORCE_V(x) ((x) << FW_HELLO_CMD_MASTERFORCE_S)
+
+#define FW_HELLO_CMD_MBMASTER_S 24
+#define FW_HELLO_CMD_MBMASTER_M 0xfU
+#define FW_HELLO_CMD_MBMASTER_V(x) ((x) << FW_HELLO_CMD_MBMASTER_S)
+#define FW_HELLO_CMD_MBMASTER_G(x) \
+ (((x) >> FW_HELLO_CMD_MBMASTER_S) & FW_HELLO_CMD_MBMASTER_M)
+
+#define FW_HELLO_CMD_MBASYNCNOTINT_S 23
+#define FW_HELLO_CMD_MBASYNCNOTINT_V(x) ((x) << FW_HELLO_CMD_MBASYNCNOTINT_S)
+
+#define FW_HELLO_CMD_MBASYNCNOT_S 20
+#define FW_HELLO_CMD_MBASYNCNOT_V(x) ((x) << FW_HELLO_CMD_MBASYNCNOT_S)
+
+#define FW_HELLO_CMD_STAGE_S 17
+#define FW_HELLO_CMD_STAGE_V(x) ((x) << FW_HELLO_CMD_STAGE_S)
+
+#define FW_HELLO_CMD_CLEARINIT_S 16
+#define FW_HELLO_CMD_CLEARINIT_V(x) ((x) << FW_HELLO_CMD_CLEARINIT_S)
+#define FW_HELLO_CMD_CLEARINIT_F FW_HELLO_CMD_CLEARINIT_V(1U)
+
struct fw_bye_cmd {
__be32 op_to_write;
__be32 retval_len16;
@@ -898,9 +1013,17 @@ struct fw_caps_config_cmd {
__be32 finicsum;
};
-#define FW_CAPS_CONFIG_CMD_CFVALID (1U << 27)
-#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) ((x) << 24)
-#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) ((x) << 16)
+#define FW_CAPS_CONFIG_CMD_CFVALID_S 27
+#define FW_CAPS_CONFIG_CMD_CFVALID_V(x) ((x) << FW_CAPS_CONFIG_CMD_CFVALID_S)
+#define FW_CAPS_CONFIG_CMD_CFVALID_F FW_CAPS_CONFIG_CMD_CFVALID_V(1U)
+
+#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF_S 24
+#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(x) \
+ ((x) << FW_CAPS_CONFIG_CMD_MEMTYPE_CF_S)
+
+#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_S 16
+#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(x) \
+ ((x) << FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_S)
/*
* params command mnemonics
@@ -996,20 +1119,29 @@ enum fw_params_param_dmaq {
FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
};
-#define FW_PARAMS_MNEM(x) ((x) << 24)
-#define FW_PARAMS_PARAM_X(x) ((x) << 16)
-#define FW_PARAMS_PARAM_Y_SHIFT 8
-#define FW_PARAMS_PARAM_Y_MASK 0xffU
-#define FW_PARAMS_PARAM_Y(x) ((x) << FW_PARAMS_PARAM_Y_SHIFT)
-#define FW_PARAMS_PARAM_Y_GET(x) (((x) >> FW_PARAMS_PARAM_Y_SHIFT) &\
- FW_PARAMS_PARAM_Y_MASK)
-#define FW_PARAMS_PARAM_Z_SHIFT 0
-#define FW_PARAMS_PARAM_Z_MASK 0xffu
-#define FW_PARAMS_PARAM_Z(x) ((x) << FW_PARAMS_PARAM_Z_SHIFT)
-#define FW_PARAMS_PARAM_Z_GET(x) (((x) >> FW_PARAMS_PARAM_Z_SHIFT) &\
- FW_PARAMS_PARAM_Z_MASK)
-#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0)
-#define FW_PARAMS_PARAM_YZ(x) ((x) << 0)
+#define FW_PARAMS_MNEM_S 24
+#define FW_PARAMS_MNEM_V(x) ((x) << FW_PARAMS_MNEM_S)
+
+#define FW_PARAMS_PARAM_X_S 16
+#define FW_PARAMS_PARAM_X_V(x) ((x) << FW_PARAMS_PARAM_X_S)
+
+#define FW_PARAMS_PARAM_Y_S 8
+#define FW_PARAMS_PARAM_Y_M 0xffU
+#define FW_PARAMS_PARAM_Y_V(x) ((x) << FW_PARAMS_PARAM_Y_S)
+#define FW_PARAMS_PARAM_Y_G(x) (((x) >> FW_PARAMS_PARAM_Y_S) &\
+ FW_PARAMS_PARAM_Y_M)
+
+#define FW_PARAMS_PARAM_Z_S 0
+#define FW_PARAMS_PARAM_Z_M 0xffu
+#define FW_PARAMS_PARAM_Z_V(x) ((x) << FW_PARAMS_PARAM_Z_S)
+#define FW_PARAMS_PARAM_Z_G(x) (((x) >> FW_PARAMS_PARAM_Z_S) &\
+ FW_PARAMS_PARAM_Z_M)
+
+#define FW_PARAMS_PARAM_XYZ_S 0
+#define FW_PARAMS_PARAM_XYZ_V(x) ((x) << FW_PARAMS_PARAM_XYZ_S)
+
+#define FW_PARAMS_PARAM_YZ_S 0
+#define FW_PARAMS_PARAM_YZ_V(x) ((x) << FW_PARAMS_PARAM_YZ_S)
struct fw_params_cmd {
__be32 op_to_vfn;
@@ -1020,8 +1152,11 @@ struct fw_params_cmd {
} param[7];
};
-#define FW_PARAMS_CMD_PFN(x) ((x) << 8)
-#define FW_PARAMS_CMD_VFN(x) ((x) << 0)
+#define FW_PARAMS_CMD_PFN_S 8
+#define FW_PARAMS_CMD_PFN_V(x) ((x) << FW_PARAMS_CMD_PFN_S)
+
+#define FW_PARAMS_CMD_VFN_S 0
+#define FW_PARAMS_CMD_VFN_V(x) ((x) << FW_PARAMS_CMD_VFN_S)
struct fw_pfvf_cmd {
__be32 op_to_vfn;
@@ -1035,46 +1170,82 @@ struct fw_pfvf_cmd {
__be32 r4;
};
-#define FW_PFVF_CMD_PFN(x) ((x) << 8)
-#define FW_PFVF_CMD_VFN(x) ((x) << 0)
-
-#define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20)
-#define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff)
-
-#define FW_PFVF_CMD_NIQ(x) ((x) << 0)
-#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff)
-
-#define FW_PFVF_CMD_TYPE (1 << 31)
-#define FW_PFVF_CMD_TYPE_GET(x) (((x) >> 31) & 0x1)
-
-#define FW_PFVF_CMD_CMASK(x) ((x) << 24)
-#define FW_PFVF_CMD_CMASK_MASK 0xf
-#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & FW_PFVF_CMD_CMASK_MASK)
-
-#define FW_PFVF_CMD_PMASK(x) ((x) << 20)
-#define FW_PFVF_CMD_PMASK_MASK 0xf
-#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & FW_PFVF_CMD_PMASK_MASK)
-
-#define FW_PFVF_CMD_NEQ(x) ((x) << 0)
-#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff)
-
-#define FW_PFVF_CMD_TC(x) ((x) << 24)
-#define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff)
-
-#define FW_PFVF_CMD_NVI(x) ((x) << 16)
-#define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff)
-
-#define FW_PFVF_CMD_NEXACTF(x) ((x) << 0)
-#define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff)
-
-#define FW_PFVF_CMD_R_CAPS(x) ((x) << 24)
-#define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff)
-
-#define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16)
-#define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff)
-
-#define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0)
-#define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff)
+#define FW_PFVF_CMD_PFN_S 8
+#define FW_PFVF_CMD_PFN_V(x) ((x) << FW_PFVF_CMD_PFN_S)
+
+#define FW_PFVF_CMD_VFN_S 0
+#define FW_PFVF_CMD_VFN_V(x) ((x) << FW_PFVF_CMD_VFN_S)
+
+#define FW_PFVF_CMD_NIQFLINT_S 20
+#define FW_PFVF_CMD_NIQFLINT_M 0xfff
+#define FW_PFVF_CMD_NIQFLINT_V(x) ((x) << FW_PFVF_CMD_NIQFLINT_S)
+#define FW_PFVF_CMD_NIQFLINT_G(x) \
+ (((x) >> FW_PFVF_CMD_NIQFLINT_S) & FW_PFVF_CMD_NIQFLINT_M)
+
+#define FW_PFVF_CMD_NIQ_S 0
+#define FW_PFVF_CMD_NIQ_M 0xfffff
+#define FW_PFVF_CMD_NIQ_V(x) ((x) << FW_PFVF_CMD_NIQ_S)
+#define FW_PFVF_CMD_NIQ_G(x) \
+ (((x) >> FW_PFVF_CMD_NIQ_S) & FW_PFVF_CMD_NIQ_M)
+
+#define FW_PFVF_CMD_TYPE_S 31
+#define FW_PFVF_CMD_TYPE_M 0x1
+#define FW_PFVF_CMD_TYPE_V(x) ((x) << FW_PFVF_CMD_TYPE_S)
+#define FW_PFVF_CMD_TYPE_G(x) \
+ (((x) >> FW_PFVF_CMD_TYPE_S) & FW_PFVF_CMD_TYPE_M)
+#define FW_PFVF_CMD_TYPE_F FW_PFVF_CMD_TYPE_V(1U)
+
+#define FW_PFVF_CMD_CMASK_S 24
+#define FW_PFVF_CMD_CMASK_M 0xf
+#define FW_PFVF_CMD_CMASK_V(x) ((x) << FW_PFVF_CMD_CMASK_S)
+#define FW_PFVF_CMD_CMASK_G(x) \
+ (((x) >> FW_PFVF_CMD_CMASK_S) & FW_PFVF_CMD_CMASK_M)
+
+#define FW_PFVF_CMD_PMASK_S 20
+#define FW_PFVF_CMD_PMASK_M 0xf
+#define FW_PFVF_CMD_PMASK_V(x) ((x) << FW_PFVF_CMD_PMASK_S)
+#define FW_PFVF_CMD_PMASK_G(x) \
+ (((x) >> FW_PFVF_CMD_PMASK_S) & FW_PFVF_CMD_PMASK_M)
+
+#define FW_PFVF_CMD_NEQ_S 0
+#define FW_PFVF_CMD_NEQ_M 0xfffff
+#define FW_PFVF_CMD_NEQ_V(x) ((x) << FW_PFVF_CMD_NEQ_S)
+#define FW_PFVF_CMD_NEQ_G(x) \
+ (((x) >> FW_PFVF_CMD_NEQ_S) & FW_PFVF_CMD_NEQ_M)
+
+#define FW_PFVF_CMD_TC_S 24
+#define FW_PFVF_CMD_TC_M 0xff
+#define FW_PFVF_CMD_TC_V(x) ((x) << FW_PFVF_CMD_TC_S)
+#define FW_PFVF_CMD_TC_G(x) (((x) >> FW_PFVF_CMD_TC_S) & FW_PFVF_CMD_TC_M)
+
+#define FW_PFVF_CMD_NVI_S 16
+#define FW_PFVF_CMD_NVI_M 0xff
+#define FW_PFVF_CMD_NVI_V(x) ((x) << FW_PFVF_CMD_NVI_S)
+#define FW_PFVF_CMD_NVI_G(x) (((x) >> FW_PFVF_CMD_NVI_S) & FW_PFVF_CMD_NVI_M)
+
+#define FW_PFVF_CMD_NEXACTF_S 0
+#define FW_PFVF_CMD_NEXACTF_M 0xffff
+#define FW_PFVF_CMD_NEXACTF_V(x) ((x) << FW_PFVF_CMD_NEXACTF_S)
+#define FW_PFVF_CMD_NEXACTF_G(x) \
+ (((x) >> FW_PFVF_CMD_NEXACTF_S) & FW_PFVF_CMD_NEXACTF_M)
+
+#define FW_PFVF_CMD_R_CAPS_S 24
+#define FW_PFVF_CMD_R_CAPS_M 0xff
+#define FW_PFVF_CMD_R_CAPS_V(x) ((x) << FW_PFVF_CMD_R_CAPS_S)
+#define FW_PFVF_CMD_R_CAPS_G(x) \
+ (((x) >> FW_PFVF_CMD_R_CAPS_S) & FW_PFVF_CMD_R_CAPS_M)
+
+#define FW_PFVF_CMD_WX_CAPS_S 16
+#define FW_PFVF_CMD_WX_CAPS_M 0xff
+#define FW_PFVF_CMD_WX_CAPS_V(x) ((x) << FW_PFVF_CMD_WX_CAPS_S)
+#define FW_PFVF_CMD_WX_CAPS_G(x) \
+ (((x) >> FW_PFVF_CMD_WX_CAPS_S) & FW_PFVF_CMD_WX_CAPS_M)
+
+#define FW_PFVF_CMD_NETHCTRL_S 0
+#define FW_PFVF_CMD_NETHCTRL_M 0xffff
+#define FW_PFVF_CMD_NETHCTRL_V(x) ((x) << FW_PFVF_CMD_NETHCTRL_S)
+#define FW_PFVF_CMD_NETHCTRL_G(x) \
+ (((x) >> FW_PFVF_CMD_NETHCTRL_S) & FW_PFVF_CMD_NETHCTRL_M)
enum fw_iq_type {
FW_IQ_TYPE_FL_INT_CAP,
@@ -1102,85 +1273,239 @@ struct fw_iq_cmd {
__be64 fl1addr;
};
-#define FW_IQ_CMD_PFN(x) ((x) << 8)
-#define FW_IQ_CMD_VFN(x) ((x) << 0)
-
-#define FW_IQ_CMD_ALLOC (1U << 31)
-#define FW_IQ_CMD_FREE (1U << 30)
-#define FW_IQ_CMD_MODIFY (1U << 29)
-#define FW_IQ_CMD_IQSTART(x) ((x) << 28)
-#define FW_IQ_CMD_IQSTOP(x) ((x) << 27)
-
-#define FW_IQ_CMD_TYPE(x) ((x) << 29)
-#define FW_IQ_CMD_IQASYNCH(x) ((x) << 28)
-#define FW_IQ_CMD_VIID(x) ((x) << 16)
-#define FW_IQ_CMD_IQANDST(x) ((x) << 15)
-#define FW_IQ_CMD_IQANUS(x) ((x) << 14)
-#define FW_IQ_CMD_IQANUD(x) ((x) << 12)
-#define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0)
-
-#define FW_IQ_CMD_IQDROPRSS (1U << 15)
-#define FW_IQ_CMD_IQGTSMODE (1U << 14)
-#define FW_IQ_CMD_IQPCIECH(x) ((x) << 12)
-#define FW_IQ_CMD_IQDCAEN(x) ((x) << 11)
-#define FW_IQ_CMD_IQDCACPU(x) ((x) << 6)
-#define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4)
-#define FW_IQ_CMD_IQO (1U << 3)
-#define FW_IQ_CMD_IQCPRIO(x) ((x) << 2)
-#define FW_IQ_CMD_IQESIZE(x) ((x) << 0)
-
-#define FW_IQ_CMD_IQNS(x) ((x) << 31)
-#define FW_IQ_CMD_IQRO(x) ((x) << 30)
-#define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28)
-#define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27)
-#define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26)
-#define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20)
-#define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15)
-#define FW_IQ_CMD_FL0DBP(x) ((x) << 14)
-#define FW_IQ_CMD_FL0DATANS(x) ((x) << 13)
-#define FW_IQ_CMD_FL0DATARO(x) ((x) << 12)
-#define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11)
-#define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10)
-#define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9)
-#define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8)
-#define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7)
-#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6)
-#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4)
-#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3)
-#define FW_IQ_CMD_FL0PADEN(x) ((x) << 2)
-#define FW_IQ_CMD_FL0PACKEN(x) ((x) << 1)
-#define FW_IQ_CMD_FL0CONGEN (1U << 0)
-
-#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15)
-#define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10)
-#define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7)
-#define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4)
-#define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3)
-#define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0)
-
-#define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20)
-#define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15)
-#define FW_IQ_CMD_FL1DBP(x) ((x) << 14)
-#define FW_IQ_CMD_FL1DATANS(x) ((x) << 13)
-#define FW_IQ_CMD_FL1DATARO(x) ((x) << 12)
-#define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11)
-#define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10)
-#define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9)
-#define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8)
-#define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7)
-#define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6)
-#define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4)
-#define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3)
-#define FW_IQ_CMD_FL1PADEN (1U << 2)
-#define FW_IQ_CMD_FL1PACKEN (1U << 1)
-#define FW_IQ_CMD_FL1CONGEN (1U << 0)
-
-#define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15)
-#define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10)
-#define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7)
-#define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4)
-#define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3)
-#define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0)
+#define FW_IQ_CMD_PFN_S 8
+#define FW_IQ_CMD_PFN_V(x) ((x) << FW_IQ_CMD_PFN_S)
+
+#define FW_IQ_CMD_VFN_S 0
+#define FW_IQ_CMD_VFN_V(x) ((x) << FW_IQ_CMD_VFN_S)
+
+#define FW_IQ_CMD_ALLOC_S 31
+#define FW_IQ_CMD_ALLOC_V(x) ((x) << FW_IQ_CMD_ALLOC_S)
+#define FW_IQ_CMD_ALLOC_F FW_IQ_CMD_ALLOC_V(1U)
+
+#define FW_IQ_CMD_FREE_S 30
+#define FW_IQ_CMD_FREE_V(x) ((x) << FW_IQ_CMD_FREE_S)
+#define FW_IQ_CMD_FREE_F FW_IQ_CMD_FREE_V(1U)
+
+#define FW_IQ_CMD_MODIFY_S 29
+#define FW_IQ_CMD_MODIFY_V(x) ((x) << FW_IQ_CMD_MODIFY_S)
+#define FW_IQ_CMD_MODIFY_F FW_IQ_CMD_MODIFY_V(1U)
+
+#define FW_IQ_CMD_IQSTART_S 28
+#define FW_IQ_CMD_IQSTART_V(x) ((x) << FW_IQ_CMD_IQSTART_S)
+#define FW_IQ_CMD_IQSTART_F FW_IQ_CMD_IQSTART_V(1U)
+
+#define FW_IQ_CMD_IQSTOP_S 27
+#define FW_IQ_CMD_IQSTOP_V(x) ((x) << FW_IQ_CMD_IQSTOP_S)
+#define FW_IQ_CMD_IQSTOP_F FW_IQ_CMD_IQSTOP_V(1U)
+
+#define FW_IQ_CMD_TYPE_S 29
+#define FW_IQ_CMD_TYPE_V(x) ((x) << FW_IQ_CMD_TYPE_S)
+
+#define FW_IQ_CMD_IQASYNCH_S 28
+#define FW_IQ_CMD_IQASYNCH_V(x) ((x) << FW_IQ_CMD_IQASYNCH_S)
+
+#define FW_IQ_CMD_VIID_S 16
+#define FW_IQ_CMD_VIID_V(x) ((x) << FW_IQ_CMD_VIID_S)
+
+#define FW_IQ_CMD_IQANDST_S 15
+#define FW_IQ_CMD_IQANDST_V(x) ((x) << FW_IQ_CMD_IQANDST_S)
+
+#define FW_IQ_CMD_IQANUS_S 14
+#define FW_IQ_CMD_IQANUS_V(x) ((x) << FW_IQ_CMD_IQANUS_S)
+
+#define FW_IQ_CMD_IQANUD_S 12
+#define FW_IQ_CMD_IQANUD_V(x) ((x) << FW_IQ_CMD_IQANUD_S)
+
+#define FW_IQ_CMD_IQANDSTINDEX_S 0
+#define FW_IQ_CMD_IQANDSTINDEX_V(x) ((x) << FW_IQ_CMD_IQANDSTINDEX_S)
+
+#define FW_IQ_CMD_IQDROPRSS_S 15
+#define FW_IQ_CMD_IQDROPRSS_V(x) ((x) << FW_IQ_CMD_IQDROPRSS_S)
+#define FW_IQ_CMD_IQDROPRSS_F FW_IQ_CMD_IQDROPRSS_V(1U)
+
+#define FW_IQ_CMD_IQGTSMODE_S 14
+#define FW_IQ_CMD_IQGTSMODE_V(x) ((x) << FW_IQ_CMD_IQGTSMODE_S)
+#define FW_IQ_CMD_IQGTSMODE_F FW_IQ_CMD_IQGTSMODE_V(1U)
+
+#define FW_IQ_CMD_IQPCIECH_S 12
+#define FW_IQ_CMD_IQPCIECH_V(x) ((x) << FW_IQ_CMD_IQPCIECH_S)
+
+#define FW_IQ_CMD_IQDCAEN_S 11
+#define FW_IQ_CMD_IQDCAEN_V(x) ((x) << FW_IQ_CMD_IQDCAEN_S)
+
+#define FW_IQ_CMD_IQDCACPU_S 6
+#define FW_IQ_CMD_IQDCACPU_V(x) ((x) << FW_IQ_CMD_IQDCACPU_S)
+
+#define FW_IQ_CMD_IQINTCNTTHRESH_S 4
+#define FW_IQ_CMD_IQINTCNTTHRESH_V(x) ((x) << FW_IQ_CMD_IQINTCNTTHRESH_S)
+
+#define FW_IQ_CMD_IQO_S 3
+#define FW_IQ_CMD_IQO_V(x) ((x) << FW_IQ_CMD_IQO_S)
+#define FW_IQ_CMD_IQO_F FW_IQ_CMD_IQO_V(1U)
+
+#define FW_IQ_CMD_IQCPRIO_S 2
+#define FW_IQ_CMD_IQCPRIO_V(x) ((x) << FW_IQ_CMD_IQCPRIO_S)
+
+#define FW_IQ_CMD_IQESIZE_S 0
+#define FW_IQ_CMD_IQESIZE_V(x) ((x) << FW_IQ_CMD_IQESIZE_S)
+
+#define FW_IQ_CMD_IQNS_S 31
+#define FW_IQ_CMD_IQNS_V(x) ((x) << FW_IQ_CMD_IQNS_S)
+
+#define FW_IQ_CMD_IQRO_S 30
+#define FW_IQ_CMD_IQRO_V(x) ((x) << FW_IQ_CMD_IQRO_S)
+
+#define FW_IQ_CMD_IQFLINTIQHSEN_S 28
+#define FW_IQ_CMD_IQFLINTIQHSEN_V(x) ((x) << FW_IQ_CMD_IQFLINTIQHSEN_S)
+
+#define FW_IQ_CMD_IQFLINTCONGEN_S 27
+#define FW_IQ_CMD_IQFLINTCONGEN_V(x) ((x) << FW_IQ_CMD_IQFLINTCONGEN_S)
+
+#define FW_IQ_CMD_IQFLINTISCSIC_S 26
+#define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S)
+
+#define FW_IQ_CMD_FL0CNGCHMAP_S 20
+#define FW_IQ_CMD_FL0CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL0CNGCHMAP_S)
+
+#define FW_IQ_CMD_FL0CACHELOCK_S 15
+#define FW_IQ_CMD_FL0CACHELOCK_V(x) ((x) << FW_IQ_CMD_FL0CACHELOCK_S)
+
+#define FW_IQ_CMD_FL0DBP_S 14
+#define FW_IQ_CMD_FL0DBP_V(x) ((x) << FW_IQ_CMD_FL0DBP_S)
+
+#define FW_IQ_CMD_FL0DATANS_S 13
+#define FW_IQ_CMD_FL0DATANS_V(x) ((x) << FW_IQ_CMD_FL0DATANS_S)
+
+#define FW_IQ_CMD_FL0DATARO_S 12
+#define FW_IQ_CMD_FL0DATARO_V(x) ((x) << FW_IQ_CMD_FL0DATARO_S)
+#define FW_IQ_CMD_FL0DATARO_F FW_IQ_CMD_FL0DATARO_V(1U)
+
+#define FW_IQ_CMD_FL0CONGCIF_S 11
+#define FW_IQ_CMD_FL0CONGCIF_V(x) ((x) << FW_IQ_CMD_FL0CONGCIF_S)
+
+#define FW_IQ_CMD_FL0ONCHIP_S 10
+#define FW_IQ_CMD_FL0ONCHIP_V(x) ((x) << FW_IQ_CMD_FL0ONCHIP_S)
+
+#define FW_IQ_CMD_FL0STATUSPGNS_S 9
+#define FW_IQ_CMD_FL0STATUSPGNS_V(x) ((x) << FW_IQ_CMD_FL0STATUSPGNS_S)
+
+#define FW_IQ_CMD_FL0STATUSPGRO_S 8
+#define FW_IQ_CMD_FL0STATUSPGRO_V(x) ((x) << FW_IQ_CMD_FL0STATUSPGRO_S)
+
+#define FW_IQ_CMD_FL0FETCHNS_S 7
+#define FW_IQ_CMD_FL0FETCHNS_V(x) ((x) << FW_IQ_CMD_FL0FETCHNS_S)
+
+#define FW_IQ_CMD_FL0FETCHRO_S 6
+#define FW_IQ_CMD_FL0FETCHRO_V(x) ((x) << FW_IQ_CMD_FL0FETCHRO_S)
+#define FW_IQ_CMD_FL0FETCHRO_F FW_IQ_CMD_FL0FETCHRO_V(1U)
+
+#define FW_IQ_CMD_FL0HOSTFCMODE_S 4
+#define FW_IQ_CMD_FL0HOSTFCMODE_V(x) ((x) << FW_IQ_CMD_FL0HOSTFCMODE_S)
+
+#define FW_IQ_CMD_FL0CPRIO_S 3
+#define FW_IQ_CMD_FL0CPRIO_V(x) ((x) << FW_IQ_CMD_FL0CPRIO_S)
+
+#define FW_IQ_CMD_FL0PADEN_S 2
+#define FW_IQ_CMD_FL0PADEN_V(x) ((x) << FW_IQ_CMD_FL0PADEN_S)
+#define FW_IQ_CMD_FL0PADEN_F FW_IQ_CMD_FL0PADEN_V(1U)
+
+#define FW_IQ_CMD_FL0PACKEN_S 1
+#define FW_IQ_CMD_FL0PACKEN_V(x) ((x) << FW_IQ_CMD_FL0PACKEN_S)
+#define FW_IQ_CMD_FL0PACKEN_F FW_IQ_CMD_FL0PACKEN_V(1U)
+
+#define FW_IQ_CMD_FL0CONGEN_S 0
+#define FW_IQ_CMD_FL0CONGEN_V(x) ((x) << FW_IQ_CMD_FL0CONGEN_S)
+#define FW_IQ_CMD_FL0CONGEN_F FW_IQ_CMD_FL0CONGEN_V(1U)
+
+#define FW_IQ_CMD_FL0DCAEN_S 15
+#define FW_IQ_CMD_FL0DCAEN_V(x) ((x) << FW_IQ_CMD_FL0DCAEN_S)
+
+#define FW_IQ_CMD_FL0DCACPU_S 10
+#define FW_IQ_CMD_FL0DCACPU_V(x) ((x) << FW_IQ_CMD_FL0DCACPU_S)
+
+#define FW_IQ_CMD_FL0FBMIN_S 7
+#define FW_IQ_CMD_FL0FBMIN_V(x) ((x) << FW_IQ_CMD_FL0FBMIN_S)
+
+#define FW_IQ_CMD_FL0FBMAX_S 4
+#define FW_IQ_CMD_FL0FBMAX_V(x) ((x) << FW_IQ_CMD_FL0FBMAX_S)
+
+#define FW_IQ_CMD_FL0CIDXFTHRESHO_S 3
+#define FW_IQ_CMD_FL0CIDXFTHRESHO_V(x) ((x) << FW_IQ_CMD_FL0CIDXFTHRESHO_S)
+#define FW_IQ_CMD_FL0CIDXFTHRESHO_F FW_IQ_CMD_FL0CIDXFTHRESHO_V(1U)
+
+#define FW_IQ_CMD_FL0CIDXFTHRESH_S 0
+#define FW_IQ_CMD_FL0CIDXFTHRESH_V(x) ((x) << FW_IQ_CMD_FL0CIDXFTHRESH_S)
+
+#define FW_IQ_CMD_FL1CNGCHMAP_S 20
+#define FW_IQ_CMD_FL1CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL1CNGCHMAP_S)
+
+#define FW_IQ_CMD_FL1CACHELOCK_S 15
+#define FW_IQ_CMD_FL1CACHELOCK_V(x) ((x) << FW_IQ_CMD_FL1CACHELOCK_S)
+
+#define FW_IQ_CMD_FL1DBP_S 14
+#define FW_IQ_CMD_FL1DBP_V(x) ((x) << FW_IQ_CMD_FL1DBP_S)
+
+#define FW_IQ_CMD_FL1DATANS_S 13
+#define FW_IQ_CMD_FL1DATANS_V(x) ((x) << FW_IQ_CMD_FL1DATANS_S)
+
+#define FW_IQ_CMD_FL1DATARO_S 12
+#define FW_IQ_CMD_FL1DATARO_V(x) ((x) << FW_IQ_CMD_FL1DATARO_S)
+
+#define FW_IQ_CMD_FL1CONGCIF_S 11
+#define FW_IQ_CMD_FL1CONGCIF_V(x) ((x) << FW_IQ_CMD_FL1CONGCIF_S)
+
+#define FW_IQ_CMD_FL1ONCHIP_S 10
+#define FW_IQ_CMD_FL1ONCHIP_V(x) ((x) << FW_IQ_CMD_FL1ONCHIP_S)
+
+#define FW_IQ_CMD_FL1STATUSPGNS_S 9
+#define FW_IQ_CMD_FL1STATUSPGNS_V(x) ((x) << FW_IQ_CMD_FL1STATUSPGNS_S)
+
+#define FW_IQ_CMD_FL1STATUSPGRO_S 8
+#define FW_IQ_CMD_FL1STATUSPGRO_V(x) ((x) << FW_IQ_CMD_FL1STATUSPGRO_S)
+
+#define FW_IQ_CMD_FL1FETCHNS_S 7
+#define FW_IQ_CMD_FL1FETCHNS_V(x) ((x) << FW_IQ_CMD_FL1FETCHNS_S)
+
+#define FW_IQ_CMD_FL1FETCHRO_S 6
+#define FW_IQ_CMD_FL1FETCHRO_V(x) ((x) << FW_IQ_CMD_FL1FETCHRO_S)
+
+#define FW_IQ_CMD_FL1HOSTFCMODE_S 4
+#define FW_IQ_CMD_FL1HOSTFCMODE_V(x) ((x) << FW_IQ_CMD_FL1HOSTFCMODE_S)
+
+#define FW_IQ_CMD_FL1CPRIO_S 3
+#define FW_IQ_CMD_FL1CPRIO_V(x) ((x) << FW_IQ_CMD_FL1CPRIO_S)
+
+#define FW_IQ_CMD_FL1PADEN_S 2
+#define FW_IQ_CMD_FL1PADEN_V(x) ((x) << FW_IQ_CMD_FL1PADEN_S)
+#define FW_IQ_CMD_FL1PADEN_F FW_IQ_CMD_FL1PADEN_V(1U)
+
+#define FW_IQ_CMD_FL1PACKEN_S 1
+#define FW_IQ_CMD_FL1PACKEN_V(x) ((x) << FW_IQ_CMD_FL1PACKEN_S)
+#define FW_IQ_CMD_FL1PACKEN_F FW_IQ_CMD_FL1PACKEN_V(1U)
+
+#define FW_IQ_CMD_FL1CONGEN_S 0
+#define FW_IQ_CMD_FL1CONGEN_V(x) ((x) << FW_IQ_CMD_FL1CONGEN_S)
+#define FW_IQ_CMD_FL1CONGEN_F FW_IQ_CMD_FL1CONGEN_V(1U)
+
+#define FW_IQ_CMD_FL1DCAEN_S 15
+#define FW_IQ_CMD_FL1DCAEN_V(x) ((x) << FW_IQ_CMD_FL1DCAEN_S)
+
+#define FW_IQ_CMD_FL1DCACPU_S 10
+#define FW_IQ_CMD_FL1DCACPU_V(x) ((x) << FW_IQ_CMD_FL1DCACPU_S)
+
+#define FW_IQ_CMD_FL1FBMIN_S 7
+#define FW_IQ_CMD_FL1FBMIN_V(x) ((x) << FW_IQ_CMD_FL1FBMIN_S)
+
+#define FW_IQ_CMD_FL1FBMAX_S 4
+#define FW_IQ_CMD_FL1FBMAX_V(x) ((x) << FW_IQ_CMD_FL1FBMAX_S)
+
+#define FW_IQ_CMD_FL1CIDXFTHRESHO_S 3
+#define FW_IQ_CMD_FL1CIDXFTHRESHO_V(x) ((x) << FW_IQ_CMD_FL1CIDXFTHRESHO_S)
+#define FW_IQ_CMD_FL1CIDXFTHRESHO_F FW_IQ_CMD_FL1CIDXFTHRESHO_V(1U)
+
+#define FW_IQ_CMD_FL1CIDXFTHRESH_S 0
+#define FW_IQ_CMD_FL1CIDXFTHRESH_V(x) ((x) << FW_IQ_CMD_FL1CIDXFTHRESH_S)
struct fw_eq_eth_cmd {
__be32 op_to_vfn;
@@ -1195,40 +1520,102 @@ struct fw_eq_eth_cmd {
__be64 r9;
};
-#define FW_EQ_ETH_CMD_PFN(x) ((x) << 8)
-#define FW_EQ_ETH_CMD_VFN(x) ((x) << 0)
-#define FW_EQ_ETH_CMD_ALLOC (1U << 31)
-#define FW_EQ_ETH_CMD_FREE (1U << 30)
-#define FW_EQ_ETH_CMD_MODIFY (1U << 29)
-#define FW_EQ_ETH_CMD_EQSTART (1U << 28)
-#define FW_EQ_ETH_CMD_EQSTOP (1U << 27)
-
-#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0)
-#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
-#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0)
-#define FW_EQ_ETH_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
-
-#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26)
-#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25)
-#define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24)
-#define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23)
-#define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22)
-#define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20)
-#define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19)
-#define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18)
-#define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16)
-#define FW_EQ_ETH_CMD_IQID(x) ((x) << 0)
-
-#define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31)
-#define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26)
-#define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23)
-#define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20)
-#define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19)
-#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16)
-#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0)
-
-#define FW_EQ_ETH_CMD_AUTOEQUEQE (1U << 30)
-#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16)
+#define FW_EQ_ETH_CMD_PFN_S 8
+#define FW_EQ_ETH_CMD_PFN_V(x) ((x) << FW_EQ_ETH_CMD_PFN_S)
+
+#define FW_EQ_ETH_CMD_VFN_S 0
+#define FW_EQ_ETH_CMD_VFN_V(x) ((x) << FW_EQ_ETH_CMD_VFN_S)
+
+#define FW_EQ_ETH_CMD_ALLOC_S 31
+#define FW_EQ_ETH_CMD_ALLOC_V(x) ((x) << FW_EQ_ETH_CMD_ALLOC_S)
+#define FW_EQ_ETH_CMD_ALLOC_F FW_EQ_ETH_CMD_ALLOC_V(1U)
+
+#define FW_EQ_ETH_CMD_FREE_S 30
+#define FW_EQ_ETH_CMD_FREE_V(x) ((x) << FW_EQ_ETH_CMD_FREE_S)
+#define FW_EQ_ETH_CMD_FREE_F FW_EQ_ETH_CMD_FREE_V(1U)
+
+#define FW_EQ_ETH_CMD_MODIFY_S 29
+#define FW_EQ_ETH_CMD_MODIFY_V(x) ((x) << FW_EQ_ETH_CMD_MODIFY_S)
+#define FW_EQ_ETH_CMD_MODIFY_F FW_EQ_ETH_CMD_MODIFY_V(1U)
+
+#define FW_EQ_ETH_CMD_EQSTART_S 28
+#define FW_EQ_ETH_CMD_EQSTART_V(x) ((x) << FW_EQ_ETH_CMD_EQSTART_S)
+#define FW_EQ_ETH_CMD_EQSTART_F FW_EQ_ETH_CMD_EQSTART_V(1U)
+
+#define FW_EQ_ETH_CMD_EQSTOP_S 27
+#define FW_EQ_ETH_CMD_EQSTOP_V(x) ((x) << FW_EQ_ETH_CMD_EQSTOP_S)
+#define FW_EQ_ETH_CMD_EQSTOP_F FW_EQ_ETH_CMD_EQSTOP_V(1U)
+
+#define FW_EQ_ETH_CMD_EQID_S 0
+#define FW_EQ_ETH_CMD_EQID_M 0xfffff
+#define FW_EQ_ETH_CMD_EQID_V(x) ((x) << FW_EQ_ETH_CMD_EQID_S)
+#define FW_EQ_ETH_CMD_EQID_G(x) \
+ (((x) >> FW_EQ_ETH_CMD_EQID_S) & FW_EQ_ETH_CMD_EQID_M)
+
+#define FW_EQ_ETH_CMD_PHYSEQID_S 0
+#define FW_EQ_ETH_CMD_PHYSEQID_M 0xfffff
+#define FW_EQ_ETH_CMD_PHYSEQID_V(x) ((x) << FW_EQ_ETH_CMD_PHYSEQID_S)
+#define FW_EQ_ETH_CMD_PHYSEQID_G(x) \
+ (((x) >> FW_EQ_ETH_CMD_PHYSEQID_S) & FW_EQ_ETH_CMD_PHYSEQID_M)
+
+#define FW_EQ_ETH_CMD_FETCHSZM_S 26
+#define FW_EQ_ETH_CMD_FETCHSZM_V(x) ((x) << FW_EQ_ETH_CMD_FETCHSZM_S)
+#define FW_EQ_ETH_CMD_FETCHSZM_F FW_EQ_ETH_CMD_FETCHSZM_V(1U)
+
+#define FW_EQ_ETH_CMD_STATUSPGNS_S 25
+#define FW_EQ_ETH_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_ETH_CMD_STATUSPGNS_S)
+
+#define FW_EQ_ETH_CMD_STATUSPGRO_S 24
+#define FW_EQ_ETH_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_ETH_CMD_STATUSPGRO_S)
+
+#define FW_EQ_ETH_CMD_FETCHNS_S 23
+#define FW_EQ_ETH_CMD_FETCHNS_V(x) ((x) << FW_EQ_ETH_CMD_FETCHNS_S)
+
+#define FW_EQ_ETH_CMD_FETCHRO_S 22
+#define FW_EQ_ETH_CMD_FETCHRO_V(x) ((x) << FW_EQ_ETH_CMD_FETCHRO_S)
+
+#define FW_EQ_ETH_CMD_HOSTFCMODE_S 20
+#define FW_EQ_ETH_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_ETH_CMD_HOSTFCMODE_S)
+
+#define FW_EQ_ETH_CMD_CPRIO_S 19
+#define FW_EQ_ETH_CMD_CPRIO_V(x) ((x) << FW_EQ_ETH_CMD_CPRIO_S)
+
+#define FW_EQ_ETH_CMD_ONCHIP_S 18
+#define FW_EQ_ETH_CMD_ONCHIP_V(x) ((x) << FW_EQ_ETH_CMD_ONCHIP_S)
+
+#define FW_EQ_ETH_CMD_PCIECHN_S 16
+#define FW_EQ_ETH_CMD_PCIECHN_V(x) ((x) << FW_EQ_ETH_CMD_PCIECHN_S)
+
+#define FW_EQ_ETH_CMD_IQID_S 0
+#define FW_EQ_ETH_CMD_IQID_V(x) ((x) << FW_EQ_ETH_CMD_IQID_S)
+
+#define FW_EQ_ETH_CMD_DCAEN_S 31
+#define FW_EQ_ETH_CMD_DCAEN_V(x) ((x) << FW_EQ_ETH_CMD_DCAEN_S)
+
+#define FW_EQ_ETH_CMD_DCACPU_S 26
+#define FW_EQ_ETH_CMD_DCACPU_V(x) ((x) << FW_EQ_ETH_CMD_DCACPU_S)
+
+#define FW_EQ_ETH_CMD_FBMIN_S 23
+#define FW_EQ_ETH_CMD_FBMIN_V(x) ((x) << FW_EQ_ETH_CMD_FBMIN_S)
+
+#define FW_EQ_ETH_CMD_FBMAX_S 20
+#define FW_EQ_ETH_CMD_FBMAX_V(x) ((x) << FW_EQ_ETH_CMD_FBMAX_S)
+
+#define FW_EQ_ETH_CMD_CIDXFTHRESHO_S 19
+#define FW_EQ_ETH_CMD_CIDXFTHRESHO_V(x) ((x) << FW_EQ_ETH_CMD_CIDXFTHRESHO_S)
+
+#define FW_EQ_ETH_CMD_CIDXFTHRESH_S 16
+#define FW_EQ_ETH_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_ETH_CMD_CIDXFTHRESH_S)
+
+#define FW_EQ_ETH_CMD_EQSIZE_S 0
+#define FW_EQ_ETH_CMD_EQSIZE_V(x) ((x) << FW_EQ_ETH_CMD_EQSIZE_S)
+
+#define FW_EQ_ETH_CMD_AUTOEQUEQE_S 30
+#define FW_EQ_ETH_CMD_AUTOEQUEQE_V(x) ((x) << FW_EQ_ETH_CMD_AUTOEQUEQE_S)
+#define FW_EQ_ETH_CMD_AUTOEQUEQE_F FW_EQ_ETH_CMD_AUTOEQUEQE_V(1U)
+
+#define FW_EQ_ETH_CMD_VIID_S 16
+#define FW_EQ_ETH_CMD_VIID_V(x) ((x) << FW_EQ_ETH_CMD_VIID_S)
struct fw_eq_ctrl_cmd {
__be32 op_to_vfn;
@@ -1240,38 +1627,102 @@ struct fw_eq_ctrl_cmd {
__be64 eqaddr;
};
-#define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8)
-#define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0)
-
-#define FW_EQ_CTRL_CMD_ALLOC (1U << 31)
-#define FW_EQ_CTRL_CMD_FREE (1U << 30)
-#define FW_EQ_CTRL_CMD_MODIFY (1U << 29)
-#define FW_EQ_CTRL_CMD_EQSTART (1U << 28)
-#define FW_EQ_CTRL_CMD_EQSTOP (1U << 27)
-
-#define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20)
-#define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0)
-#define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
-#define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
-
-#define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26)
-#define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25)
-#define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24)
-#define FW_EQ_CTRL_CMD_FETCHNS (1U << 23)
-#define FW_EQ_CTRL_CMD_FETCHRO (1U << 22)
-#define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20)
-#define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19)
-#define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18)
-#define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16)
-#define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0)
-
-#define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31)
-#define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26)
-#define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23)
-#define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20)
-#define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19)
-#define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16)
-#define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0)
+#define FW_EQ_CTRL_CMD_PFN_S 8
+#define FW_EQ_CTRL_CMD_PFN_V(x) ((x) << FW_EQ_CTRL_CMD_PFN_S)
+
+#define FW_EQ_CTRL_CMD_VFN_S 0
+#define FW_EQ_CTRL_CMD_VFN_V(x) ((x) << FW_EQ_CTRL_CMD_VFN_S)
+
+#define FW_EQ_CTRL_CMD_ALLOC_S 31
+#define FW_EQ_CTRL_CMD_ALLOC_V(x) ((x) << FW_EQ_CTRL_CMD_ALLOC_S)
+#define FW_EQ_CTRL_CMD_ALLOC_F FW_EQ_CTRL_CMD_ALLOC_V(1U)
+
+#define FW_EQ_CTRL_CMD_FREE_S 30
+#define FW_EQ_CTRL_CMD_FREE_V(x) ((x) << FW_EQ_CTRL_CMD_FREE_S)
+#define FW_EQ_CTRL_CMD_FREE_F FW_EQ_CTRL_CMD_FREE_V(1U)
+
+#define FW_EQ_CTRL_CMD_MODIFY_S 29
+#define FW_EQ_CTRL_CMD_MODIFY_V(x) ((x) << FW_EQ_CTRL_CMD_MODIFY_S)
+#define FW_EQ_CTRL_CMD_MODIFY_F FW_EQ_CTRL_CMD_MODIFY_V(1U)
+
+#define FW_EQ_CTRL_CMD_EQSTART_S 28
+#define FW_EQ_CTRL_CMD_EQSTART_V(x) ((x) << FW_EQ_CTRL_CMD_EQSTART_S)
+#define FW_EQ_CTRL_CMD_EQSTART_F FW_EQ_CTRL_CMD_EQSTART_V(1U)
+
+#define FW_EQ_CTRL_CMD_EQSTOP_S 27
+#define FW_EQ_CTRL_CMD_EQSTOP_V(x) ((x) << FW_EQ_CTRL_CMD_EQSTOP_S)
+#define FW_EQ_CTRL_CMD_EQSTOP_F FW_EQ_CTRL_CMD_EQSTOP_V(1U)
+
+#define FW_EQ_CTRL_CMD_CMPLIQID_S 20
+#define FW_EQ_CTRL_CMD_CMPLIQID_V(x) ((x) << FW_EQ_CTRL_CMD_CMPLIQID_S)
+
+#define FW_EQ_CTRL_CMD_EQID_S 0
+#define FW_EQ_CTRL_CMD_EQID_M 0xfffff
+#define FW_EQ_CTRL_CMD_EQID_V(x) ((x) << FW_EQ_CTRL_CMD_EQID_S)
+#define FW_EQ_CTRL_CMD_EQID_G(x) \
+ (((x) >> FW_EQ_CTRL_CMD_EQID_S) & FW_EQ_CTRL_CMD_EQID_M)
+
+#define FW_EQ_CTRL_CMD_PHYSEQID_S 0
+#define FW_EQ_CTRL_CMD_PHYSEQID_M 0xfffff
+#define FW_EQ_CTRL_CMD_PHYSEQID_G(x) \
+ (((x) >> FW_EQ_CTRL_CMD_PHYSEQID_S) & FW_EQ_CTRL_CMD_PHYSEQID_M)
+
+#define FW_EQ_CTRL_CMD_FETCHSZM_S 26
+#define FW_EQ_CTRL_CMD_FETCHSZM_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHSZM_S)
+#define FW_EQ_CTRL_CMD_FETCHSZM_F FW_EQ_CTRL_CMD_FETCHSZM_V(1U)
+
+#define FW_EQ_CTRL_CMD_STATUSPGNS_S 25
+#define FW_EQ_CTRL_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_CTRL_CMD_STATUSPGNS_S)
+#define FW_EQ_CTRL_CMD_STATUSPGNS_F FW_EQ_CTRL_CMD_STATUSPGNS_V(1U)
+
+#define FW_EQ_CTRL_CMD_STATUSPGRO_S 24
+#define FW_EQ_CTRL_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_CTRL_CMD_STATUSPGRO_S)
+#define FW_EQ_CTRL_CMD_STATUSPGRO_F FW_EQ_CTRL_CMD_STATUSPGRO_V(1U)
+
+#define FW_EQ_CTRL_CMD_FETCHNS_S 23
+#define FW_EQ_CTRL_CMD_FETCHNS_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHNS_S)
+#define FW_EQ_CTRL_CMD_FETCHNS_F FW_EQ_CTRL_CMD_FETCHNS_V(1U)
+
+#define FW_EQ_CTRL_CMD_FETCHRO_S 22
+#define FW_EQ_CTRL_CMD_FETCHRO_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHRO_S)
+#define FW_EQ_CTRL_CMD_FETCHRO_F FW_EQ_CTRL_CMD_FETCHRO_V(1U)
+
+#define FW_EQ_CTRL_CMD_HOSTFCMODE_S 20
+#define FW_EQ_CTRL_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_CTRL_CMD_HOSTFCMODE_S)
+
+#define FW_EQ_CTRL_CMD_CPRIO_S 19
+#define FW_EQ_CTRL_CMD_CPRIO_V(x) ((x) << FW_EQ_CTRL_CMD_CPRIO_S)
+
+#define FW_EQ_CTRL_CMD_ONCHIP_S 18
+#define FW_EQ_CTRL_CMD_ONCHIP_V(x) ((x) << FW_EQ_CTRL_CMD_ONCHIP_S)
+
+#define FW_EQ_CTRL_CMD_PCIECHN_S 16
+#define FW_EQ_CTRL_CMD_PCIECHN_V(x) ((x) << FW_EQ_CTRL_CMD_PCIECHN_S)
+
+#define FW_EQ_CTRL_CMD_IQID_S 0
+#define FW_EQ_CTRL_CMD_IQID_V(x) ((x) << FW_EQ_CTRL_CMD_IQID_S)
+
+#define FW_EQ_CTRL_CMD_DCAEN_S 31
+#define FW_EQ_CTRL_CMD_DCAEN_V(x) ((x) << FW_EQ_CTRL_CMD_DCAEN_S)
+
+#define FW_EQ_CTRL_CMD_DCACPU_S 26
+#define FW_EQ_CTRL_CMD_DCACPU_V(x) ((x) << FW_EQ_CTRL_CMD_DCACPU_S)
+
+#define FW_EQ_CTRL_CMD_FBMIN_S 23
+#define FW_EQ_CTRL_CMD_FBMIN_V(x) ((x) << FW_EQ_CTRL_CMD_FBMIN_S)
+
+#define FW_EQ_CTRL_CMD_FBMAX_S 20
+#define FW_EQ_CTRL_CMD_FBMAX_V(x) ((x) << FW_EQ_CTRL_CMD_FBMAX_S)
+
+#define FW_EQ_CTRL_CMD_CIDXFTHRESHO_S 19
+#define FW_EQ_CTRL_CMD_CIDXFTHRESHO_V(x) \
+ ((x) << FW_EQ_CTRL_CMD_CIDXFTHRESHO_S)
+
+#define FW_EQ_CTRL_CMD_CIDXFTHRESH_S 16
+#define FW_EQ_CTRL_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_CTRL_CMD_CIDXFTHRESH_S)
+
+#define FW_EQ_CTRL_CMD_EQSIZE_S 0
+#define FW_EQ_CTRL_CMD_EQSIZE_V(x) ((x) << FW_EQ_CTRL_CMD_EQSIZE_S)
struct fw_eq_ofld_cmd {
__be32 op_to_vfn;
@@ -1283,45 +1734,112 @@ struct fw_eq_ofld_cmd {
__be64 eqaddr;
};
-#define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8)
-#define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0)
-
-#define FW_EQ_OFLD_CMD_ALLOC (1U << 31)
-#define FW_EQ_OFLD_CMD_FREE (1U << 30)
-#define FW_EQ_OFLD_CMD_MODIFY (1U << 29)
-#define FW_EQ_OFLD_CMD_EQSTART (1U << 28)
-#define FW_EQ_OFLD_CMD_EQSTOP (1U << 27)
-
-#define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0)
-#define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
-#define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
-
-#define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26)
-#define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25)
-#define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24)
-#define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23)
-#define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22)
-#define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20)
-#define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19)
-#define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18)
-#define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16)
-#define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0)
-
-#define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31)
-#define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26)
-#define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23)
-#define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20)
-#define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19)
-#define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16)
-#define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0)
+#define FW_EQ_OFLD_CMD_PFN_S 8
+#define FW_EQ_OFLD_CMD_PFN_V(x) ((x) << FW_EQ_OFLD_CMD_PFN_S)
+
+#define FW_EQ_OFLD_CMD_VFN_S 0
+#define FW_EQ_OFLD_CMD_VFN_V(x) ((x) << FW_EQ_OFLD_CMD_VFN_S)
+
+#define FW_EQ_OFLD_CMD_ALLOC_S 31
+#define FW_EQ_OFLD_CMD_ALLOC_V(x) ((x) << FW_EQ_OFLD_CMD_ALLOC_S)
+#define FW_EQ_OFLD_CMD_ALLOC_F FW_EQ_OFLD_CMD_ALLOC_V(1U)
+
+#define FW_EQ_OFLD_CMD_FREE_S 30
+#define FW_EQ_OFLD_CMD_FREE_V(x) ((x) << FW_EQ_OFLD_CMD_FREE_S)
+#define FW_EQ_OFLD_CMD_FREE_F FW_EQ_OFLD_CMD_FREE_V(1U)
+
+#define FW_EQ_OFLD_CMD_MODIFY_S 29
+#define FW_EQ_OFLD_CMD_MODIFY_V(x) ((x) << FW_EQ_OFLD_CMD_MODIFY_S)
+#define FW_EQ_OFLD_CMD_MODIFY_F FW_EQ_OFLD_CMD_MODIFY_V(1U)
+
+#define FW_EQ_OFLD_CMD_EQSTART_S 28
+#define FW_EQ_OFLD_CMD_EQSTART_V(x) ((x) << FW_EQ_OFLD_CMD_EQSTART_S)
+#define FW_EQ_OFLD_CMD_EQSTART_F FW_EQ_OFLD_CMD_EQSTART_V(1U)
+
+#define FW_EQ_OFLD_CMD_EQSTOP_S 27
+#define FW_EQ_OFLD_CMD_EQSTOP_V(x) ((x) << FW_EQ_OFLD_CMD_EQSTOP_S)
+#define FW_EQ_OFLD_CMD_EQSTOP_F FW_EQ_OFLD_CMD_EQSTOP_V(1U)
+
+#define FW_EQ_OFLD_CMD_EQID_S 0
+#define FW_EQ_OFLD_CMD_EQID_M 0xfffff
+#define FW_EQ_OFLD_CMD_EQID_V(x) ((x) << FW_EQ_OFLD_CMD_EQID_S)
+#define FW_EQ_OFLD_CMD_EQID_G(x) \
+ (((x) >> FW_EQ_OFLD_CMD_EQID_S) & FW_EQ_OFLD_CMD_EQID_M)
+
+#define FW_EQ_OFLD_CMD_PHYSEQID_S 0
+#define FW_EQ_OFLD_CMD_PHYSEQID_M 0xfffff
+#define FW_EQ_OFLD_CMD_PHYSEQID_G(x) \
+ (((x) >> FW_EQ_OFLD_CMD_PHYSEQID_S) & FW_EQ_OFLD_CMD_PHYSEQID_M)
+
+#define FW_EQ_OFLD_CMD_FETCHSZM_S 26
+#define FW_EQ_OFLD_CMD_FETCHSZM_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHSZM_S)
+
+#define FW_EQ_OFLD_CMD_STATUSPGNS_S 25
+#define FW_EQ_OFLD_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_OFLD_CMD_STATUSPGNS_S)
+
+#define FW_EQ_OFLD_CMD_STATUSPGRO_S 24
+#define FW_EQ_OFLD_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_OFLD_CMD_STATUSPGRO_S)
+
+#define FW_EQ_OFLD_CMD_FETCHNS_S 23
+#define FW_EQ_OFLD_CMD_FETCHNS_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHNS_S)
+
+#define FW_EQ_OFLD_CMD_FETCHRO_S 22
+#define FW_EQ_OFLD_CMD_FETCHRO_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHRO_S)
+#define FW_EQ_OFLD_CMD_FETCHRO_F FW_EQ_OFLD_CMD_FETCHRO_V(1U)
+
+#define FW_EQ_OFLD_CMD_HOSTFCMODE_S 20
+#define FW_EQ_OFLD_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_OFLD_CMD_HOSTFCMODE_S)
+
+#define FW_EQ_OFLD_CMD_CPRIO_S 19
+#define FW_EQ_OFLD_CMD_CPRIO_V(x) ((x) << FW_EQ_OFLD_CMD_CPRIO_S)
+
+#define FW_EQ_OFLD_CMD_ONCHIP_S 18
+#define FW_EQ_OFLD_CMD_ONCHIP_V(x) ((x) << FW_EQ_OFLD_CMD_ONCHIP_S)
+
+#define FW_EQ_OFLD_CMD_PCIECHN_S 16
+#define FW_EQ_OFLD_CMD_PCIECHN_V(x) ((x) << FW_EQ_OFLD_CMD_PCIECHN_S)
+
+#define FW_EQ_OFLD_CMD_IQID_S 0
+#define FW_EQ_OFLD_CMD_IQID_V(x) ((x) << FW_EQ_OFLD_CMD_IQID_S)
+
+#define FW_EQ_OFLD_CMD_DCAEN_S 31
+#define FW_EQ_OFLD_CMD_DCAEN_V(x) ((x) << FW_EQ_OFLD_CMD_DCAEN_S)
+
+#define FW_EQ_OFLD_CMD_DCACPU_S 26
+#define FW_EQ_OFLD_CMD_DCACPU_V(x) ((x) << FW_EQ_OFLD_CMD_DCACPU_S)
+
+#define FW_EQ_OFLD_CMD_FBMIN_S 23
+#define FW_EQ_OFLD_CMD_FBMIN_V(x) ((x) << FW_EQ_OFLD_CMD_FBMIN_S)
+
+#define FW_EQ_OFLD_CMD_FBMAX_S 20
+#define FW_EQ_OFLD_CMD_FBMAX_V(x) ((x) << FW_EQ_OFLD_CMD_FBMAX_S)
+
+#define FW_EQ_OFLD_CMD_CIDXFTHRESHO_S 19
+#define FW_EQ_OFLD_CMD_CIDXFTHRESHO_V(x) \
+ ((x) << FW_EQ_OFLD_CMD_CIDXFTHRESHO_S)
+
+#define FW_EQ_OFLD_CMD_CIDXFTHRESH_S 16
+#define FW_EQ_OFLD_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_OFLD_CMD_CIDXFTHRESH_S)
+
+#define FW_EQ_OFLD_CMD_EQSIZE_S 0
+#define FW_EQ_OFLD_CMD_EQSIZE_V(x) ((x) << FW_EQ_OFLD_CMD_EQSIZE_S)
/*
* Macros for VIID parsing:
* VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number
*/
-#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7)
-#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1)
-#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F)
+
+#define FW_VIID_PFN_S 8
+#define FW_VIID_PFN_M 0x7
+#define FW_VIID_PFN_G(x) (((x) >> FW_VIID_PFN_S) & FW_VIID_PFN_M)
+
+#define FW_VIID_VIVLD_S 7
+#define FW_VIID_VIVLD_M 0x1
+#define FW_VIID_VIVLD_G(x) (((x) >> FW_VIID_VIVLD_S) & FW_VIID_VIVLD_M)
+
+#define FW_VIID_VIN_S 0
+#define FW_VIID_VIN_M 0x7F
+#define FW_VIID_VIN_G(x) (((x) >> FW_VIID_VIN_S) & FW_VIID_VIN_M)
struct fw_vi_cmd {
__be32 op_to_vfn;
@@ -1341,15 +1859,35 @@ struct fw_vi_cmd {
__be64 r10;
};
-#define FW_VI_CMD_PFN(x) ((x) << 8)
-#define FW_VI_CMD_VFN(x) ((x) << 0)
-#define FW_VI_CMD_ALLOC (1U << 31)
-#define FW_VI_CMD_FREE (1U << 30)
-#define FW_VI_CMD_VIID(x) ((x) << 0)
-#define FW_VI_CMD_VIID_GET(x) ((x) & 0xfff)
-#define FW_VI_CMD_PORTID(x) ((x) << 4)
-#define FW_VI_CMD_PORTID_GET(x) (((x) >> 4) & 0xf)
-#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff)
+#define FW_VI_CMD_PFN_S 8
+#define FW_VI_CMD_PFN_V(x) ((x) << FW_VI_CMD_PFN_S)
+
+#define FW_VI_CMD_VFN_S 0
+#define FW_VI_CMD_VFN_V(x) ((x) << FW_VI_CMD_VFN_S)
+
+#define FW_VI_CMD_ALLOC_S 31
+#define FW_VI_CMD_ALLOC_V(x) ((x) << FW_VI_CMD_ALLOC_S)
+#define FW_VI_CMD_ALLOC_F FW_VI_CMD_ALLOC_V(1U)
+
+#define FW_VI_CMD_FREE_S 30
+#define FW_VI_CMD_FREE_V(x) ((x) << FW_VI_CMD_FREE_S)
+#define FW_VI_CMD_FREE_F FW_VI_CMD_FREE_V(1U)
+
+#define FW_VI_CMD_VIID_S 0
+#define FW_VI_CMD_VIID_M 0xfff
+#define FW_VI_CMD_VIID_V(x) ((x) << FW_VI_CMD_VIID_S)
+#define FW_VI_CMD_VIID_G(x) (((x) >> FW_VI_CMD_VIID_S) & FW_VI_CMD_VIID_M)
+
+#define FW_VI_CMD_PORTID_S 4
+#define FW_VI_CMD_PORTID_M 0xf
+#define FW_VI_CMD_PORTID_V(x) ((x) << FW_VI_CMD_PORTID_S)
+#define FW_VI_CMD_PORTID_G(x) \
+ (((x) >> FW_VI_CMD_PORTID_S) & FW_VI_CMD_PORTID_M)
+
+#define FW_VI_CMD_RSSSIZE_S 0
+#define FW_VI_CMD_RSSSIZE_M 0x7ff
+#define FW_VI_CMD_RSSSIZE_G(x) \
+ (((x) >> FW_VI_CMD_RSSSIZE_S) & FW_VI_CMD_RSSSIZE_M)
/* Special VI_MAC command index ids */
#define FW_VI_MAC_ADD_MAC 0x3FF
@@ -1385,16 +1923,37 @@ struct fw_vi_mac_cmd {
} u;
};
-#define FW_VI_MAC_CMD_VIID(x) ((x) << 0)
-#define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31)
-#define FW_VI_MAC_CMD_HASHVECEN (1U << 23)
-#define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22)
-#define FW_VI_MAC_CMD_VALID (1U << 15)
-#define FW_VI_MAC_CMD_PRIO(x) ((x) << 12)
-#define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10)
-#define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3)
-#define FW_VI_MAC_CMD_IDX(x) ((x) << 0)
-#define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff)
+#define FW_VI_MAC_CMD_VIID_S 0
+#define FW_VI_MAC_CMD_VIID_V(x) ((x) << FW_VI_MAC_CMD_VIID_S)
+
+#define FW_VI_MAC_CMD_FREEMACS_S 31
+#define FW_VI_MAC_CMD_FREEMACS_V(x) ((x) << FW_VI_MAC_CMD_FREEMACS_S)
+
+#define FW_VI_MAC_CMD_HASHVECEN_S 23
+#define FW_VI_MAC_CMD_HASHVECEN_V(x) ((x) << FW_VI_MAC_CMD_HASHVECEN_S)
+#define FW_VI_MAC_CMD_HASHVECEN_F FW_VI_MAC_CMD_HASHVECEN_V(1U)
+
+#define FW_VI_MAC_CMD_HASHUNIEN_S 22
+#define FW_VI_MAC_CMD_HASHUNIEN_V(x) ((x) << FW_VI_MAC_CMD_HASHUNIEN_S)
+
+#define FW_VI_MAC_CMD_VALID_S 15
+#define FW_VI_MAC_CMD_VALID_V(x) ((x) << FW_VI_MAC_CMD_VALID_S)
+#define FW_VI_MAC_CMD_VALID_F FW_VI_MAC_CMD_VALID_V(1U)
+
+#define FW_VI_MAC_CMD_PRIO_S 12
+#define FW_VI_MAC_CMD_PRIO_V(x) ((x) << FW_VI_MAC_CMD_PRIO_S)
+
+#define FW_VI_MAC_CMD_SMAC_RESULT_S 10
+#define FW_VI_MAC_CMD_SMAC_RESULT_M 0x3
+#define FW_VI_MAC_CMD_SMAC_RESULT_V(x) ((x) << FW_VI_MAC_CMD_SMAC_RESULT_S)
+#define FW_VI_MAC_CMD_SMAC_RESULT_G(x) \
+ (((x) >> FW_VI_MAC_CMD_SMAC_RESULT_S) & FW_VI_MAC_CMD_SMAC_RESULT_M)
+
+#define FW_VI_MAC_CMD_IDX_S 0
+#define FW_VI_MAC_CMD_IDX_M 0x3ff
+#define FW_VI_MAC_CMD_IDX_V(x) ((x) << FW_VI_MAC_CMD_IDX_S)
+#define FW_VI_MAC_CMD_IDX_G(x) \
+ (((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M)
#define FW_RXMODE_MTU_NO_CHG 65535
@@ -1405,17 +1964,30 @@ struct fw_vi_rxmode_cmd {
__be32 r4_lo;
};
-#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0)
-#define FW_VI_RXMODE_CMD_MTU_MASK 0xffff
-#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16)
-#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3
-#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14)
-#define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3
-#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12)
-#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3
-#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10)
-#define FW_VI_RXMODE_CMD_VLANEXEN_MASK 0x3
-#define FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << 8)
+#define FW_VI_RXMODE_CMD_VIID_S 0
+#define FW_VI_RXMODE_CMD_VIID_V(x) ((x) << FW_VI_RXMODE_CMD_VIID_S)
+
+#define FW_VI_RXMODE_CMD_MTU_S 16
+#define FW_VI_RXMODE_CMD_MTU_M 0xffff
+#define FW_VI_RXMODE_CMD_MTU_V(x) ((x) << FW_VI_RXMODE_CMD_MTU_S)
+
+#define FW_VI_RXMODE_CMD_PROMISCEN_S 14
+#define FW_VI_RXMODE_CMD_PROMISCEN_M 0x3
+#define FW_VI_RXMODE_CMD_PROMISCEN_V(x) ((x) << FW_VI_RXMODE_CMD_PROMISCEN_S)
+
+#define FW_VI_RXMODE_CMD_ALLMULTIEN_S 12
+#define FW_VI_RXMODE_CMD_ALLMULTIEN_M 0x3
+#define FW_VI_RXMODE_CMD_ALLMULTIEN_V(x) \
+ ((x) << FW_VI_RXMODE_CMD_ALLMULTIEN_S)
+
+#define FW_VI_RXMODE_CMD_BROADCASTEN_S 10
+#define FW_VI_RXMODE_CMD_BROADCASTEN_M 0x3
+#define FW_VI_RXMODE_CMD_BROADCASTEN_V(x) \
+ ((x) << FW_VI_RXMODE_CMD_BROADCASTEN_S)
+
+#define FW_VI_RXMODE_CMD_VLANEXEN_S 8
+#define FW_VI_RXMODE_CMD_VLANEXEN_M 0x3
+#define FW_VI_RXMODE_CMD_VLANEXEN_V(x) ((x) << FW_VI_RXMODE_CMD_VLANEXEN_S)
struct fw_vi_enable_cmd {
__be32 op_to_viid;
@@ -1425,11 +1997,21 @@ struct fw_vi_enable_cmd {
__be32 r4;
};
-#define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0)
-#define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31)
-#define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30)
-#define FW_VI_ENABLE_CMD_DCB_INFO(x) ((x) << 28)
-#define FW_VI_ENABLE_CMD_LED (1U << 29)
+#define FW_VI_ENABLE_CMD_VIID_S 0
+#define FW_VI_ENABLE_CMD_VIID_V(x) ((x) << FW_VI_ENABLE_CMD_VIID_S)
+
+#define FW_VI_ENABLE_CMD_IEN_S 31
+#define FW_VI_ENABLE_CMD_IEN_V(x) ((x) << FW_VI_ENABLE_CMD_IEN_S)
+
+#define FW_VI_ENABLE_CMD_EEN_S 30
+#define FW_VI_ENABLE_CMD_EEN_V(x) ((x) << FW_VI_ENABLE_CMD_EEN_S)
+
+#define FW_VI_ENABLE_CMD_LED_S 29
+#define FW_VI_ENABLE_CMD_LED_V(x) ((x) << FW_VI_ENABLE_CMD_LED_S)
+#define FW_VI_ENABLE_CMD_LED_F FW_VI_ENABLE_CMD_LED_V(1U)
+
+#define FW_VI_ENABLE_CMD_DCB_INFO_S 28
+#define FW_VI_ENABLE_CMD_DCB_INFO_V(x) ((x) << FW_VI_ENABLE_CMD_DCB_INFO_S)
/* VI VF stats offset definitions */
#define VI_VF_NUM_STATS 16
@@ -1529,9 +2111,14 @@ struct fw_vi_stats_cmd {
} u;
};
-#define FW_VI_STATS_CMD_VIID(x) ((x) << 0)
-#define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12)
-#define FW_VI_STATS_CMD_IX(x) ((x) << 0)
+#define FW_VI_STATS_CMD_VIID_S 0
+#define FW_VI_STATS_CMD_VIID_V(x) ((x) << FW_VI_STATS_CMD_VIID_S)
+
+#define FW_VI_STATS_CMD_NSTATS_S 12
+#define FW_VI_STATS_CMD_NSTATS_V(x) ((x) << FW_VI_STATS_CMD_NSTATS_S)
+
+#define FW_VI_STATS_CMD_IX_S 0
+#define FW_VI_STATS_CMD_IX_V(x) ((x) << FW_VI_STATS_CMD_IX_S)
struct fw_acl_mac_cmd {
__be32 op_to_vfn;
@@ -1548,9 +2135,14 @@ struct fw_acl_mac_cmd {
u8 macaddr3[6];
};
-#define FW_ACL_MAC_CMD_PFN(x) ((x) << 8)
-#define FW_ACL_MAC_CMD_VFN(x) ((x) << 0)
-#define FW_ACL_MAC_CMD_EN(x) ((x) << 31)
+#define FW_ACL_MAC_CMD_PFN_S 8
+#define FW_ACL_MAC_CMD_PFN_V(x) ((x) << FW_ACL_MAC_CMD_PFN_S)
+
+#define FW_ACL_MAC_CMD_VFN_S 0
+#define FW_ACL_MAC_CMD_VFN_V(x) ((x) << FW_ACL_MAC_CMD_VFN_S)
+
+#define FW_ACL_MAC_CMD_EN_S 31
+#define FW_ACL_MAC_CMD_EN_V(x) ((x) << FW_ACL_MAC_CMD_EN_S)
struct fw_acl_vlan_cmd {
__be32 op_to_vfn;
@@ -1561,11 +2153,20 @@ struct fw_acl_vlan_cmd {
__be16 vlanid[16];
};
-#define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8)
-#define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0)
-#define FW_ACL_VLAN_CMD_EN(x) ((x) << 31)
-#define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7)
-#define FW_ACL_VLAN_CMD_FM(x) ((x) << 6)
+#define FW_ACL_VLAN_CMD_PFN_S 8
+#define FW_ACL_VLAN_CMD_PFN_V(x) ((x) << FW_ACL_VLAN_CMD_PFN_S)
+
+#define FW_ACL_VLAN_CMD_VFN_S 0
+#define FW_ACL_VLAN_CMD_VFN_V(x) ((x) << FW_ACL_VLAN_CMD_VFN_S)
+
+#define FW_ACL_VLAN_CMD_EN_S 31
+#define FW_ACL_VLAN_CMD_EN_V(x) ((x) << FW_ACL_VLAN_CMD_EN_S)
+
+#define FW_ACL_VLAN_CMD_DROPNOVLAN_S 7
+#define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x) ((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S)
+
+#define FW_ACL_VLAN_CMD_FM_S 6
+#define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S)
enum fw_port_cap {
FW_PORT_CAP_SPEED_100M = 0x0001,
@@ -1587,13 +2188,14 @@ enum fw_port_cap {
};
enum fw_port_mdi {
- FW_PORT_MDI_UNCHANGED,
- FW_PORT_MDI_AUTO,
- FW_PORT_MDI_F_STRAIGHT,
- FW_PORT_MDI_F_CROSSOVER
+ FW_PORT_CAP_MDI_UNCHANGED,
+ FW_PORT_CAP_MDI_AUTO,
+ FW_PORT_CAP_MDI_F_STRAIGHT,
+ FW_PORT_CAP_MDI_F_CROSSOVER
};
-#define FW_PORT_MDI(x) ((x) << 9)
+#define FW_PORT_CAP_MDI_S 9
+#define FW_PORT_CAP_MDI_V(x) ((x) << FW_PORT_CAP_MDI_S)
enum fw_port_action {
FW_PORT_ACTION_L1_CFG = 0x0001,
@@ -1753,52 +2355,105 @@ struct fw_port_cmd {
} u;
};
-#define FW_PORT_CMD_READ (1U << 22)
-
-#define FW_PORT_CMD_PORTID(x) ((x) << 0)
-#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
-
-#define FW_PORT_CMD_ACTION(x) ((x) << 16)
-#define FW_PORT_CMD_ACTION_GET(x) (((x) >> 16) & 0xffff)
-
-#define FW_PORT_CMD_CTLBF(x) ((x) << 10)
-#define FW_PORT_CMD_OVLAN3(x) ((x) << 7)
-#define FW_PORT_CMD_OVLAN2(x) ((x) << 6)
-#define FW_PORT_CMD_OVLAN1(x) ((x) << 5)
-#define FW_PORT_CMD_OVLAN0(x) ((x) << 4)
-#define FW_PORT_CMD_IVLAN0(x) ((x) << 3)
-
-#define FW_PORT_CMD_TXIPG(x) ((x) << 19)
-
-#define FW_PORT_CMD_LSTATUS (1U << 31)
-#define FW_PORT_CMD_LSTATUS_GET(x) (((x) >> 31) & 0x1)
-#define FW_PORT_CMD_LSPEED(x) ((x) << 24)
-#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f)
-#define FW_PORT_CMD_TXPAUSE (1U << 23)
-#define FW_PORT_CMD_RXPAUSE (1U << 22)
-#define FW_PORT_CMD_MDIOCAP (1U << 21)
-#define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f)
-#define FW_PORT_CMD_LPTXPAUSE (1U << 15)
-#define FW_PORT_CMD_LPRXPAUSE (1U << 14)
-#define FW_PORT_CMD_PTYPE_MASK 0x1f
-#define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK)
-#define FW_PORT_CMD_MODTYPE_MASK 0x1f
-#define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK)
-
-#define FW_PORT_CMD_DCBXDIS (1U << 7)
-#define FW_PORT_CMD_APPLY (1U << 7)
-#define FW_PORT_CMD_ALL_SYNCD (1U << 7)
-#define FW_PORT_CMD_DCB_VERSION_GET(x) (((x) >> 8) & 0xf)
-
-#define FW_PORT_CMD_PPPEN(x) ((x) << 31)
-#define FW_PORT_CMD_TPSRC(x) ((x) << 28)
-#define FW_PORT_CMD_NCSISRC(x) ((x) << 24)
-
-#define FW_PORT_CMD_CH0(x) ((x) << 20)
-#define FW_PORT_CMD_CH1(x) ((x) << 16)
-#define FW_PORT_CMD_CH2(x) ((x) << 12)
-#define FW_PORT_CMD_CH3(x) ((x) << 8)
-#define FW_PORT_CMD_NCSICH(x) ((x) << 4)
+#define FW_PORT_CMD_READ_S 22
+#define FW_PORT_CMD_READ_V(x) ((x) << FW_PORT_CMD_READ_S)
+#define FW_PORT_CMD_READ_F FW_PORT_CMD_READ_V(1U)
+
+#define FW_PORT_CMD_PORTID_S 0
+#define FW_PORT_CMD_PORTID_M 0xf
+#define FW_PORT_CMD_PORTID_V(x) ((x) << FW_PORT_CMD_PORTID_S)
+#define FW_PORT_CMD_PORTID_G(x) \
+ (((x) >> FW_PORT_CMD_PORTID_S) & FW_PORT_CMD_PORTID_M)
+
+#define FW_PORT_CMD_ACTION_S 16
+#define FW_PORT_CMD_ACTION_M 0xffff
+#define FW_PORT_CMD_ACTION_V(x) ((x) << FW_PORT_CMD_ACTION_S)
+#define FW_PORT_CMD_ACTION_G(x) \
+ (((x) >> FW_PORT_CMD_ACTION_S) & FW_PORT_CMD_ACTION_M)
+
+#define FW_PORT_CMD_OVLAN3_S 7
+#define FW_PORT_CMD_OVLAN3_V(x) ((x) << FW_PORT_CMD_OVLAN3_S)
+
+#define FW_PORT_CMD_OVLAN2_S 6
+#define FW_PORT_CMD_OVLAN2_V(x) ((x) << FW_PORT_CMD_OVLAN2_S)
+
+#define FW_PORT_CMD_OVLAN1_S 5
+#define FW_PORT_CMD_OVLAN1_V(x) ((x) << FW_PORT_CMD_OVLAN1_S)
+
+#define FW_PORT_CMD_OVLAN0_S 4
+#define FW_PORT_CMD_OVLAN0_V(x) ((x) << FW_PORT_CMD_OVLAN0_S)
+
+#define FW_PORT_CMD_IVLAN0_S 3
+#define FW_PORT_CMD_IVLAN0_V(x) ((x) << FW_PORT_CMD_IVLAN0_S)
+
+#define FW_PORT_CMD_TXIPG_S 3
+#define FW_PORT_CMD_TXIPG_V(x) ((x) << FW_PORT_CMD_TXIPG_S)
+
+#define FW_PORT_CMD_LSTATUS_S 31
+#define FW_PORT_CMD_LSTATUS_M 0x1
+#define FW_PORT_CMD_LSTATUS_V(x) ((x) << FW_PORT_CMD_LSTATUS_S)
+#define FW_PORT_CMD_LSTATUS_G(x) \
+ (((x) >> FW_PORT_CMD_LSTATUS_S) & FW_PORT_CMD_LSTATUS_M)
+#define FW_PORT_CMD_LSTATUS_F FW_PORT_CMD_LSTATUS_V(1U)
+
+#define FW_PORT_CMD_LSPEED_S 24
+#define FW_PORT_CMD_LSPEED_M 0x3f
+#define FW_PORT_CMD_LSPEED_V(x) ((x) << FW_PORT_CMD_LSPEED_S)
+#define FW_PORT_CMD_LSPEED_G(x) \
+ (((x) >> FW_PORT_CMD_LSPEED_S) & FW_PORT_CMD_LSPEED_M)
+
+#define FW_PORT_CMD_TXPAUSE_S 23
+#define FW_PORT_CMD_TXPAUSE_V(x) ((x) << FW_PORT_CMD_TXPAUSE_S)
+#define FW_PORT_CMD_TXPAUSE_F FW_PORT_CMD_TXPAUSE_V(1U)
+
+#define FW_PORT_CMD_RXPAUSE_S 22
+#define FW_PORT_CMD_RXPAUSE_V(x) ((x) << FW_PORT_CMD_RXPAUSE_S)
+#define FW_PORT_CMD_RXPAUSE_F FW_PORT_CMD_RXPAUSE_V(1U)
+
+#define FW_PORT_CMD_MDIOCAP_S 21
+#define FW_PORT_CMD_MDIOCAP_V(x) ((x) << FW_PORT_CMD_MDIOCAP_S)
+#define FW_PORT_CMD_MDIOCAP_F FW_PORT_CMD_MDIOCAP_V(1U)
+
+#define FW_PORT_CMD_MDIOADDR_S 16
+#define FW_PORT_CMD_MDIOADDR_M 0x1f
+#define FW_PORT_CMD_MDIOADDR_G(x) \
+ (((x) >> FW_PORT_CMD_MDIOADDR_S) & FW_PORT_CMD_MDIOADDR_M)
+
+#define FW_PORT_CMD_LPTXPAUSE_S 15
+#define FW_PORT_CMD_LPTXPAUSE_V(x) ((x) << FW_PORT_CMD_LPTXPAUSE_S)
+#define FW_PORT_CMD_LPTXPAUSE_F FW_PORT_CMD_LPTXPAUSE_V(1U)
+
+#define FW_PORT_CMD_LPRXPAUSE_S 14
+#define FW_PORT_CMD_LPRXPAUSE_V(x) ((x) << FW_PORT_CMD_LPRXPAUSE_S)
+#define FW_PORT_CMD_LPRXPAUSE_F FW_PORT_CMD_LPRXPAUSE_V(1U)
+
+#define FW_PORT_CMD_PTYPE_S 8
+#define FW_PORT_CMD_PTYPE_M 0x1f
+#define FW_PORT_CMD_PTYPE_G(x) \
+ (((x) >> FW_PORT_CMD_PTYPE_S) & FW_PORT_CMD_PTYPE_M)
+
+#define FW_PORT_CMD_MODTYPE_S 0
+#define FW_PORT_CMD_MODTYPE_M 0x1f
+#define FW_PORT_CMD_MODTYPE_V(x) ((x) << FW_PORT_CMD_MODTYPE_S)
+#define FW_PORT_CMD_MODTYPE_G(x) \
+ (((x) >> FW_PORT_CMD_MODTYPE_S) & FW_PORT_CMD_MODTYPE_M)
+
+#define FW_PORT_CMD_DCBXDIS_S 7
+#define FW_PORT_CMD_DCBXDIS_V(x) ((x) << FW_PORT_CMD_DCBXDIS_S)
+#define FW_PORT_CMD_DCBXDIS_F FW_PORT_CMD_DCBXDIS_V(1U)
+
+#define FW_PORT_CMD_APPLY_S 7
+#define FW_PORT_CMD_APPLY_V(x) ((x) << FW_PORT_CMD_APPLY_S)
+#define FW_PORT_CMD_APPLY_F FW_PORT_CMD_APPLY_V(1U)
+
+#define FW_PORT_CMD_ALL_SYNCD_S 7
+#define FW_PORT_CMD_ALL_SYNCD_V(x) ((x) << FW_PORT_CMD_ALL_SYNCD_S)
+#define FW_PORT_CMD_ALL_SYNCD_F FW_PORT_CMD_ALL_SYNCD_V(1U)
+
+#define FW_PORT_CMD_DCB_VERSION_S 12
+#define FW_PORT_CMD_DCB_VERSION_M 0x7
+#define FW_PORT_CMD_DCB_VERSION_G(x) \
+ (((x) >> FW_PORT_CMD_DCB_VERSION_S) & FW_PORT_CMD_DCB_VERSION_M)
enum fw_port_type {
FW_PORT_TYPE_FIBER_XFI,
@@ -1817,7 +2472,7 @@ enum fw_port_type {
FW_PORT_TYPE_QSFP,
FW_PORT_TYPE_BP40_BA,
- FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
+ FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
};
enum fw_port_module_type {
@@ -1828,11 +2483,11 @@ enum fw_port_module_type {
FW_PORT_MOD_TYPE_TWINAX_PASSIVE,
FW_PORT_MOD_TYPE_TWINAX_ACTIVE,
FW_PORT_MOD_TYPE_LRM,
- FW_PORT_MOD_TYPE_ERROR = FW_PORT_CMD_MODTYPE_MASK - 3,
- FW_PORT_MOD_TYPE_UNKNOWN = FW_PORT_CMD_MODTYPE_MASK - 2,
- FW_PORT_MOD_TYPE_NOTSUPPORTED = FW_PORT_CMD_MODTYPE_MASK - 1,
+ FW_PORT_MOD_TYPE_ERROR = FW_PORT_CMD_MODTYPE_M - 3,
+ FW_PORT_MOD_TYPE_UNKNOWN = FW_PORT_CMD_MODTYPE_M - 2,
+ FW_PORT_MOD_TYPE_NOTSUPPORTED = FW_PORT_CMD_MODTYPE_M - 1,
- FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
+ FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_M
};
enum fw_port_mod_sub_type {
@@ -1988,11 +2643,6 @@ struct fw_port_stats_cmd {
} u;
};
-#define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4)
-#define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0)
-#define FW_PORT_STATS_CMD_TX(x) ((x) << 7)
-#define FW_PORT_STATS_CMD_IX(x) ((x) << 0)
-
/* port loopback stats */
#define FW_NUM_LB_STATS 16
enum fw_port_lb_stats_index {
@@ -2048,22 +2698,13 @@ struct fw_port_lb_stats_cmd {
} u;
};
-#define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0)
-#define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4)
-#define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0)
-#define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0)
-
struct fw_rss_ind_tbl_cmd {
__be32 op_to_viid;
-#define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0)
__be32 retval_len16;
__be16 niqid;
__be16 startidx;
__be32 r3;
__be32 iq0_to_iq2;
-#define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20)
-#define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10)
-#define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0)
__be32 iq3_to_iq5;
__be32 iq6_to_iq8;
__be32 iq9_to_iq11;
@@ -2077,6 +2718,18 @@ struct fw_rss_ind_tbl_cmd {
__be32 r15_lo;
};
+#define FW_RSS_IND_TBL_CMD_VIID_S 0
+#define FW_RSS_IND_TBL_CMD_VIID_V(x) ((x) << FW_RSS_IND_TBL_CMD_VIID_S)
+
+#define FW_RSS_IND_TBL_CMD_IQ0_S 20
+#define FW_RSS_IND_TBL_CMD_IQ0_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ0_S)
+
+#define FW_RSS_IND_TBL_CMD_IQ1_S 10
+#define FW_RSS_IND_TBL_CMD_IQ1_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ1_S)
+
+#define FW_RSS_IND_TBL_CMD_IQ2_S 0
+#define FW_RSS_IND_TBL_CMD_IQ2_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ2_S)
+
struct fw_rss_glb_config_cmd {
__be32 op_to_write;
__be32 retval_len16;
@@ -2090,27 +2743,75 @@ struct fw_rss_glb_config_cmd {
struct fw_rss_glb_config_basicvirtual {
__be32 mode_pkd;
__be32 synmapen_to_hashtoeplitz;
-#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8)
-#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7)
-#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6)
-#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5)
-#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4)
-#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3)
-#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2)
-#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1)
-#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0)
__be64 r8;
__be64 r9;
} basicvirtual;
} u;
};
-#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28)
-#define FW_RSS_GLB_CONFIG_CMD_MODE_GET(x) (((x) >> 28) & 0xf)
+#define FW_RSS_GLB_CONFIG_CMD_MODE_S 28
+#define FW_RSS_GLB_CONFIG_CMD_MODE_M 0xf
+#define FW_RSS_GLB_CONFIG_CMD_MODE_V(x) ((x) << FW_RSS_GLB_CONFIG_CMD_MODE_S)
+#define FW_RSS_GLB_CONFIG_CMD_MODE_G(x) \
+ (((x) >> FW_RSS_GLB_CONFIG_CMD_MODE_S) & FW_RSS_GLB_CONFIG_CMD_MODE_M)
#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0
#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1
+#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_S 8
+#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_S)
+#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F \
+ FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_S 7
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_S)
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F \
+ FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_S 6
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_S)
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F \
+ FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_S 5
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_S)
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F \
+ FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_S 4
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_S)
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F \
+ FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_S 3
+#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_S)
+#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F \
+ FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_S 2
+#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_S)
+#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F \
+ FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_S 1
+#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_S)
+#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F \
+ FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_V(1U)
+
+#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_S 0
+#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_V(x) \
+ ((x) << FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_S)
+#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F \
+ FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_V(1U)
+
struct fw_rss_vi_config_cmd {
__be32 op_to_viid;
#define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0)
@@ -2124,19 +2825,51 @@ struct fw_rss_vi_config_cmd {
struct fw_rss_vi_config_basicvirtual {
__be32 r6;
__be32 defaultq_to_udpen;
-#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16)
-#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(x) (((x) >> 16) & 0x3ff)
-#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4)
-#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3)
-#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2)
-#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1)
-#define FW_RSS_VI_CONFIG_CMD_UDPEN (1U << 0)
__be64 r9;
__be64 r10;
} basicvirtual;
} u;
};
+#define FW_RSS_VI_CONFIG_CMD_VIID_S 0
+#define FW_RSS_VI_CONFIG_CMD_VIID_V(x) ((x) << FW_RSS_VI_CONFIG_CMD_VIID_S)
+
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S 16
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_M 0x3ff
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(x) \
+ ((x) << FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S)
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(x) \
+ (((x) >> FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S) & \
+ FW_RSS_VI_CONFIG_CMD_DEFAULTQ_M)
+
+#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_S 4
+#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_V(x) \
+ ((x) << FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_S)
+#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F \
+ FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_V(1U)
+
+#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_S 3
+#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_V(x) \
+ ((x) << FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_S)
+#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F \
+ FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_V(1U)
+
+#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_S 2
+#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_V(x) \
+ ((x) << FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_S)
+#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F \
+ FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_V(1U)
+
+#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_S 1
+#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_V(x) \
+ ((x) << FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_S)
+#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F \
+ FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_V(1U)
+
+#define FW_RSS_VI_CONFIG_CMD_UDPEN_S 0
+#define FW_RSS_VI_CONFIG_CMD_UDPEN_V(x) ((x) << FW_RSS_VI_CONFIG_CMD_UDPEN_S)
+#define FW_RSS_VI_CONFIG_CMD_UDPEN_F FW_RSS_VI_CONFIG_CMD_UDPEN_V(1U)
+
struct fw_clip_cmd {
__be32 op_to_write;
__be32 alloc_to_len16;
@@ -2145,19 +2878,13 @@ struct fw_clip_cmd {
__be32 r4[2];
};
-#define S_FW_CLIP_CMD_ALLOC 31
-#define M_FW_CLIP_CMD_ALLOC 0x1
-#define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC)
-#define G_FW_CLIP_CMD_ALLOC(x) \
- (((x) >> S_FW_CLIP_CMD_ALLOC) & M_FW_CLIP_CMD_ALLOC)
-#define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U)
+#define FW_CLIP_CMD_ALLOC_S 31
+#define FW_CLIP_CMD_ALLOC_V(x) ((x) << FW_CLIP_CMD_ALLOC_S)
+#define FW_CLIP_CMD_ALLOC_F FW_CLIP_CMD_ALLOC_V(1U)
-#define S_FW_CLIP_CMD_FREE 30
-#define M_FW_CLIP_CMD_FREE 0x1
-#define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE)
-#define G_FW_CLIP_CMD_FREE(x) \
- (((x) >> S_FW_CLIP_CMD_FREE) & M_FW_CLIP_CMD_FREE)
-#define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U)
+#define FW_CLIP_CMD_FREE_S 30
+#define FW_CLIP_CMD_FREE_V(x) ((x) << FW_CLIP_CMD_FREE_S)
+#define FW_CLIP_CMD_FREE_F FW_CLIP_CMD_FREE_V(1U)
enum fw_error_type {
FW_ERROR_TYPE_EXCEPTION = 0x0,
@@ -2196,7 +2923,6 @@ struct fw_error_cmd {
struct fw_debug_cmd {
__be32 op_type;
-#define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff)
__be32 len16_pkd;
union fw_debug {
struct fw_debug_assert {
@@ -2219,19 +2945,35 @@ struct fw_debug_cmd {
} u;
};
-#define FW_PCIE_FW_ERR (1U << 31)
-#define FW_PCIE_FW_INIT (1U << 30)
-#define FW_PCIE_FW_HALT (1U << 29)
-#define FW_PCIE_FW_MASTER_VLD (1U << 15)
-#define FW_PCIE_FW_MASTER_MASK 0x7
-#define FW_PCIE_FW_MASTER_SHIFT 12
-#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT)
-#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \
- FW_PCIE_FW_MASTER_MASK)
-#define FW_PCIE_FW_EVAL_MASK 0x7
-#define FW_PCIE_FW_EVAL_SHIFT 24
-#define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \
- FW_PCIE_FW_EVAL_MASK)
+#define FW_DEBUG_CMD_TYPE_S 0
+#define FW_DEBUG_CMD_TYPE_M 0xff
+#define FW_DEBUG_CMD_TYPE_G(x) \
+ (((x) >> FW_DEBUG_CMD_TYPE_S) & FW_DEBUG_CMD_TYPE_M)
+
+#define PCIE_FW_ERR_S 31
+#define PCIE_FW_ERR_V(x) ((x) << PCIE_FW_ERR_S)
+#define PCIE_FW_ERR_F PCIE_FW_ERR_V(1U)
+
+#define PCIE_FW_INIT_S 30
+#define PCIE_FW_INIT_V(x) ((x) << PCIE_FW_INIT_S)
+#define PCIE_FW_INIT_F PCIE_FW_INIT_V(1U)
+
+#define PCIE_FW_HALT_S 29
+#define PCIE_FW_HALT_V(x) ((x) << PCIE_FW_HALT_S)
+#define PCIE_FW_HALT_F PCIE_FW_HALT_V(1U)
+
+#define PCIE_FW_EVAL_S 24
+#define PCIE_FW_EVAL_M 0x7
+#define PCIE_FW_EVAL_G(x) (((x) >> PCIE_FW_EVAL_S) & PCIE_FW_EVAL_M)
+
+#define PCIE_FW_MASTER_VLD_S 15
+#define PCIE_FW_MASTER_VLD_V(x) ((x) << PCIE_FW_MASTER_VLD_S)
+#define PCIE_FW_MASTER_VLD_F PCIE_FW_MASTER_VLD_V(1U)
+
+#define PCIE_FW_MASTER_S 12
+#define PCIE_FW_MASTER_M 0x7
+#define PCIE_FW_MASTER_V(x) ((x) << PCIE_FW_MASTER_S)
+#define PCIE_FW_MASTER_G(x) (((x) >> PCIE_FW_MASTER_S) & PCIE_FW_MASTER_M)
struct fw_hdr {
u8 ver;
@@ -2259,10 +3001,25 @@ enum fw_hdr_chip {
FW_HDR_CHIP_T5
};
-#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
-#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff)
-#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
-#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
+#define FW_HDR_FW_VER_MAJOR_S 24
+#define FW_HDR_FW_VER_MAJOR_M 0xff
+#define FW_HDR_FW_VER_MAJOR_G(x) \
+ (((x) >> FW_HDR_FW_VER_MAJOR_S) & FW_HDR_FW_VER_MAJOR_M)
+
+#define FW_HDR_FW_VER_MINOR_S 16
+#define FW_HDR_FW_VER_MINOR_M 0xff
+#define FW_HDR_FW_VER_MINOR_G(x) \
+ (((x) >> FW_HDR_FW_VER_MINOR_S) & FW_HDR_FW_VER_MINOR_M)
+
+#define FW_HDR_FW_VER_MICRO_S 8
+#define FW_HDR_FW_VER_MICRO_M 0xff
+#define FW_HDR_FW_VER_MICRO_G(x) \
+ (((x) >> FW_HDR_FW_VER_MICRO_S) & FW_HDR_FW_VER_MICRO_M)
+
+#define FW_HDR_FW_VER_BUILD_S 0
+#define FW_HDR_FW_VER_BUILD_M 0xff
+#define FW_HDR_FW_VER_BUILD_G(x) \
+ (((x) >> FW_HDR_FW_VER_BUILD_S) & FW_HDR_FW_VER_BUILD_M)
enum fw_hdr_intfver {
FW_HDR_INTFVER_NIC = 0x00,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 3d06e77d7121..d00a751f0588 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -138,6 +138,8 @@ struct sge_fl {
struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */
__be64 *desc; /* address of HW RX descriptor ring */
dma_addr_t addr; /* PCI bus address of hardware ring */
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
};
/*
@@ -178,6 +180,8 @@ struct sge_rspq {
u16 abs_id; /* SGE abs QID for the response Q */
__be64 *desc; /* address of hardware response ring */
dma_addr_t phys_addr; /* PCI bus address of ring */
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
unsigned int iqe_len; /* entry size */
unsigned int size; /* capcity of response Q */
struct adapter *adapter; /* our adapter */
@@ -240,6 +244,8 @@ struct sge_txq {
struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */
struct sge_qstat *stat; /* queue status entry */
dma_addr_t phys_addr; /* PCI bus address of hardware ring */
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
};
/*
@@ -345,6 +351,7 @@ struct sge {
struct adapter {
/* PCI resources */
void __iomem *regs;
+ void __iomem *bar2;
struct pci_dev *pdev;
struct device *pdev_dev;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 0b42bddaf284..aa74ec34a467 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1030,10 +1030,10 @@ static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
pktcnt_idx = closest_thres(&adapter->sge, cnt);
if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
- v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
- FW_PARAMS_PARAM_X(
+ v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(
FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
- FW_PARAMS_PARAM_YZ(rspq->cntxt_id);
+ FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
if (err)
return err;
@@ -1230,14 +1230,14 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
- FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev),
- FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.fwrev),
- FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.fwrev),
- FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.fwrev),
- FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.tprev),
- FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.tprev),
- FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.tprev),
- FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.tprev));
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
}
/*
@@ -2095,7 +2095,6 @@ static int adap_init0(struct adapter *adapter)
unsigned int ethqsets;
int err;
u32 param, val = 0;
- unsigned int chipid;
/*
* Wait for the device to become ready before proceeding ...
@@ -2123,17 +2122,6 @@ static int adap_init0(struct adapter *adapter)
return err;
}
- adapter->params.chip = 0;
- switch (adapter->pdev->device >> 12) {
- case CHELSIO_T4:
- adapter->params.chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
- break;
- case CHELSIO_T5:
- chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
- adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
- break;
- }
-
/*
* Grab basic operational parameters. These will predominantly have
* been set up by the Physical Function Driver or will be hard coded
@@ -2184,8 +2172,8 @@ static int adap_init0(struct adapter *adapter)
* firmware won't understand this and we'll just get
* unencapsulated messages ...
*/
- param = FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
+ param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
val = 1;
(void) t4vf_set_params(adapter, 1, &param, &val);
@@ -2594,6 +2582,27 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
goto err_free_adapter;
}
+ /* Wait for the device to become ready before proceeding ...
+ */
+ err = t4vf_prep_adapter(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "device didn't become ready:"
+ " err=%d\n", err);
+ goto err_unmap_bar0;
+ }
+
+ /* For T5 and later we want to use the new BAR-based User Doorbells,
+ * so we need to map BAR2 here ...
+ */
+ if (!is_t4(adapter->params.chip)) {
+ adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ if (!adapter->bar2) {
+ dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
+ err = -ENOMEM;
+ goto err_unmap_bar0;
+ }
+ }
/*
* Initialize adapter level features.
*/
@@ -2786,6 +2795,10 @@ err_free_dev:
}
err_unmap_bar:
+ if (!is_t4(adapter->params.chip))
+ iounmap(adapter->bar2);
+
+err_unmap_bar0:
iounmap(adapter->regs);
err_free_adapter:
@@ -2856,6 +2869,8 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
free_netdev(netdev);
}
iounmap(adapter->regs);
+ if (!is_t4(adapter->params.chip))
+ iounmap(adapter->bar2);
kfree(adapter);
}
@@ -2908,67 +2923,18 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-/*
- * PCI Device registration data structures.
- */
-#define CH_DEVICE(devid) \
- { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }
-
-static const struct pci_device_id cxgb4vf_pci_tbl[] = {
- CH_DEVICE(0xb000), /* PE10K FPGA */
- CH_DEVICE(0x4801), /* T420-cr */
- CH_DEVICE(0x4802), /* T422-cr */
- CH_DEVICE(0x4803), /* T440-cr */
- CH_DEVICE(0x4804), /* T420-bch */
- CH_DEVICE(0x4805), /* T440-bch */
- CH_DEVICE(0x4806), /* T460-ch */
- CH_DEVICE(0x4807), /* T420-so */
- CH_DEVICE(0x4808), /* T420-cx */
- CH_DEVICE(0x4809), /* T420-bt */
- CH_DEVICE(0x480a), /* T404-bt */
- CH_DEVICE(0x480d), /* T480-cr */
- CH_DEVICE(0x480e), /* T440-lp-cr */
- CH_DEVICE(0x4880),
- CH_DEVICE(0x4881),
- CH_DEVICE(0x4882),
- CH_DEVICE(0x4883),
- CH_DEVICE(0x4884),
- CH_DEVICE(0x4885),
- CH_DEVICE(0x4886),
- CH_DEVICE(0x4887),
- CH_DEVICE(0x4888),
- CH_DEVICE(0x5801), /* T520-cr */
- CH_DEVICE(0x5802), /* T522-cr */
- CH_DEVICE(0x5803), /* T540-cr */
- CH_DEVICE(0x5804), /* T520-bch */
- CH_DEVICE(0x5805), /* T540-bch */
- CH_DEVICE(0x5806), /* T540-ch */
- CH_DEVICE(0x5807), /* T520-so */
- CH_DEVICE(0x5808), /* T520-cx */
- CH_DEVICE(0x5809), /* T520-bt */
- CH_DEVICE(0x580a), /* T504-bt */
- CH_DEVICE(0x580b), /* T520-sr */
- CH_DEVICE(0x580c), /* T504-bt */
- CH_DEVICE(0x580d), /* T580-cr */
- CH_DEVICE(0x580e), /* T540-lp-cr */
- CH_DEVICE(0x580f), /* Amsterdam */
- CH_DEVICE(0x5810), /* T580-lp-cr */
- CH_DEVICE(0x5811), /* T520-lp-cr */
- CH_DEVICE(0x5812), /* T560-cr */
- CH_DEVICE(0x5813), /* T580-cr */
- CH_DEVICE(0x5814), /* T580-so-cr */
- CH_DEVICE(0x5815), /* T502-bt */
- CH_DEVICE(0x5880),
- CH_DEVICE(0x5881),
- CH_DEVICE(0x5882),
- CH_DEVICE(0x5883),
- CH_DEVICE(0x5884),
- CH_DEVICE(0x5885),
- CH_DEVICE(0x5886),
- CH_DEVICE(0x5887),
- CH_DEVICE(0x5888),
- { 0, }
-};
+/* Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+ static struct pci_device_id cxgb4vf_pci_tbl[] = {
+#define CH_PCI_DEVICE_ID_FUNCTION 0x8
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+ { PCI_VDEVICE(CHELSIO, (devid)), 0 }
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
+
+#include "../cxgb4/t4_pci_id_tbl.h"
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index fdd078d7d82c..f7fd1317d996 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -118,7 +118,7 @@ enum {
* we can specify for immediate data in the firmware Ethernet TX
* Work Request.
*/
- MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK,
+ MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M,
/*
* Max size of a WR sent through a control TX queue.
@@ -525,19 +525,40 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
{
u32 val;
- /*
- * The SGE keeps track of its Producer and Consumer Indices in terms
+ /* The SGE keeps track of its Producer and Consumer Indices in terms
* of Egress Queue Units so we can only tell it about integral numbers
* of multiples of Free List Entries per Egress Queue Units ...
*/
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
- val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
- if (!is_t4(adapter->params.chip))
- val |= DBTYPE(1);
+ if (is_t4(adapter->params.chip))
+ val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
+ else
+ val = PIDX_T5(fl->pend_cred / FL_PER_EQ_UNIT) |
+ DBTYPE(1);
+ val |= DBPRIO(1);
+
+ /* Make sure all memory writes to the Free List queue are
+ * committed before we tell the hardware about them.
+ */
wmb();
- t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
- DBPRIO(1) |
- QID(fl->cntxt_id) | val);
+
+ /* If we don't have access to the new User Doorbell (T5+), use
+ * the old doorbell mechanism; otherwise use the new BAR2
+ * mechanism.
+ */
+ if (unlikely(fl->bar2_addr == NULL)) {
+ t4_write_reg(adapter,
+ T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
+ QID(fl->cntxt_id) | val);
+ } else {
+ writel(val | QID(fl->bar2_qid),
+ fl->bar2_addr + SGE_UDB_KDOORBELL);
+
+ /* This Write memory Barrier will force the write to
+ * the User Doorbell area to be flushed.
+ */
+ wmb();
+ }
fl->pend_cred %= FL_PER_EQ_UNIT;
}
}
@@ -598,6 +619,8 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
*/
BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
+ gfp |= __GFP_NOWARN;
+
/*
* If we support large pages, prefer large buffers and fail over to
* small pages if we can't allocate large pages to satisfy the refill.
@@ -608,8 +631,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
goto alloc_small_pages;
while (n) {
- page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
- s->fl_pg_order);
+ page = __dev_alloc_pages(gfp, s->fl_pg_order);
if (unlikely(!page)) {
/*
* We've failed inour attempt to allocate a "large
@@ -653,7 +675,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
alloc_small_pages:
while (n--) {
- page = __skb_alloc_page(gfp | __GFP_NOWARN, NULL);
+ page = __dev_alloc_page(gfp);
if (unlikely(!page)) {
fl->alloc_failed++;
break;
@@ -902,7 +924,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
sgl->addr0 = cpu_to_be64(addr[1]);
}
- sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
+ sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE(nfrags));
if (likely(--nfrags == 0))
return;
@@ -948,14 +970,74 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
int n)
{
- /*
- * Warn if we write doorbells with the wrong priority and write
- * descriptors before telling HW.
+ /* Make sure that all writes to the TX Descriptors are committed
+ * before we tell the hardware about them.
*/
- WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO(1));
wmb();
- t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
- QID(tq->cntxt_id) | PIDX(n));
+
+ /* If we don't have access to the new User Doorbell (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(tq->bar2_addr == NULL)) {
+ u32 val = PIDX(n);
+
+ t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
+ QID(tq->cntxt_id) | val);
+ } else {
+ u32 val = PIDX_T5(n);
+
+ /* T4 and later chips share the same PIDX field offset within
+ * the doorbell, but T5 and later shrank the field in order to
+ * gain a bit for Doorbell Priority. The field was absurdly
+ * large in the first place (14 bits) so we just use the T5
+ * and later limits and warn if a Queue ID is too large.
+ */
+ WARN_ON(val & DBPRIO(1));
+
+ /* If we're only writing a single Egress Unit and the BAR2
+ * Queue ID is 0, we can use the Write Combining Doorbell
+ * Gather Buffer; otherwise we use the simple doorbell.
+ */
+ if (n == 1 && tq->bar2_qid == 0) {
+ unsigned int index = (tq->pidx
+ ? (tq->pidx - 1)
+ : (tq->size - 1));
+ __be64 *src = (__be64 *)&tq->desc[index];
+ __be64 __iomem *dst = (__be64 *)(tq->bar2_addr +
+ SGE_UDB_WCDOORBELL);
+ unsigned int count = EQ_UNIT / sizeof(__be64);
+
+ /* Copy the TX Descriptor in a tight loop in order to
+ * try to get it to the adapter in a single Write
+ * Combined transfer on the PCI-E Bus. If the Write
+ * Combine fails (say because of an interrupt, etc.)
+ * the hardware will simply take the last write as a
+ * simple doorbell write with a PIDX Increment of 1
+ * and will fetch the TX Descriptor from memory via
+ * DMA.
+ */
+ while (count) {
+ writeq(*src, dst);
+ src++;
+ dst++;
+ count--;
+ }
+ } else
+ writel(val | QID(tq->bar2_qid),
+ tq->bar2_addr + SGE_UDB_KDOORBELL);
+
+ /* This Write Memory Barrier will force the write to the User
+ * Doorbell area to be flushed. This is needed to prevent
+ * writes on different CPUs for the same queue from hitting
+ * the adapter out of order. This is required when some Work
+ * Requests take the Write Combine Gather Buffer path (user
+ * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
+ * take the traditional path where we simply increment the
+ * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
+ * hardware DMA read the actual Work Request.
+ */
+ wmb();
+ }
}
/**
@@ -1145,7 +1227,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
goto out_free;
}
- wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
+ wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
/*
* After we're done injecting the Work Request for this
@@ -1157,7 +1239,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* has opened up.
*/
txq_stop(txq);
- wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
/*
@@ -1187,9 +1269,9 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
wr->op_immdlen =
- cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
- FW_WR_IMMDLEN(sizeof(*lso) +
- sizeof(*cpl)));
+ cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN_V(sizeof(*lso) +
+ sizeof(*cpl)));
/*
* Fill in the LSO CPL message.
*/
@@ -1224,8 +1306,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
wr->op_immdlen =
- cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
- FW_WR_IMMDLEN(len));
+ cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN_V(len));
/*
* Set up TX Packet CPL pointer, control word and perform
@@ -1781,6 +1863,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
unsigned int intr_params;
struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
int work_done = process_responses(rspq, budget);
+ u32 val;
if (likely(work_done < budget)) {
napi_complete(napi);
@@ -1792,11 +1875,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
if (unlikely(work_done == 0))
rspq->unhandled_irqs++;
- t4_write_reg(rspq->adapter,
- T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
- CIDXINC(work_done) |
- INGRESSQID((u32)rspq->cntxt_id) |
- SEINTARM(intr_params));
+ val = CIDXINC(work_done) | SEINTARM(intr_params);
+ if (is_t4(rspq->adapter->params.chip)) {
+ t4_write_reg(rspq->adapter,
+ T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+ val | INGRESSQID((u32)rspq->cntxt_id));
+ } else {
+ writel(val | INGRESSQID(rspq->bar2_qid),
+ rspq->bar2_addr + SGE_UDB_GTS);
+ wmb();
+ }
return work_done;
}
@@ -1821,6 +1909,7 @@ static unsigned int process_intrq(struct adapter *adapter)
struct sge *s = &adapter->sge;
struct sge_rspq *intrq = &s->intrq;
unsigned int work_done;
+ u32 val;
spin_lock(&adapter->sge.intrq_lock);
for (work_done = 0; ; work_done++) {
@@ -1886,10 +1975,15 @@ static unsigned int process_intrq(struct adapter *adapter)
rspq_next(intrq);
}
- t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
- CIDXINC(work_done) |
- INGRESSQID(intrq->cntxt_id) |
- SEINTARM(intrq->intr_params));
+ val = CIDXINC(work_done) | SEINTARM(intrq->intr_params);
+ if (is_t4(adapter->params.chip))
+ t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+ val | INGRESSQID(intrq->cntxt_id));
+ else {
+ writel(val | INGRESSQID(intrq->bar2_qid),
+ intrq->bar2_addr + SGE_UDB_GTS);
+ wmb();
+ }
spin_unlock(&adapter->sge.intrq_lock);
@@ -2035,6 +2129,35 @@ static void sge_tx_timer_cb(unsigned long data)
}
/**
+ * bar2_address - return the BAR2 address for an SGE Queue's Registers
+ * @adapter: the adapter
+ * @qid: the SGE Queue ID
+ * @qtype: the SGE Queue Type (Egress or Ingress)
+ * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
+ *
+ * Returns the BAR2 address for the SGE Queue Registers associated with
+ * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
+ * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
+ * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
+ * Registers are supported (e.g. the Write Combining Doorbell Buffer).
+ */
+static void __iomem *bar2_address(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ unsigned int *pbar2_qid)
+{
+ u64 bar2_qoffset;
+ int ret;
+
+ ret = t4_bar2_sge_qregs(adapter, qid, qtype,
+ &bar2_qoffset, pbar2_qid);
+ if (ret)
+ return NULL;
+
+ return adapter->bar2 + bar2_qoffset;
+}
+
+/**
* t4vf_sge_alloc_rxq - allocate an SGE RX Queue
* @adapter: the adapter
* @rspq: pointer to to the new rxq's Response Queue to be filled in
@@ -2087,26 +2210,26 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
* into OS-independent common code ...
*/
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_CMD_EXEC);
- cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC |
- FW_IQ_CMD_IQSTART(1) |
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_CMD_EXEC_F);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F |
+ FW_IQ_CMD_IQSTART_F |
FW_LEN16(cmd));
cmd.type_to_iqandstindex =
- cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
- FW_IQ_CMD_IQASYNCH(iqasynch) |
- FW_IQ_CMD_VIID(pi->viid) |
- FW_IQ_CMD_IQANDST(iqandst) |
- FW_IQ_CMD_IQANUS(1) |
- FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) |
- FW_IQ_CMD_IQANDSTINDEX(intr_dest));
+ cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
+ FW_IQ_CMD_IQASYNCH_V(iqasynch) |
+ FW_IQ_CMD_VIID_V(pi->viid) |
+ FW_IQ_CMD_IQANDST_V(iqandst) |
+ FW_IQ_CMD_IQANUS_V(1) |
+ FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) |
+ FW_IQ_CMD_IQANDSTINDEX_V(intr_dest));
cmd.iqdroprss_to_iqesize =
- cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) |
- FW_IQ_CMD_IQGTSMODE |
- FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) |
- FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4));
+ cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) |
+ FW_IQ_CMD_IQGTSMODE_F |
+ FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) |
+ FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4));
cmd.iqsize = cpu_to_be16(rspq->size);
cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
@@ -2140,13 +2263,13 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
*/
cmd.iqns_to_fl0congen =
cpu_to_be32(
- FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) |
- FW_IQ_CMD_FL0PACKEN(1) |
- FW_IQ_CMD_FL0PADEN(1));
+ FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
+ FW_IQ_CMD_FL0PACKEN_F |
+ FW_IQ_CMD_FL0PADEN_F);
cmd.fl0dcaen_to_fl0cidxfthresh =
cpu_to_be16(
- FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) |
- FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B));
+ FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) |
+ FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B));
cmd.fl0size = cpu_to_be16(flsz);
cmd.fl0addr = cpu_to_be64(fl->addr);
}
@@ -2165,6 +2288,10 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
rspq->gen = 1;
rspq->next_intr_params = rspq->intr_params;
rspq->cntxt_id = be16_to_cpu(rpl.iqid);
+ rspq->bar2_addr = bar2_address(adapter,
+ rspq->cntxt_id,
+ T4_BAR2_QTYPE_INGRESS,
+ &rspq->bar2_qid);
rspq->abs_id = be16_to_cpu(rpl.physiqid);
rspq->size--; /* subtract status entry */
rspq->adapter = adapter;
@@ -2183,6 +2310,15 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
fl->alloc_failed = 0;
fl->large_alloc_failed = 0;
fl->starving = 0;
+
+ /* Note, we must initialize the BAR2 Free List User Doorbell
+ * information before refilling the Free List!
+ */
+ fl->bar2_addr = bar2_address(adapter,
+ fl->cntxt_id,
+ T4_BAR2_QTYPE_EGRESS,
+ &fl->bar2_qid);
+
refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
}
@@ -2250,24 +2386,25 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
* into the common code ...
*/
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_CMD_EXEC);
- cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC |
- FW_EQ_ETH_CMD_EQSTART |
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_CMD_EXEC_F);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F |
+ FW_EQ_ETH_CMD_EQSTART_F |
FW_LEN16(cmd));
- cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE |
- FW_EQ_ETH_CMD_VIID(pi->viid));
+ cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
+ FW_EQ_ETH_CMD_VIID_V(pi->viid));
cmd.fetchszm_to_iqid =
- cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) |
- FW_EQ_ETH_CMD_PCIECHN(pi->port_id) |
- FW_EQ_ETH_CMD_IQID(iqid));
+ cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) |
+ FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) |
+ FW_EQ_ETH_CMD_IQID_V(iqid));
cmd.dcaen_to_eqsize =
- cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) |
- FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) |
- FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) |
- FW_EQ_ETH_CMD_EQSIZE(nentries));
+ cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B) |
+ FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B) |
+ FW_EQ_ETH_CMD_CIDXFTHRESH_V(
+ SGE_CIDXFLUSHTHRESH_32) |
+ FW_EQ_ETH_CMD_EQSIZE_V(nentries));
cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
/*
@@ -2293,9 +2430,13 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
txq->q.cidx = 0;
txq->q.pidx = 0;
txq->q.stat = (void *)&txq->q.desc[txq->q.size];
- txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd));
+ txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd));
+ txq->q.bar2_addr = bar2_address(adapter,
+ txq->q.cntxt_id,
+ T4_BAR2_QTYPE_EGRESS,
+ &txq->q.bar2_qid);
txq->q.abs_id =
- FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd));
+ FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd));
txq->txq = devq;
txq->tso = 0;
txq->tx_cso = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 4b6a6d14d86d..8d3237f5e364 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -67,7 +67,7 @@ enum chip_type {
/*
* The "len16" field of a Firmware Command Structure ...
*/
-#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
+#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
/*
* Per-VF statistics.
@@ -135,9 +135,12 @@ struct dev_params {
struct sge_params {
u32 sge_control; /* padding, boundaries, lengths, etc. */
u32 sge_control2; /* T5: more of the same */
- u32 sge_host_page_size; /* RDMA page sizes */
- u32 sge_queues_per_page; /* RDMA queues/page */
- u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */
+ u32 sge_host_page_size; /* PF0-7 page sizes */
+ u32 sge_egress_queues_per_page; /* PF0-7 egress queues/page */
+ u32 sge_ingress_queues_per_page;/* PF0-7 ingress queues/page */
+ u32 sge_vf_hps; /* host page size for our vf */
+ u32 sge_vf_eq_qpp; /* egress queues/page for our VF */
+ u32 sge_vf_iq_qpp; /* ingress queues/page for our VF */
u32 sge_fl_buffer_size[16]; /* free list buffer sizes */
u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */
u32 sge_congestion_control; /* congestion thresholds, etc. */
@@ -267,6 +270,8 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
}
+#define CHELSIO_PCI_ID_VER(dev_id) ((dev_id) >> 12)
+
static inline int is_t4(enum chip_type chip)
{
return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
@@ -278,6 +283,13 @@ int t4vf_port_init(struct adapter *, int);
int t4vf_fw_reset(struct adapter *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
+enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
+int t4_bar2_sge_qregs(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid);
+
int t4vf_get_sge_params(struct adapter *);
int t4vf_get_vpd_params(struct adapter *);
int t4vf_get_dev_params(struct adapter *);
@@ -309,5 +321,6 @@ int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int,
int t4vf_eth_eq_free(struct adapter *, unsigned int);
int t4vf_handle_fw_rpl(struct adapter *, const __be64 *);
+int t4vf_prep_adapter(struct adapter *);
#endif /* __T4VF_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 1e896b923234..02e8833b7797 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -204,20 +204,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
/* return value in low-order little-endian word */
v = t4_read_reg(adapter, mbox_data);
- if (FW_CMD_RETVAL_GET(v))
+ if (FW_CMD_RETVAL_G(v))
dump_mbox(adapter, "FW Error", mbox_data);
if (rpl) {
/* request bit in high-order BE word */
WARN_ON((be32_to_cpu(*(const u32 *)cmd)
- & FW_CMD_REQUEST) == 0);
+ & FW_CMD_REQUEST_F) == 0);
get_mbox_rpl(adapter, rpl, size, mbox_data);
WARN_ON((be32_to_cpu(*(u32 *)rpl)
- & FW_CMD_REQUEST) != 0);
+ & FW_CMD_REQUEST_F) != 0);
}
t4_write_reg(adapter, mbox_ctl,
MBOWNER(MBOX_OWNER_NONE));
- return -FW_CMD_RETVAL_GET(v);
+ return -FW_CMD_RETVAL_G(v);
}
}
@@ -287,17 +287,17 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
* like MAC address, etc.
*/
memset(&vi_cmd, 0, sizeof(vi_cmd));
- vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
+ vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
- vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(pi->viid));
+ vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid));
v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
if (v)
return v;
- BUG_ON(pi->port_id != FW_VI_CMD_PORTID_GET(vi_rpl.portid_pkd));
- pi->rss_size = FW_VI_CMD_RSSSIZE_GET(be16_to_cpu(vi_rpl.rsssize_pkd));
+ BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd));
+ pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd));
t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac);
/*
@@ -308,12 +308,12 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
return 0;
memset(&port_cmd, 0, sizeof(port_cmd));
- port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ |
- FW_PORT_CMD_PORTID(pi->port_id));
+ port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_PORT_CMD_PORTID_V(pi->port_id));
port_cmd.action_to_len16 =
- cpu_to_be32(FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
+ cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
FW_LEN16(port_cmd));
v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl);
if (v)
@@ -349,8 +349,8 @@ int t4vf_fw_reset(struct adapter *adapter)
struct fw_reset_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
- FW_CMD_WRITE);
+ cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) |
+ FW_CMD_WRITE_F);
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
}
@@ -377,12 +377,12 @@ static int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
return -EINVAL;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
param[nparams].mnem), 16);
- cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+ cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
p->mnem = htonl(*params++);
@@ -415,12 +415,12 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
return -EINVAL;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE);
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F);
len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
param[nparams]), 16);
- cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+ cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
p->mnem = cpu_to_be32(*params++);
p->val = cpu_to_be32(*vals++);
@@ -430,6 +430,95 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
}
/**
+ * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ * @adapter: the adapter
+ * @qid: the Queue ID
+ * @qtype: the Ingress or Egress type for @qid
+ * @pbar2_qoffset: BAR2 Queue Offset
+ * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
+ *
+ * Returns the BAR2 SGE Queue Registers information associated with the
+ * indicated Absolute Queue ID. These are passed back in return value
+ * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
+ * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
+ *
+ * This may return an error which indicates that BAR2 SGE Queue
+ * registers aren't available. If an error is not returned, then the
+ * following values are returned:
+ *
+ * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
+ * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
+ *
+ * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
+ * require the "Inferred Queue ID" ability may be used. E.g. the
+ * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
+ * then these "Inferred Queue ID" register may not be used.
+ */
+int t4_bar2_sge_qregs(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid)
+{
+ unsigned int page_shift, page_size, qpp_shift, qpp_mask;
+ u64 bar2_page_offset, bar2_qoffset;
+ unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
+
+ /* T4 doesn't support BAR2 SGE Queue registers.
+ */
+ if (is_t4(adapter->params.chip))
+ return -EINVAL;
+
+ /* Get our SGE Page Size parameters.
+ */
+ page_shift = adapter->params.sge.sge_vf_hps + 10;
+ page_size = 1 << page_shift;
+
+ /* Get the right Queues per Page parameters for our Queue.
+ */
+ qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
+ ? adapter->params.sge.sge_vf_eq_qpp
+ : adapter->params.sge.sge_vf_iq_qpp);
+ qpp_mask = (1 << qpp_shift) - 1;
+
+ /* Calculate the basics of the BAR2 SGE Queue register area:
+ * o The BAR2 page the Queue registers will be in.
+ * o The BAR2 Queue ID.
+ * o The BAR2 Queue ID Offset into the BAR2 page.
+ */
+ bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+ bar2_qid = qid & qpp_mask;
+ bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
+
+ /* If the BAR2 Queue ID Offset is less than the Page Size, then the
+ * hardware will infer the Absolute Queue ID simply from the writes to
+ * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
+ * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
+ * write to the first BAR2 SGE Queue Area within the BAR2 Page with
+ * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
+ * from the BAR2 Page and BAR2 Queue ID.
+ *
+ * One important censequence of this is that some BAR2 SGE registers
+ * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
+ * there. But other registers synthesize the SGE Queue ID purely
+ * from the writes to the registers -- the Write Combined Doorbell
+ * Buffer is a good example. These BAR2 SGE Registers are only
+ * available for those BAR2 SGE Register areas where the SGE Absolute
+ * Queue ID can be inferred from simple writes.
+ */
+ bar2_qoffset = bar2_page_offset;
+ bar2_qinferred = (bar2_qid_offset < page_size);
+ if (bar2_qinferred) {
+ bar2_qoffset += bar2_qid_offset;
+ bar2_qid = 0;
+ }
+
+ *pbar2_qoffset = bar2_qoffset;
+ *pbar2_qid = bar2_qid;
+ return 0;
+}
+
+/**
* t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
* @adapter: the adapter
*
@@ -443,20 +532,20 @@ int t4vf_get_sge_params(struct adapter *adapter)
u32 params[7], vals[7];
int v;
- params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_CONTROL));
- params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_HOST_PAGE_SIZE));
- params[2] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE0));
- params[3] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE1));
- params[4] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_0_AND_1));
- params[5] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_2_AND_3));
- params[6] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_4_AND_5));
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL));
+ params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE));
+ params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0));
+ params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1));
+ params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1));
+ params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3));
+ params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5));
v = t4vf_query_params(adapter, 7, params, vals);
if (v)
return v;
@@ -479,8 +568,8 @@ int t4vf_get_sge_params(struct adapter *adapter)
* right value.
*/
if (!is_t4(adapter->params.chip)) {
- params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A));
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A));
v = t4vf_query_params(adapter, 1, params, vals);
if (v != FW_SUCCESS) {
dev_err(adapter->pdev_dev,
@@ -491,16 +580,65 @@ int t4vf_get_sge_params(struct adapter *adapter)
sge_params->sge_control2 = vals[0];
}
- params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
- params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
- FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL));
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD));
+ params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL));
v = t4vf_query_params(adapter, 2, params, vals);
if (v)
return v;
sge_params->sge_ingress_rx_threshold = vals[0];
sge_params->sge_congestion_control = vals[1];
+ /* For T5 and later we want to use the new BAR2 Doorbells.
+ * Unfortunately, older firmware didn't allow the this register to be
+ * read.
+ */
+ if (!is_t4(adapter->params.chip)) {
+ u32 whoami;
+ unsigned int pf, s_hps, s_qpp;
+
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(
+ SGE_EGRESS_QUEUES_PER_PAGE_VF_A));
+ params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ_V(
+ SGE_INGRESS_QUEUES_PER_PAGE_VF_A));
+ v = t4vf_query_params(adapter, 2, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_warn(adapter->pdev_dev,
+ "Unable to get VF SGE Queues/Page; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_params->sge_egress_queues_per_page = vals[0];
+ sge_params->sge_ingress_queues_per_page = vals[1];
+
+ /* We need the Queues/Page for our VF. This is based on the
+ * PF from which we're instantiated and is indexed in the
+ * register we just read. Do it once here so other code in
+ * the driver can just use it.
+ */
+ whoami = t4_read_reg(adapter,
+ T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI);
+ pf = SOURCEPF_GET(whoami);
+
+ s_hps = (HOSTPAGESIZEPF0_S +
+ (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
+ sge_params->sge_vf_hps =
+ ((sge_params->sge_host_page_size >> s_hps)
+ & HOSTPAGESIZEPF0_M);
+
+ s_qpp = (QUEUESPERPAGEPF0_S +
+ (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
+ sge_params->sge_vf_eq_qpp =
+ ((sge_params->sge_egress_queues_per_page >> s_qpp)
+ & QUEUESPERPAGEPF0_MASK);
+ sge_params->sge_vf_iq_qpp =
+ ((sge_params->sge_ingress_queues_per_page >> s_qpp)
+ & QUEUESPERPAGEPF0_MASK);
+ }
+
return 0;
}
@@ -517,8 +655,8 @@ int t4vf_get_vpd_params(struct adapter *adapter)
u32 params[7], vals[7];
int v;
- params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
v = t4vf_query_params(adapter, 1, params, vals);
if (v)
return v;
@@ -540,10 +678,10 @@ int t4vf_get_dev_params(struct adapter *adapter)
u32 params[7], vals[7];
int v;
- params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV));
- params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV));
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV));
+ params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV));
v = t4vf_query_params(adapter, 2, params, vals);
if (v)
return v;
@@ -571,9 +709,9 @@ int t4vf_get_rss_glb_config(struct adapter *adapter)
* our RSS configuration.
*/
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
+ cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
if (v)
@@ -585,7 +723,7 @@ int t4vf_get_rss_glb_config(struct adapter *adapter)
* filtering at this point to weed out modes which don't support
* VF Drivers ...
*/
- rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_GET(
+ rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G(
be32_to_cpu(rpl.u.manual.mode_pkd));
switch (rss->mode) {
case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
@@ -593,26 +731,26 @@ int t4vf_get_rss_glb_config(struct adapter *adapter)
rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
rss->u.basicvirtual.synmapen =
- ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0);
rss->u.basicvirtual.syn4tupenipv6 =
- ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0);
rss->u.basicvirtual.syn2tupenipv6 =
- ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0);
rss->u.basicvirtual.syn4tupenipv4 =
- ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0);
rss->u.basicvirtual.syn2tupenipv4 =
- ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0);
rss->u.basicvirtual.ofdmapen =
- ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0);
rss->u.basicvirtual.tnlmapen =
- ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0);
rss->u.basicvirtual.tnlalllookup =
- ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0);
rss->u.basicvirtual.hashtoeplitz =
- ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0);
+ ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0);
/* we need at least Tunnel Map Enable to be set */
if (!rss->u.basicvirtual.tnlmapen)
@@ -647,9 +785,9 @@ int t4vf_get_vfres(struct adapter *adapter)
* with error on command failure.
*/
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PFVF_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
if (v)
@@ -659,22 +797,22 @@ int t4vf_get_vfres(struct adapter *adapter)
* Extract VF resource limits and return success.
*/
word = be32_to_cpu(rpl.niqflint_niq);
- vfres->niqflint = FW_PFVF_CMD_NIQFLINT_GET(word);
- vfres->niq = FW_PFVF_CMD_NIQ_GET(word);
+ vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
+ vfres->niq = FW_PFVF_CMD_NIQ_G(word);
word = be32_to_cpu(rpl.type_to_neq);
- vfres->neq = FW_PFVF_CMD_NEQ_GET(word);
- vfres->pmask = FW_PFVF_CMD_PMASK_GET(word);
+ vfres->neq = FW_PFVF_CMD_NEQ_G(word);
+ vfres->pmask = FW_PFVF_CMD_PMASK_G(word);
word = be32_to_cpu(rpl.tc_to_nexactf);
- vfres->tc = FW_PFVF_CMD_TC_GET(word);
- vfres->nvi = FW_PFVF_CMD_NVI_GET(word);
- vfres->nexactf = FW_PFVF_CMD_NEXACTF_GET(word);
+ vfres->tc = FW_PFVF_CMD_TC_G(word);
+ vfres->nvi = FW_PFVF_CMD_NVI_G(word);
+ vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
word = be32_to_cpu(rpl.r_caps_to_nethctrl);
- vfres->r_caps = FW_PFVF_CMD_R_CAPS_GET(word);
- vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_GET(word);
- vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_GET(word);
+ vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
+ vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
+ vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
return 0;
}
@@ -695,9 +833,9 @@ int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid,
int v;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_READ |
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
FW_RSS_VI_CONFIG_CMD_VIID(viid));
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
@@ -709,17 +847,17 @@ int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid,
u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen);
config->basicvirtual.ip6fourtupen =
- ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) != 0);
+ ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0);
config->basicvirtual.ip6twotupen =
- ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) != 0);
+ ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0);
config->basicvirtual.ip4fourtupen =
- ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) != 0);
+ ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0);
config->basicvirtual.ip4twotupen =
- ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) != 0);
+ ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0);
config->basicvirtual.udpen =
- ((word & FW_RSS_VI_CONFIG_CMD_UDPEN) != 0);
+ ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0);
config->basicvirtual.defaultq =
- FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(word);
+ FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word);
break;
}
@@ -745,9 +883,9 @@ int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid,
struct fw_rss_vi_config_cmd cmd, rpl;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
FW_RSS_VI_CONFIG_CMD_VIID(viid));
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
switch (adapter->params.rss.mode) {
@@ -755,16 +893,16 @@ int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid,
u32 word = 0;
if (config->basicvirtual.ip6fourtupen)
- word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
+ word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F;
if (config->basicvirtual.ip6twotupen)
- word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
+ word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F;
if (config->basicvirtual.ip4fourtupen)
- word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
+ word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F;
if (config->basicvirtual.ip4twotupen)
- word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
+ word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F;
if (config->basicvirtual.udpen)
- word |= FW_RSS_VI_CONFIG_CMD_UDPEN;
- word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ(
+ word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F;
+ word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(
config->basicvirtual.defaultq);
cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word);
break;
@@ -803,10 +941,10 @@ int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
* Initialize firmware command template to write the RSS table.
*/
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_RSS_IND_TBL_CMD_VIID(viid));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_RSS_IND_TBL_CMD_VIID_V(viid));
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
/*
@@ -857,9 +995,9 @@ int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
if (rsp >= rsp_end)
rsp = rspq;
}
- *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
- FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
- FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
+ *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) |
+ FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) |
+ FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2]));
}
/*
@@ -892,18 +1030,18 @@ int t4vf_alloc_vi(struct adapter *adapter, int port_id)
* VIID.
*/
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_CMD_EXEC);
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_CMD_EXEC_F);
cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
- FW_VI_CMD_ALLOC);
- cmd.portid_pkd = FW_VI_CMD_PORTID(port_id);
+ FW_VI_CMD_ALLOC_F);
+ cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id);
v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
if (v)
return v;
- return FW_VI_CMD_VIID_GET(be16_to_cpu(rpl.type_viid));
+ return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid));
}
/**
@@ -922,12 +1060,12 @@ int t4vf_free_vi(struct adapter *adapter, int viid)
* Execute a VI command to free the Virtual Interface.
*/
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_EXEC);
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F);
cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
- FW_VI_CMD_FREE);
- cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(viid));
+ FW_VI_CMD_FREE_F);
+ cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
}
@@ -946,12 +1084,12 @@ int t4vf_enable_vi(struct adapter *adapter, unsigned int viid,
struct fw_vi_enable_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_EXEC |
- FW_VI_ENABLE_CMD_VIID(viid));
- cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN(rx_en) |
- FW_VI_ENABLE_CMD_EEN(tx_en) |
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F |
+ FW_VI_ENABLE_CMD_VIID_V(viid));
+ cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
+ FW_VI_ENABLE_CMD_EEN_V(tx_en) |
FW_LEN16(cmd));
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
}
@@ -970,11 +1108,11 @@ int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
struct fw_vi_enable_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_EXEC |
- FW_VI_ENABLE_CMD_VIID(viid));
- cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED |
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F |
+ FW_VI_ENABLE_CMD_VIID_V(viid));
+ cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F |
FW_LEN16(cmd));
cmd.blinkdur = cpu_to_be16(nblinks);
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
@@ -1001,28 +1139,28 @@ int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid,
/* convert to FW values */
if (mtu < 0)
- mtu = FW_VI_RXMODE_CMD_MTU_MASK;
+ mtu = FW_VI_RXMODE_CMD_MTU_M;
if (promisc < 0)
- promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
+ promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
if (all_multi < 0)
- all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
+ all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
if (bcast < 0)
- bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
+ bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
if (vlanex < 0)
- vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
+ vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_RXMODE_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_VI_RXMODE_CMD_VIID(viid));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_VI_RXMODE_CMD_VIID_V(viid));
cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
cmd.mtu_to_vlanexen =
- cpu_to_be32(FW_VI_RXMODE_CMD_MTU(mtu) |
- FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
- FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
- FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
- FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
+ cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
+ FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
+ FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
+ FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
+ FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
}
@@ -1072,19 +1210,19 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
int i;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- (free ? FW_CMD_EXEC : 0) |
- FW_VI_MAC_CMD_VIID(viid));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ (free ? FW_CMD_EXEC_F : 0) |
+ FW_VI_MAC_CMD_VIID_V(viid));
cmd.freemacs_to_len16 =
- cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
- FW_CMD_LEN16(len16));
+ cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
+ FW_CMD_LEN16_V(len16));
for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
p->valid_to_idx = cpu_to_be16(
- FW_VI_MAC_CMD_VALID |
- FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+ FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
}
@@ -1095,7 +1233,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
break;
for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
- u16 index = FW_VI_MAC_CMD_IDX_GET(
+ u16 index = FW_VI_MAC_CMD_IDX_G(
be16_to_cpu(p->valid_to_idx));
if (idx)
@@ -1161,19 +1299,19 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_VI_MAC_CMD_VIID(viid));
- cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
- p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID |
- FW_VI_MAC_CMD_IDX(idx));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
+ p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_IDX_V(idx));
memcpy(p->macaddr, addr, sizeof(p->macaddr));
ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
if (ret == 0) {
p = &rpl.u.exact[0];
- ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
+ ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
if (ret >= max_naddr)
ret = -ENOMEM;
}
@@ -1198,13 +1336,13 @@ int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid,
u.exact[0]), 16);
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- FW_VI_ENABLE_CMD_VIID(viid));
- cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN |
- FW_VI_MAC_CMD_HASHUNIEN(ucast) |
- FW_CMD_LEN16(len16));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_VI_ENABLE_CMD_VIID_V(viid));
+ cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
+ FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
+ FW_CMD_LEN16_V(len16));
cmd.u.hash.hashvec = cpu_to_be64(vec);
return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
}
@@ -1240,14 +1378,14 @@ int t4vf_get_port_stats(struct adapter *adapter, int pidx,
int ret;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_STATS_CMD) |
- FW_VI_STATS_CMD_VIID(pi->viid) |
- FW_CMD_REQUEST |
- FW_CMD_READ);
- cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) |
+ FW_VI_STATS_CMD_VIID_V(pi->viid) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F);
+ cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
cmd.u.ctl.nstats_ix =
- cpu_to_be16(FW_VI_STATS_CMD_IX(ix) |
- FW_VI_STATS_CMD_NSTATS(nstats));
+ cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) |
+ FW_VI_STATS_CMD_NSTATS_V(nstats));
ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
if (ret)
return ret;
@@ -1299,13 +1437,13 @@ int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype,
struct fw_iq_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_EXEC);
- cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE |
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F |
FW_LEN16(cmd));
cmd.type_to_iqandstindex =
- cpu_to_be32(FW_IQ_CMD_TYPE(iqtype));
+ cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
cmd.iqid = cpu_to_be16(iqid);
cmd.fl0id = cpu_to_be16(fl0id);
@@ -1325,12 +1463,12 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
struct fw_eq_eth_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_EXEC);
- cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE |
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F |
FW_LEN16(cmd));
- cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID(eqid));
+ cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
}
@@ -1344,7 +1482,7 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
{
const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
- u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
+ u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi));
switch (opcode) {
case FW_PORT_CMD: {
@@ -1359,7 +1497,7 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
/*
* Extract various fields from port status change message.
*/
- action = FW_PORT_CMD_ACTION_GET(
+ action = FW_PORT_CMD_ACTION_G(
be32_to_cpu(port_cmd->action_to_len16));
if (action != FW_PORT_ACTION_GET_PORT_INFO) {
dev_err(adapter->pdev_dev,
@@ -1368,24 +1506,24 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
break;
}
- port_id = FW_PORT_CMD_PORTID_GET(
+ port_id = FW_PORT_CMD_PORTID_G(
be32_to_cpu(port_cmd->op_to_portid));
word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
- link_ok = (word & FW_PORT_CMD_LSTATUS) != 0;
+ link_ok = (word & FW_PORT_CMD_LSTATUS_F) != 0;
speed = 0;
fc = 0;
- if (word & FW_PORT_CMD_RXPAUSE)
+ if (word & FW_PORT_CMD_RXPAUSE_F)
fc |= PAUSE_RX;
- if (word & FW_PORT_CMD_TXPAUSE)
+ if (word & FW_PORT_CMD_TXPAUSE_F)
fc |= PAUSE_TX;
- if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+ if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
speed = 100;
- else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+ else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
speed = 1000;
- else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+ else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
- else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+ else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
/*
@@ -1420,3 +1558,38 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
}
return 0;
}
+
+/**
+ */
+int t4vf_prep_adapter(struct adapter *adapter)
+{
+ int err;
+ unsigned int chipid;
+
+ /* Wait for the device to become ready before proceeding ...
+ */
+ err = t4vf_wait_dev_ready(adapter);
+ if (err)
+ return err;
+
+ /* Default port and clock for debugging in case we can't reach
+ * firmware.
+ */
+ adapter->params.nports = 1;
+ adapter->params.vfres.pmask = 1;
+ adapter->params.vpd.cclk = 50000;
+
+ adapter->params.chip = 0;
+ switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) {
+ case CHELSIO_T4:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
+ break;
+
+ case CHELSIO_T5:
+ chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index e285f384b096..07719676c305 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -216,14 +216,10 @@ struct net_device * __init mac89x0_probe(int unit)
ioaddr = (unsigned long)
nubus_slot_addr(slot) | (((slot&0xf) << 20) + DEFAULTIOBASE);
{
- unsigned long flags;
int card_present;
- local_irq_save(flags);
- card_present = (hwreg_present((void*) ioaddr+4) &&
- hwreg_present((void*) ioaddr + DATA_PORT));
- local_irq_restore(flags);
-
+ card_present = (hwreg_present((void *)ioaddr + 4) &&
+ hwreg_present((void *)ioaddr + DATA_PORT));
if (!card_present)
goto out;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 5ba5ad071bb6..25c4d88853d8 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -187,6 +187,7 @@ struct enic {
unsigned int cq_count;
struct enic_rfs_flw_tbl rfs_h;
u32 rx_copybreak;
+ u8 rss_key[ENIC_RSS_LEN];
};
static inline struct device *enic_get_dev(struct enic *enic)
@@ -246,5 +247,6 @@ int enic_sriov_enabled(struct enic *enic);
int enic_is_valid_vf(struct enic *enic, int vf);
int enic_is_dynamic(struct enic *enic);
void enic_set_ethtool_ops(struct net_device *netdev);
+int __enic_set_rsskey(struct enic *enic);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 85173d620758..eba1eb846d34 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -23,6 +23,7 @@
#include "enic.h"
#include "enic_dev.h"
#include "enic_clsf.h"
+#include "vnic_rss.h"
struct enic_stat {
char name[ETH_GSTRING_LEN];
@@ -416,6 +417,40 @@ static int enic_set_tunable(struct net_device *dev,
return ret;
}
+static u32 enic_get_rxfh_key_size(struct net_device *netdev)
+{
+ return ENIC_RSS_LEN;
+}
+
+static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
+ u8 *hfunc)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ if (hkey)
+ memcpy(hkey, enic->rss_key, ENIC_RSS_LEN);
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) ||
+ indir)
+ return -EINVAL;
+
+ if (hkey)
+ memcpy(enic->rss_key, hkey, ENIC_RSS_LEN);
+
+ return __enic_set_rsskey(enic);
+}
+
static const struct ethtool_ops enic_ethtool_ops = {
.get_settings = enic_get_settings,
.get_drvinfo = enic_get_drvinfo,
@@ -430,6 +465,9 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_rxnfc = enic_get_rxnfc,
.get_tunable = enic_get_tunable,
.set_tunable = enic_set_tunable,
+ .get_rxfh_key_size = enic_get_rxfh_key_size,
+ .get_rxfh = enic_get_rxfh,
+ .set_rxfh = enic_set_rxfh,
};
void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 73cf1653a4a3..868d0f605d60 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -283,12 +283,10 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
return IRQ_HANDLED;
}
- if (ENIC_TEST_INTR(pba, io_intr)) {
- if (napi_schedule_prep(&enic->napi[0]))
- __napi_schedule(&enic->napi[0]);
- } else {
+ if (ENIC_TEST_INTR(pba, io_intr))
+ napi_schedule_irqoff(&enic->napi[0]);
+ else
vnic_intr_unmask(&enic->intr[io_intr]);
- }
return IRQ_HANDLED;
}
@@ -313,7 +311,7 @@ static irqreturn_t enic_isr_msi(int irq, void *data)
* writes).
*/
- napi_schedule(&enic->napi[0]);
+ napi_schedule_irqoff(&enic->napi[0]);
return IRQ_HANDLED;
}
@@ -322,7 +320,7 @@ static irqreturn_t enic_isr_msix(int irq, void *data)
{
struct napi_struct *napi = data;
- napi_schedule(napi);
+ napi_schedule_irqoff(napi);
return IRQ_HANDLED;
}
@@ -531,8 +529,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
{
struct enic *enic = netdev_priv(netdev);
struct vnic_wq *wq;
- unsigned long flags;
unsigned int txq_map;
+ struct netdev_queue *txq;
if (skb->len <= 0) {
dev_kfree_skb_any(skb);
@@ -541,6 +539,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
wq = &enic->wq[txq_map];
+ txq = netdev_get_tx_queue(netdev, txq_map);
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
* which is very likely. In the off chance it's going to take
@@ -554,23 +553,25 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- spin_lock_irqsave(&enic->wq_lock[txq_map], flags);
+ spin_lock(&enic->wq_lock[txq_map]);
if (vnic_wq_desc_avail(wq) <
skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
- netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
+ netif_tx_stop_queue(txq);
/* This is a hard error, log it */
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
- spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
+ spin_unlock(&enic->wq_lock[txq_map]);
return NETDEV_TX_BUSY;
}
enic_queue_wq_skb(enic, wq, skb);
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
- netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
+ netif_tx_stop_queue(txq);
+ if (!skb->xmit_more || netif_xmit_stopped(txq))
+ vnic_wq_doorbell(wq);
- spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
+ spin_unlock(&enic->wq_lock[txq_map]);
return NETDEV_TX_OK;
}
@@ -1312,9 +1313,10 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
if (!wq_work_done) {
napi_complete(napi);
vnic_intr_unmask(&enic->intr[intr]);
+ return 0;
}
- return 0;
+ return budget;
}
static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
@@ -1886,25 +1888,23 @@ static int enic_dev_hang_reset(struct enic *enic)
return err;
}
-static int enic_set_rsskey(struct enic *enic)
+int __enic_set_rsskey(struct enic *enic)
{
+ union vnic_rss_key *rss_key_buf_va;
dma_addr_t rss_key_buf_pa;
- union vnic_rss_key *rss_key_buf_va = NULL;
- union vnic_rss_key rss_key = {
- .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
- .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
- .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
- .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
- };
- int err;
+ int i, kidx, bidx, err;
- rss_key_buf_va = pci_alloc_consistent(enic->pdev,
- sizeof(union vnic_rss_key), &rss_key_buf_pa);
+ rss_key_buf_va = pci_zalloc_consistent(enic->pdev,
+ sizeof(union vnic_rss_key),
+ &rss_key_buf_pa);
if (!rss_key_buf_va)
return -ENOMEM;
- memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
-
+ for (i = 0; i < ENIC_RSS_LEN; i++) {
+ kidx = i / ENIC_RSS_BYTES_PER_KEY;
+ bidx = i % ENIC_RSS_BYTES_PER_KEY;
+ rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i];
+ }
spin_lock_bh(&enic->devcmd_lock);
err = enic_set_rss_key(enic,
rss_key_buf_pa,
@@ -1917,6 +1917,13 @@ static int enic_set_rsskey(struct enic *enic)
return err;
}
+static int enic_set_rsskey(struct enic *enic)
+{
+ netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN);
+
+ return __enic_set_rsskey(enic);
+}
+
static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
{
dma_addr_t rss_cpu_buf_pa;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rss.h b/drivers/net/ethernet/cisco/enic/vnic_rss.h
index fa421baf45b8..881fa18542b3 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rss.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rss.h
@@ -20,11 +20,16 @@
#define _VNIC_RSS_H_
/* RSS key array */
+
+#define ENIC_RSS_BYTES_PER_KEY 10
+#define ENIC_RSS_KEYS 4
+#define ENIC_RSS_LEN (ENIC_RSS_BYTES_PER_KEY * ENIC_RSS_KEYS)
+
union vnic_rss_key {
struct {
- u8 b[10];
+ u8 b[ENIC_RSS_BYTES_PER_KEY];
u8 b_pad[6];
- } key[4];
+ } key[ENIC_RSS_KEYS];
u64 raw[8];
};
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 2c6c70804a39..816f1ad6072f 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -104,6 +104,17 @@ static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
return wq->to_use->desc;
}
+static inline void vnic_wq_doorbell(struct vnic_wq *wq)
+{
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+ iowrite32(wq->to_use->index, &wq->ctrl->posted_index);
+}
+
static inline void vnic_wq_post(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr,
unsigned int len, int sop, int eop,
@@ -122,15 +133,6 @@ static inline void vnic_wq_post(struct vnic_wq *wq,
buf->wr_id = wrid;
buf = buf->next;
- if (eop) {
- /* Adding write memory barrier prevents compiler and/or CPU
- * reordering, thus avoiding descriptor posting before
- * descriptor is initialized. Otherwise, hardware can read
- * stale descriptor fields.
- */
- wmb();
- iowrite32(buf->index, &wq->ctrl->posted_index);
- }
wq->to_use = buf;
wq->ring.desc_avail -= desc_skip_cnt;
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index cf8b6ff21613..badff181e719 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -995,7 +995,6 @@ static void de4x5_dbg_mii(struct net_device *dev, int k);
static void de4x5_dbg_media(struct net_device *dev);
static void de4x5_dbg_srom(struct de4x5_srom *p);
static void de4x5_dbg_rx(struct sk_buff *skb, int len);
-static int de4x5_strncmp(char *a, char *b, int n);
static int dc21041_infoleaf(struct net_device *dev);
static int dc21140_infoleaf(struct net_device *dev);
static int dc21142_infoleaf(struct net_device *dev);
@@ -4102,8 +4101,7 @@ get_hw_addr(struct net_device *dev)
}
/*
-** Test for enet addresses in the first 32 bytes. The built-in strncmp
-** didn't seem to work here...?
+** Test for enet addresses in the first 32 bytes.
*/
static int
de4x5_bad_srom(struct de4x5_private *lp)
@@ -4111,8 +4109,8 @@ de4x5_bad_srom(struct de4x5_private *lp)
int i, status = 0;
for (i = 0; i < ARRAY_SIZE(enet_det); i++) {
- if (!de4x5_strncmp((char *)&lp->srom, (char *)&enet_det[i], 3) &&
- !de4x5_strncmp((char *)&lp->srom+0x10, (char *)&enet_det[i], 3)) {
+ if (!memcmp(&lp->srom, &enet_det[i], 3) &&
+ !memcmp((char *)&lp->srom+0x10, &enet_det[i], 3)) {
if (i == 0) {
status = SMC;
} else if (i == 1) {
@@ -4125,18 +4123,6 @@ de4x5_bad_srom(struct de4x5_private *lp)
return status;
}
-static int
-de4x5_strncmp(char *a, char *b, int n)
-{
- int ret=0;
-
- for (;n && !ret; n--) {
- ret = *a++ - *b++;
- }
-
- return ret;
-}
-
static void
srom_repair(struct net_device *dev, int card)
{
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index c8205606c775..50a00777228e 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -2265,7 +2265,7 @@ static int __init dmfe_init_module(void)
static void __exit dmfe_cleanup_module(void)
{
- DMFE_DBUG(0, "dmfe_clean_module() ", debug);
+ DMFE_DBUG(0, "dmfe_cleanup_module() ", debug);
pci_unregister_driver(&dmfe_driver);
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 4061f9b22812..1c5916b13778 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1837,7 +1837,7 @@ static int __init uli526x_init_module(void)
static void __exit uli526x_cleanup_module(void)
{
- ULI526X_DBUG(0, "uli526x_clean_module() ", debug);
+ ULI526X_DBUG(0, "uli526x_cleanup_module() ", debug);
pci_unregister_driver(&uli526x_driver);
}
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index e42a791c1835..73a500ccbf69 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1171,7 +1171,8 @@ static u32 be_get_rxfh_key_size(struct net_device *netdev)
return RSS_HASH_KEY_LEN;
}
-static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey)
+static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
+ u8 *hfunc)
{
struct be_adapter *adapter = netdev_priv(netdev);
int i;
@@ -1185,16 +1186,23 @@ static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey)
if (hkey)
memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
return 0;
}
static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *hkey)
+ const u8 *hkey, const u8 hfunc)
{
int rc = 0, i, j;
struct be_adapter *adapter = netdev_priv(netdev);
u8 rsstable[RSS_INDIR_TABLE_LEN];
+ /* We do not allow change in unsupported parameters */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
if (indir) {
struct be_rx_obj *rxo;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 597c463e384d..9461ad8d837b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -887,7 +887,8 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
}
if (vlan_tag) {
- skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ vlan_tag);
if (unlikely(!skb))
return skb;
skb->vlan_tci = 0;
@@ -896,7 +897,8 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
/* Insert the outer VLAN, if any */
if (adapter->qnq_vid) {
vlan_tag = adapter->qnq_vid;
- skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ vlan_tag);
if (unlikely(!skb))
return skb;
if (skip_hw_vlan)
@@ -1015,9 +1017,8 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
* to pad short packets (<= 32 bytes) to a 36-byte length.
*/
if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
- if (skb_padto(skb, 36))
+ if (skb_put_padto(skb, 36))
return NULL;
- skb->len = 36;
}
if (BEx_chip(adapter) || lancer_chip(adapter)) {
@@ -2853,10 +2854,10 @@ static int be_close(struct net_device *netdev)
static int be_rx_qs_create(struct be_adapter *adapter)
{
+ struct rss_info *rss = &adapter->rss_info;
+ u8 rss_key[RSS_HASH_KEY_LEN];
struct be_rx_obj *rxo;
int rc, i, j;
- u8 rss_hkey[RSS_HASH_KEY_LEN];
- struct rss_info *rss = &adapter->rss_info;
for_all_rx_queues(adapter, rxo, i) {
rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
@@ -2901,15 +2902,15 @@ static int be_rx_qs_create(struct be_adapter *adapter)
rss->rss_flags = RSS_ENABLE_NONE;
}
- get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
+ netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
- 128, rss_hkey);
+ 128, rss_key);
if (rc) {
rss->rss_flags = RSS_ENABLE_NONE;
return rc;
}
- memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
+ memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
/* First time posting */
for_all_rx_queues(adapter, rxo, i)
@@ -4365,7 +4366,8 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
hsw_mode == PORT_FWD_TYPE_VEPA ?
- BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
+ BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
+ 0, 0);
}
#ifdef CONFIG_BE2NET_VXLAN
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 9af296a1ca99..469691ad4a1e 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -38,9 +38,9 @@
#define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */
#define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */
#define FEC_OPD 0x0ec /* Opcode + Pause duration */
-#define FEC_TXIC0 0xF0 /* Tx Interrupt Coalescing for ring 0 */
-#define FEC_TXIC1 0xF4 /* Tx Interrupt Coalescing for ring 1 */
-#define FEC_TXIC2 0xF8 /* Tx Interrupt Coalescing for ring 2 */
+#define FEC_TXIC0 0x0f0 /* Tx Interrupt Coalescing for ring 0 */
+#define FEC_TXIC1 0x0f4 /* Tx Interrupt Coalescing for ring 1 */
+#define FEC_TXIC2 0x0f8 /* Tx Interrupt Coalescing for ring 2 */
#define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */
#define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */
#define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */
@@ -53,16 +53,18 @@
#define FEC_R_FSTART 0x150 /* FIFO receive start reg */
#define FEC_R_DES_START_1 0x160 /* Receive descriptor ring 1 */
#define FEC_X_DES_START_1 0x164 /* Transmit descriptor ring 1 */
+#define FEC_R_BUFF_SIZE_1 0x168 /* Maximum receive buff ring1 size */
#define FEC_R_DES_START_2 0x16c /* Receive descriptor ring 2 */
#define FEC_X_DES_START_2 0x170 /* Transmit descriptor ring 2 */
+#define FEC_R_BUFF_SIZE_2 0x174 /* Maximum receive buff ring2 size */
#define FEC_R_DES_START_0 0x180 /* Receive descriptor ring */
#define FEC_X_DES_START_0 0x184 /* Transmit descriptor ring */
-#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */
+#define FEC_R_BUFF_SIZE_0 0x188 /* Maximum receive buff size */
#define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */
#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
-#define FEC_RACC 0x1C4 /* Receive Accelerator function */
+#define FEC_RACC 0x1c4 /* Receive Accelerator function */
#define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */
#define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */
#define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */
@@ -82,57 +84,57 @@
#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
-#define RMON_T_MC_PKT 0x20C /* RMON TX multicast pkts */
+#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */
#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
-#define RMON_T_FRAG 0x21C /* RMON TX pkts < 64 bytes, bad CRC */
+#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */
#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
#define RMON_T_COL 0x224 /* RMON TX collision count */
#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
-#define RMON_T_P65TO127 0x22C /* RMON TX 65 to 127 byte pkts */
+#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */
#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
-#define RMON_T_P1024TO2047 0x23C /* RMON TX 1024 to 2047 byte pkts */
+#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */
#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
#define RMON_T_OCTETS 0x244 /* RMON TX octets */
#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
-#define IEEE_T_FRAME_OK 0x24C /* Frames tx'd OK */
+#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */
#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
-#define IEEE_T_LCOL 0x25C /* Frames tx'd with late collision */
+#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
-#define IEEE_T_SQE 0x26C /* Frames tx'd with SQE err */
+#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
-#define RMON_R_MC_PKT 0x28C /* RMON RX multicast pkts */
+#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */
#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
-#define RMON_R_FRAG 0x29C /* RMON RX pkts < 64 bytes, bad CRC */
-#define RMON_R_JAB 0x2A0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
-#define RMON_R_RESVD_O 0x2A4 /* Reserved */
-#define RMON_R_P64 0x2A8 /* RMON RX 64 byte pkts */
-#define RMON_R_P65TO127 0x2AC /* RMON RX 65 to 127 byte pkts */
-#define RMON_R_P128TO255 0x2B0 /* RMON RX 128 to 255 byte pkts */
-#define RMON_R_P256TO511 0x2B4 /* RMON RX 256 to 511 byte pkts */
-#define RMON_R_P512TO1023 0x2B8 /* RMON RX 512 to 1023 byte pkts */
-#define RMON_R_P1024TO2047 0x2BC /* RMON RX 1024 to 2047 byte pkts */
-#define RMON_R_P_GTE2048 0x2C0 /* RMON RX pkts > 2048 bytes */
-#define RMON_R_OCTETS 0x2C4 /* RMON RX octets */
-#define IEEE_R_DROP 0x2C8 /* Count frames not counted correctly */
-#define IEEE_R_FRAME_OK 0x2CC /* Frames rx'd OK */
-#define IEEE_R_CRC 0x2D0 /* Frames rx'd with CRC err */
-#define IEEE_R_ALIGN 0x2D4 /* Frames rx'd with alignment err */
-#define IEEE_R_MACERR 0x2D8 /* Receive FIFO overflow count */
-#define IEEE_R_FDXFC 0x2DC /* Flow control pause frames rx'd */
-#define IEEE_R_OCTETS_OK 0x2E0 /* Octet cnt for frames rx'd w/o err */
+#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */
+#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
+#define RMON_R_RESVD_O 0x2a4 /* Reserved */
+#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */
+#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */
+#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */
+#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */
+#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */
+#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */
+#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */
+#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */
+#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */
+#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */
+#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */
+#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */
+#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */
+#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */
+#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */
#else
@@ -165,21 +167,23 @@
#define FEC_X_DES_START_0 0x3d4 /* Transmit descriptor ring */
#define FEC_X_DES_START_1 FEC_X_DES_START_0
#define FEC_X_DES_START_2 FEC_X_DES_START_0
-#define FEC_R_BUFF_SIZE 0x3d8 /* Maximum receive buff size */
+#define FEC_R_BUFF_SIZE_0 0x3d8 /* Maximum receive buff size */
+#define FEC_R_BUFF_SIZE_1 FEC_R_BUFF_SIZE_0
+#define FEC_R_BUFF_SIZE_2 FEC_R_BUFF_SIZE_0
#define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */
/* Not existed in real chip
* Just for pass build.
*/
-#define FEC_RCMR_1 0xFFF
-#define FEC_RCMR_2 0xFFF
-#define FEC_DMA_CFG_1 0xFFF
-#define FEC_DMA_CFG_2 0xFFF
-#define FEC_TXIC0 0xFFF
-#define FEC_TXIC1 0xFFF
-#define FEC_TXIC2 0xFFF
-#define FEC_RXIC0 0xFFF
-#define FEC_RXIC1 0xFFF
-#define FEC_RXIC2 0xFFF
+#define FEC_RCMR_1 0xfff
+#define FEC_RCMR_2 0xfff
+#define FEC_DMA_CFG_1 0xfff
+#define FEC_DMA_CFG_2 0xfff
+#define FEC_TXIC0 0xfff
+#define FEC_TXIC1 0xfff
+#define FEC_TXIC2 0xfff
+#define FEC_RXIC0 0xfff
+#define FEC_RXIC1 0xfff
+#define FEC_RXIC2 0xfff
#endif /* CONFIG_M5272 */
@@ -213,60 +217,60 @@ struct bufdesc_ex {
* The following definitions courtesy of commproc.h, which where
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
*/
-#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */
-#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */
-#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */
-#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */
-#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */
-#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */
-#define BD_SC_P ((ushort)0x0100) /* xmt preamble */
-#define BD_SC_BR ((ushort)0x0020) /* Break received */
-#define BD_SC_FR ((ushort)0x0010) /* Framing error */
-#define BD_SC_PR ((ushort)0x0008) /* Parity error */
-#define BD_SC_OV ((ushort)0x0002) /* Overrun */
-#define BD_SC_CD ((ushort)0x0001) /* ?? */
+#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */
+#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */
+#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */
+#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */
+#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */
+#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */
+#define BD_SC_P ((ushort)0x0100) /* xmt preamble */
+#define BD_SC_BR ((ushort)0x0020) /* Break received */
+#define BD_SC_FR ((ushort)0x0010) /* Framing error */
+#define BD_SC_PR ((ushort)0x0008) /* Parity error */
+#define BD_SC_OV ((ushort)0x0002) /* Overrun */
+#define BD_SC_CD ((ushort)0x0001) /* ?? */
/* Buffer descriptor control/status used by Ethernet receive.
-*/
-#define BD_ENET_RX_EMPTY ((ushort)0x8000)
-#define BD_ENET_RX_WRAP ((ushort)0x2000)
-#define BD_ENET_RX_INTR ((ushort)0x1000)
-#define BD_ENET_RX_LAST ((ushort)0x0800)
-#define BD_ENET_RX_FIRST ((ushort)0x0400)
-#define BD_ENET_RX_MISS ((ushort)0x0100)
-#define BD_ENET_RX_LG ((ushort)0x0020)
-#define BD_ENET_RX_NO ((ushort)0x0010)
-#define BD_ENET_RX_SH ((ushort)0x0008)
-#define BD_ENET_RX_CR ((ushort)0x0004)
-#define BD_ENET_RX_OV ((ushort)0x0002)
-#define BD_ENET_RX_CL ((ushort)0x0001)
-#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */
+ */
+#define BD_ENET_RX_EMPTY ((ushort)0x8000)
+#define BD_ENET_RX_WRAP ((ushort)0x2000)
+#define BD_ENET_RX_INTR ((ushort)0x1000)
+#define BD_ENET_RX_LAST ((ushort)0x0800)
+#define BD_ENET_RX_FIRST ((ushort)0x0400)
+#define BD_ENET_RX_MISS ((ushort)0x0100)
+#define BD_ENET_RX_LG ((ushort)0x0020)
+#define BD_ENET_RX_NO ((ushort)0x0010)
+#define BD_ENET_RX_SH ((ushort)0x0008)
+#define BD_ENET_RX_CR ((ushort)0x0004)
+#define BD_ENET_RX_OV ((ushort)0x0002)
+#define BD_ENET_RX_CL ((ushort)0x0001)
+#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */
/* Enhanced buffer descriptor control/status used by Ethernet receive */
-#define BD_ENET_RX_VLAN 0x00000004
+#define BD_ENET_RX_VLAN 0x00000004
/* Buffer descriptor control/status used by Ethernet transmit.
-*/
-#define BD_ENET_TX_READY ((ushort)0x8000)
-#define BD_ENET_TX_PAD ((ushort)0x4000)
-#define BD_ENET_TX_WRAP ((ushort)0x2000)
-#define BD_ENET_TX_INTR ((ushort)0x1000)
-#define BD_ENET_TX_LAST ((ushort)0x0800)
-#define BD_ENET_TX_TC ((ushort)0x0400)
-#define BD_ENET_TX_DEF ((ushort)0x0200)
-#define BD_ENET_TX_HB ((ushort)0x0100)
-#define BD_ENET_TX_LC ((ushort)0x0080)
-#define BD_ENET_TX_RL ((ushort)0x0040)
-#define BD_ENET_TX_RCMASK ((ushort)0x003c)
-#define BD_ENET_TX_UN ((ushort)0x0002)
-#define BD_ENET_TX_CSL ((ushort)0x0001)
-#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */
-
-/*enhanced buffer descriptor control/status used by Ethernet transmit*/
-#define BD_ENET_TX_INT 0x40000000
-#define BD_ENET_TX_TS 0x20000000
-#define BD_ENET_TX_PINS 0x10000000
-#define BD_ENET_TX_IINS 0x08000000
+ */
+#define BD_ENET_TX_READY ((ushort)0x8000)
+#define BD_ENET_TX_PAD ((ushort)0x4000)
+#define BD_ENET_TX_WRAP ((ushort)0x2000)
+#define BD_ENET_TX_INTR ((ushort)0x1000)
+#define BD_ENET_TX_LAST ((ushort)0x0800)
+#define BD_ENET_TX_TC ((ushort)0x0400)
+#define BD_ENET_TX_DEF ((ushort)0x0200)
+#define BD_ENET_TX_HB ((ushort)0x0100)
+#define BD_ENET_TX_LC ((ushort)0x0080)
+#define BD_ENET_TX_RL ((ushort)0x0040)
+#define BD_ENET_TX_RCMASK ((ushort)0x003c)
+#define BD_ENET_TX_UN ((ushort)0x0002)
+#define BD_ENET_TX_CSL ((ushort)0x0001)
+#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */
+
+/* enhanced buffer descriptor control/status used by Ethernet transmit */
+#define BD_ENET_TX_INT 0x40000000
+#define BD_ENET_TX_TS 0x20000000
+#define BD_ENET_TX_PINS 0x10000000
+#define BD_ENET_TX_IINS 0x08000000
/* This device has up to three irqs on some platforms */
@@ -279,36 +283,40 @@ struct bufdesc_ex {
#define FEC_ENET_MAX_TX_QS 3
#define FEC_ENET_MAX_RX_QS 3
-#define FEC_R_DES_START(X) ((X == 1) ? FEC_R_DES_START_1 : \
- ((X == 2) ? \
+#define FEC_R_DES_START(X) (((X) == 1) ? FEC_R_DES_START_1 : \
+ (((X) == 2) ? \
FEC_R_DES_START_2 : FEC_R_DES_START_0))
-#define FEC_X_DES_START(X) ((X == 1) ? FEC_X_DES_START_1 : \
- ((X == 2) ? \
+#define FEC_X_DES_START(X) (((X) == 1) ? FEC_X_DES_START_1 : \
+ (((X) == 2) ? \
FEC_X_DES_START_2 : FEC_X_DES_START_0))
-#define FEC_R_DES_ACTIVE(X) ((X == 1) ? FEC_R_DES_ACTIVE_1 : \
- ((X == 2) ? \
+#define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \
+ (((X) == 2) ? \
+ FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0))
+#define FEC_R_DES_ACTIVE(X) (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \
+ (((X) == 2) ? \
FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0))
-#define FEC_X_DES_ACTIVE(X) ((X == 1) ? FEC_X_DES_ACTIVE_1 : \
- ((X == 2) ? \
+#define FEC_X_DES_ACTIVE(X) (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \
+ (((X) == 2) ? \
FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0))
-#define FEC_DMA_CFG(X) ((X == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
+#define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
#define DMA_CLASS_EN (1 << 16)
-#define FEC_RCMR(X) ((X == 2) ? FEC_RCMR_2 : FEC_RCMR_1)
-#define IDLE_SLOPE_MASK 0xFFFF
+#define FEC_RCMR(X) (((X) == 2) ? FEC_RCMR_2 : FEC_RCMR_1)
+#define IDLE_SLOPE_MASK 0xffff
#define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */
#define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */
-#define IDLE_SLOPE(X) ((X == 1) ? (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \
+#define IDLE_SLOPE(X) (((X) == 1) ? \
+ (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \
(IDLE_SLOPE_2 & IDLE_SLOPE_MASK))
-#define RCMR_MATCHEN (0x1 << 16)
-#define RCMR_CMP_CFG(v, n) ((v & 0x7) << (n << 2))
+#define RCMR_MATCHEN (0x1 << 16)
+#define RCMR_CMP_CFG(v, n) (((v) & 0x7) << (n << 2))
#define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \
RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3))
#define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \
RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3))
-#define RCMR_CMP(X) ((X == 1) ? RCMR_CMP_1 : RCMR_CMP_2)
-#define FEC_TX_BD_FTYPE(X) ((X & 0xF) << 20)
+#define RCMR_CMP(X) (((X) == 1) ? RCMR_CMP_1 : RCMR_CMP_2)
+#define FEC_TX_BD_FTYPE(X) (((X) & 0xf) << 20)
/* The number of Tx and Rx buffers. These are allocated from the page
* pool. The code may assume these are power of two, so it it best
@@ -326,8 +334,8 @@ struct bufdesc_ex {
#define TX_RING_SIZE 512 /* Must be power of two */
#define TX_RING_MOD_MASK 511 /* for this to work */
-#define BD_ENET_RX_INT 0x00800000
-#define BD_ENET_RX_PTP ((ushort)0x0400)
+#define BD_ENET_RX_INT 0x00800000
+#define BD_ENET_RX_PTP ((ushort)0x0400)
#define BD_ENET_RX_ICE 0x00000020
#define BD_ENET_RX_PCR 0x00000010
#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
@@ -359,13 +367,13 @@ struct bufdesc_ex {
/* ENET interrupt coalescing macro define */
#define FEC_ITR_CLK_SEL (0x1 << 30)
#define FEC_ITR_EN (0x1 << 31)
-#define FEC_ITR_ICFT(X) ((X & 0xFF) << 20)
-#define FEC_ITR_ICTT(X) ((X) & 0xFFFF)
+#define FEC_ITR_ICFT(X) (((X) & 0xff) << 20)
+#define FEC_ITR_ICTT(X) ((X) & 0xffff)
#define FEC_ITR_ICFT_DEFAULT 200 /* Set 200 frame count threshold */
#define FEC_ITR_ICTT_DEFAULT 1000 /* Set 1000us timer threshold */
-#define FEC_VLAN_TAG_LEN 0x04
-#define FEC_ETHTYPE_LEN 0x02
+#define FEC_VLAN_TAG_LEN 0x04
+#define FEC_ETHTYPE_LEN 0x02
/* Controller is ENET-MAC */
#define FEC_QUIRK_ENET_MAC (1 << 0)
@@ -390,7 +398,7 @@ struct bufdesc_ex {
* frames not being transmitted until there is a 0-to-1 transition on
* ENET_TDAR[TDAR].
*/
-#define FEC_QUIRK_ERR006358 (1 << 7)
+#define FEC_QUIRK_ERR006358 (1 << 7)
/* ENET IP hw AVB
*
* i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support.
@@ -501,8 +509,9 @@ struct fec_enet_private {
int speed;
struct completion mdio_done;
int irq[FEC_IRQ_NUM];
- int bufdesc_ex;
+ bool bufdesc_ex;
int pause_flag;
+ u32 quirks;
struct napi_struct napi;
int csum_flags;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 3dca494797bd..fee2afe47eb3 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -287,15 +287,13 @@ static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep,
return entries > 0 ? entries : entries + txq->tx_ring_size;
}
-static void *swap_buffer(void *bufaddr, int len)
+static void swap_buffer(void *bufaddr, int len)
{
int i;
unsigned int *buf = bufaddr;
- for (i = 0; i < DIV_ROUND_UP(len, 4); i++, buf++)
- *buf = cpu_to_be32(*buf);
-
- return bufaddr;
+ for (i = 0; i < len; i += 4, buf++)
+ swab32s(buf);
}
static void swap_buffer2(void *dst_buf, void *src_buf, int len)
@@ -361,8 +359,6 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
struct bufdesc *bdp = txq->cur_tx;
struct bufdesc_ex *ebdp;
int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -398,7 +394,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
}
if (fep->bufdesc_ex) {
- if (id_entry->driver_data & FEC_QUIRK_HAS_AVB)
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue);
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
@@ -410,11 +406,11 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
if (((unsigned long) bufaddr) & fep->tx_align ||
- id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], bufaddr, frag_len);
bufaddr = txq->tx_bounce[index];
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, frag_len);
}
@@ -450,8 +446,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
struct sk_buff *skb, struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
int nr_frags = skb_shinfo(skb)->nr_frags;
struct bufdesc *bdp, *last_bdp;
void *bufaddr;
@@ -490,11 +484,11 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
queue = skb_get_queue_mapping(skb);
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
if (((unsigned long) bufaddr) & fep->tx_align ||
- id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], skb->data, buflen);
bufaddr = txq->tx_bounce[index];
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, buflen);
}
@@ -529,7 +523,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
fep->hwts_tx_en))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- if (id_entry->driver_data & FEC_QUIRK_HAS_AVB)
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue);
if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -573,8 +567,6 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
int size, bool last_tcp, bool is_last)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
unsigned short queue = skb_get_queue_mapping(skb);
unsigned short status;
@@ -587,11 +579,11 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
if (((unsigned long) data) & fep->tx_align ||
- id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], data, size);
data = txq->tx_bounce[index];
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
swap_buffer(data, size);
}
@@ -607,7 +599,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
bdp->cbd_bufaddr = addr;
if (fep->bufdesc_ex) {
- if (id_entry->driver_data & FEC_QUIRK_HAS_AVB)
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue);
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
@@ -635,8 +627,6 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
struct bufdesc *bdp, int index)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
unsigned short queue = skb_get_queue_mapping(skb);
@@ -652,11 +642,11 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
if (((unsigned long)bufaddr) & fep->tx_align ||
- id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], skb->data, hdr_len);
bufaddr = txq->tx_bounce[index];
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, hdr_len);
dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
@@ -673,7 +663,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
bdp->cbd_datlen = hdr_len;
if (fep->bufdesc_ex) {
- if (id_entry->driver_data & FEC_QUIRK_HAS_AVB)
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue);
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
@@ -698,8 +688,6 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
struct tso_t tso;
unsigned int index = 0;
int ret;
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) {
dev_kfree_skb_any(skb);
@@ -761,7 +749,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
txq->cur_tx = bdp;
/* Trigger transmission start */
- if (!(id_entry->driver_data & FEC_QUIRK_ERR007885) ||
+ if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
@@ -879,6 +867,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
for (i = 0; i < fep->num_rx_queues; i++) {
rxq = fep->rx_queue[i];
writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i));
+ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
/* enable DMA1/2 */
if (i)
@@ -924,8 +913,6 @@ static void
fec_restart(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
u32 val;
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
@@ -935,7 +922,7 @@ fec_restart(struct net_device *ndev)
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
*/
- if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB) {
writel(0, fep->hwp + FEC_ECNTRL);
} else {
writel(1, fep->hwp + FEC_ECNTRL);
@@ -946,7 +933,7 @@ fec_restart(struct net_device *ndev)
* enet-mac reset will reset mac address registers too,
* so need to reconfigure it.
*/
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ if (fep->quirks & FEC_QUIRK_ENET_MAC) {
memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
@@ -955,9 +942,6 @@ fec_restart(struct net_device *ndev)
/* Clear any outstanding interrupt. */
writel(0xffc00000, fep->hwp + FEC_IEVENT);
- /* Set maximum receive buffer size. */
- writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
-
fec_enet_bd_init(ndev);
fec_enet_enable_ring(ndev);
@@ -992,7 +976,7 @@ fec_restart(struct net_device *ndev)
* The phy interface and speed need to get configured
* differently on enet-mac.
*/
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* Enable flow control and length check */
rcntl |= 0x40000000 | 0x00000020;
@@ -1015,7 +999,7 @@ fec_restart(struct net_device *ndev)
}
} else {
#ifdef FEC_MIIGSK_ENR
- if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
+ if (fep->quirks & FEC_QUIRK_USE_GASKET) {
u32 cfgr;
/* disable the gasket and wait */
writel(0, fep->hwp + FEC_MIIGSK_ENR);
@@ -1068,7 +1052,7 @@ fec_restart(struct net_device *ndev)
writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
#endif
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
ecntl |= (1 << 8);
/* enable ENET store and forward mode */
@@ -1102,8 +1086,6 @@ static void
fec_stop(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
/* We cannot expect a graceful transmit stop without link !!! */
@@ -1118,7 +1100,7 @@ fec_stop(struct net_device *ndev)
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
*/
- if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB) {
writel(0, fep->hwp + FEC_ECNTRL);
} else {
writel(1, fep->hwp + FEC_ECNTRL);
@@ -1128,7 +1110,7 @@ fec_stop(struct net_device *ndev)
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
/* We have to keep ENET enabled to have MII interrupt stay working */
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ if (fep->quirks & FEC_QUIRK_ENET_MAC) {
writel(2, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
@@ -1350,8 +1332,6 @@ static int
fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
struct fec_enet_priv_rx_q *rxq;
struct bufdesc *bdp;
unsigned short status;
@@ -1365,7 +1345,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
u16 vlan_tag;
int index = 0;
bool is_copybreak;
- bool need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME;
+ bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
#ifdef CONFIG_M532x
flush_cache_all();
@@ -1880,8 +1860,6 @@ failed_clk_ipg:
static int fec_enet_mii_probe(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
struct phy_device *phy_dev = NULL;
char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3];
@@ -1894,6 +1872,8 @@ static int fec_enet_mii_probe(struct net_device *ndev)
phy_dev = of_phy_connect(ndev, fep->phy_node,
&fec_enet_adjust_link, 0,
fep->phy_interface);
+ if (!phy_dev)
+ return -ENODEV;
} else {
/* check for attached phy */
for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
@@ -1927,7 +1907,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
}
/* mask with MAC supported features */
- if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
+ if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
phy_dev->supported &= PHY_GBIT_FEATURES;
phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
#if !defined(CONFIG_M5272)
@@ -1955,8 +1935,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
static struct mii_bus *fec0_mii_bus;
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
struct device_node *node;
int err = -ENXIO, i;
@@ -1976,7 +1954,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* mdio interface in board design, and need to be configured by
* fec0 mii_bus.
*/
- if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
+ if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */
if (mii_cnt && fec0_mii_bus) {
fep->mii_bus = fec0_mii_bus;
@@ -1997,7 +1975,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* document.
*/
fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ if (fep->quirks & FEC_QUIRK_ENET_MAC)
fep->phy_speed--;
fep->phy_speed <<= 1;
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
@@ -2039,7 +2017,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
mii_cnt++;
/* save fec0 mii_bus */
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ if (fep->quirks & FEC_QUIRK_ENET_MAC)
fec0_mii_bus = fep->mii_bus;
return 0;
@@ -2308,11 +2286,9 @@ static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
static void fec_enet_itr_coal_set(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
int rx_itr, tx_itr;
- if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB))
+ if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
return;
/* Must be greater than zero to avoid unpredictable behavior */
@@ -2347,10 +2323,8 @@ static int
fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
- if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB))
+ if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
return -EOPNOTSUPP;
ec->rx_coalesce_usecs = fep->rx_time_itr;
@@ -2366,12 +2340,9 @@ static int
fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
-
unsigned int cycle;
- if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB))
+ if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames > 255) {
@@ -2951,8 +2922,6 @@ static const struct net_device_ops fec_netdev_ops = {
static int fec_enet_init(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
struct bufdesc *cbd_base;
@@ -3031,11 +3000,11 @@ static int fec_enet_init(struct net_device *ndev)
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
- if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN)
+ if (fep->quirks & FEC_QUIRK_HAS_VLAN)
/* enable hw VLAN support */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
- if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
+ if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
/* enable hw accelerator */
@@ -3044,7 +3013,7 @@ static int fec_enet_init(struct net_device *ndev)
fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
}
- if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB) {
fep->tx_align = 0;
fep->rx_align = 0x3f;
}
@@ -3144,10 +3113,6 @@ fec_probe(struct platform_device *pdev)
int num_tx_qs;
int num_rx_qs;
- of_id = of_match_device(fec_dt_ids, &pdev->dev);
- if (of_id)
- pdev->id_entry = of_id->data;
-
fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
/* Init network device */
@@ -3161,13 +3126,17 @@ fec_probe(struct platform_device *pdev)
/* setup board info structure */
fep = netdev_priv(ndev);
+ of_id = of_match_device(fec_dt_ids, &pdev->dev);
+ if (of_id)
+ pdev->id_entry = of_id->data;
+ fep->quirks = pdev->id_entry->driver_data;
+
fep->num_rx_queues = num_rx_qs;
fep->num_tx_queues = num_tx_qs;
#if !defined(CONFIG_M5272)
/* default enable pause frame auto negotiation */
- if (pdev->id_entry &&
- (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
+ if (fep->quirks & FEC_QUIRK_HAS_GBIT)
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
#endif
@@ -3184,8 +3153,6 @@ fec_probe(struct platform_device *pdev)
fep->pdev = pdev;
fep->dev_id = dev_id++;
- fep->bufdesc_ex = 0;
-
platform_set_drvdata(pdev, ndev);
phy_node = of_parse_phandle(np, "phy-handle", 0);
@@ -3238,12 +3205,11 @@ fec_probe(struct platform_device *pdev)
if (IS_ERR(fep->clk_ref))
fep->clk_ref = NULL;
+ fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
- fep->bufdesc_ex =
- pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
if (IS_ERR(fep->clk_ptp)) {
fep->clk_ptp = NULL;
- fep->bufdesc_ex = 0;
+ fep->bufdesc_ex = false;
}
ret = fec_enet_clk_enable(ndev, true);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4fdf0aa16978..86dccb26fecc 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -173,10 +173,12 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
static int gfar_init_bds(struct net_device *ndev)
{
struct gfar_private *priv = netdev_priv(ndev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
struct txbd8 *txbdp;
struct rxbd8 *rxbdp;
+ u32 *rfbptr;
int i, j;
for (i = 0; i < priv->num_tx_queues; i++) {
@@ -201,6 +203,7 @@ static int gfar_init_bds(struct net_device *ndev)
txbdp->status |= TXBD_WRAP;
}
+ rfbptr = &regs->rfbptr0;
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
rx_queue->cur_rx = rx_queue->rx_bd_base;
@@ -227,6 +230,8 @@ static int gfar_init_bds(struct net_device *ndev)
rxbdp++;
}
+ rx_queue->rfbptr = rfbptr;
+ rfbptr += 2;
}
return 0;
@@ -336,6 +341,20 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
}
}
+static void gfar_init_rqprm(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr;
+ int i;
+
+ baddr = &regs->rqprm0;
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
+ (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
+ baddr++;
+ }
+}
+
static void gfar_rx_buff_size_config(struct gfar_private *priv)
{
int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
@@ -396,6 +415,13 @@ static void gfar_mac_rx_config(struct gfar_private *priv)
if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+ /* Clear the LFC bit */
+ gfar_write(&regs->rctrl, rctrl);
+ /* Init flow control threshold values */
+ gfar_init_rqprm(priv);
+ gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
+ rctrl |= RCTRL_LFC;
+
/* Init rctrl based on our settings */
gfar_write(&regs->rctrl, rctrl);
}
@@ -1687,6 +1713,9 @@ static int init_phy(struct net_device *dev)
priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
priv->phydev->advertising = priv->phydev->supported;
+ /* Add support for flow control, but don't advertise it by default */
+ priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+
return 0;
}
@@ -2856,6 +2885,10 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
/* Setup the new bdp */
gfar_new_rxbdp(rx_queue, bdp, newskb);
+ /* Update Last Free RxBD pointer for LFC */
+ if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
+ gfar_write(rx_queue->rfbptr, (u32)bdp);
+
/* Update to the next pointer */
bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
@@ -3370,7 +3403,11 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = mii_advertise_flowctrl(phydev->advertising);
+ lcl_adv = 0;
+ if (phydev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (phydev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
@@ -3386,6 +3423,9 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
struct phy_device *phydev = priv->phydev;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ int i;
+ struct rxbd8 *bdp;
if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
return;
@@ -3394,6 +3434,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
u32 tempval1 = gfar_read(&regs->maccfg1);
u32 tempval = gfar_read(&regs->maccfg2);
u32 ecntrl = gfar_read(&regs->ecntrl);
+ u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
if (phydev->duplex != priv->oldduplex) {
if (!(phydev->duplex))
@@ -3438,6 +3479,26 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
tempval1 |= gfar_get_flowctrl_cfg(priv);
+ /* Turn last free buffer recording on */
+ if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ bdp = rx_queue->cur_rx;
+ /* skip to previous bd */
+ bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
+ rx_queue->rx_bd_base,
+ rx_queue->rx_ring_size);
+
+ if (rx_queue->rfbptr)
+ gfar_write(rx_queue->rfbptr, (u32)bdp);
+ }
+
+ priv->tx_actual_en = 1;
+ }
+
+ if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
+ priv->tx_actual_en = 0;
+
gfar_write(&regs->maccfg1, tempval1);
gfar_write(&regs->maccfg2, tempval);
gfar_write(&regs->ecntrl, ecntrl);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 2805cfbf1765..b581b8823a2a 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -99,6 +99,10 @@ extern const char gfar_driver_version[];
#define GFAR_MAX_FIFO_STARVE 511
#define GFAR_MAX_FIFO_STARVE_OFF 511
+#define FBTHR_SHIFT 24
+#define DEFAULT_RX_LFC_THR 16
+#define DEFAULT_LFC_PTVVAL 4
+
#define DEFAULT_RX_BUFFER_SIZE 1536
#define TX_RING_MOD_MASK(size) (size-1)
#define RX_RING_MOD_MASK(size) (size-1)
@@ -145,9 +149,7 @@ extern const char gfar_driver_version[];
| SUPPORTED_Autoneg \
| SUPPORTED_MII)
-#define GFAR_SUPPORTED_GBIT (SUPPORTED_1000baseT_Full \
- | SUPPORTED_Pause \
- | SUPPORTED_Asym_Pause)
+#define GFAR_SUPPORTED_GBIT SUPPORTED_1000baseT_Full
/* TBI register addresses */
#define MII_TBICON 0x11
@@ -275,6 +277,7 @@ extern const char gfar_driver_version[];
#define RCTRL_TS_ENABLE 0x01000000
#define RCTRL_PAL_MASK 0x001f0000
+#define RCTRL_LFC 0x00004000
#define RCTRL_VLEX 0x00002000
#define RCTRL_FILREN 0x00001000
#define RCTRL_GHTX 0x00000400
@@ -851,7 +854,32 @@ struct gfar {
u8 res23c[248];
u32 attr; /* 0x.bf8 - Attributes Register */
u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */
- u8 res24[688];
+ u32 rqprm0; /* 0x.c00 - Receive queue parameters register 0 */
+ u32 rqprm1; /* 0x.c04 - Receive queue parameters register 1 */
+ u32 rqprm2; /* 0x.c08 - Receive queue parameters register 2 */
+ u32 rqprm3; /* 0x.c0c - Receive queue parameters register 3 */
+ u32 rqprm4; /* 0x.c10 - Receive queue parameters register 4 */
+ u32 rqprm5; /* 0x.c14 - Receive queue parameters register 5 */
+ u32 rqprm6; /* 0x.c18 - Receive queue parameters register 6 */
+ u32 rqprm7; /* 0x.c1c - Receive queue parameters register 7 */
+ u8 res24[36];
+ u32 rfbptr0; /* 0x.c44 - Last free RxBD pointer for ring 0 */
+ u8 res24a[4];
+ u32 rfbptr1; /* 0x.c4c - Last free RxBD pointer for ring 1 */
+ u8 res24b[4];
+ u32 rfbptr2; /* 0x.c54 - Last free RxBD pointer for ring 2 */
+ u8 res24c[4];
+ u32 rfbptr3; /* 0x.c5c - Last free RxBD pointer for ring 3 */
+ u8 res24d[4];
+ u32 rfbptr4; /* 0x.c64 - Last free RxBD pointer for ring 4 */
+ u8 res24e[4];
+ u32 rfbptr5; /* 0x.c6c - Last free RxBD pointer for ring 5 */
+ u8 res24f[4];
+ u32 rfbptr6; /* 0x.c74 - Last free RxBD pointer for ring 6 */
+ u8 res24g[4];
+ u32 rfbptr7; /* 0x.c7c - Last free RxBD pointer for ring 7 */
+ u8 res24h[4];
+ u8 res24x[556];
u32 isrg0; /* 0x.eb0 - Interrupt steering group 0 register */
u32 isrg1; /* 0x.eb4 - Interrupt steering group 1 register */
u32 isrg2; /* 0x.eb8 - Interrupt steering group 2 register */
@@ -1011,6 +1039,7 @@ struct gfar_priv_rx_q {
/* RX Coalescing values */
unsigned char rxcoalescing;
unsigned long rxic;
+ u32 *rfbptr;
};
enum gfar_irqinfo_id {
@@ -1101,6 +1130,7 @@ struct gfar_private {
unsigned int num_tx_queues;
unsigned int num_rx_queues;
unsigned int num_grps;
+ int tx_actual_en;
/* Network Statistics */
struct gfar_extra_stats extra_stats;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 76d70708f864..3e1a9c1a67a9 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -579,8 +579,13 @@ static int gfar_spauseparam(struct net_device *dev,
u32 tempval;
tempval = gfar_read(&regs->maccfg1);
tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
- if (priv->tx_pause_en)
+
+ priv->tx_actual_en = 0;
+ if (priv->tx_pause_en) {
+ priv->tx_actual_en = 1;
tempval |= MACCFG1_TX_FLOW;
+ }
+
if (priv->rx_pause_en)
tempval |= MACCFG1_RX_FLOW;
gfar_write(&regs->maccfg1, tempval);
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 76a6e0c77d69..ae6e30d39f0f 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -490,7 +490,8 @@ static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus,
eid = hp100_read_id(ioaddr);
if (eid == NULL) { /* bad checksum? */
- printk(KERN_WARNING "hp100_probe: bad ID checksum at base port 0x%x\n", ioaddr);
+ printk(KERN_WARNING "%s: bad ID checksum at base port 0x%x\n",
+ __func__, ioaddr);
goto out2;
}
@@ -498,7 +499,9 @@ static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus,
for (i = uc = 0; i < 7; i++)
uc += hp100_inb(LAN_ADDR + i);
if (uc != 0xff) {
- printk(KERN_WARNING "hp100_probe: bad lan address checksum at port 0x%x)\n", ioaddr);
+ printk(KERN_WARNING
+ "%s: bad lan address checksum at port 0x%x)\n",
+ __func__, ioaddr);
err = -EIO;
goto out2;
}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 87bd953cc2ee..3f3fba9e4650 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2323,16 +2323,11 @@ static int emac_check_deps(struct emac_instance *dev,
static void emac_put_deps(struct emac_instance *dev)
{
- if (dev->mal_dev)
- of_dev_put(dev->mal_dev);
- if (dev->zmii_dev)
- of_dev_put(dev->zmii_dev);
- if (dev->rgmii_dev)
- of_dev_put(dev->rgmii_dev);
- if (dev->mdio_dev)
- of_dev_put(dev->mdio_dev);
- if (dev->tah_dev)
- of_dev_put(dev->tah_dev);
+ of_dev_put(dev->mal_dev);
+ of_dev_put(dev->zmii_dev);
+ of_dev_put(dev->rgmii_dev);
+ of_dev_put(dev->mdio_dev);
+ of_dev_put(dev->tah_dev);
}
static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
@@ -2371,8 +2366,7 @@ static int emac_wait_deps(struct emac_instance *dev)
bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
for (i = 0; i < EMAC_DEP_COUNT; i++) {
- if (deps[i].node)
- of_node_put(deps[i].node);
+ of_node_put(deps[i].node);
if (err && deps[i].ofdev)
of_dev_put(deps[i].ofdev);
}
@@ -2383,8 +2377,7 @@ static int emac_wait_deps(struct emac_instance *dev)
dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
}
- if (deps[EMAC_DEP_PREV_IDX].ofdev)
- of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
+ of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
return err;
}
@@ -3113,8 +3106,7 @@ static void __exit emac_exit(void)
/* Destroy EMAC boot list */
for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
- if (emac_boot_list[i])
- of_node_put(emac_boot_list[i]);
+ of_node_put(emac_boot_list[i]);
}
module_init(emac_init);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 24f3986cfae2..83140cbb5f01 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3136,12 +3136,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* packets may get corrupted during padding by HW.
* To WA this issue, pad all small packets manually.
*/
- if (skb->len < ETH_ZLEN) {
- if (skb_pad(skb, ETH_ZLEN - skb->len))
- return NETDEV_TX_OK;
- skb->len = ETH_ZLEN;
- skb_set_tail_pointer(skb, ETH_ZLEN);
- }
+ if (eth_skb_pad(skb))
+ return NETDEV_TX_OK;
mss = skb_shinfo(skb)->gso_size;
/* The controller does a simple calculation to
@@ -4104,7 +4100,7 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
unsigned int bufsz)
{
- struct sk_buff *skb = netdev_alloc_skb_ip_align(adapter->netdev, bufsz);
+ struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
if (unlikely(!skb))
adapter->alloc_rx_buff_failed++;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 247335d2c7ec..5c82c8065501 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1016,7 +1016,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
*/
if (length < copybreak) {
struct sk_buff *new_skb =
- netdev_alloc_skb_ip_align(netdev, length);
+ napi_alloc_skb(&adapter->napi, length);
if (new_skb) {
skb_copy_to_linear_data_offset(new_skb,
-NET_IP_ALIGN,
@@ -3449,15 +3449,12 @@ static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 mrqc, rxcsum;
+ u32 rss_key[10];
int i;
- static const u32 rsskey[10] = {
- 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0,
- 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe
- };
- /* Fill out hash function seed */
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
for (i = 0; i < 10; i++)
- ew32(RSSRK(i), rsskey[i]);
+ ew32(RSSRK(i), rss_key[i]);
/* Direct all traffic to queue 0 */
for (i = 0; i < 32; i++)
@@ -5557,12 +5554,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* The minimum packet size with TCTL.PSP set is 17 bytes so
* pad skb in order to meet this minimum size requirement
*/
- if (unlikely(skb->len < 17)) {
- if (skb_pad(skb, 17 - skb->len))
- return NETDEV_TX_OK;
- skb->len = 17;
- skb_set_tail_pointer(skb, 17);
- }
+ if (skb_put_padto(skb, 17))
+ return NETDEV_TX_OK;
mss = skb_shinfo(skb)->gso_size;
if (mss) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 2d04464e6aa3..651f53bc7376 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -916,11 +916,15 @@ static u32 fm10k_get_rssrk_size(struct net_device *netdev)
return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG;
}
-static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key)
+static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
int i, err;
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
err = fm10k_get_reta(netdev, indir);
if (err || !key)
return err;
@@ -932,12 +936,16 @@ static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key)
}
static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir,
- const u8 *key)
+ const u8 *key, const u8 hfunc)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_hw *hw = &interface->hw;
int i, err;
+ /* We do not allow change in unsupported parameters */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
err = fm10k_set_reta(netdev, indir);
if (err || !key)
return err;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index e645af412e76..ee1ecb146df7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -83,7 +83,7 @@ static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
return true;
/* alloc new page for storage */
- page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ page = dev_alloc_page();
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
return false;
@@ -308,8 +308,8 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
#endif
/* allocate a skb to store the frags */
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- FM10K_RX_HDR_LEN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi,
+ FM10K_RX_HDR_LEN);
if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_failed++;
return NULL;
@@ -578,14 +578,9 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
if (skb_is_nonlinear(skb))
fm10k_pull_tail(rx_ring, rx_desc, skb);
- /* if skb_pad returns an error the skb was freed */
- if (unlikely(skb->len < 60)) {
- int pad_len = 60 - skb->len;
-
- if (skb_pad(skb, pad_len))
- return true;
- __skb_put(skb, pad_len);
- }
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
return false;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index a0cb74ab3dc6..4f5892cc32d7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1551,15 +1551,11 @@ void fm10k_down(struct fm10k_intfc *interface)
static int fm10k_sw_init(struct fm10k_intfc *interface,
const struct pci_device_id *ent)
{
- static const u32 seed[FM10K_RSSRK_SIZE] = { 0xda565a6d, 0xc20e5b25,
- 0x3d256741, 0xb08fa343,
- 0xcb2bcad0, 0xb4307bae,
- 0xa32dcb77, 0x0cf23080,
- 0x3bb7426a, 0xfa01acbe };
const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data];
struct fm10k_hw *hw = &interface->hw;
struct pci_dev *pdev = interface->pdev;
struct net_device *netdev = interface->netdev;
+ u32 rss_key[FM10K_RSSRK_SIZE];
unsigned int rss;
int err;
@@ -1673,8 +1669,8 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
/* initialize vxlan_port list */
INIT_LIST_HEAD(&interface->vxlan_port);
- /* initialize RSS key */
- memcpy(interface->rssrk, seed, sizeof(seed));
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
+ memcpy(interface->rssrk, rss_key, sizeof(rss_key));
/* Start off interface as being down */
set_bit(__FM10K_DOWN, &interface->state);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index f1e33f896439..fc50f6461b13 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -87,7 +87,7 @@
#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
#endif /* I40E_FCOE */
#define I40E_MAX_AQ_BUF_SIZE 4096
-#define I40E_AQ_LEN 32
+#define I40E_AQ_LEN 128
#define I40E_AQ_WORK_LIMIT 16
#define I40E_MAX_USER_PRIORITY 8
#define I40E_DEFAULT_MSG_ENABLE 4
@@ -146,6 +146,7 @@ enum i40e_state_t {
__I40E_DOWN_REQUESTED,
__I40E_FD_FLUSH_REQUESTED,
__I40E_RESET_FAILED,
+ __I40E_PORT_TX_SUSPENDED,
};
enum i40e_interrupt_policy {
@@ -269,7 +270,8 @@ struct i40e_pf {
u16 msg_enable;
char misc_int_name[IFNAMSIZ + 9];
u16 adminq_work_limit; /* num of admin receive queue desc to process */
- int service_timer_period;
+ unsigned long service_timer_period;
+ unsigned long service_timer_previous;
struct timer_list service_timer;
struct work_struct service_task;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 72f5d25a222f..77f6254a89ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -51,7 +51,7 @@ static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
static void i40e_adminq_init_regs(struct i40e_hw *hw)
{
/* set head and tail registers in our local struct */
- if (hw->mac.type == I40E_MAC_VF) {
+ if (i40e_is_vf(hw)) {
hw->aq.asq.tail = I40E_VF_ATQT1;
hw->aq.asq.head = I40E_VF_ATQH1;
hw->aq.asq.len = I40E_VF_ATQLEN1;
@@ -617,7 +617,8 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
- hw->aq.nvm_busy = false;
+ hw->aq.nvm_release_on_done = false;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
ret_code = i40e_aq_set_hmc_resource_profile(hw,
I40E_HMC_PROFILE_DEFAULT,
@@ -754,12 +755,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
goto asq_send_command_exit;
}
- if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
- status = I40E_ERR_NVM;
- goto asq_send_command_exit;
- }
-
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
@@ -853,7 +848,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
*/
if (!details->async && !details->postpone) {
u32 total_delay = 0;
- u32 delay_len = 10;
do {
/* AQ designers suggest use of head for better
@@ -861,9 +855,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
*/
if (i40e_asq_done(hw))
break;
- /* ugh! delay while spin_lock */
- udelay(delay_len);
- total_delay += delay_len;
+ usleep_range(1000, 2000);
+ total_delay++;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
@@ -903,9 +896,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
}
- if (!status && i40e_is_nvm_update_op(desc))
- hw->aq.nvm_busy = true;
-
asq_send_command_error:
mutex_unlock(&hw->aq.asq_mutex);
asq_send_command_exit:
@@ -958,9 +948,6 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
- "AQRX: Queue is empty.\n");
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
goto clean_arq_element_out;
}
@@ -982,10 +969,10 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
e->desc = *desc;
datalen = le16_to_cpu(desc->datalen);
- e->msg_size = min(datalen, e->msg_size);
- if (e->msg_buf != NULL && (e->msg_size != 0))
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf != NULL && (e->msg_len != 0))
memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
- e->msg_size);
+ e->msg_len);
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
@@ -1021,7 +1008,6 @@ clean_arq_element_out:
mutex_unlock(&hw->aq.arq_mutex);
if (i40e_is_nvm_update_op(&e->desc)) {
- hw->aq.nvm_busy = false;
if (hw->aq.nvm_release_on_done) {
i40e_release_nvm(hw);
hw->aq.nvm_release_on_done = false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index ba38a89c79d6..564d0b0192f7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -28,6 +28,7 @@
#define _I40E_ADMINQ_H_
#include "i40e_osdep.h"
+#include "i40e_status.h"
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
@@ -76,7 +77,8 @@ struct i40e_asq_cmd_details {
/* ARQ event information */
struct i40e_arq_event_info {
struct i40e_aq_desc desc;
- u16 msg_size;
+ u16 msg_len;
+ u16 buf_len;
u8 *msg_buf;
};
@@ -93,7 +95,6 @@ struct i40e_adminq_info {
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
- bool nvm_busy;
bool nvm_release_on_done;
struct mutex asq_mutex; /* Send queue lock */
@@ -108,7 +109,7 @@ struct i40e_adminq_info {
* i40e_aq_rc_to_posix - convert errors to user-land codes
* aq_rc: AdminQ error code to convert
**/
-static inline int i40e_aq_rc_to_posix(u16 aq_rc)
+static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
{
int aq_to_posix[] = {
0, /* I40E_AQ_RC_OK */
@@ -136,12 +137,18 @@ static inline int i40e_aq_rc_to_posix(u16 aq_rc)
-EFBIG, /* I40E_AQ_RC_EFBIG */
};
+ /* aq_rc is invalid if AQ timed out */
+ if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ return -EAGAIN;
+
+ if (aq_rc >= ARRAY_SIZE(aq_to_posix))
+ return -ERANGE;
return aq_to_posix[aq_rc];
}
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
+#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 15f289f2917f..8835aeeff23e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -33,8 +33,8 @@
* This file needs to comply with the Linux Kernel coding style.
*/
-#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0002
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0002
struct i40e_aq_desc {
__le16 flags;
@@ -66,216 +66,217 @@ struct i40e_aq_desc {
*/
/* command flags and offsets*/
-#define I40E_AQ_FLAG_DD_SHIFT 0
-#define I40E_AQ_FLAG_CMP_SHIFT 1
-#define I40E_AQ_FLAG_ERR_SHIFT 2
-#define I40E_AQ_FLAG_VFE_SHIFT 3
-#define I40E_AQ_FLAG_LB_SHIFT 9
-#define I40E_AQ_FLAG_RD_SHIFT 10
-#define I40E_AQ_FLAG_VFC_SHIFT 11
-#define I40E_AQ_FLAG_BUF_SHIFT 12
-#define I40E_AQ_FLAG_SI_SHIFT 13
-#define I40E_AQ_FLAG_EI_SHIFT 14
-#define I40E_AQ_FLAG_FE_SHIFT 15
-
-#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
/* error codes */
enum i40e_admin_queue_err {
- I40E_AQ_RC_OK = 0, /* success */
- I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
- I40E_AQ_RC_ENOENT = 2, /* No such element */
- I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
- I40E_AQ_RC_EINTR = 4, /* operation interrupted */
- I40E_AQ_RC_EIO = 5, /* I/O error */
- I40E_AQ_RC_ENXIO = 6, /* No such resource */
- I40E_AQ_RC_E2BIG = 7, /* Arg too long */
- I40E_AQ_RC_EAGAIN = 8, /* Try again */
- I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
- I40E_AQ_RC_EACCES = 10, /* Permission denied */
- I40E_AQ_RC_EFAULT = 11, /* Bad address */
- I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
- I40E_AQ_RC_EEXIST = 13, /* object already exists */
- I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
- I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
- I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
- I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
- I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
- I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */
- I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
- I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- I40E_AQ_RC_EFBIG = 22, /* File too large */
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
};
/* Admin Queue command opcodes */
enum i40e_admin_queue_opc {
/* aq commands */
- i40e_aqc_opc_get_version = 0x0001,
- i40e_aqc_opc_driver_version = 0x0002,
- i40e_aqc_opc_queue_shutdown = 0x0003,
- i40e_aqc_opc_set_pf_context = 0x0004,
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
/* resource ownership */
- i40e_aqc_opc_request_resource = 0x0008,
- i40e_aqc_opc_release_resource = 0x0009,
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
- i40e_aqc_opc_list_func_capabilities = 0x000A,
- i40e_aqc_opc_list_dev_capabilities = 0x000B,
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
- i40e_aqc_opc_set_cppm_configuration = 0x0103,
- i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
- i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
+ i40e_aqc_opc_set_cppm_configuration = 0x0103,
+ i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
- i40e_aqc_opc_mac_address_read = 0x0107,
- i40e_aqc_opc_mac_address_write = 0x0108,
+ i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
/* PXE */
- i40e_aqc_opc_clear_pxe_mode = 0x0110,
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
/* internal switch commands */
- i40e_aqc_opc_get_switch_config = 0x0200,
- i40e_aqc_opc_add_statistics = 0x0201,
- i40e_aqc_opc_remove_statistics = 0x0202,
- i40e_aqc_opc_set_port_parameters = 0x0203,
- i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
-
- i40e_aqc_opc_add_vsi = 0x0210,
- i40e_aqc_opc_update_vsi_parameters = 0x0211,
- i40e_aqc_opc_get_vsi_parameters = 0x0212,
-
- i40e_aqc_opc_add_pv = 0x0220,
- i40e_aqc_opc_update_pv_parameters = 0x0221,
- i40e_aqc_opc_get_pv_parameters = 0x0222,
-
- i40e_aqc_opc_add_veb = 0x0230,
- i40e_aqc_opc_update_veb_parameters = 0x0231,
- i40e_aqc_opc_get_veb_parameters = 0x0232,
-
- i40e_aqc_opc_delete_element = 0x0243,
-
- i40e_aqc_opc_add_macvlan = 0x0250,
- i40e_aqc_opc_remove_macvlan = 0x0251,
- i40e_aqc_opc_add_vlan = 0x0252,
- i40e_aqc_opc_remove_vlan = 0x0253,
- i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
- i40e_aqc_opc_add_tag = 0x0255,
- i40e_aqc_opc_remove_tag = 0x0256,
- i40e_aqc_opc_add_multicast_etag = 0x0257,
- i40e_aqc_opc_remove_multicast_etag = 0x0258,
- i40e_aqc_opc_update_tag = 0x0259,
- i40e_aqc_opc_add_control_packet_filter = 0x025A,
- i40e_aqc_opc_remove_control_packet_filter = 0x025B,
- i40e_aqc_opc_add_cloud_filters = 0x025C,
- i40e_aqc_opc_remove_cloud_filters = 0x025D,
-
- i40e_aqc_opc_add_mirror_rule = 0x0260,
- i40e_aqc_opc_delete_mirror_rule = 0x0261,
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
/* DCB commands */
- i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
- i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
/* TX scheduler */
- i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
- i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
- i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
- i40e_aqc_opc_query_vsi_bw_config = 0x0408,
- i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
- i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
-
- i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
- i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
- i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
- i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
- i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
- i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
- i40e_aqc_opc_query_port_ets_config = 0x0419,
- i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
- i40e_aqc_opc_suspend_port_tx = 0x041B,
- i40e_aqc_opc_resume_port_tx = 0x041C,
- i40e_aqc_opc_configure_partition_bw = 0x041D,
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
/* phy commands*/
- i40e_aqc_opc_get_phy_abilities = 0x0600,
- i40e_aqc_opc_set_phy_config = 0x0601,
- i40e_aqc_opc_set_mac_config = 0x0603,
- i40e_aqc_opc_set_link_restart_an = 0x0605,
- i40e_aqc_opc_get_link_status = 0x0607,
- i40e_aqc_opc_set_phy_int_mask = 0x0613,
- i40e_aqc_opc_get_local_advt_reg = 0x0614,
- i40e_aqc_opc_set_local_advt_reg = 0x0615,
- i40e_aqc_opc_get_partner_advt = 0x0616,
- i40e_aqc_opc_set_lb_modes = 0x0618,
- i40e_aqc_opc_get_phy_wol_caps = 0x0621,
- i40e_aqc_opc_set_phy_debug = 0x0622,
- i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
/* NVM commands */
- i40e_aqc_opc_nvm_read = 0x0701,
- i40e_aqc_opc_nvm_erase = 0x0702,
- i40e_aqc_opc_nvm_update = 0x0703,
- i40e_aqc_opc_nvm_config_read = 0x0704,
- i40e_aqc_opc_nvm_config_write = 0x0705,
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
/* virtualization commands */
- i40e_aqc_opc_send_msg_to_pf = 0x0801,
- i40e_aqc_opc_send_msg_to_vf = 0x0802,
- i40e_aqc_opc_send_msg_to_peer = 0x0803,
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
/* alternate structure */
- i40e_aqc_opc_alternate_write = 0x0900,
- i40e_aqc_opc_alternate_write_indirect = 0x0901,
- i40e_aqc_opc_alternate_read = 0x0902,
- i40e_aqc_opc_alternate_read_indirect = 0x0903,
- i40e_aqc_opc_alternate_write_done = 0x0904,
- i40e_aqc_opc_alternate_set_mode = 0x0905,
- i40e_aqc_opc_alternate_clear_port = 0x0906,
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
/* LLDP commands */
- i40e_aqc_opc_lldp_get_mib = 0x0A00,
- i40e_aqc_opc_lldp_update_mib = 0x0A01,
- i40e_aqc_opc_lldp_add_tlv = 0x0A02,
- i40e_aqc_opc_lldp_update_tlv = 0x0A03,
- i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
- i40e_aqc_opc_lldp_stop = 0x0A05,
- i40e_aqc_opc_lldp_start = 0x0A06,
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+ i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
/* Tunnel commands */
- i40e_aqc_opc_add_udp_tunnel = 0x0B00,
- i40e_aqc_opc_del_udp_tunnel = 0x0B01,
- i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_tunnel_key_structure = 0x0B10,
/* Async Events */
- i40e_aqc_opc_event_lan_overflow = 0x1001,
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
/* OEM commands */
- i40e_aqc_opc_oem_parameter_change = 0xFE00,
- i40e_aqc_opc_oem_device_status_change = 0xFE01,
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
/* debug commands */
- i40e_aqc_opc_debug_get_deviceid = 0xFF00,
- i40e_aqc_opc_debug_set_mode = 0xFF01,
- i40e_aqc_opc_debug_read_reg = 0xFF03,
- i40e_aqc_opc_debug_write_reg = 0xFF04,
- i40e_aqc_opc_debug_modify_reg = 0xFF07,
- i40e_aqc_opc_debug_dump_internals = 0xFF08,
- i40e_aqc_opc_debug_modify_internals = 0xFF09,
+ i40e_aqc_opc_debug_get_deviceid = 0xFF00,
+ i40e_aqc_opc_debug_set_mode = 0xFF01,
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+ i40e_aqc_opc_debug_modify_internals = 0xFF09,
};
/* command structures and indirect data structures */
@@ -302,7 +303,7 @@ enum i40e_admin_queue_opc {
/* This macro is used extensively to ensure that command structures are 16
* bytes in length as they have to map to the raw array of that size.
*/
-#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
/* internal (0x00XX) commands */
@@ -320,22 +321,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
/* Send driver version (indirect 0x0002) */
struct i40e_aqc_driver_version {
- u8 driver_major_ver;
- u8 driver_minor_ver;
- u8 driver_build_ver;
- u8 driver_subbuild_ver;
- u8 reserved[4];
- __le32 address_high;
- __le32 address_low;
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
/* Queue Shutdown (direct 0x0003) */
struct i40e_aqc_queue_shutdown {
- __le32 driver_unloading;
-#define I40E_AQ_DRIVER_UNLOADING 0x1
- u8 reserved[12];
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
@@ -351,19 +352,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
/* Request resource ownership (direct 0x0008)
* Release resource ownership (direct 0x0009)
*/
-#define I40E_AQ_RESOURCE_NVM 1
-#define I40E_AQ_RESOURCE_SDP 2
-#define I40E_AQ_RESOURCE_ACCESS_READ 1
-#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
-#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
-#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
struct i40e_aqc_request_resource {
- __le16 resource_id;
- __le16 access_type;
- __le32 timeout;
- __le32 resource_number;
- u8 reserved[4];
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
@@ -373,7 +374,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
*/
struct i40e_aqc_list_capabilites {
u8 command_flags;
-#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
u8 pf_index;
u8 reserved[2];
__le32 count;
@@ -384,123 +385,123 @@ struct i40e_aqc_list_capabilites {
I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
struct i40e_aqc_list_capabilities_element_resp {
- __le16 id;
- u8 major_rev;
- u8 minor_rev;
- __le32 number;
- __le32 logical_id;
- __le32 phys_id;
- u8 reserved[16];
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
};
/* list of caps */
-#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
-#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
-#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
-#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
-#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
-#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
-#define I40E_AQ_CAP_ID_SRIOV 0x0012
-#define I40E_AQ_CAP_ID_VF 0x0013
-#define I40E_AQ_CAP_ID_VMDQ 0x0014
-#define I40E_AQ_CAP_ID_8021QBG 0x0015
-#define I40E_AQ_CAP_ID_8021QBR 0x0016
-#define I40E_AQ_CAP_ID_VSI 0x0017
-#define I40E_AQ_CAP_ID_DCB 0x0018
-#define I40E_AQ_CAP_ID_FCOE 0x0021
-#define I40E_AQ_CAP_ID_RSS 0x0040
-#define I40E_AQ_CAP_ID_RXQ 0x0041
-#define I40E_AQ_CAP_ID_TXQ 0x0042
-#define I40E_AQ_CAP_ID_MSIX 0x0043
-#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
-#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
-#define I40E_AQ_CAP_ID_1588 0x0046
-#define I40E_AQ_CAP_ID_IWARP 0x0051
-#define I40E_AQ_CAP_ID_LED 0x0061
-#define I40E_AQ_CAP_ID_SDP 0x0062
-#define I40E_AQ_CAP_ID_MDIO 0x0063
-#define I40E_AQ_CAP_ID_FLEX10 0x00F1
-#define I40E_AQ_CAP_ID_CEM 0x00F2
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
/* Set CPPM Configuration (direct 0x0103) */
struct i40e_aqc_cppm_configuration {
- __le16 command_flags;
-#define I40E_AQ_CPPM_EN_LTRC 0x0800
-#define I40E_AQ_CPPM_EN_DMCTH 0x1000
-#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
-#define I40E_AQ_CPPM_EN_HPTC 0x4000
-#define I40E_AQ_CPPM_EN_DMARC 0x8000
- __le16 ttlx;
- __le32 dmacr;
- __le16 dmcth;
- u8 hptc;
- u8 reserved;
- __le32 pfltrc;
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
/* Set ARP Proxy command / response (indirect 0x0104) */
struct i40e_aqc_arp_proxy_data {
- __le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4 0x0008
-#define I40E_AQ_ARP_UNSUP_CTL 0x0010
-#define I40E_AQ_ARP_ENA 0x0020
-#define I40E_AQ_ARP_ADD_IPV4 0x0040
-#define I40E_AQ_ARP_DEL_IPV4 0x0080
- __le16 table_id;
- __le32 pfpm_proxyfc;
- __le32 ip_addr;
- u8 mac_addr[6];
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0008
+#define I40E_AQ_ARP_UNSUP_CTL 0x0010
+#define I40E_AQ_ARP_ENA 0x0020
+#define I40E_AQ_ARP_ADD_IPV4 0x0040
+#define I40E_AQ_ARP_DEL_IPV4 0x0080
+ __le16 table_id;
+ __le32 pfpm_proxyfc;
+ __le32 ip_addr;
+ u8 mac_addr[6];
};
/* Set NS Proxy Table Entry Command (indirect 0x0105) */
struct i40e_aqc_ns_proxy_data {
- __le16 table_idx_mac_addr_0;
- __le16 table_idx_mac_addr_1;
- __le16 table_idx_ipv6_0;
- __le16 table_idx_ipv6_1;
- __le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0 0x0100
-#define I40E_AQ_NS_PROXY_DEL_0 0x0200
-#define I40E_AQ_NS_PROXY_ADD_1 0x0400
-#define I40E_AQ_NS_PROXY_DEL_1 0x0800
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
- u8 mac_addr_0[6];
- u8 mac_addr_1[6];
- u8 local_mac_addr[6];
- u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
- u8 ipv6_addr_1[16];
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0100
+#define I40E_AQ_NS_PROXY_DEL_0 0x0200
+#define I40E_AQ_NS_PROXY_ADD_1 0x0400
+#define I40E_AQ_NS_PROXY_DEL_1 0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
};
/* Manage LAA Command (0x0106) - obsolete */
struct i40e_aqc_mng_laa {
__le16 command_flags;
-#define I40E_AQ_LAA_FLAG_WR 0x8000
- u8 reserved[2];
- __le32 sal;
- __le16 sah;
- u8 reserved2[6];
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
};
/* Manage MAC Address Read Command (indirect 0x0107) */
struct i40e_aqc_mac_address_read {
__le16 command_flags;
-#define I40E_AQC_LAN_ADDR_VALID 0x10
-#define I40E_AQC_SAN_ADDR_VALID 0x20
-#define I40E_AQC_PORT_ADDR_VALID 0x40
-#define I40E_AQC_WOL_ADDR_VALID 0x80
-#define I40E_AQC_ADDR_VALID_MASK 0xf0
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_ADDR_VALID_MASK 0xf0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
@@ -516,14 +517,14 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
/* Manage MAC Address Write Command (0x0108) */
struct i40e_aqc_mac_address_write {
- __le16 command_flags;
-#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
-#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
-#define I40E_AQC_WRITE_TYPE_PORT 0x8000
-#define I40E_AQC_WRITE_TYPE_MASK 0xc000
- __le16 mac_sah;
- __le32 mac_sal;
- u8 reserved[8];
+ __le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
@@ -544,10 +545,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
* command
*/
struct i40e_aqc_switch_seid {
- __le16 seid;
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
@@ -556,34 +557,34 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
* uses i40e_aqc_switch_seid for the descriptor
*/
struct i40e_aqc_get_switch_config_header_resp {
- __le16 num_reported;
- __le16 num_total;
- u8 reserved[12];
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
};
struct i40e_aqc_switch_config_element_resp {
- u8 element_type;
-#define I40E_AQ_SW_ELEM_TYPE_MAC 1
-#define I40E_AQ_SW_ELEM_TYPE_PF 2
-#define I40E_AQ_SW_ELEM_TYPE_VF 3
-#define I40E_AQ_SW_ELEM_TYPE_EMP 4
-#define I40E_AQ_SW_ELEM_TYPE_BMC 5
-#define I40E_AQ_SW_ELEM_TYPE_PV 16
-#define I40E_AQ_SW_ELEM_TYPE_VEB 17
-#define I40E_AQ_SW_ELEM_TYPE_PA 18
-#define I40E_AQ_SW_ELEM_TYPE_VSI 19
- u8 revision;
-#define I40E_AQ_SW_ELEM_REV_1 1
- __le16 seid;
- __le16 uplink_seid;
- __le16 downlink_seid;
- u8 reserved[3];
- u8 connection_type;
-#define I40E_AQ_CONN_TYPE_REGULAR 0x1
-#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
-#define I40E_AQ_CONN_TYPE_CASCADED 0x3
- __le16 scheduler_id;
- __le16 element_info;
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
};
/* Get Switch Configuration (indirect 0x0200)
@@ -591,73 +592,73 @@ struct i40e_aqc_switch_config_element_resp {
* the first in the array is the header, remainder are elements
*/
struct i40e_aqc_get_switch_config_resp {
- struct i40e_aqc_get_switch_config_header_resp header;
- struct i40e_aqc_switch_config_element_resp element[1];
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
};
/* Add Statistics (direct 0x0201)
* Remove Statistics (direct 0x0202)
*/
struct i40e_aqc_add_remove_statistics {
- __le16 seid;
- __le16 vlan;
- __le16 stat_index;
- u8 reserved[10];
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
/* Set Port Parameters command (direct 0x0203) */
struct i40e_aqc_set_port_parameters {
- __le16 command_flags;
-#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
-#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
-#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
- __le16 bad_frame_vsi;
- __le16 default_seid; /* reserved for command */
- u8 reserved[10];
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
/* Get Switch Resource Allocation (indirect 0x0204) */
struct i40e_aqc_get_switch_resource_alloc {
- u8 num_entries; /* reserved for command */
- u8 reserved[7];
- __le32 addr_high;
- __le32 addr_low;
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
/* expect an array of these structs in the response buffer */
struct i40e_aqc_switch_resource_alloc_element_resp {
- u8 resource_type;
-#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
-#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
-#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
-#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
-#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
-#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
-#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
-#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
-#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
-#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
-#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
-#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
-#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
-#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
-#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
-#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
-#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
-#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
-#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
- u8 reserved1;
- __le16 guaranteed;
- __le16 total;
- __le16 used;
- __le16 total_unalloced;
- u8 reserved2[6];
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
};
/* Add VSI (indirect 0x0210)
@@ -671,24 +672,24 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
* uses the same completion and data structure as Add VSI
*/
struct i40e_aqc_add_get_update_vsi {
- __le16 uplink_seid;
- u8 connection_type;
-#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
-#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
-#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
- u8 reserved1;
- u8 vf_id;
- u8 reserved2;
- __le16 vsi_flags;
-#define I40E_AQ_VSI_TYPE_SHIFT 0x0
-#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
-#define I40E_AQ_VSI_TYPE_VF 0x0
-#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
-#define I40E_AQ_VSI_TYPE_PF 0x2
-#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
-#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
- __le32 addr_high;
- __le32 addr_low;
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
@@ -706,121 +707,121 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
struct i40e_aqc_vsi_properties_data {
/* first 96 byte are written by SW */
- __le16 valid_sections;
-#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
-#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
-#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
-#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
-#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
-#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
-#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
-#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
-#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
-#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
/* switch section */
- __le16 switch_id; /* 12bit id combined with flags below */
-#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
-#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
-#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
-#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
-#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
- u8 sw_reserved[2];
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
/* security section */
- u8 sec_flags;
-#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
- u8 sec_reserved;
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
/* VLAN section */
- __le16 pvid; /* VLANS include priority bits */
- __le16 fcoe_pvid;
- u8 port_vlan_flags;
-#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
-#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
- I40E_AQ_VSI_PVLAN_MODE_SHIFT)
-#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
-#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
-#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
-#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
-#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
-#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
- I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
-#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
-#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
- u8 pvlan_reserved[3];
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
/* ingress egress up sections */
- __le32 ingress_table; /* bitmap, 3 bits per up */
-#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
-#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
-#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
-#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
-#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
-#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
-#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
-#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
-#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
- __le32 egress_table; /* same defines as for ingress table */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
/* cascaded PV section */
- __le16 cas_pv_tag;
- u8 cas_pv_flags;
-#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
- I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
-#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
-#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
-#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
-#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
-#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
- u8 cas_pv_reserved;
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
/* queue mapping section */
- __le16 mapping_flags;
-#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
-#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
- __le16 queue_mapping[16];
-#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
-#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
- __le16 tc_mapping[8];
-#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
-#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
- I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
-#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
-#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
- I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
- u8 queueing_opt_flags;
-#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
-#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
- u8 queueing_opt_reserved[3];
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+ u8 queueing_opt_reserved[3];
/* scheduler section */
- u8 up_enable_bits;
- u8 sched_reserved;
+ u8 up_enable_bits;
+ u8 sched_reserved;
/* outer up section */
- __le32 outer_up_table; /* same structure and defines as ingress table */
- u8 cmd_reserved[8];
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ u8 cmd_reserved[8];
/* last 32 bytes are written by FW */
- __le16 qs_handle[8];
+ __le16 qs_handle[8];
#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
- __le16 stat_counter_idx;
- __le16 sched_id;
- u8 resp_reserved[12];
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
};
I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
@@ -830,26 +831,26 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
* (IS_CTRL_PORT only works on add PV)
*/
struct i40e_aqc_add_update_pv {
- __le16 command_flags;
-#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
-#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
- __le16 uplink_seid;
- __le16 connected_seid;
- u8 reserved[10];
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
struct i40e_aqc_add_update_pv_completion {
/* reserved for update; for add also encodes error if rc == ENOSPC */
- __le16 pv_seid;
-#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
-#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
-#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
-#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
- u8 reserved[14];
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
@@ -859,48 +860,48 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
*/
struct i40e_aqc_get_pv_params_completion {
- __le16 seid;
- __le16 default_stag;
- __le16 pv_flags; /* same flags as add_pv */
-#define I40E_AQC_GET_PV_PV_TYPE 0x1
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
- u8 reserved[8];
- __le16 default_port_seid;
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
/* Add VEB (direct 0x0230) */
struct i40e_aqc_add_veb {
- __le16 uplink_seid;
- __le16 downlink_seid;
- __le16 veb_flags;
-#define I40E_AQC_ADD_VEB_FLOATING 0x1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
-#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
-#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
-#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
- u8 enable_tcs;
- u8 reserved[9];
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+ u8 enable_tcs;
+ u8 reserved[9];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
struct i40e_aqc_add_veb_completion {
- u8 reserved[6];
- __le16 switch_seid;
+ u8 reserved[6];
+ __le16 switch_seid;
/* also encodes error if rc == ENOSPC; codes are the same as add_pv */
- __le16 veb_seid;
-#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
-#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
-#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
-#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
- __le16 statistic_index;
- __le16 vebs_used;
- __le16 vebs_free;
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
@@ -909,13 +910,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
* uses i40e_aqc_switch_seid for the descriptor
*/
struct i40e_aqc_get_veb_parameters_completion {
- __le16 seid;
- __le16 switch_id;
- __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
- __le16 statistic_index;
- __le16 vebs_used;
- __le16 vebs_free;
- u8 reserved[4];
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
@@ -928,37 +929,37 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
/* used for the command for most vlan commands */
struct i40e_aqc_macvlan {
- __le16 num_addresses;
- __le16 seid[3];
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
-#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
- __le32 addr_high;
- __le32 addr_low;
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
/* indirect data for command and response */
struct i40e_aqc_add_macvlan_element_data {
- u8 mac_addr[6];
- __le16 vlan_tag;
- __le16 flags;
-#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
-#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
-#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
-#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
- __le16 queue_number;
-#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
-#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
/* response section */
- u8 match_method;
-#define I40E_AQC_MM_PERFECT_MATCH 0x01
-#define I40E_AQC_MM_HASH_MATCH 0x02
-#define I40E_AQC_MM_ERR_NO_RES 0xFF
- u8 reserved1[3];
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
};
struct i40e_aqc_add_remove_macvlan_completion {
@@ -978,19 +979,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
*/
struct i40e_aqc_remove_macvlan_element_data {
- u8 mac_addr[6];
- __le16 vlan_tag;
- u8 flags;
-#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
-#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
-#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
-#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
- u8 reserved[3];
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
/* reply section */
- u8 error_code;
-#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
-#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
- u8 reply_reserved[3];
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
};
/* Add VLAN (indirect 0x0252)
@@ -998,59 +999,58 @@ struct i40e_aqc_remove_macvlan_element_data {
* use the generic i40e_aqc_macvlan for the command
*/
struct i40e_aqc_add_remove_vlan_element_data {
- __le16 vlan_tag;
- u8 vlan_flags;
+ __le16 vlan_tag;
+ u8 vlan_flags;
/* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_LOCAL 0x1
-#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
-#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \
- I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
-#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
-#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
-#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
-#define I40E_AQC_VLAN_PTYPE_SHIFT 3
-#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
-#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
-#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
-#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
-#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
/* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_ALL 0x1
- u8 reserved;
- u8 result;
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
/* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
-#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
-#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
/* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
-#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
- u8 reserved1[3];
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
};
struct i40e_aqc_add_remove_vlan_completion {
- u8 reserved[4];
- __le16 vlans_used;
- __le16 vlans_free;
- __le32 addr_high;
- __le32 addr_low;
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
};
/* Set VSI Promiscuous Modes (direct 0x0254) */
struct i40e_aqc_set_vsi_promiscuous_modes {
- __le16 promiscuous_flags;
- __le16 valid_flags;
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
/* flags used for both fields above */
-#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
-#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
-#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
-#define I40E_AQC_SET_VSI_DEFAULT 0x08
-#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
- __le16 seid;
-#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
- __le16 vlan_tag;
-#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
- u8 reserved[8];
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1059,23 +1059,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
* Uses generic i40e_aqc_add_remove_tag_completion for completion
*/
struct i40e_aqc_add_tag {
- __le16 flags;
-#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
- __le16 seid;
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
- __le16 tag;
- __le16 queue_number;
- u8 reserved[8];
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
struct i40e_aqc_add_remove_tag_completion {
- u8 reserved[12];
- __le16 tags_used;
- __le16 tags_free;
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
@@ -1084,12 +1084,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
* Uses generic i40e_aqc_add_remove_tag_completion for completion
*/
struct i40e_aqc_remove_tag {
- __le16 seid;
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
- __le16 tag;
- u8 reserved[12];
+ __le16 tag;
+ u8 reserved[12];
};
/* Add multicast E-Tag (direct 0x0257)
@@ -1097,22 +1097,22 @@ struct i40e_aqc_remove_tag {
* and no external data
*/
struct i40e_aqc_add_remove_mcast_etag {
- __le16 pv_seid;
- __le16 etag;
- u8 num_unicast_etags;
- u8 reserved[3];
- __le32 addr_high; /* address of array of 2-byte s-tags */
- __le32 addr_low;
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
struct i40e_aqc_add_remove_mcast_etag_completion {
- u8 reserved[4];
- __le16 mcast_etags_used;
- __le16 mcast_etags_free;
- __le32 addr_high;
- __le32 addr_low;
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
};
@@ -1120,21 +1120,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
/* Update S/E-Tag (direct 0x0259) */
struct i40e_aqc_update_tag {
- __le16 seid;
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
- __le16 old_tag;
- __le16 new_tag;
- u8 reserved[10];
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
struct i40e_aqc_update_tag_completion {
- u8 reserved[12];
- __le16 tags_used;
- __le16 tags_free;
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
@@ -1145,30 +1145,30 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
* and the generic direct completion structure
*/
struct i40e_aqc_add_remove_control_packet_filter {
- u8 mac[6];
- __le16 etype;
- __le16 flags;
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
- __le16 seid;
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
- __le16 queue;
- u8 reserved[2];
+ __le16 queue;
+ u8 reserved[2];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
struct i40e_aqc_add_remove_control_packet_filter_completion {
- __le16 mac_etype_used;
- __le16 etype_used;
- __le16 mac_etype_free;
- __le16 etype_free;
- u8 reserved[8];
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
@@ -1179,23 +1179,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
* and the generic indirect completion structure
*/
struct i40e_aqc_add_remove_cloud_filters {
- u8 num_filters;
- u8 reserved;
- __le16 seid;
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
- u8 reserved2[4];
- __le32 addr_high;
- __le32 addr_low;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
struct i40e_aqc_add_remove_cloud_filters_element_data {
- u8 outer_mac[6];
- u8 inner_mac[6];
- __le16 inner_vlan;
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
union {
struct {
u8 reserved[12];
@@ -1205,49 +1205,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 data[16];
} v6;
} ipaddr;
- __le16 flags;
-#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
/* 0x0000 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
/* 0x0002 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
/* 0x0005 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
/* 0x0007 reserved */
/* 0x0008 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
-#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
-#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
-
-#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
-#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
-#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
-#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
-#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
-
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
-
- __le32 tenant_id;
- u8 reserved[4];
- __le16 queue_number;
-#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
- I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
- u8 reserved2[14];
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id;
+ u8 reserved[4];
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved2[14];
/* response section */
- u8 allocation_result;
-#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
-#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
- u8 response_reserved[7];
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
};
struct i40e_aqc_remove_cloud_filters_completion {
@@ -1269,14 +1269,14 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
struct i40e_aqc_add_delete_mirror_rule {
__le16 seid;
__le16 rule_type;
-#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
-#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
-#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
-#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
-#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
__le16 num_entries;
__le16 destination; /* VSI for add, rule id for delete */
__le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
@@ -1286,12 +1286,12 @@ struct i40e_aqc_add_delete_mirror_rule {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
struct i40e_aqc_add_delete_mirror_rule_completion {
- u8 reserved[2];
- __le16 rule_id; /* only used on add */
- __le16 mirror_rules_used;
- __le16 mirror_rules_free;
- __le32 addr_high;
- __le32 addr_low;
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
@@ -1302,11 +1302,11 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
* the command and response use the same descriptor structure
*/
struct i40e_aqc_pfc_ignore {
- u8 tc_bitmap;
- u8 command_flags; /* unused on response */
-#define I40E_AQC_PFC_IGNORE_SET 0x80
-#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
- u8 reserved[14];
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
@@ -1321,10 +1321,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
* this generic struct to pass the SEID in param0
*/
struct i40e_aqc_tx_sched_ind {
- __le16 vsi_seid;
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
@@ -1336,12 +1336,12 @@ struct i40e_aqc_qs_handles_resp {
/* Configure VSI BW limits (direct 0x0400) */
struct i40e_aqc_configure_vsi_bw_limit {
- __le16 vsi_seid;
- u8 reserved[2];
- __le16 credit;
- u8 reserved1[2];
- u8 max_credit; /* 0-3, limit = 2^max */
- u8 reserved2[7];
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
@@ -1350,58 +1350,58 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
* responds with i40e_aqc_qs_handles_resp
*/
struct i40e_aqc_configure_vsi_ets_sla_bw_data {
- u8 tc_valid_bits;
- u8 reserved[15];
- __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
- u8 reserved1[28];
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
};
/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
* responds with i40e_aqc_qs_handles_resp
*/
struct i40e_aqc_configure_vsi_tc_bw_data {
- u8 tc_valid_bits;
- u8 reserved[3];
- u8 tc_bw_credits[8];
- u8 reserved1[4];
- __le16 qs_handles[8];
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
};
/* Query vsi bw configuration (indirect 0x0408) */
struct i40e_aqc_query_vsi_bw_config_resp {
- u8 tc_valid_bits;
- u8 tc_suspended_bits;
- u8 reserved[14];
- __le16 qs_handles[8];
- u8 reserved1[4];
- __le16 port_bw_limit;
- u8 reserved2[2];
- u8 max_bw; /* 0-3, limit = 2^max */
- u8 reserved3[23];
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
};
/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
struct i40e_aqc_query_vsi_ets_sla_config_resp {
- u8 tc_valid_bits;
- u8 reserved[3];
- u8 share_credits[8];
- __le16 credits[8];
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
+ __le16 tc_bw_max[2];
};
/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
struct i40e_aqc_configure_switching_comp_bw_limit {
- __le16 seid;
- u8 reserved[2];
- __le16 credit;
- u8 reserved1[2];
- u8 max_bw; /* 0-3, limit = 2^max */
- u8 reserved2[7];
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
@@ -1411,75 +1411,75 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
* Disable Physical Port ETS (indirect 0x0415)
*/
struct i40e_aqc_configure_switching_comp_ets_data {
- u8 reserved[4];
- u8 tc_valid_bits;
- u8 seepage;
-#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
- u8 tc_strict_priority_flags;
- u8 reserved1[17];
- u8 tc_bw_share_credits[8];
- u8 reserved2[96];
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
+ u8 tc_strict_priority_flags;
+ u8 reserved1[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved2[96];
};
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
- u8 tc_valid_bits;
- u8 reserved[15];
- __le16 tc_bw_credit[8];
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
- u8 reserved1[28];
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
};
/* Configure Switching Component Bandwidth Allocation per Tc
* (indirect 0x0417)
*/
struct i40e_aqc_configure_switching_comp_bw_config_data {
- u8 tc_valid_bits;
- u8 reserved[2];
- u8 absolute_credits; /* bool */
- u8 tc_bw_share_credits[8];
- u8 reserved1[20];
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
};
/* Query Switching Component Configuration (indirect 0x0418) */
struct i40e_aqc_query_switching_comp_ets_config_resp {
- u8 tc_valid_bits;
- u8 reserved[35];
- __le16 port_bw_limit;
- u8 reserved1[2];
- u8 tc_bw_max; /* 0-3, limit = 2^max */
- u8 reserved2[23];
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
};
/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
struct i40e_aqc_query_port_ets_config_resp {
- u8 reserved[4];
- u8 tc_valid_bits;
- u8 reserved1;
- u8 tc_strict_priority_bits;
- u8 reserved2;
- u8 tc_bw_share_credits[8];
- __le16 tc_bw_limits[8];
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
/* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
- __le16 tc_bw_max[2];
- u8 reserved3[32];
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
};
/* Query Switching Component Bandwidth Allocation per Traffic Type
* (indirect 0x041A)
*/
struct i40e_aqc_query_switching_comp_bw_config_resp {
- u8 tc_valid_bits;
- u8 reserved[2];
- u8 absolute_credits_enable; /* bool */
- u8 tc_bw_share_credits[8];
- __le16 tc_bw_limits[8];
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
+ __le16 tc_bw_max[2];
};
/* Suspend/resume port TX traffic
@@ -1490,37 +1490,37 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
* (indirect 0x041D)
*/
struct i40e_aqc_configure_partition_bw_data {
- __le16 pf_valid_bits;
- u8 min_bw[16]; /* guaranteed bandwidth */
- u8 max_bw[16]; /* bandwidth limit */
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
};
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
struct i40e_aq_get_set_hmc_resource_profile {
- u8 pm_profile;
- u8 pe_vf_enabled;
- u8 reserved[14];
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
enum i40e_aq_hmc_profile {
/* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
- I40E_HMC_PROFILE_DEFAULT = 1,
- I40E_HMC_PROFILE_FAVOR_VF = 2,
- I40E_HMC_PROFILE_EQUAL = 3,
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
};
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
-#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
-#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
enum i40e_aq_phy_type {
I40E_PHY_TYPE_SGMII = 0x0,
@@ -1578,147 +1578,147 @@ struct i40e_aqc_module_desc {
};
struct i40e_aq_get_phy_abilities_resp {
- __le32 phy_type; /* bitmap using the above enum for offsets */
- u8 link_speed; /* bitmap using the above enum bit patterns */
- u8 abilities;
-#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
-#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
-#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_LINK_ENABLED 0x08
-#define I40E_AQ_PHY_AN_ENABLED 0x10
-#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
- __le16 eee_capability;
-#define I40E_AQ_EEE_100BASE_TX 0x0002
-#define I40E_AQ_EEE_1000BASE_T 0x0004
-#define I40E_AQ_EEE_10GBASE_T 0x0008
-#define I40E_AQ_EEE_1000BASE_KX 0x0010
-#define I40E_AQ_EEE_10GBASE_KX4 0x0020
-#define I40E_AQ_EEE_10GBASE_KR 0x0040
- __le32 eeer_val;
- u8 d3_lpan;
-#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
- u8 reserved[3];
- u8 phy_id[4];
- u8 module_type[3];
- u8 qualified_module_count;
-#define I40E_AQ_PHY_MAX_QMS 16
- struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 reserved[3];
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
};
/* Set PHY Config (direct 0x0601) */
struct i40e_aq_set_phy_config { /* same bits as above in all */
- __le32 phy_type;
- u8 link_speed;
- u8 abilities;
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
/* bits 0-2 use the values from get_phy_abilities_resp */
#define I40E_AQ_PHY_ENABLE_LINK 0x08
#define I40E_AQ_PHY_ENABLE_AN 0x10
#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
- __le16 eee_capability;
- __le32 eeer;
- u8 low_power_ctrl;
- u8 reserved[3];
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 reserved[3];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
/* Set MAC Config command data structure (direct 0x0603) */
struct i40e_aq_set_mac_config {
- __le16 max_frame_size;
- u8 params;
-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
- u8 tx_timer_priority; /* bitmap */
- __le16 tx_timer_value;
- __le16 fc_refresh_threshold;
- u8 reserved[8];
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
/* Restart Auto-Negotiation (direct 0x605) */
struct i40e_aqc_set_link_restart_an {
- u8 command;
-#define I40E_AQ_PHY_RESTART_AN 0x02
-#define I40E_AQ_PHY_LINK_ENABLE 0x04
- u8 reserved[15];
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
/* Get Link Status cmd & response data structure (direct 0x0607) */
struct i40e_aqc_get_link_status {
- __le16 command_flags; /* only field set on command */
-#define I40E_AQ_LSE_MASK 0x3
-#define I40E_AQ_LSE_NOP 0x0
-#define I40E_AQ_LSE_DISABLE 0x2
-#define I40E_AQ_LSE_ENABLE 0x3
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
/* only response uses this flag */
-#define I40E_AQ_LSE_IS_ENABLED 0x1
- u8 phy_type; /* i40e_aq_phy_type */
- u8 link_speed; /* i40e_aq_link_speed */
- u8 link_info;
-#define I40E_AQ_LINK_UP 0x01
-#define I40E_AQ_LINK_FAULT 0x02
-#define I40E_AQ_LINK_FAULT_TX 0x04
-#define I40E_AQ_LINK_FAULT_RX 0x08
-#define I40E_AQ_LINK_FAULT_REMOTE 0x10
-#define I40E_AQ_MEDIA_AVAILABLE 0x40
-#define I40E_AQ_SIGNAL_DETECT 0x80
- u8 an_info;
-#define I40E_AQ_AN_COMPLETED 0x01
-#define I40E_AQ_LP_AN_ABILITY 0x02
-#define I40E_AQ_PD_FAULT 0x04
-#define I40E_AQ_FEC_EN 0x08
-#define I40E_AQ_PHY_LOW_POWER 0x10
-#define I40E_AQ_LINK_PAUSE_TX 0x20
-#define I40E_AQ_LINK_PAUSE_RX 0x40
-#define I40E_AQ_QUALIFIED_MODULE 0x80
- u8 ext_info;
-#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
-#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
-#define I40E_AQ_LINK_TX_SHIFT 0x02
-#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
-#define I40E_AQ_LINK_TX_ACTIVE 0x00
-#define I40E_AQ_LINK_TX_DRAINED 0x01
-#define I40E_AQ_LINK_TX_FLUSHED 0x03
-#define I40E_AQ_LINK_FORCED_40G 0x10
- u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
- __le16 max_frame_size;
- u8 config;
-#define I40E_AQ_CONFIG_CRC_ENA 0x04
-#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 reserved[5];
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ u8 reserved[5];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
/* Set event mask command (direct 0x613) */
struct i40e_aqc_set_phy_int_mask {
- u8 reserved[8];
- __le16 event_mask;
-#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
-#define I40E_AQ_EVENT_MEDIA_NA 0x0004
-#define I40E_AQ_EVENT_LINK_FAULT 0x0008
-#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
-#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
-#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
-#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
-#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
-#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
- u8 reserved1[6];
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
@@ -1728,27 +1728,27 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
* Get Link Partner AN advt register (direct 0x0616)
*/
struct i40e_aqc_an_advt_reg {
- __le32 local_an_reg0;
- __le16 local_an_reg1;
- u8 reserved[10];
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
/* Set Loopback mode (0x0618) */
struct i40e_aqc_set_lb_mode {
- __le16 lb_mode;
-#define I40E_AQ_LB_PHY_LOCAL 0x01
-#define I40E_AQ_LB_PHY_REMOTE 0x02
-#define I40E_AQ_LB_MAC_LOCAL 0x04
- u8 reserved[14];
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
/* Set PHY Debug command (0x0622) */
struct i40e_aqc_set_phy_debug {
- u8 command_flags;
+ u8 command_flags;
#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
@@ -1757,15 +1757,15 @@ struct i40e_aqc_set_phy_debug {
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
- u8 reserved[15];
+ u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
enum i40e_aq_phy_reg_type {
- I40E_AQC_PHY_REG_INTERNAL = 0x1,
- I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
- I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
};
/* NVM Read command (indirect 0x0701)
@@ -1773,40 +1773,40 @@ enum i40e_aq_phy_reg_type {
* NVM Update commands (indirect 0x0703)
*/
struct i40e_aqc_nvm_update {
- u8 command_flags;
-#define I40E_AQ_NVM_LAST_CMD 0x01
-#define I40E_AQ_NVM_FLASH_ONLY 0x80
- u8 module_pointer;
- __le16 length;
- __le32 offset;
- __le32 addr_high;
- __le32 addr_low;
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
/* NVM Config Read (indirect 0x0704) */
struct i40e_aqc_nvm_config_read {
- __le16 cmd_flags;
+ __le16 cmd_flags;
#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
#define ANVM_READ_SINGLE_FEATURE 0
#define ANVM_READ_MULTIPLE_FEATURES 1
- __le16 element_count;
- __le16 element_id; /* Feature/field ID */
- u8 reserved[2];
- __le32 address_high;
- __le32 address_low;
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
/* NVM Config Write (indirect 0x0705) */
struct i40e_aqc_nvm_config_write {
- __le16 cmd_flags;
- __le16 element_count;
- u8 reserved[4];
- __le32 address_high;
- __le32 address_low;
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
@@ -1831,10 +1831,10 @@ struct i40e_aqc_nvm_config_data_immediate_field {
* Send to Peer PF command (indirect 0x0803)
*/
struct i40e_aqc_pf_vf_message {
- __le32 id;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
@@ -1870,22 +1870,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
* uses i40e_aq_desc
*/
struct i40e_aqc_alternate_write_done {
- __le16 cmd_flags;
+ __le16 cmd_flags;
#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
- u8 reserved[14];
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
/* Set OEM mode (direct 0x0905) */
struct i40e_aqc_alternate_set_mode {
- __le32 mode;
+ __le32 mode;
#define I40E_AQ_ALTERNATE_MODE_NONE 0
#define I40E_AQ_ALTERNATE_MODE_OEM 1
- u8 reserved[12];
+ u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
@@ -1896,33 +1896,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
/* Lan Queue Overflow Event (direct, 0x1001) */
struct i40e_aqc_lan_overflow {
- __le32 prtdcb_rupto;
- __le32 otx_ctl;
- u8 reserved[8];
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
/* Get LLDP MIB (indirect 0x0A00) */
struct i40e_aqc_lldp_get_mib {
- u8 type;
- u8 reserved1;
-#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
-#define I40E_AQ_LLDP_MIB_LOCAL 0x0
-#define I40E_AQ_LLDP_MIB_REMOTE 0x1
-#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
-#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
-#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
-#define I40E_AQ_LLDP_TX_SHIFT 0x4
-#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
/* TX pause flags use I40E_AQ_LINK_TX_* above */
- __le16 local_len;
- __le16 remote_len;
- u8 reserved2[2];
- __le32 addr_high;
- __le32 addr_low;
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
@@ -1931,12 +1931,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
* also used for the event (with type in the command field)
*/
struct i40e_aqc_lldp_update_mib {
- u8 command;
-#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
-#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
- u8 reserved[7];
- __le32 addr_high;
- __le32 addr_low;
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
@@ -1945,35 +1945,35 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
* Delete LLDP TLV (indirect 0x0A04)
*/
struct i40e_aqc_lldp_add_tlv {
- u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
- u8 reserved1[1];
- __le16 len;
- u8 reserved2[4];
- __le32 addr_high;
- __le32 addr_low;
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
/* Update LLDP TLV (indirect 0x0A03) */
struct i40e_aqc_lldp_update_tlv {
- u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
- u8 reserved;
- __le16 old_len;
- __le16 new_offset;
- __le16 new_len;
- __le32 addr_high;
- __le32 addr_low;
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
/* Stop LLDP (direct 0x0A05) */
struct i40e_aqc_lldp_stop {
- u8 command;
-#define I40E_AQ_LLDP_AGENT_STOP 0x0
-#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
- u8 reserved[15];
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
@@ -1981,57 +1981,97 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
/* Start LLDP (direct 0x0A06) */
struct i40e_aqc_lldp_start {
- u8 command;
-#define I40E_AQ_LLDP_AGENT_START 0x1
- u8 reserved[15];
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
-/* Apply MIB changes (0x0A07)
- * uses the generic struc as it contains no data
+/* Get CEE DCBX Oper Config (0x0A07)
+ * uses the generic descriptor struct
+ * returns below as indirect response
*/
+#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0
+#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
+#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3
+#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
+#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
+#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
+#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
+#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
+#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
+#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
+ u8 reserved1;
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 reserved2;
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ u8 reserved3;
+ __le16 oper_app_prio;
+ u8 reserved4;
+ __le16 tlv_status;
+};
+
+I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp);
+
+struct i40e_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+ __le32 tlv_status;
+ u8 reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
+
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
- __le16 udp_port;
- u8 reserved0[3];
- u8 protocol_type;
+ __le16 udp_port;
+ u8 reserved0[3];
+ u8 protocol_type;
#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
- u8 reserved1[10];
+ u8 reserved1[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
struct i40e_aqc_add_udp_tunnel_completion {
- __le16 udp_port;
- u8 filter_entry_index;
- u8 multiple_pfs;
-#define I40E_AQC_SINGLE_PF 0x0
-#define I40E_AQC_MULTIPLE_PFS 0x1
- u8 total_filters;
- u8 reserved[11];
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
/* remove UDP Tunnel command (0x0B01) */
struct i40e_aqc_remove_udp_tunnel {
- u8 reserved[2];
- u8 index; /* 0 to 15 */
- u8 reserved2[13];
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 reserved2[13];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
struct i40e_aqc_del_udp_tunnel_completion {
- __le16 udp_port;
- u8 index; /* 0 to 15 */
- u8 multiple_pfs;
- u8 total_filters_used;
- u8 reserved1[11];
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
@@ -2044,11 +2084,11 @@ struct i40e_aqc_tunnel_key_structure {
u8 key1_len; /* 0 to 15 */
u8 key2_len; /* 0 to 15 */
u8 flags;
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
/* response flags */
-#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
-#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
u8 network_key_index;
#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
@@ -2061,21 +2101,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
/* OEM mode commands (direct 0xFE0x) */
struct i40e_aqc_oem_param_change {
- __le32 param_type;
-#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
-#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
-#define I40E_AQ_OEM_PARAM_MAC 2
- __le32 param_value1;
- u8 param_value2[8];
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ u8 param_value2[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
struct i40e_aqc_oem_state_change {
- __le32 state;
-#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
-#define I40E_AQ_OEM_STATE_LINK_UP 0x1
- u8 reserved[12];
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
@@ -2087,18 +2127,18 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
/* set test more (0xFF01, internal) */
struct i40e_acq_set_test_mode {
- u8 mode;
-#define I40E_AQ_TEST_PARTIAL 0
-#define I40E_AQ_TEST_FULL 1
-#define I40E_AQ_TEST_NVM 2
- u8 reserved[3];
- u8 command;
-#define I40E_AQ_TEST_OPEN 0
-#define I40E_AQ_TEST_CLOSE 1
-#define I40E_AQ_TEST_INC 2
- u8 reserved2[3];
- __le32 address_high;
- __le32 address_low;
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
@@ -2151,21 +2191,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
#define I40E_AQ_CLUSTER_ID_ALTRAM 11
struct i40e_aqc_debug_dump_internals {
- u8 cluster_id;
- u8 table_id;
- __le16 data_size;
- __le32 idx;
- __le32 address_high;
- __le32 address_low;
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
struct i40e_aqc_debug_modify_internals {
- u8 cluster_id;
- u8 cluster_specific_params[7];
- __le32 address_high;
- __le32 address_low;
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 30056b25d94e..3d741ee99a2c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -50,6 +50,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
+ case I40E_DEV_ID_10G_BASE_T:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_VF:
@@ -549,7 +550,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
i40e_status i40e_init_shared_code(struct i40e_hw *hw)
{
i40e_status status = 0;
- u32 reg;
+ u32 port, ari, func_rid;
i40e_set_mac_type(hw);
@@ -562,18 +563,17 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
hw->phy.get_link_info = true;
- /* Determine port number */
- reg = rd32(hw, I40E_PFGEN_PORTNUM);
- reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
- I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
- hw->port = (u8)reg;
-
- /* Determine the PF number based on the PCI fn */
- reg = rd32(hw, I40E_GLPCI_CAPSUP);
- if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK)
- hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func);
+ /* Determine port number and PF number*/
+ port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
+ >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
+ hw->port = (u8)port;
+ ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
+ I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
+ func_rid = rd32(hw, I40E_PF_FUNC_RID);
+ if (ari)
+ hw->pf_id = (u8)(func_rid & 0xff);
else
- hw->pf_id = (u8)hw->bus.func;
+ hw->pf_id = (u8)(func_rid & 0x7);
status = i40e_init_nvm(hw);
return status;
@@ -790,7 +790,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
}
#define I40E_PF_RESET_WAIT_COUNT_A0 200
-#define I40E_PF_RESET_WAIT_COUNT 100
+#define I40E_PF_RESET_WAIT_COUNT 110
/**
* i40e_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
@@ -1420,6 +1420,33 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse)
}
/**
+ * i40e_aq_set_phy_int_mask
+ * @hw: pointer to the hw struct
+ * @mask: interrupt mask to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set link interrupt mask.
+ **/
+i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+ u16 mask,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_phy_int_mask *cmd =
+ (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_int_mask);
+
+ cmd->event_mask = cpu_to_le16(mask);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_add_vsi
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
@@ -2632,6 +2659,34 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
}
/**
+ * i40e_aq_get_cee_dcb_config
+ * @hw: pointer to the hw struct
+ * @buff: response buffer that stores CEE operational configuration
+ * @buff_size: size of the buffer passed
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get CEE DCBX mode operational configuration from firmware
+ **/
+i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ i40e_status status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
* @udp_port: the UDP port to add
@@ -3189,6 +3244,26 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
}
/**
+ * i40e_aq_resume_port_tx
+ * @hw: pointer to the hardware structure
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Resume port's Tx traffic
+ **/
+i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_set_pci_config_data - store PCI bus info
* @hw: pointer to hardware structure
* @link_status: the link status word from PCI config space
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 036570d76176..3ce43588592d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -59,7 +59,7 @@ i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
struct i40e_dcbx_config *dcbcfg)
{
- struct i40e_ieee_ets_config *etscfg;
+ struct i40e_dcb_ets_config *etscfg;
u8 *buf = tlv->tlvinfo;
u16 offset = 0;
u8 priority;
@@ -407,6 +407,166 @@ free_mem:
}
/**
+ * i40e_cee_to_dcb_v1_config
+ * @cee_cfg: pointer to CEE v1 response configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE v1 configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_v1_config(
+ struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status);
+ u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+ u8 i, tc, err, sync, oper;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ for (i = 0; i < 4; i++) {
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prioritytable[i] =
+ cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+ status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
+ I40E_AQC_CEE_APP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add APPs if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* CEE operating configuration supports FCoE/iSCSI/FIP only */
+ dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
+
+ /* FCoE APP */
+ dcbcfg->app[0].priority =
+ (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+ I40E_AQC_CEE_APP_FCOE_SHIFT;
+ dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ /* iSCSI APP */
+ dcbcfg->app[1].priority =
+ (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+ I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
+
+ /* FIP APP */
+ dcbcfg->app[2].priority =
+ (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+ I40E_AQC_CEE_APP_FIP_SHIFT;
+ dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
+ }
+}
+
+/**
+ * i40e_cee_to_dcb_config
+ * @cee_cfg: pointer to CEE configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_config(
+ struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
+ u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+ u8 i, tc, err, sync, oper;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ for (i = 0; i < 4; i++) {
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prioritytable[i] =
+ cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+ status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
+ I40E_AQC_CEE_APP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add APPs if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
+ /* CEE operating configuration supports FCoE/iSCSI/FIP only */
+ dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
+
+ /* FCoE APP */
+ dcbcfg->app[0].priority =
+ (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+ I40E_AQC_CEE_APP_FCOE_SHIFT;
+ dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ /* iSCSI APP */
+ dcbcfg->app[1].priority =
+ (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+ I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
+
+ /* FIP APP */
+ dcbcfg->app[2].priority =
+ (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+ I40E_AQC_CEE_APP_FIP_SHIFT;
+ dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
+ }
+}
+
+/**
* i40e_get_dcb_config
* @hw: pointer to the hw struct
*
@@ -415,7 +575,44 @@ free_mem:
i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
{
i40e_status ret = 0;
+ struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
+
+ /* If Firmware version < v4.33 IEEE only */
+ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4))
+ goto ieee;
+
+ /* If Firmware version == v4.33 use old CEE struct */
+ if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) {
+ ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
+ sizeof(cee_v1_cfg), NULL);
+ if (!ret) {
+ /* CEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ i40e_cee_to_dcb_v1_config(&cee_v1_cfg,
+ &hw->local_dcbx_config);
+ }
+ } else {
+ ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg,
+ sizeof(cee_cfg), NULL);
+ if (!ret) {
+ /* CEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ i40e_cee_to_dcb_config(&cee_cfg,
+ &hw->local_dcbx_config);
+ }
+ }
+
+ /* CEE mode not enabled try querying IEEE data */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ goto ieee;
+ else
+ goto out;
+ieee:
+ /* IEEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
/* Get Local DCB Config */
ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
&hw->local_dcbx_config);
@@ -426,6 +623,10 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
&hw->remote_dcbx_config);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ ret = 0;
+
out:
return ret;
}
@@ -439,10 +640,27 @@ out:
i40e_status i40e_init_dcb(struct i40e_hw *hw)
{
i40e_status ret = 0;
+ struct i40e_lldp_variables lldp_cfg;
+ u8 adminstatus = 0;
if (!hw->func_caps.dcb)
return ret;
+ /* Read LLDP NVM area */
+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ if (ret)
+ return ret;
+
+ /* Get the LLDP AdminStatus for the current port */
+ adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
+ adminstatus &= 0xF;
+
+ /* LLDP agent disabled */
+ if (!adminstatus) {
+ hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
+ return ret;
+ }
+
/* Get DCBX status */
ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
if (ret)
@@ -454,6 +672,8 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
case I40E_DCBX_STATUS_IN_PROGRESS:
/* Get current DCBX configuration */
ret = i40e_get_dcb_config(hw);
+ if (ret)
+ return ret;
break;
case I40E_DCBX_STATUS_DISABLED:
return ret;
@@ -470,3 +690,33 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
return ret;
}
+
+/**
+ * i40e_read_lldp_cfg - read LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ *
+ * Reads the LLDP configuration data from NVM
+ **/
+i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg)
+{
+ i40e_status ret = 0;
+ u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR);
+
+ if (!lldp_cfg)
+ return I40E_ERR_PARAM;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset,
+ sizeof(struct i40e_lldp_variables),
+ (u8 *)lldp_cfg,
+ true, NULL);
+ i40e_release_nvm(hw);
+
+err_lldp_cfg:
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 34cf1c30c7ff..e137e3fac8ee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -65,6 +65,11 @@
#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_PRIO_0_SHIFT 0
+#define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT)
+#define I40E_CEE_PGID_PRIO_1_SHIFT 4
+#define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_STRICT 15
/* Defines for IEEE TSA types */
#define I40E_IEEE_TSA_STRICT 0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 00bc0cdb3a03..183dcb63ce98 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -207,7 +207,7 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
* VSI
**/
static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
- struct i40e_ieee_app_priority_table *app)
+ struct i40e_dcb_app_priority_table *app)
{
struct net_device *dev = vsi->netdev;
struct dcb_app sapp;
@@ -229,7 +229,7 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
* Delete given APP from all the VSIs for given PF
**/
static void i40e_dcbnl_del_app(struct i40e_pf *pf,
- struct i40e_ieee_app_priority_table *app)
+ struct i40e_dcb_app_priority_table *app)
{
int v, err;
for (v = 0; v < pf->num_alloc_vsi; v++) {
@@ -252,7 +252,7 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
* Find given APP in the DCB configuration
**/
static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
- struct i40e_ieee_app_priority_table *app)
+ struct i40e_dcb_app_priority_table *app)
{
int i;
@@ -277,7 +277,7 @@ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
struct i40e_dcbx_config *new_cfg)
{
- struct i40e_ieee_app_priority_table app;
+ struct i40e_dcb_app_priority_table app;
struct i40e_dcbx_config *dcbxcfg;
struct i40e_hw *hw = &pf->hw;
int i;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 7067f4b9159c..433a55886ad2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -773,7 +773,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
{
struct i40e_tx_desc *txd;
union i40e_rx_desc *rxd;
- struct i40e_ring ring;
+ struct i40e_ring *ring;
struct i40e_vsi *vsi;
int i;
@@ -792,29 +792,32 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
vsi_seid);
return;
}
- if (is_rx_ring)
- ring = *vsi->rx_rings[ring_id];
- else
- ring = *vsi->tx_rings[ring_id];
+
+ ring = kmemdup(is_rx_ring
+ ? vsi->rx_rings[ring_id] : vsi->tx_rings[ring_id],
+ sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return;
+
if (cnt == 2) {
dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
- for (i = 0; i < ring.count; i++) {
+ for (i = 0; i < ring->count; i++) {
if (!is_rx_ring) {
- txd = I40E_TX_DESC(&ring, i);
+ txd = I40E_TX_DESC(ring, i);
dev_info(&pf->pdev->dev,
" d[%03i] = 0x%016llx 0x%016llx\n",
i, txd->buffer_addr,
txd->cmd_type_offset_bsz);
} else if (sizeof(union i40e_rx_desc) ==
sizeof(union i40e_16byte_rx_desc)) {
- rxd = I40E_RX_DESC(&ring, i);
+ rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev,
" d[%03i] = 0x%016llx 0x%016llx\n",
i, rxd->read.pkt_addr,
rxd->read.hdr_addr);
} else {
- rxd = I40E_RX_DESC(&ring, i);
+ rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev,
" d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
i, rxd->read.pkt_addr,
@@ -823,26 +826,26 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
}
}
} else if (cnt == 3) {
- if (desc_n >= ring.count || desc_n < 0) {
+ if (desc_n >= ring->count || desc_n < 0) {
dev_info(&pf->pdev->dev,
"descriptor %d not found\n", desc_n);
return;
}
if (!is_rx_ring) {
- txd = I40E_TX_DESC(&ring, desc_n);
+ txd = I40E_TX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
"vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
txd->buffer_addr, txd->cmd_type_offset_bsz);
} else if (sizeof(union i40e_rx_desc) ==
sizeof(union i40e_16byte_rx_desc)) {
- rxd = I40E_RX_DESC(&ring, desc_n);
+ rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
"vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
rxd->read.pkt_addr, rxd->read.hdr_addr);
} else {
- rxd = I40E_RX_DESC(&ring, desc_n);
+ rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
"vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
@@ -852,6 +855,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else {
dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
}
+ kfree(ring);
}
/**
@@ -895,90 +899,6 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
}
/**
- * i40e_dbg_dump_stats - handles dump stats write into command datum
- * @pf: the i40e_pf created in command write
- * @stats: the stats structure to be dumped
- **/
-static void i40e_dbg_dump_stats(struct i40e_pf *pf,
- struct i40e_hw_port_stats *stats)
-{
- int i;
-
- dev_info(&pf->pdev->dev, " stats:\n");
- dev_info(&pf->pdev->dev,
- " crc_errors = \t\t%lld \tillegal_bytes = \t%lld \terror_bytes = \t\t%lld\n",
- stats->crc_errors, stats->illegal_bytes, stats->error_bytes);
- dev_info(&pf->pdev->dev,
- " mac_local_faults = \t%lld \tmac_remote_faults = \t%lld \trx_length_errors = \t%lld\n",
- stats->mac_local_faults, stats->mac_remote_faults,
- stats->rx_length_errors);
- dev_info(&pf->pdev->dev,
- " link_xon_rx = \t\t%lld \tlink_xoff_rx = \t\t%lld \tlink_xon_tx = \t\t%lld\n",
- stats->link_xon_rx, stats->link_xoff_rx, stats->link_xon_tx);
- dev_info(&pf->pdev->dev,
- " link_xoff_tx = \t\t%lld \trx_size_64 = \t\t%lld \trx_size_127 = \t\t%lld\n",
- stats->link_xoff_tx, stats->rx_size_64, stats->rx_size_127);
- dev_info(&pf->pdev->dev,
- " rx_size_255 = \t\t%lld \trx_size_511 = \t\t%lld \trx_size_1023 = \t\t%lld\n",
- stats->rx_size_255, stats->rx_size_511, stats->rx_size_1023);
- dev_info(&pf->pdev->dev,
- " rx_size_big = \t\t%lld \trx_undersize = \t\t%lld \trx_jabber = \t\t%lld\n",
- stats->rx_size_big, stats->rx_undersize, stats->rx_jabber);
- dev_info(&pf->pdev->dev,
- " rx_fragments = \t\t%lld \trx_oversize = \t\t%lld \ttx_size_64 = \t\t%lld\n",
- stats->rx_fragments, stats->rx_oversize, stats->tx_size_64);
- dev_info(&pf->pdev->dev,
- " tx_size_127 = \t\t%lld \ttx_size_255 = \t\t%lld \ttx_size_511 = \t\t%lld\n",
- stats->tx_size_127, stats->tx_size_255, stats->tx_size_511);
- dev_info(&pf->pdev->dev,
- " tx_size_1023 = \t\t%lld \ttx_size_big = \t\t%lld \tmac_short_packet_dropped = \t%lld\n",
- stats->tx_size_1023, stats->tx_size_big,
- stats->mac_short_packet_dropped);
- for (i = 0; i < 8; i += 4) {
- dev_info(&pf->pdev->dev,
- " priority_xon_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
- i, stats->priority_xon_rx[i],
- i+1, stats->priority_xon_rx[i+1],
- i+2, stats->priority_xon_rx[i+2],
- i+3, stats->priority_xon_rx[i+3]);
- }
- for (i = 0; i < 8; i += 4) {
- dev_info(&pf->pdev->dev,
- " priority_xoff_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
- i, stats->priority_xoff_rx[i],
- i+1, stats->priority_xoff_rx[i+1],
- i+2, stats->priority_xoff_rx[i+2],
- i+3, stats->priority_xoff_rx[i+3]);
- }
- for (i = 0; i < 8; i += 4) {
- dev_info(&pf->pdev->dev,
- " priority_xon_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
- i, stats->priority_xon_tx[i],
- i+1, stats->priority_xon_tx[i+1],
- i+2, stats->priority_xon_tx[i+2],
- i+3, stats->priority_xon_rx[i+3]);
- }
- for (i = 0; i < 8; i += 4) {
- dev_info(&pf->pdev->dev,
- " priority_xoff_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
- i, stats->priority_xoff_tx[i],
- i+1, stats->priority_xoff_tx[i+1],
- i+2, stats->priority_xoff_tx[i+2],
- i+3, stats->priority_xoff_tx[i+3]);
- }
- for (i = 0; i < 8; i += 4) {
- dev_info(&pf->pdev->dev,
- " priority_xon_2_xoff[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
- i, stats->priority_xon_2_xoff[i],
- i+1, stats->priority_xon_2_xoff[i+1],
- i+2, stats->priority_xon_2_xoff[i+2],
- i+3, stats->priority_xon_2_xoff[i+3]);
- }
-
- i40e_dbg_dump_eth_stats(pf, &stats->eth);
-}
-
-/**
* i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
* @pf: the i40e_pf created in command write
* @seid: the seid the user put in
@@ -1342,11 +1262,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
"dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
dev_info(&pf->pdev->dev, "dump desc aq\n");
}
- } else if (strncmp(&cmd_buf[5], "stats", 5) == 0) {
- dev_info(&pf->pdev->dev, "pf stats:\n");
- i40e_dbg_dump_stats(pf, &pf->stats);
- dev_info(&pf->pdev->dev, "pf stats_offsets:\n");
- i40e_dbg_dump_stats(pf, &pf->stats_offsets);
} else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
dev_info(&pf->pdev->dev,
"core reset count: %d\n", pf->corer_count);
@@ -1402,6 +1317,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
bw_data = NULL;
dev_info(&pf->pdev->dev,
+ "port dcbx_mode=%d\n", cfg->dcbx_mode);
+ dev_info(&pf->pdev->dev,
"port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
cfg->etscfg.willing, cfg->etscfg.cbs,
cfg->etscfg.maxtcs);
@@ -1464,8 +1381,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else {
dev_info(&pf->pdev->dev,
"dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
- dev_info(&pf->pdev->dev, "dump switch, dump vsi [seid] or\n");
- dev_info(&pf->pdev->dev, "dump stats\n");
+ dev_info(&pf->pdev->dev, "dump switch\n");
+ dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
dev_info(&pf->pdev->dev, "dump reset stats\n");
dev_info(&pf->pdev->dev, "dump port\n");
dev_info(&pf->pdev->dev,
@@ -1580,7 +1497,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (!desc)
goto command_write_done;
cnt = sscanf(&cmd_buf[11],
- "%hx %hx %hx %hx %x %x %x %x %x %x",
+ "%hi %hi %hi %hi %i %i %i %i %i %i",
&desc->flags,
&desc->opcode, &desc->datalen, &desc->retval,
&desc->cookie_high, &desc->cookie_low,
@@ -1628,7 +1545,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (!desc)
goto command_write_done;
cnt = sscanf(&cmd_buf[20],
- "%hx %hx %hx %hx %x %x %x %x %x %x %hd",
+ "%hi %hi %hi %hi %i %i %i %i %i %i %hi",
&desc->flags,
&desc->opcode, &desc->datalen, &desc->retval,
&desc->cookie_high, &desc->cookie_low,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 1dda467ae1ac..951e8767fc50 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -40,8 +40,9 @@ struct i40e_stats {
.sizeof_stat = FIELD_SIZEOF(_type, _stat), \
.stat_offset = offsetof(_type, _stat) \
}
+
#define I40E_NETDEV_STAT(_net_stat) \
- I40E_STAT(struct net_device_stats, #_net_stat, _net_stat)
+ I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
#define I40E_PF_STAT(_name, _stat) \
I40E_STAT(struct i40e_pf, _name, _stat)
#define I40E_VSI_STAT(_name, _stat) \
@@ -264,6 +265,14 @@ static int i40e_get_settings(struct net_device *netdev,
ecmd->supported = SUPPORTED_10000baseKR_Full;
ecmd->advertising = ADVERTISED_10000baseKR_Full;
break;
+ case I40E_DEV_ID_10G_BASE_T:
+ ecmd->supported = SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full;
+ break;
default:
/* all the rest are 10G/1G */
ecmd->supported = SUPPORTED_10000baseT_Full |
@@ -322,9 +331,13 @@ static int i40e_get_settings(struct net_device *netdev,
case I40E_PHY_TYPE_10GBASE_CR1:
case I40E_PHY_TYPE_10GBASE_T:
ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full;
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full;
ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full;
+ ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full;
break;
case I40E_PHY_TYPE_XAUI:
case I40E_PHY_TYPE_XFI:
@@ -335,14 +348,22 @@ static int i40e_get_settings(struct net_device *netdev,
case I40E_PHY_TYPE_1000BASE_KX:
case I40E_PHY_TYPE_1000BASE_T:
ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full;
ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
+ ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full;
break;
case I40E_PHY_TYPE_100BASE_TX:
ecmd->supported = SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
SUPPORTED_100baseT_Full;
ecmd->advertising = ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
ADVERTISED_100baseT_Full;
break;
case I40E_PHY_TYPE_SGMII:
@@ -426,6 +447,9 @@ no_valid_phy_type:
case I40E_LINK_SPEED_1GB:
ethtool_cmd_speed_set(ecmd, SPEED_1000);
break;
+ case I40E_LINK_SPEED_100MB:
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ break;
default:
break;
}
@@ -528,7 +552,7 @@ static int i40e_set_settings(struct net_device *netdev,
}
/* If autoneg is currently enabled */
if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
- config.abilities = abilities.abilities |
+ config.abilities = abilities.abilities &
~I40E_AQ_PHY_ENABLE_AN;
change = true;
}
@@ -621,11 +645,19 @@ static void i40e_get_pauseparam(struct net_device *netdev,
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
pause->autoneg =
((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
+ /* PFC enabled so report LFC as off */
+ if (dcbx_cfg->pfc.pfcenable) {
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+ return;
+ }
+
if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
pause->rx_pause = 1;
} else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
@@ -649,6 +681,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
i40e_status status;
u8 aq_failures;
@@ -670,8 +703,9 @@ static int i40e_set_pauseparam(struct net_device *netdev,
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
}
- if (hw->fc.current_mode == I40E_FC_PFC) {
- netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n");
+ if (dcbx_cfg->pfc.pfcenable) {
+ netdev_info(netdev,
+ "Priority flow control enabled. Cannot set link flow control.\n");
return -EOPNOTSUPP;
}
@@ -788,7 +822,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
struct i40e_pf *pf = np->vsi->back;
- int ret_val = 0, len;
+ int ret_val = 0, len, offset;
u8 *eeprom_buff;
u16 i, sectors;
bool last;
@@ -801,19 +835,21 @@ static int i40e_get_eeprom(struct net_device *netdev,
/* check for NVMUpdate access method */
magic = hw->vendor_id | (hw->device_id << 16);
if (eeprom->magic && eeprom->magic != magic) {
+ struct i40e_nvm_access *cmd;
int errno;
/* make sure it is the right magic for NVMUpdate */
if ((eeprom->magic >> 16) != hw->device_id)
return -EINVAL;
- ret_val = i40e_nvmupd_command(hw,
- (struct i40e_nvm_access *)eeprom,
- bytes, &errno);
+ cmd = (struct i40e_nvm_access *)eeprom;
+ ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
if (ret_val)
dev_info(&pf->pdev->dev,
- "NVMUpdate read failed err=%d status=0x%x\n",
- ret_val, hw->aq.asq_last_status);
+ "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
+ ret_val, hw->aq.asq_last_status, errno,
+ (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
+ cmd->offset, cmd->data_size);
return errno;
}
@@ -842,20 +878,29 @@ static int i40e_get_eeprom(struct net_device *netdev,
len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
last = true;
}
- ret_val = i40e_aq_read_nvm(hw, 0x0,
- eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
- len,
+ offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
+ ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len,
(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
last, NULL);
- if (ret_val) {
+ if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ dev_info(&pf->pdev->dev,
+ "read NVM failed, invalid offset 0x%x\n",
+ offset);
+ break;
+ } else if (ret_val &&
+ hw->aq.asq_last_status == I40E_AQ_RC_EACCES) {
dev_info(&pf->pdev->dev,
- "read NVM failed err=%d status=0x%x\n",
- ret_val, hw->aq.asq_last_status);
- goto release_nvm;
+ "read NVM failed, access, offset 0x%x\n",
+ offset);
+ break;
+ } else if (ret_val) {
+ dev_info(&pf->pdev->dev,
+ "read NVM failed offset %d err=%d status=0x%x\n",
+ offset, ret_val, hw->aq.asq_last_status);
+ break;
}
}
-release_nvm:
i40e_release_nvm(hw);
memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
free_buff:
@@ -883,6 +928,7 @@ static int i40e_set_eeprom(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
struct i40e_pf *pf = np->vsi->back;
+ struct i40e_nvm_access *cmd;
int ret_val = 0;
int errno;
u32 magic;
@@ -900,12 +946,14 @@ static int i40e_set_eeprom(struct net_device *netdev,
test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
return -EBUSY;
- ret_val = i40e_nvmupd_command(hw, (struct i40e_nvm_access *)eeprom,
- bytes, &errno);
- if (ret_val)
+ cmd = (struct i40e_nvm_access *)eeprom;
+ ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
+ if (ret_val && hw->aq.asq_last_status != I40E_AQ_RC_EBUSY)
dev_info(&pf->pdev->dev,
- "NVMUpdate write failed err=%d status=0x%x\n",
- ret_val, hw->aq.asq_last_status);
+ "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
+ ret_val, hw->aq.asq_last_status, errno,
+ (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
+ cmd->offset, cmd->data_size);
return errno;
}
@@ -1292,6 +1340,10 @@ static int i40e_get_ts_info(struct net_device *dev,
{
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ /* only report HW timestamping if PTP is enabled */
+ if (!(pf->flags & I40E_FLAG_PTP))
+ return ethtool_op_get_ts_info(dev, info);
+
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
@@ -1355,6 +1407,9 @@ static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
netif_info(pf, hw, netdev, "eeprom test\n");
*data = i40e_diag_eeprom_test(&pf->hw);
+ /* forcebly clear the NVM Update state machine */
+ pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
return *data;
}
@@ -1367,7 +1422,10 @@ static int i40e_intr_test(struct net_device *netdev, u64 *data)
netif_info(pf, hw, netdev, "interrupt test\n");
wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
(I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
usleep_range(1000, 2000);
*data = (swc_old == pf->sw_int_count);
@@ -1551,13 +1609,10 @@ static int i40e_set_coalesce(struct net_device *netdev,
vsi->rx_itr_setting = ec->rx_coalesce_usecs;
} else if (ec->rx_coalesce_usecs == 0) {
vsi->rx_itr_setting = ec->rx_coalesce_usecs;
- i40e_irq_dynamic_disable(vsi, vector);
if (ec->use_adaptive_rx_coalesce)
- netif_info(pf, drv, netdev,
- "Rx-secs=0, need to disable adaptive-Rx for a complete disable\n");
+ netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
} else {
- netif_info(pf, drv, netdev,
- "Invalid value, Rx-usecs range is 0, 8-8160\n");
+ netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
return -EINVAL;
}
@@ -1566,13 +1621,11 @@ static int i40e_set_coalesce(struct net_device *netdev,
vsi->tx_itr_setting = ec->tx_coalesce_usecs;
} else if (ec->tx_coalesce_usecs == 0) {
vsi->tx_itr_setting = ec->tx_coalesce_usecs;
- i40e_irq_dynamic_disable(vsi, vector);
if (ec->use_adaptive_tx_coalesce)
- netif_info(pf, drv, netdev,
- "Tx-secs=0, need to disable adaptive-Tx for a complete disable\n");
+ netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
} else {
netif_info(pf, drv, netdev,
- "Invalid value, Tx-usecs range is 0, 8-8160\n");
+ "Invalid value, tx-usecs range is 0-8160\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 5d01db1d789b..a8b8bd95108d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -343,7 +343,7 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
**/
u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
{
- struct i40e_ieee_app_priority_table app;
+ struct i40e_dcb_app_priority_table app;
struct i40e_hw *hw = &pf->hw;
u8 enabled_tc = 0;
u8 tc, i;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index c3a7f4a4b775..0a7ea4c5f9d3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -38,8 +38,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 0
-#define DRV_VERSION_BUILD 11
+#define DRV_VERSION_MINOR 2
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -74,6 +74,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
/* required last entry */
{0, }
};
@@ -812,7 +813,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */
u32 tx_restart, tx_busy;
+ struct i40e_ring *p;
u32 rx_page, rx_buf;
+ u64 bytes, packets;
+ unsigned int start;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
u16 q;
@@ -836,10 +840,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_buf = 0;
rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) {
- struct i40e_ring *p;
- u64 bytes, packets;
- unsigned int start;
-
/* locate Tx ring */
p = ACCESS_ONCE(vsi->tx_rings[q]);
@@ -2382,6 +2382,35 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
}
/**
+ * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
+ * @ring: The Tx ring to configure
+ *
+ * This enables/disables XPS for a given Tx descriptor ring
+ * based on the TCs enabled for the VSI that ring belongs to.
+ **/
+static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
+{
+ struct i40e_vsi *vsi = ring->vsi;
+ cpumask_var_t mask;
+
+ if (ring->q_vector && ring->netdev) {
+ /* Single TC mode enable XPS */
+ if (vsi->tc_config.numtc <= 1 &&
+ !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
+ netif_set_xps_queue(ring->netdev,
+ &ring->q_vector->affinity_mask,
+ ring->queue_index);
+ } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ /* Disable XPS to allow selection based on TC */
+ bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
+ netif_set_xps_queue(ring->netdev, mask,
+ ring->queue_index);
+ free_cpumask_var(mask);
+ }
+ }
+}
+
+/**
* i40e_configure_tx_ring - Configure a transmit ring context and rest
* @ring: The Tx ring to configure
*
@@ -2404,13 +2433,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
ring->atr_sample_rate = 0;
}
- /* initialize XPS */
- if (ring->q_vector && ring->netdev &&
- vsi->tc_config.numtc <= 1 &&
- !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
- netif_set_xps_queue(ring->netdev,
- &ring->q_vector->affinity_mask,
- ring->queue_index);
+ /* configure XPS */
+ i40e_config_xps_tx_ring(ring);
/* clear the context structure first */
memset(&tx_ctx, 0, sizeof(tx_ctx));
@@ -2462,10 +2486,14 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
}
/* Now associate this queue with this PCI function */
- if (vsi->type == I40E_VSI_VMDQ2)
+ if (vsi->type == I40E_VSI_VMDQ2) {
qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
- else
+ qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
+ I40E_QTX_CTL_VFVM_INDX_MASK;
+ } else {
qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+ }
+
qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
I40E_QTX_CTL_PF_INDX_MASK);
wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
@@ -3440,7 +3468,7 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
break;
- udelay(10);
+ usleep_range(10, 20);
}
if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
return -ETIMEDOUT;
@@ -3466,7 +3494,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
/* warn the TX unit of coming changes */
i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
if (!enable)
- udelay(10);
+ usleep_range(10, 20);
for (j = 0; j < 50; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
@@ -3488,6 +3516,9 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
}
wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
+ /* No waiting for the Tx queue to disable */
+ if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
+ continue;
/* wait for the change to finish */
ret = i40e_pf_txq_wait(pf, pf_q, enable);
@@ -3526,7 +3557,7 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
break;
- udelay(10);
+ usleep_range(10, 20);
}
if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
return -ETIMEDOUT;
@@ -3855,6 +3886,15 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
if (test_bit(__I40E_DOWN, &vsi->state))
return;
+ /* No need to disable FCoE VSI when Tx suspended */
+ if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
+ vsi->type == I40E_VSI_FCOE) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "%s: VSI seid %d skipping FCoE VSI disable\n",
+ __func__, vsi->seid);
+ return;
+ }
+
set_bit(__I40E_NEEDS_RESTART, &vsi->state);
if (vsi->netdev && netif_running(vsi->netdev)) {
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
@@ -3907,6 +3947,57 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
}
}
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
+ * @vsi: the VSI being configured
+ *
+ * This function waits for the given VSI's Tx queues to be disabled.
+ **/
+static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int i, pf_q, ret;
+
+ pf_q = vsi->base_queue;
+ for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ /* Check and wait for the disable status of the queue */
+ ret = i40e_pf_txq_wait(pf, pf_q, false);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: VSI seid %d Tx ring %d disable timeout\n",
+ __func__, vsi->seid, pf_q);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
+ * @pf: the PF
+ *
+ * This function waits for the Tx queues to be in disabled state for all the
+ * VSIs that are managed by this PF.
+ **/
+static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
+{
+ int v, ret = 0;
+
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ /* No need to wait for FCoE VSI queues */
+ if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
+ ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+#endif
/**
* i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
* @dcbcfg: the corresponding DCBx configuration structure
@@ -4378,6 +4469,31 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
}
/**
+ * i40e_resume_port_tx - Resume port Tx
+ * @pf: PF struct
+ *
+ * Resume a port's Tx and issue a PF reset in case of failure to
+ * resume.
+ **/
+static int i40e_resume_port_tx(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int ret;
+
+ ret = i40e_aq_resume_port_tx(hw, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "AQ command Resume Port Tx failed = %d\n",
+ pf->hw.aq.asq_last_status);
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
+ }
+
+ return ret;
+}
+
+/**
* i40e_init_pf_dcb - Initialize DCB configuration
* @pf: PF being configured
*
@@ -4413,6 +4529,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
/* Enable DCB tagging only when more than one TC */
if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
pf->flags |= I40E_FLAG_DCB_ENABLED;
+ dev_dbg(&pf->pdev->dev,
+ "DCBX offload is supported for this PF.\n");
}
} else {
dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
@@ -4449,6 +4567,9 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
case I40E_LINK_SPEED_1GB:
strlcpy(speed, "1000 Mbps", SPEED_SIZE);
break;
+ case I40E_LINK_SPEED_100MB:
+ strncpy(speed, "100 Mbps", SPEED_SIZE);
+ break;
default:
break;
}
@@ -4479,12 +4600,8 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
static int i40e_up_complete(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- u8 set_fc_aq_fail = 0;
int err;
- /* force flow control off */
- i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
-
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
i40e_vsi_configure_msix(vsi);
else
@@ -4753,9 +4870,11 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
goto err_set_queues;
} else if (vsi->type == I40E_VSI_FDIR) {
- snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
- dev_driver_string(&pf->pdev->dev));
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s-fdir",
+ dev_driver_string(&pf->pdev->dev),
+ dev_name(&pf->pdev->dev));
err = i40e_vsi_request_irq(vsi, int_name);
+
} else {
err = -EINVAL;
goto err_setup_rx;
@@ -4995,6 +5114,8 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
}
+ dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
+ need_reconfig);
return need_reconfig;
}
@@ -5022,11 +5143,16 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Ignore if event is not for Nearest Bridge */
type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ dev_dbg(&pf->pdev->dev,
+ "%s: LLDP event mib bridge type 0x%x\n", __func__, type);
if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
return ret;
/* Check MIB Type and return if event for Remote MIB update */
type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ dev_dbg(&pf->pdev->dev,
+ "%s: LLDP event mib type %s\n", __func__,
+ type ? "remote" : "local");
if (type == I40E_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */
ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
@@ -5035,12 +5161,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
goto exit;
}
- /* Convert/store the DCBX data from LLDPDU temporarily */
memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
- ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg);
+ /* Store the old configuration */
+ tmp_dcbx_cfg = *dcbx_cfg;
+
+ /* Get updated DCBX data from firmware */
+ ret = i40e_get_dcb_config(&pf->hw);
if (ret) {
- /* Error in LLDPDU parsing return */
- dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n");
+ dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n");
goto exit;
}
@@ -5050,12 +5178,9 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
goto exit;
}
- need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg);
+ need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg);
- i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg);
-
- /* Overwrite the new configuration */
- *dcbx_cfg = tmp_dcbx_cfg;
+ i40e_dcbnl_flush_apps(pf, dcbx_cfg);
if (!need_reconfig)
goto exit;
@@ -5066,13 +5191,24 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
else
pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
/* Reconfiguration needed quiesce all VSIs */
i40e_pf_quiesce_all_vsi(pf);
/* Changes in configuration update VEB/VSI */
i40e_dcb_reconfigure(pf);
- i40e_pf_unquiesce_all_vsi(pf);
+ ret = i40e_resume_port_tx(pf);
+
+ clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
+ /* In case of error no point in resuming VSIs */
+ if (ret)
+ goto exit;
+
+ /* Wait for the PF's Tx queues to be disabled */
+ ret = i40e_pf_wait_txq_disabled(pf);
+ if (!ret)
+ i40e_pf_unquiesce_all_vsi(pf);
exit:
return ret;
}
@@ -5212,6 +5348,9 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
int flush_wait_retry = 50;
int reg;
+ if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
+ return;
+
if (time_after(jiffies, pf->fd_flush_timestamp +
(I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
@@ -5273,6 +5412,9 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
if (test_bit(__I40E_DOWN, &pf->state))
return;
+ if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
+ return;
+
if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) &&
(i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) &&
(i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count))
@@ -5310,8 +5452,6 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
break;
case I40E_VSI_SRIOV:
- break;
-
case I40E_VSI_VMDQ2:
case I40E_VSI_CTRL:
case I40E_VSI_MIRROR:
@@ -5353,14 +5493,21 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
static void i40e_link_event(struct i40e_pf *pf)
{
bool new_link, old_link;
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+
+ /* set this to force the get_link_status call to refresh state */
+ pf->hw.phy.get_link_info = true;
- new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
+ new_link = i40e_get_link_status(&pf->hw);
- if (new_link == old_link)
+ if (new_link == old_link &&
+ (test_bit(__I40E_DOWN, &vsi->state) ||
+ new_link == netif_carrier_ok(vsi->netdev)))
return;
- if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
- i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link);
+
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ i40e_print_link_message(vsi, new_link);
/* Notify the base of the switch tree connected to
* the link. Floating VEBs are not notified.
@@ -5368,7 +5515,7 @@ static void i40e_link_event(struct i40e_pf *pf)
if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
else
- i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
+ i40e_vsi_link_event(vsi, new_link);
if (pf->vf)
i40e_vc_notify_link_state(pf);
@@ -5418,11 +5565,17 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
(I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
} else {
u16 vec = vsi->base_vector - 1;
u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
for (i = 0; i < vsi->num_q_vectors; i++, vec++)
wr32(&vsi->back->hw,
I40E_PFINT_DYN_CTLN(vec), val);
@@ -5433,7 +5586,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
}
/**
- * i40e_watchdog_subtask - Check and bring link up
+ * i40e_watchdog_subtask - periodic checks not using event driven response
* @pf: board private structure
**/
static void i40e_watchdog_subtask(struct i40e_pf *pf)
@@ -5445,6 +5598,15 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
test_bit(__I40E_CONFIG_BUSY, &pf->state))
return;
+ /* make sure we don't do these things too often */
+ if (time_before(jiffies, (pf->service_timer_previous +
+ pf->service_timer_period)))
+ return;
+ pf->service_timer_previous = jiffies;
+
+ i40e_check_hang_subtask(pf);
+ i40e_link_event(pf);
+
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
@@ -5525,33 +5687,20 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
memcpy(&pf->hw.phy.link_info_old, hw_link_info,
sizeof(pf->hw.phy.link_info_old));
+ /* Do a new status request to re-enable LSE reporting
+ * and load new status information into the hw struct
+ * This completely ignores any state information
+ * in the ARQ event info, instead choosing to always
+ * issue the AQ update link status command.
+ */
+ i40e_link_event(pf);
+
/* check for unqualified module, if link is down */
if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
(!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
(!(status->link_info & I40E_AQ_LINK_UP)))
dev_err(&pf->pdev->dev,
"The driver failed to link because an unqualified module was detected.\n");
-
- /* update link status */
- hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
- hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
- hw_link_info->link_info = status->link_info;
- hw_link_info->an_info = status->an_info;
- hw_link_info->ext_info = status->ext_info;
- hw_link_info->lse_enable =
- le16_to_cpu(status->command_flags) &
- I40E_AQ_LSE_ENABLE;
-
- /* process the event */
- i40e_link_event(pf);
-
- /* Do a new status request to re-enable LSE reporting
- * and load new status information into the hw struct,
- * then see if the status changed while processing the
- * initial event.
- */
- i40e_update_link_info(&pf->hw, true);
- i40e_link_event(pf);
}
/**
@@ -5607,13 +5756,12 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
if (oldval != val)
wr32(&pf->hw, pf->hw.aq.asq.len, val);
- event.msg_size = I40E_MAX_AQ_BUF_SIZE;
- event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ event.buf_len = I40E_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf)
return;
do {
- event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
ret = i40e_clean_arq_element(hw, &event, &pending);
if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
break;
@@ -5634,7 +5782,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
le32_to_cpu(event.desc.cookie_high),
le32_to_cpu(event.desc.cookie_low),
event.msg_buf,
- event.msg_size);
+ event.msg_len);
break;
case i40e_aqc_opc_lldp_update_mib:
dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
@@ -5740,6 +5888,9 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (ret)
goto end_reconstitute;
+ /* Enable LB mode for the main VSI now that it is on a VEB */
+ i40e_enable_pf_switch_lb(pf);
+
/* create the remaining VSIs attached to this VEB */
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
@@ -5967,6 +6118,7 @@ static void i40e_send_version(struct i40e_pf *pf)
static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
{
struct i40e_hw *hw = &pf->hw;
+ u8 set_fc_aq_fail = 0;
i40e_status ret;
u32 v;
@@ -6038,6 +6190,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
if (ret)
goto end_core_reset;
+ /* driver is only interested in link up/down and module qualification
+ * reports from firmware
+ */
+ ret = i40e_aq_set_phy_int_mask(&pf->hw,
+ I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
+ if (ret)
+ dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
+
+ /* make sure our flow control settings are restored */
+ ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
+ if (ret)
+ dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
+
/* Rebuild the VSIs and VEBs that existed before reset.
* They are still in our local switch element arrays, so only
* need to rebuild the switch model in the HW.
@@ -6092,6 +6258,13 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
}
+ msleep(75);
+ ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ }
+
/* reinit the misc interrupt */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
ret = i40e_setup_misc_vector(pf);
@@ -6149,12 +6322,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
if (reg & I40E_GL_MDET_TX_VALID_MASK) {
u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
I40E_GL_MDET_TX_PF_NUM_SHIFT;
- u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
+ u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
I40E_GL_MDET_TX_VF_NUM_SHIFT;
u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
I40E_GL_MDET_TX_EVENT_SHIFT;
- u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
- I40E_GL_MDET_TX_QUEUE_SHIFT;
+ u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
+ I40E_GL_MDET_TX_QUEUE_SHIFT) -
+ pf->hw.func_caps.base_queue;
if (netif_msg_tx_err(pf))
dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
event, queue, pf_num, vf_num);
@@ -6167,8 +6341,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
I40E_GL_MDET_RX_FUNCTION_SHIFT;
u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
I40E_GL_MDET_RX_EVENT_SHIFT;
- u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
- I40E_GL_MDET_RX_QUEUE_SHIFT;
+ u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
+ I40E_GL_MDET_RX_QUEUE_SHIFT) -
+ pf->hw.func_caps.base_queue;
if (netif_msg_rx_err(pf))
dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
event, queue, func);
@@ -6298,7 +6473,6 @@ static void i40e_service_task(struct work_struct *work)
i40e_vc_process_vflr_event(pf);
i40e_watchdog_subtask(pf);
i40e_fdir_reinit_subtask(pf);
- i40e_check_hang_subtask(pf);
i40e_sync_filters_subtask(pf);
#ifdef CONFIG_I40E_VXLAN
i40e_sync_vxlan_filters_subtask(pf);
@@ -6676,6 +6850,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
{
i40e_status err = 0;
struct i40e_hw *hw = &pf->hw;
+ int other_vecs = 0;
int v_budget, i;
int vec;
@@ -6701,10 +6876,10 @@ static int i40e_init_msix(struct i40e_pf *pf)
*/
pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
pf->num_vmdq_msix = pf->num_vmdq_qps;
- v_budget = 1 + pf->num_lan_msix;
- v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
+ other_vecs = 1;
+ other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
- v_budget++;
+ other_vecs++;
#ifdef I40E_FCOE
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
@@ -6714,7 +6889,9 @@ static int i40e_init_msix(struct i40e_pf *pf)
#endif
/* Scale down if necessary, and the rings will share vectors */
- v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
+ pf->num_lan_msix = min_t(int, pf->num_lan_msix,
+ (hw->func_caps.num_msix_vectors - other_vecs));
+ v_budget = pf->num_lan_msix + other_vecs;
pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
GFP_KERNEL);
@@ -6964,20 +7141,16 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
**/
static int i40e_config_rss(struct i40e_pf *pf)
{
- /* Set of random keys generated using kernel random number generator */
- static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
- 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
- 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
- 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
+ u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
struct i40e_hw *hw = &pf->hw;
u32 lut = 0;
int i, j;
u64 hena;
u32 reg_val;
- /* Fill out hash function seed */
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
+ wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]);
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
@@ -7341,7 +7514,7 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
#endif
static int i40e_get_phys_port_id(struct net_device *netdev,
- struct netdev_phys_port_id *ppid)
+ struct netdev_phys_item_id *ppid)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -7356,18 +7529,18 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
return 0;
}
-#ifdef HAVE_FDB_OPS
-#ifdef USE_CONST_DEV_UC_CHAR
+/**
+ * i40e_ndo_fdb_add - add an entry to the hardware database
+ * @ndm: the input from the stack
+ * @tb: pointer to array of nladdr (unused)
+ * @dev: the net device pointer
+ * @addr: the MAC address entry being added
+ * @flags: instructions from stack about fdb operation
+ */
static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr,
- u16 flags)
-#else
-static int i40e_ndo_fdb_add(struct ndmsg *ndm,
- struct net_device *dev,
- unsigned char *addr,
+ const unsigned char *addr, u16 vid,
u16 flags)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_pf *pf = np->vsi->back;
@@ -7398,55 +7571,6 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm,
return err;
}
-#ifndef USE_DEFAULT_FDB_DEL_DUMP
-#ifdef USE_CONST_DEV_UC_CHAR
-static int i40e_ndo_fdb_del(struct ndmsg *ndm,
- struct net_device *dev,
- const unsigned char *addr)
-#else
-static int i40e_ndo_fdb_del(struct ndmsg *ndm,
- struct net_device *dev,
- unsigned char *addr)
-#endif
-{
- struct i40e_netdev_priv *np = netdev_priv(dev);
- struct i40e_pf *pf = np->vsi->back;
- int err = -EOPNOTSUPP;
-
- if (ndm->ndm_state & NUD_PERMANENT) {
- netdev_info(dev, "FDB only supports static addresses\n");
- return -EINVAL;
- }
-
- if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
- if (is_unicast_ether_addr(addr))
- err = dev_uc_del(dev, addr);
- else if (is_multicast_ether_addr(addr))
- err = dev_mc_del(dev, addr);
- else
- err = -EINVAL;
- }
-
- return err;
-}
-
-static int i40e_ndo_fdb_dump(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct net_device *dev,
- struct net_device *filter_dev,
- int idx)
-{
- struct i40e_netdev_priv *np = netdev_priv(dev);
- struct i40e_pf *pf = np->vsi->back;
-
- if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
- idx = ndo_dflt_fdb_dump(skb, cb, dev, filter_dev, idx);
-
- return idx;
-}
-
-#endif /* USE_DEFAULT_FDB_DEL_DUMP */
-#endif /* HAVE_FDB_OPS */
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -7480,13 +7604,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_del_vxlan_port = i40e_del_vxlan_port,
#endif
.ndo_get_phys_port_id = i40e_get_phys_port_id,
-#ifdef HAVE_FDB_OPS
.ndo_fdb_add = i40e_ndo_fdb_add,
-#ifndef USE_DEFAULT_FDB_DEL_DUMP
- .ndo_fdb_del = i40e_ndo_fdb_del,
- .ndo_fdb_dump = i40e_ndo_fdb_dump,
-#endif
-#endif
};
/**
@@ -7682,6 +7800,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.uplink_seid = vsi->uplink_seid;
ctxt.connection_type = 0x1; /* regular data port */
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break;
@@ -7931,8 +8053,8 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
vsi->num_q_vectors, vsi->idx);
if (vsi->base_vector < 0) {
dev_info(&pf->pdev->dev,
- "failed to get queue tracking for VSI %d, err=%d\n",
- vsi->seid, vsi->base_vector);
+ "failed to get tracking for %d vectors for VSI %d, err=%d\n",
+ vsi->num_q_vectors, vsi->seid, vsi->base_vector);
i40e_vsi_free_q_vectors(vsi);
ret = -ENOENT;
goto vector_setup_out;
@@ -7968,8 +8090,9 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
if (ret < 0) {
- dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
- vsi->seid, ret);
+ dev_info(&pf->pdev->dev,
+ "failed to get tracking for %d queues for VSI %d err=%d\n",
+ vsi->alloc_queue_pairs, vsi->seid, ret);
goto err_vsi;
}
vsi->base_queue = ret;
@@ -8066,7 +8189,15 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
vsi->tc_config.enabled_tc);
-
+ if (veb) {
+ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
+ dev_info(&vsi->back->pdev->dev,
+ "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
+ __func__);
+ return NULL;
+ }
+ i40e_enable_pf_switch_lb(pf);
+ }
for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
veb = pf->veb[i];
@@ -8098,8 +8229,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
vsi->idx);
if (ret < 0) {
- dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
- vsi->seid, ret);
+ dev_info(&pf->pdev->dev,
+ "failed to get tracking for %d queues for VSI %d err=%d\n",
+ vsi->alloc_queue_pairs, vsi->seid, ret);
goto err_vsi;
}
vsi->base_queue = ret;
@@ -8206,6 +8338,7 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
veb->bw_max_quanta = ets_data.tc_bw_max;
veb->is_abs_credits = bw_data.absolute_credits_enable;
+ veb->enabled_tc = ets_data.tc_valid_bits;
tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
(le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -8719,6 +8852,14 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
I40E_AQ_AN_COMPLETED) ? true : false);
+ /* fill in link information and enable LSE reporting */
+ i40e_update_link_info(&pf->hw, true);
+ i40e_link_event(pf);
+
+ /* Initialize user-specific link properties */
+ pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
+ I40E_AQ_AN_COMPLETED) ? true : false);
+
i40e_ptp_init(pf);
return ret;
@@ -8987,6 +9128,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.func = PCI_FUNC(pdev->devfn);
pf->instance = pfs_found;
+ if (debug != -1) {
+ pf->msg_enable = pf->hw.debug_mask;
+ pf->msg_enable = debug;
+ }
+
/* do a special CORER for clearing PXE mode once at init */
if (hw->revision_id == 0 &&
(rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
@@ -9012,9 +9158,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
+
snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
- "%s-pf%d:misc",
- dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
+ "%s-%s:misc",
+ dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
err = i40e_init_shared_code(hw);
if (err) {
@@ -9158,6 +9305,22 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+ /* driver is only interested in link up/down and module qualification
+ * reports from firmware
+ */
+ err = i40e_aq_set_phy_int_mask(&pf->hw,
+ I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
+ if (err)
+ dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
+
+ msleep(75);
+ err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ if (err) {
+ dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ }
+
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
* ends up disabled forever.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 25c4f9a3011f..3e70f2e45a47 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -61,7 +61,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
} else { /* Blank programming mode */
nvm->blank_nvm_mode = true;
ret_code = I40E_ERR_NVM_BLANK_MODE;
- hw_dbg(hw, "NVM init error: unsupported blank mode.\n");
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
}
return ret_code;
@@ -80,46 +80,45 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
{
i40e_status ret_code = 0;
u64 gtime, timeout;
- u64 time = 0;
+ u64 time_left = 0;
if (hw->nvm.blank_nvm_mode)
goto i40e_i40e_acquire_nvm_exit;
ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
- 0, &time, NULL);
+ 0, &time_left, NULL);
/* Reading the Global Device Timer */
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
/* Store the timeout */
- hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
+ hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
- if (ret_code) {
- /* Set the polling timeout */
- if (time > I40E_MAX_NVM_TIMEOUT)
- timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
- + gtime;
- else
- timeout = hw->nvm.hw_semaphore_timeout;
+ if (ret_code)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
+ access, time_left, ret_code, hw->aq.asq_last_status);
+
+ if (ret_code && time_left) {
/* Poll until the current NVM owner timeouts */
- while (gtime < timeout) {
+ timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
+ while ((gtime < timeout) && time_left) {
usleep_range(10000, 20000);
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
ret_code = i40e_aq_request_resource(hw,
I40E_NVM_RESOURCE_ID,
- access, 0, &time,
+ access, 0, &time_left,
NULL);
if (!ret_code) {
hw->nvm.hw_semaphore_timeout =
- I40E_MS_TO_GTIME(time) + gtime;
+ I40E_MS_TO_GTIME(time_left) + gtime;
break;
}
- gtime = rd32(hw, I40E_GLVFGEN_TIMER);
}
if (ret_code) {
hw->nvm.hw_semaphore_timeout = 0;
- hw->nvm.hw_semaphore_wait =
- I40E_MS_TO_GTIME(time) + gtime;
- hw_dbg(hw, "NVM acquire timed out, wait %llu ms before trying again.\n",
- time);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
+ time_left, ret_code, hw->aq.asq_last_status);
}
}
@@ -160,7 +159,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
udelay(5);
}
if (ret_code == I40E_ERR_TIMEOUT)
- hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n");
+ i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
return ret_code;
}
@@ -179,7 +178,9 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u32 sr_reg;
if (offset >= hw->nvm.sr_size) {
- hw_dbg(hw, "NVM read error: Offset beyond Shadow RAM limit.\n");
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read error: offset %d beyond Shadow RAM limit %d\n",
+ offset, hw->nvm.sr_size);
ret_code = I40E_ERR_PARAM;
goto read_nvm_exit;
}
@@ -202,8 +203,9 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
}
}
if (ret_code)
- hw_dbg(hw, "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
- offset);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
+ offset);
read_nvm_exit:
return ret_code;
@@ -263,14 +265,20 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
* Firmware will check the module-based model.
*/
if ((offset + words) > hw->nvm.sr_size)
- hw_dbg(hw, "NVM write error: offset beyond Shadow RAM limit.\n");
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
/* We can write only up to 4KB (one sector), in one AQ write */
- hw_dbg(hw, "NVM write fail error: cannot write more than 4KB in a single write.\n");
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write fail error: tried to write %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
!= (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
/* A single write cannot spread over two sectors */
- hw_dbg(hw, "NVM write error: cannot spread over two sectors in a single write.\n");
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+ offset, words);
else
ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/
@@ -438,6 +446,22 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val)
return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
}
+static char *i40e_nvm_update_state_str[] = {
+ "I40E_NVMUPD_INVALID",
+ "I40E_NVMUPD_READ_CON",
+ "I40E_NVMUPD_READ_SNT",
+ "I40E_NVMUPD_READ_LCB",
+ "I40E_NVMUPD_READ_SA",
+ "I40E_NVMUPD_WRITE_ERA",
+ "I40E_NVMUPD_WRITE_CON",
+ "I40E_NVMUPD_WRITE_SNT",
+ "I40E_NVMUPD_WRITE_LCB",
+ "I40E_NVMUPD_WRITE_SA",
+ "I40E_NVMUPD_CSUM_CON",
+ "I40E_NVMUPD_CSUM_SA",
+ "I40E_NVMUPD_CSUM_LCB",
+};
+
/**
* i40e_nvmupd_command - Process an NVM update command
* @hw: pointer to hardware structure
@@ -471,6 +495,8 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
default:
/* invalid state, should never happen */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: no such state %d\n", hw->nvmupd_state);
status = I40E_NOT_SUPPORTED;
*errno = -ESRCH;
break;
@@ -501,7 +527,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
case I40E_NVMUPD_READ_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
i40e_release_nvm(hw);
@@ -511,17 +538,22 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
case I40E_NVMUPD_READ_SNT:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
- hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
+ if (status)
+ i40e_release_nvm(hw);
+ else
+ hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
}
break;
case I40E_NVMUPD_WRITE_ERA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_erase(hw, cmd, errno);
if (status)
@@ -534,7 +566,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
case I40E_NVMUPD_WRITE_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
if (status)
@@ -547,22 +580,28 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
case I40E_NVMUPD_WRITE_SNT:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ if (status)
+ i40e_release_nvm(hw);
+ else
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
}
break;
case I40E_NVMUPD_CSUM_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status);
} else {
status = i40e_update_nvm_checksum(hw);
if (status) {
*errno = hw->aq.asq_last_status ?
- i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
-EIO;
i40e_release_nvm(hw);
} else {
@@ -572,6 +611,9 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
break;
default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in init state\n",
+ i40e_nvm_update_state_str[upd_cmd]);
status = I40E_ERR_NVM;
*errno = -ESRCH;
break;
@@ -611,6 +653,9 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
break;
default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in reading state.\n",
+ i40e_nvm_update_state_str[upd_cmd]);
status = I40E_NOT_SUPPORTED;
*errno = -ESRCH;
break;
@@ -644,33 +689,38 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
case I40E_NVMUPD_WRITE_LCB:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
- if (!status) {
+ if (!status)
hw->aq.nvm_release_on_done = true;
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- }
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
case I40E_NVMUPD_CSUM_CON:
status = i40e_update_nvm_checksum(hw);
- if (status)
+ if (status) {
*errno = hw->aq.asq_last_status ?
- i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
-EIO;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ }
break;
case I40E_NVMUPD_CSUM_LCB:
status = i40e_update_nvm_checksum(hw);
- if (status) {
+ if (status)
*errno = hw->aq.asq_last_status ?
- i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
-EIO;
- } else {
+ else
hw->aq.nvm_release_on_done = true;
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- }
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
default:
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: bad cmd %s in writing state.\n",
+ i40e_nvm_update_state_str[upd_cmd]);
status = I40E_NOT_SUPPORTED;
*errno = -ESRCH;
break;
@@ -702,8 +752,9 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
/* limits on data size */
if ((cmd->data_size < 1) ||
(cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
- hw_dbg(hw, "i40e_nvmupd_validate_command data_size %d\n",
- cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_validate_command data_size %d\n",
+ cmd->data_size);
*errno = -EFAULT;
return I40E_NVMUPD_INVALID;
}
@@ -755,12 +806,16 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
}
break;
}
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
+ i40e_nvm_update_state_str[upd_cmd],
+ hw->nvmupd_state,
+ hw->aq.nvm_release_on_done);
if (upd_cmd == I40E_NVMUPD_INVALID) {
*errno = -EFAULT;
- hw_dbg(hw,
- "i40e_nvmupd_validate_command returns %d errno: %d\n",
- upd_cmd, *errno);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_validate_command returns %d errno %d\n",
+ upd_cmd, *errno);
}
return upd_cmd;
}
@@ -785,14 +840,18 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
- hw_dbg(hw, "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
bytes, last, NULL);
- hw_dbg(hw, "i40e_nvmupd_nvm_read status %d\n", status);
- if (status)
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_read status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
return status;
}
@@ -816,13 +875,17 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
- hw_dbg(hw, "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
last, NULL);
- hw_dbg(hw, "i40e_nvmupd_nvm_erase status %d\n", status);
- if (status)
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_erase status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
return status;
}
@@ -847,13 +910,18 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
- hw_dbg(hw, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
+
status = i40e_aq_update_nvm(hw, module, cmd->offset,
(u16)cmd->data_size, bytes, last, NULL);
- hw_dbg(hw, "i40e_nvmupd_nvm_write status %d\n", status);
- if (status)
- *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_nvm_write status %d aq %d\n",
+ status, hw->aq.asq_last_status);
+ *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 0988b5c1fe87..2fb4306597e8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -84,6 +84,8 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
bool atomic_reset);
+i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
@@ -173,6 +175,9 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u16 udp_port, u8 protocol_index,
u8 *filter_index,
@@ -228,6 +233,10 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg);
/* i40e_common */
i40e_status i40e_init_shared_code(struct i40e_hw *hw);
i40e_status i40e_pf_reset(struct i40e_hw *hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 537b6216971d..6d1ec926aa37 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -382,11 +382,17 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
incval = I40E_PTP_1GB_INCVAL;
break;
case I40E_LINK_SPEED_100MB:
- dev_warn(&pf->pdev->dev,
- "%s: 1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n",
- __func__);
+ {
+ static int warn_once;
+
+ if (!warn_once) {
+ dev_warn(&pf->pdev->dev,
+ "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n");
+ warn_once++;
+ }
incval = 0;
break;
+ }
case I40E_LINK_SPEED_40GB:
default:
incval = I40E_PTP_40GB_INCVAL;
@@ -418,6 +424,9 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
{
struct hwtstamp_config *config = &pf->tstamp_config;
+ if (!(pf->flags & I40E_FLAG_PTP))
+ return -EOPNOTSUPP;
+
return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
-EFAULT : 0;
}
@@ -438,22 +447,12 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
struct hwtstamp_config *config)
{
struct i40e_hw *hw = &pf->hw;
- u32 pf_id, tsyntype, regval;
+ u32 tsyntype, regval;
/* Reserved for future extensions. */
if (config->flags)
return -EINVAL;
- /* Confirm that 1588 is supported on this PF. */
- pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
- I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
- if (hw->pf_id != pf_id) {
- dev_err(&pf->pdev->dev,
- "PF %d attempted to control timestamp mode on port %d, which is owned by PF %d\n",
- hw->pf_id, hw->port, pf_id);
- return -EPERM;
- }
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
pf->ptp_tx = false;
@@ -556,6 +555,9 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
struct hwtstamp_config config;
int err;
+ if (!(pf->flags & I40E_FLAG_PTP))
+ return -EOPNOTSUPP;
+
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
@@ -625,8 +627,22 @@ void i40e_ptp_init(struct i40e_pf *pf)
{
struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
struct i40e_hw *hw = &pf->hw;
+ u32 pf_id;
long err;
+ /* Only one PF is assigned to control 1588 logic per port. Do not
+ * enable any support for PFs not assigned via PRTTSYN_CTL0.PF_ID
+ */
+ pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
+ I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
+ if (hw->pf_id != pf_id) {
+ pf->flags &= ~I40E_FLAG_PTP;
+ dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n",
+ __func__,
+ netdev->name);
+ return;
+ }
+
/* we have to initialize the lock first, since we can't control
* when the user will enter the PHC device entry points
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 3195d82e4942..04b441460bbd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2399,12 +2399,8 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* hardware can't handle really short frames, hardware padding works
* beyond this point
*/
- if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
- if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
- return NETDEV_TX_OK;
- skb->len = I40E_MIN_TX_LEN;
- skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
- }
+ if (skb_put_padto(skb, I40E_MIN_TX_LEN))
+ return NETDEV_TX_OK;
return i40e_xmit_frame_ring(skb, tx_ring);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index d7a625a6a14f..e60d3accb2e2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -30,10 +30,7 @@
/* Interrupt Throttling and Rate Limiting Goodies */
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
-#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
-#define I40E_MAX_IRATE 0x03F
-#define I40E_MIN_IRATE 0x001
-#define I40E_IRATE_USEC_RESOLUTION 4
+#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
#define I40E_ITR_100K 0x0005
#define I40E_ITR_20K 0x0019
#define I40E_ITR_8K 0x003E
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index ce04d9093db6..c1f2eb963357 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -43,6 +43,7 @@
#define I40E_DEV_ID_QSFP_A 0x1583
#define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
@@ -260,8 +261,7 @@ enum i40e_aq_resource_access_type {
};
struct i40e_nvm_info {
- u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
- u64 hw_semaphore_wait; /* - || - */
+ u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */
u32 timeout; /* [ms] */
u16 sr_size; /* Shadow RAM size in words */
bool blank_nvm_mode; /* is NVM empty (no FW present)*/
@@ -380,9 +380,18 @@ struct i40e_fc_info {
#define I40E_MAX_USER_PRIORITY 8
#define I40E_DCBX_MAX_APPS 32
#define I40E_LLDPDU_SIZE 1500
-
-/* IEEE 802.1Qaz ETS Configuration data */
-struct i40e_ieee_ets_config {
+#define I40E_TLV_STATUS_OPER 0x1
+#define I40E_TLV_STATUS_SYNC 0x2
+#define I40E_TLV_STATUS_ERR 0x4
+#define I40E_CEE_OPER_MAX_APPS 3
+#define I40E_APP_PROTOID_FCOE 0x8906
+#define I40E_APP_PROTOID_ISCSI 0x0cbc
+#define I40E_APP_PROTOID_FIP 0x8914
+#define I40E_APP_SEL_ETHTYPE 0x1
+#define I40E_APP_SEL_TCPIP 0x2
+
+/* CEE or IEEE 802.1Qaz ETS Configuration data */
+struct i40e_dcb_ets_config {
u8 willing;
u8 cbs;
u8 maxtcs;
@@ -391,34 +400,30 @@ struct i40e_ieee_ets_config {
u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
};
-/* IEEE 802.1Qaz ETS Recommendation data */
-struct i40e_ieee_ets_recommend {
- u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
- u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
- u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
-};
-
-/* IEEE 802.1Qaz PFC Configuration data */
-struct i40e_ieee_pfc_config {
+/* CEE or IEEE 802.1Qaz PFC Configuration data */
+struct i40e_dcb_pfc_config {
u8 willing;
u8 mbc;
u8 pfccap;
u8 pfcenable;
};
-/* IEEE 802.1Qaz Application Priority data */
-struct i40e_ieee_app_priority_table {
+/* CEE or IEEE 802.1Qaz Application Priority data */
+struct i40e_dcb_app_priority_table {
u8 priority;
u8 selector;
u16 protocolid;
};
struct i40e_dcbx_config {
+ u8 dcbx_mode;
+#define I40E_DCBX_MODE_CEE 0x1
+#define I40E_DCBX_MODE_IEEE 0x2
u32 numapps;
- struct i40e_ieee_ets_config etscfg;
- struct i40e_ieee_ets_recommend etsrec;
- struct i40e_ieee_pfc_config pfc;
- struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
+ struct i40e_dcb_ets_config etscfg;
+ struct i40e_dcb_ets_config etsrec;
+ struct i40e_dcb_pfc_config pfc;
+ struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS];
};
/* Port hardware description */
@@ -476,6 +481,11 @@ struct i40e_hw {
u32 debug_mask;
};
+static inline bool i40e_is_vf(struct i40e_hw *hw)
+{
+ return hw->mac.type == I40E_MAC_VF;
+}
+
struct i40e_driver_version {
u8 major_version;
u8 minor_version;
@@ -1371,6 +1381,18 @@ enum i40e_reset_type {
I40E_RESET_EMPR = 3,
};
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define I40E_NVM_LLDP_CFG_PTR 0xD
+struct i40e_lldp_variables {
+ u16 length;
+ u16 adminstatus;
+ u16 msgfasttx;
+ u16 msgtxinterval;
+ u16 txparams;
+ u16 timers;
+ u16 crc8;
+};
+
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 70951d2edcad..61dd1b187624 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -79,6 +79,7 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
I40E_VIRTCHNL_OP_GET_STATS,
I40E_VIRTCHNL_OP_FCOE,
+ I40E_VIRTCHNL_OP_CONFIG_RSS,
/* PF sends status change events to vfs using
* the following op.
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4eeed267e4b7..5bae89550657 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -674,7 +674,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
* that the requested op was completed
* successfully
*/
- udelay(10);
+ usleep_range(10, 20);
reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
rsd = true;
@@ -707,7 +707,6 @@ complete_reset:
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
i40e_flush(hw);
}
-#ifdef CONFIG_PCI_IOV
/**
* i40e_enable_pf_switch_lb
@@ -715,7 +714,7 @@ complete_reset:
*
* enable switch loop back or die - no point in a return value
**/
-static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
@@ -742,7 +741,6 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
__func__, vsi->back->hw.aq.asq_last_status);
}
}
-#endif
/**
* i40e_disable_pf_switch_lb
@@ -1869,6 +1867,12 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
return 0;
+ /* re-enable vflr interrupt cause */
+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+ reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ i40e_flush(hw);
+
clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
@@ -1885,12 +1889,6 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
}
}
- /* re-enable vflr interrupt cause */
- reg = rd32(hw, I40E_PFINT_ICR0_ENA);
- reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
- wr32(hw, I40E_PFINT_ICR0_ENA, reg);
- i40e_flush(hw);
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 0adc61e1052d..9452f5247cff 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -126,5 +126,6 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
+void i40e_enable_pf_switch_lb(struct i40e_pf *pf);
#endif /* _I40E_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index f206be917842..c1d25f8c1abc 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -49,7 +49,7 @@ static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
static void i40e_adminq_init_regs(struct i40e_hw *hw)
{
/* set head and tail registers in our local struct */
- if (hw->mac.type == I40E_MAC_VF) {
+ if (i40e_is_vf(hw)) {
hw->aq.asq.tail = I40E_VF_ATQT1;
hw->aq.asq.head = I40E_VF_ATQH1;
hw->aq.asq.len = I40E_VF_ATQLEN1;
@@ -801,7 +801,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
*/
if (!details->async && !details->postpone) {
u32 total_delay = 0;
- u32 delay_len = 10;
do {
/* AQ designers suggest use of head for better
@@ -809,9 +808,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
*/
if (i40evf_asq_done(hw))
break;
- /* ugh! delay while spin_lock */
- udelay(delay_len);
- total_delay += delay_len;
+ usleep_range(1000, 2000);
+ total_delay++;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
@@ -838,9 +836,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
- if (i40e_is_nvm_update_op(desc))
- hw->aq.nvm_busy = true;
-
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: desc and buffer writeback:\n");
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
@@ -907,9 +902,6 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
- "AQRX: Queue is empty.\n");
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
goto clean_arq_element_out;
}
@@ -931,13 +923,10 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
e->desc = *desc;
datalen = le16_to_cpu(desc->datalen);
- e->msg_size = min(datalen, e->msg_size);
- if (e->msg_buf != NULL && (e->msg_size != 0))
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf != NULL && (e->msg_len != 0))
memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
- e->msg_size);
-
- if (i40e_is_nvm_update_op(&e->desc))
- hw->aq.nvm_busy = false;
+ e->msg_len);
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 91a5c5bd80f3..6c31bf22c2c3 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -28,6 +28,7 @@
#define _I40E_ADMINQ_H_
#include "i40e_osdep.h"
+#include "i40e_status.h"
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
@@ -76,7 +77,8 @@ struct i40e_asq_cmd_details {
/* ARQ event information */
struct i40e_arq_event_info {
struct i40e_aq_desc desc;
- u16 msg_size;
+ u16 msg_len;
+ u16 buf_len;
u8 *msg_buf;
};
@@ -93,7 +95,6 @@ struct i40e_adminq_info {
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
- bool nvm_busy;
bool nvm_release_on_done;
struct mutex asq_mutex; /* Send queue lock */
@@ -108,7 +109,7 @@ struct i40e_adminq_info {
* i40e_aq_rc_to_posix - convert errors to user-land codes
* aq_rc: AdminQ error code to convert
**/
-static inline int i40e_aq_rc_to_posix(u16 aq_rc)
+static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
{
int aq_to_posix[] = {
0, /* I40E_AQ_RC_OK */
@@ -136,12 +137,18 @@ static inline int i40e_aq_rc_to_posix(u16 aq_rc)
-EFBIG, /* I40E_AQ_RC_EFBIG */
};
+ /* aq_rc is invalid if AQ timed out */
+ if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ return -EAGAIN;
+
+ if (aq_rc >= ARRAY_SIZE(aq_to_posix))
+ return -ERANGE;
return aq_to_posix[aq_rc];
}
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
+#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */
void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index e656ea7a7920..ff1b16370da9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -33,8 +33,8 @@
* This file needs to comply with the Linux Kernel coding style.
*/
-#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0002
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0002
#define I40E_FW_API_VERSION_A0_MINOR 0x0000
struct i40e_aq_desc {
@@ -67,216 +67,216 @@ struct i40e_aq_desc {
*/
/* command flags and offsets*/
-#define I40E_AQ_FLAG_DD_SHIFT 0
-#define I40E_AQ_FLAG_CMP_SHIFT 1
-#define I40E_AQ_FLAG_ERR_SHIFT 2
-#define I40E_AQ_FLAG_VFE_SHIFT 3
-#define I40E_AQ_FLAG_LB_SHIFT 9
-#define I40E_AQ_FLAG_RD_SHIFT 10
-#define I40E_AQ_FLAG_VFC_SHIFT 11
-#define I40E_AQ_FLAG_BUF_SHIFT 12
-#define I40E_AQ_FLAG_SI_SHIFT 13
-#define I40E_AQ_FLAG_EI_SHIFT 14
-#define I40E_AQ_FLAG_FE_SHIFT 15
-
-#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
/* error codes */
enum i40e_admin_queue_err {
- I40E_AQ_RC_OK = 0, /* success */
- I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
- I40E_AQ_RC_ENOENT = 2, /* No such element */
- I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
- I40E_AQ_RC_EINTR = 4, /* operation interrupted */
- I40E_AQ_RC_EIO = 5, /* I/O error */
- I40E_AQ_RC_ENXIO = 6, /* No such resource */
- I40E_AQ_RC_E2BIG = 7, /* Arg too long */
- I40E_AQ_RC_EAGAIN = 8, /* Try again */
- I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
- I40E_AQ_RC_EACCES = 10, /* Permission denied */
- I40E_AQ_RC_EFAULT = 11, /* Bad address */
- I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
- I40E_AQ_RC_EEXIST = 13, /* object already exists */
- I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
- I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
- I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
- I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
- I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
- I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */
- I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
- I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- I40E_AQ_RC_EFBIG = 22, /* File too large */
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
};
/* Admin Queue command opcodes */
enum i40e_admin_queue_opc {
/* aq commands */
- i40e_aqc_opc_get_version = 0x0001,
- i40e_aqc_opc_driver_version = 0x0002,
- i40e_aqc_opc_queue_shutdown = 0x0003,
- i40e_aqc_opc_set_pf_context = 0x0004,
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
/* resource ownership */
- i40e_aqc_opc_request_resource = 0x0008,
- i40e_aqc_opc_release_resource = 0x0009,
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
- i40e_aqc_opc_list_func_capabilities = 0x000A,
- i40e_aqc_opc_list_dev_capabilities = 0x000B,
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
- i40e_aqc_opc_set_cppm_configuration = 0x0103,
- i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
- i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
+ i40e_aqc_opc_set_cppm_configuration = 0x0103,
+ i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
- i40e_aqc_opc_mac_address_read = 0x0107,
- i40e_aqc_opc_mac_address_write = 0x0108,
+ i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
/* PXE */
- i40e_aqc_opc_clear_pxe_mode = 0x0110,
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
/* internal switch commands */
- i40e_aqc_opc_get_switch_config = 0x0200,
- i40e_aqc_opc_add_statistics = 0x0201,
- i40e_aqc_opc_remove_statistics = 0x0202,
- i40e_aqc_opc_set_port_parameters = 0x0203,
- i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
-
- i40e_aqc_opc_add_vsi = 0x0210,
- i40e_aqc_opc_update_vsi_parameters = 0x0211,
- i40e_aqc_opc_get_vsi_parameters = 0x0212,
-
- i40e_aqc_opc_add_pv = 0x0220,
- i40e_aqc_opc_update_pv_parameters = 0x0221,
- i40e_aqc_opc_get_pv_parameters = 0x0222,
-
- i40e_aqc_opc_add_veb = 0x0230,
- i40e_aqc_opc_update_veb_parameters = 0x0231,
- i40e_aqc_opc_get_veb_parameters = 0x0232,
-
- i40e_aqc_opc_delete_element = 0x0243,
-
- i40e_aqc_opc_add_macvlan = 0x0250,
- i40e_aqc_opc_remove_macvlan = 0x0251,
- i40e_aqc_opc_add_vlan = 0x0252,
- i40e_aqc_opc_remove_vlan = 0x0253,
- i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
- i40e_aqc_opc_add_tag = 0x0255,
- i40e_aqc_opc_remove_tag = 0x0256,
- i40e_aqc_opc_add_multicast_etag = 0x0257,
- i40e_aqc_opc_remove_multicast_etag = 0x0258,
- i40e_aqc_opc_update_tag = 0x0259,
- i40e_aqc_opc_add_control_packet_filter = 0x025A,
- i40e_aqc_opc_remove_control_packet_filter = 0x025B,
- i40e_aqc_opc_add_cloud_filters = 0x025C,
- i40e_aqc_opc_remove_cloud_filters = 0x025D,
-
- i40e_aqc_opc_add_mirror_rule = 0x0260,
- i40e_aqc_opc_delete_mirror_rule = 0x0261,
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
/* DCB commands */
- i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
- i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
/* TX scheduler */
- i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
- i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
- i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
- i40e_aqc_opc_query_vsi_bw_config = 0x0408,
- i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
- i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
-
- i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
- i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
- i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
- i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
- i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
- i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
- i40e_aqc_opc_query_port_ets_config = 0x0419,
- i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
- i40e_aqc_opc_suspend_port_tx = 0x041B,
- i40e_aqc_opc_resume_port_tx = 0x041C,
- i40e_aqc_opc_configure_partition_bw = 0x041D,
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
/* phy commands*/
- i40e_aqc_opc_get_phy_abilities = 0x0600,
- i40e_aqc_opc_set_phy_config = 0x0601,
- i40e_aqc_opc_set_mac_config = 0x0603,
- i40e_aqc_opc_set_link_restart_an = 0x0605,
- i40e_aqc_opc_get_link_status = 0x0607,
- i40e_aqc_opc_set_phy_int_mask = 0x0613,
- i40e_aqc_opc_get_local_advt_reg = 0x0614,
- i40e_aqc_opc_set_local_advt_reg = 0x0615,
- i40e_aqc_opc_get_partner_advt = 0x0616,
- i40e_aqc_opc_set_lb_modes = 0x0618,
- i40e_aqc_opc_get_phy_wol_caps = 0x0621,
- i40e_aqc_opc_set_phy_debug = 0x0622,
- i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
/* NVM commands */
- i40e_aqc_opc_nvm_read = 0x0701,
- i40e_aqc_opc_nvm_erase = 0x0702,
- i40e_aqc_opc_nvm_update = 0x0703,
- i40e_aqc_opc_nvm_config_read = 0x0704,
- i40e_aqc_opc_nvm_config_write = 0x0705,
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
/* virtualization commands */
- i40e_aqc_opc_send_msg_to_pf = 0x0801,
- i40e_aqc_opc_send_msg_to_vf = 0x0802,
- i40e_aqc_opc_send_msg_to_peer = 0x0803,
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
/* alternate structure */
- i40e_aqc_opc_alternate_write = 0x0900,
- i40e_aqc_opc_alternate_write_indirect = 0x0901,
- i40e_aqc_opc_alternate_read = 0x0902,
- i40e_aqc_opc_alternate_read_indirect = 0x0903,
- i40e_aqc_opc_alternate_write_done = 0x0904,
- i40e_aqc_opc_alternate_set_mode = 0x0905,
- i40e_aqc_opc_alternate_clear_port = 0x0906,
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
/* LLDP commands */
- i40e_aqc_opc_lldp_get_mib = 0x0A00,
- i40e_aqc_opc_lldp_update_mib = 0x0A01,
- i40e_aqc_opc_lldp_add_tlv = 0x0A02,
- i40e_aqc_opc_lldp_update_tlv = 0x0A03,
- i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
- i40e_aqc_opc_lldp_stop = 0x0A05,
- i40e_aqc_opc_lldp_start = 0x0A06,
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
/* Tunnel commands */
- i40e_aqc_opc_add_udp_tunnel = 0x0B00,
- i40e_aqc_opc_del_udp_tunnel = 0x0B01,
- i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_tunnel_key_structure = 0x0B10,
/* Async Events */
- i40e_aqc_opc_event_lan_overflow = 0x1001,
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
/* OEM commands */
- i40e_aqc_opc_oem_parameter_change = 0xFE00,
- i40e_aqc_opc_oem_device_status_change = 0xFE01,
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
/* debug commands */
- i40e_aqc_opc_debug_get_deviceid = 0xFF00,
- i40e_aqc_opc_debug_set_mode = 0xFF01,
- i40e_aqc_opc_debug_read_reg = 0xFF03,
- i40e_aqc_opc_debug_write_reg = 0xFF04,
- i40e_aqc_opc_debug_modify_reg = 0xFF07,
- i40e_aqc_opc_debug_dump_internals = 0xFF08,
- i40e_aqc_opc_debug_modify_internals = 0xFF09,
+ i40e_aqc_opc_debug_get_deviceid = 0xFF00,
+ i40e_aqc_opc_debug_set_mode = 0xFF01,
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+ i40e_aqc_opc_debug_modify_internals = 0xFF09,
};
/* command structures and indirect data structures */
@@ -303,7 +303,7 @@ enum i40e_admin_queue_opc {
/* This macro is used extensively to ensure that command structures are 16
* bytes in length as they have to map to the raw array of that size.
*/
-#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
/* internal (0x00XX) commands */
@@ -321,22 +321,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
/* Send driver version (indirect 0x0002) */
struct i40e_aqc_driver_version {
- u8 driver_major_ver;
- u8 driver_minor_ver;
- u8 driver_build_ver;
- u8 driver_subbuild_ver;
- u8 reserved[4];
- __le32 address_high;
- __le32 address_low;
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
/* Queue Shutdown (direct 0x0003) */
struct i40e_aqc_queue_shutdown {
- __le32 driver_unloading;
-#define I40E_AQ_DRIVER_UNLOADING 0x1
- u8 reserved[12];
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
@@ -352,19 +352,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
/* Request resource ownership (direct 0x0008)
* Release resource ownership (direct 0x0009)
*/
-#define I40E_AQ_RESOURCE_NVM 1
-#define I40E_AQ_RESOURCE_SDP 2
-#define I40E_AQ_RESOURCE_ACCESS_READ 1
-#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
-#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
-#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
struct i40e_aqc_request_resource {
- __le16 resource_id;
- __le16 access_type;
- __le32 timeout;
- __le32 resource_number;
- u8 reserved[4];
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
@@ -374,7 +374,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
*/
struct i40e_aqc_list_capabilites {
u8 command_flags;
-#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
u8 pf_index;
u8 reserved[2];
__le32 count;
@@ -385,123 +385,123 @@ struct i40e_aqc_list_capabilites {
I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
struct i40e_aqc_list_capabilities_element_resp {
- __le16 id;
- u8 major_rev;
- u8 minor_rev;
- __le32 number;
- __le32 logical_id;
- __le32 phys_id;
- u8 reserved[16];
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
};
/* list of caps */
-#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
-#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
-#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
-#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
-#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
-#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
-#define I40E_AQ_CAP_ID_SRIOV 0x0012
-#define I40E_AQ_CAP_ID_VF 0x0013
-#define I40E_AQ_CAP_ID_VMDQ 0x0014
-#define I40E_AQ_CAP_ID_8021QBG 0x0015
-#define I40E_AQ_CAP_ID_8021QBR 0x0016
-#define I40E_AQ_CAP_ID_VSI 0x0017
-#define I40E_AQ_CAP_ID_DCB 0x0018
-#define I40E_AQ_CAP_ID_FCOE 0x0021
-#define I40E_AQ_CAP_ID_RSS 0x0040
-#define I40E_AQ_CAP_ID_RXQ 0x0041
-#define I40E_AQ_CAP_ID_TXQ 0x0042
-#define I40E_AQ_CAP_ID_MSIX 0x0043
-#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
-#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
-#define I40E_AQ_CAP_ID_1588 0x0046
-#define I40E_AQ_CAP_ID_IWARP 0x0051
-#define I40E_AQ_CAP_ID_LED 0x0061
-#define I40E_AQ_CAP_ID_SDP 0x0062
-#define I40E_AQ_CAP_ID_MDIO 0x0063
-#define I40E_AQ_CAP_ID_FLEX10 0x00F1
-#define I40E_AQ_CAP_ID_CEM 0x00F2
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
/* Set CPPM Configuration (direct 0x0103) */
struct i40e_aqc_cppm_configuration {
- __le16 command_flags;
-#define I40E_AQ_CPPM_EN_LTRC 0x0800
-#define I40E_AQ_CPPM_EN_DMCTH 0x1000
-#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
-#define I40E_AQ_CPPM_EN_HPTC 0x4000
-#define I40E_AQ_CPPM_EN_DMARC 0x8000
- __le16 ttlx;
- __le32 dmacr;
- __le16 dmcth;
- u8 hptc;
- u8 reserved;
- __le32 pfltrc;
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
/* Set ARP Proxy command / response (indirect 0x0104) */
struct i40e_aqc_arp_proxy_data {
- __le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4 0x0008
-#define I40E_AQ_ARP_UNSUP_CTL 0x0010
-#define I40E_AQ_ARP_ENA 0x0020
-#define I40E_AQ_ARP_ADD_IPV4 0x0040
-#define I40E_AQ_ARP_DEL_IPV4 0x0080
- __le16 table_id;
- __le32 pfpm_proxyfc;
- __le32 ip_addr;
- u8 mac_addr[6];
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0008
+#define I40E_AQ_ARP_UNSUP_CTL 0x0010
+#define I40E_AQ_ARP_ENA 0x0020
+#define I40E_AQ_ARP_ADD_IPV4 0x0040
+#define I40E_AQ_ARP_DEL_IPV4 0x0080
+ __le16 table_id;
+ __le32 pfpm_proxyfc;
+ __le32 ip_addr;
+ u8 mac_addr[6];
};
/* Set NS Proxy Table Entry Command (indirect 0x0105) */
struct i40e_aqc_ns_proxy_data {
- __le16 table_idx_mac_addr_0;
- __le16 table_idx_mac_addr_1;
- __le16 table_idx_ipv6_0;
- __le16 table_idx_ipv6_1;
- __le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0 0x0100
-#define I40E_AQ_NS_PROXY_DEL_0 0x0200
-#define I40E_AQ_NS_PROXY_ADD_1 0x0400
-#define I40E_AQ_NS_PROXY_DEL_1 0x0800
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
- u8 mac_addr_0[6];
- u8 mac_addr_1[6];
- u8 local_mac_addr[6];
- u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
- u8 ipv6_addr_1[16];
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0100
+#define I40E_AQ_NS_PROXY_DEL_0 0x0200
+#define I40E_AQ_NS_PROXY_ADD_1 0x0400
+#define I40E_AQ_NS_PROXY_DEL_1 0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
};
/* Manage LAA Command (0x0106) - obsolete */
struct i40e_aqc_mng_laa {
__le16 command_flags;
-#define I40E_AQ_LAA_FLAG_WR 0x8000
- u8 reserved[2];
- __le32 sal;
- __le16 sah;
- u8 reserved2[6];
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
};
/* Manage MAC Address Read Command (indirect 0x0107) */
struct i40e_aqc_mac_address_read {
__le16 command_flags;
-#define I40E_AQC_LAN_ADDR_VALID 0x10
-#define I40E_AQC_SAN_ADDR_VALID 0x20
-#define I40E_AQC_PORT_ADDR_VALID 0x40
-#define I40E_AQC_WOL_ADDR_VALID 0x80
-#define I40E_AQC_ADDR_VALID_MASK 0xf0
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_ADDR_VALID_MASK 0xf0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
@@ -517,14 +517,14 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
/* Manage MAC Address Write Command (0x0108) */
struct i40e_aqc_mac_address_write {
- __le16 command_flags;
-#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
-#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
-#define I40E_AQC_WRITE_TYPE_PORT 0x8000
-#define I40E_AQC_WRITE_TYPE_MASK 0xc000
- __le16 mac_sah;
- __le32 mac_sal;
- u8 reserved[8];
+ __le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
@@ -545,10 +545,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
* command
*/
struct i40e_aqc_switch_seid {
- __le16 seid;
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
@@ -557,34 +557,34 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
* uses i40e_aqc_switch_seid for the descriptor
*/
struct i40e_aqc_get_switch_config_header_resp {
- __le16 num_reported;
- __le16 num_total;
- u8 reserved[12];
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
};
struct i40e_aqc_switch_config_element_resp {
- u8 element_type;
-#define I40E_AQ_SW_ELEM_TYPE_MAC 1
-#define I40E_AQ_SW_ELEM_TYPE_PF 2
-#define I40E_AQ_SW_ELEM_TYPE_VF 3
-#define I40E_AQ_SW_ELEM_TYPE_EMP 4
-#define I40E_AQ_SW_ELEM_TYPE_BMC 5
-#define I40E_AQ_SW_ELEM_TYPE_PV 16
-#define I40E_AQ_SW_ELEM_TYPE_VEB 17
-#define I40E_AQ_SW_ELEM_TYPE_PA 18
-#define I40E_AQ_SW_ELEM_TYPE_VSI 19
- u8 revision;
-#define I40E_AQ_SW_ELEM_REV_1 1
- __le16 seid;
- __le16 uplink_seid;
- __le16 downlink_seid;
- u8 reserved[3];
- u8 connection_type;
-#define I40E_AQ_CONN_TYPE_REGULAR 0x1
-#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
-#define I40E_AQ_CONN_TYPE_CASCADED 0x3
- __le16 scheduler_id;
- __le16 element_info;
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
};
/* Get Switch Configuration (indirect 0x0200)
@@ -592,73 +592,73 @@ struct i40e_aqc_switch_config_element_resp {
* the first in the array is the header, remainder are elements
*/
struct i40e_aqc_get_switch_config_resp {
- struct i40e_aqc_get_switch_config_header_resp header;
- struct i40e_aqc_switch_config_element_resp element[1];
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
};
/* Add Statistics (direct 0x0201)
* Remove Statistics (direct 0x0202)
*/
struct i40e_aqc_add_remove_statistics {
- __le16 seid;
- __le16 vlan;
- __le16 stat_index;
- u8 reserved[10];
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
/* Set Port Parameters command (direct 0x0203) */
struct i40e_aqc_set_port_parameters {
- __le16 command_flags;
-#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
-#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
-#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
- __le16 bad_frame_vsi;
- __le16 default_seid; /* reserved for command */
- u8 reserved[10];
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
/* Get Switch Resource Allocation (indirect 0x0204) */
struct i40e_aqc_get_switch_resource_alloc {
- u8 num_entries; /* reserved for command */
- u8 reserved[7];
- __le32 addr_high;
- __le32 addr_low;
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
/* expect an array of these structs in the response buffer */
struct i40e_aqc_switch_resource_alloc_element_resp {
- u8 resource_type;
-#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
-#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
-#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
-#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
-#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
-#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
-#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
-#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
-#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
-#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
-#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
-#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
-#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
-#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
-#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
-#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
-#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
-#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
-#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
- u8 reserved1;
- __le16 guaranteed;
- __le16 total;
- __le16 used;
- __le16 total_unalloced;
- u8 reserved2[6];
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
};
/* Add VSI (indirect 0x0210)
@@ -672,24 +672,24 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
* uses the same completion and data structure as Add VSI
*/
struct i40e_aqc_add_get_update_vsi {
- __le16 uplink_seid;
- u8 connection_type;
-#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
-#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
-#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
- u8 reserved1;
- u8 vf_id;
- u8 reserved2;
- __le16 vsi_flags;
-#define I40E_AQ_VSI_TYPE_SHIFT 0x0
-#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
-#define I40E_AQ_VSI_TYPE_VF 0x0
-#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
-#define I40E_AQ_VSI_TYPE_PF 0x2
-#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
-#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
- __le32 addr_high;
- __le32 addr_low;
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
@@ -707,121 +707,121 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
struct i40e_aqc_vsi_properties_data {
/* first 96 byte are written by SW */
- __le16 valid_sections;
-#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
-#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
-#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
-#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
-#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
-#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
-#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
-#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
-#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
-#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
/* switch section */
- __le16 switch_id; /* 12bit id combined with flags below */
-#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
-#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
-#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
-#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
-#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
- u8 sw_reserved[2];
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
/* security section */
- u8 sec_flags;
-#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
- u8 sec_reserved;
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
/* VLAN section */
- __le16 pvid; /* VLANS include priority bits */
- __le16 fcoe_pvid;
- u8 port_vlan_flags;
-#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
-#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
- I40E_AQ_VSI_PVLAN_MODE_SHIFT)
-#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
-#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
-#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
-#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
-#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
-#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
- I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
-#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
-#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
- u8 pvlan_reserved[3];
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
/* ingress egress up sections */
- __le32 ingress_table; /* bitmap, 3 bits per up */
-#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
-#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
-#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
-#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
-#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
-#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
-#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
-#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
-#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
- __le32 egress_table; /* same defines as for ingress table */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
/* cascaded PV section */
- __le16 cas_pv_tag;
- u8 cas_pv_flags;
-#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
- I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
-#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
-#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
-#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
-#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
-#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
- u8 cas_pv_reserved;
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
/* queue mapping section */
- __le16 mapping_flags;
-#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
-#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
- __le16 queue_mapping[16];
-#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
-#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
- __le16 tc_mapping[8];
-#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
-#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
- I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
-#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
-#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
- I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
- u8 queueing_opt_flags;
-#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
-#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
- u8 queueing_opt_reserved[3];
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+ u8 queueing_opt_reserved[3];
/* scheduler section */
- u8 up_enable_bits;
- u8 sched_reserved;
+ u8 up_enable_bits;
+ u8 sched_reserved;
/* outer up section */
- __le32 outer_up_table; /* same structure and defines as ingress table */
- u8 cmd_reserved[8];
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ u8 cmd_reserved[8];
/* last 32 bytes are written by FW */
- __le16 qs_handle[8];
+ __le16 qs_handle[8];
#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
- __le16 stat_counter_idx;
- __le16 sched_id;
- u8 resp_reserved[12];
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
};
I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
@@ -831,26 +831,26 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
* (IS_CTRL_PORT only works on add PV)
*/
struct i40e_aqc_add_update_pv {
- __le16 command_flags;
-#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
-#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
- __le16 uplink_seid;
- __le16 connected_seid;
- u8 reserved[10];
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
struct i40e_aqc_add_update_pv_completion {
/* reserved for update; for add also encodes error if rc == ENOSPC */
- __le16 pv_seid;
-#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
-#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
-#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
-#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
- u8 reserved[14];
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
@@ -860,48 +860,48 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
*/
struct i40e_aqc_get_pv_params_completion {
- __le16 seid;
- __le16 default_stag;
- __le16 pv_flags; /* same flags as add_pv */
-#define I40E_AQC_GET_PV_PV_TYPE 0x1
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
- u8 reserved[8];
- __le16 default_port_seid;
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
/* Add VEB (direct 0x0230) */
struct i40e_aqc_add_veb {
- __le16 uplink_seid;
- __le16 downlink_seid;
- __le16 veb_flags;
-#define I40E_AQC_ADD_VEB_FLOATING 0x1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
-#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
-#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
-#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
- u8 enable_tcs;
- u8 reserved[9];
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+ u8 enable_tcs;
+ u8 reserved[9];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
struct i40e_aqc_add_veb_completion {
- u8 reserved[6];
- __le16 switch_seid;
+ u8 reserved[6];
+ __le16 switch_seid;
/* also encodes error if rc == ENOSPC; codes are the same as add_pv */
- __le16 veb_seid;
-#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
-#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
-#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
-#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
- __le16 statistic_index;
- __le16 vebs_used;
- __le16 vebs_free;
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
@@ -910,13 +910,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
* uses i40e_aqc_switch_seid for the descriptor
*/
struct i40e_aqc_get_veb_parameters_completion {
- __le16 seid;
- __le16 switch_id;
- __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
- __le16 statistic_index;
- __le16 vebs_used;
- __le16 vebs_free;
- u8 reserved[4];
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
@@ -929,37 +929,37 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
/* used for the command for most vlan commands */
struct i40e_aqc_macvlan {
- __le16 num_addresses;
- __le16 seid[3];
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
-#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
- __le32 addr_high;
- __le32 addr_low;
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
/* indirect data for command and response */
struct i40e_aqc_add_macvlan_element_data {
- u8 mac_addr[6];
- __le16 vlan_tag;
- __le16 flags;
-#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
-#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
-#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
-#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
- __le16 queue_number;
-#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
-#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
/* response section */
- u8 match_method;
-#define I40E_AQC_MM_PERFECT_MATCH 0x01
-#define I40E_AQC_MM_HASH_MATCH 0x02
-#define I40E_AQC_MM_ERR_NO_RES 0xFF
- u8 reserved1[3];
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
};
struct i40e_aqc_add_remove_macvlan_completion {
@@ -979,19 +979,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
*/
struct i40e_aqc_remove_macvlan_element_data {
- u8 mac_addr[6];
- __le16 vlan_tag;
- u8 flags;
-#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
-#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
-#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
-#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
- u8 reserved[3];
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
/* reply section */
- u8 error_code;
-#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
-#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
- u8 reply_reserved[3];
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
};
/* Add VLAN (indirect 0x0252)
@@ -999,59 +999,58 @@ struct i40e_aqc_remove_macvlan_element_data {
* use the generic i40e_aqc_macvlan for the command
*/
struct i40e_aqc_add_remove_vlan_element_data {
- __le16 vlan_tag;
- u8 vlan_flags;
+ __le16 vlan_tag;
+ u8 vlan_flags;
/* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_LOCAL 0x1
-#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
-#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \
- I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
-#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
-#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
-#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
-#define I40E_AQC_VLAN_PTYPE_SHIFT 3
-#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
-#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
-#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
-#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
-#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
/* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_ALL 0x1
- u8 reserved;
- u8 result;
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
/* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
-#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
-#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
/* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
-#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
- u8 reserved1[3];
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
};
struct i40e_aqc_add_remove_vlan_completion {
- u8 reserved[4];
- __le16 vlans_used;
- __le16 vlans_free;
- __le32 addr_high;
- __le32 addr_low;
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
};
/* Set VSI Promiscuous Modes (direct 0x0254) */
struct i40e_aqc_set_vsi_promiscuous_modes {
- __le16 promiscuous_flags;
- __le16 valid_flags;
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
/* flags used for both fields above */
-#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
-#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
-#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
-#define I40E_AQC_SET_VSI_DEFAULT 0x08
-#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
- __le16 seid;
-#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
- __le16 vlan_tag;
-#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
- u8 reserved[8];
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1060,23 +1059,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
* Uses generic i40e_aqc_add_remove_tag_completion for completion
*/
struct i40e_aqc_add_tag {
- __le16 flags;
-#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
- __le16 seid;
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
- __le16 tag;
- __le16 queue_number;
- u8 reserved[8];
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
struct i40e_aqc_add_remove_tag_completion {
- u8 reserved[12];
- __le16 tags_used;
- __le16 tags_free;
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
@@ -1085,12 +1084,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
* Uses generic i40e_aqc_add_remove_tag_completion for completion
*/
struct i40e_aqc_remove_tag {
- __le16 seid;
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
- __le16 tag;
- u8 reserved[12];
+ __le16 tag;
+ u8 reserved[12];
};
/* Add multicast E-Tag (direct 0x0257)
@@ -1098,22 +1097,22 @@ struct i40e_aqc_remove_tag {
* and no external data
*/
struct i40e_aqc_add_remove_mcast_etag {
- __le16 pv_seid;
- __le16 etag;
- u8 num_unicast_etags;
- u8 reserved[3];
- __le32 addr_high; /* address of array of 2-byte s-tags */
- __le32 addr_low;
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
struct i40e_aqc_add_remove_mcast_etag_completion {
- u8 reserved[4];
- __le16 mcast_etags_used;
- __le16 mcast_etags_free;
- __le32 addr_high;
- __le32 addr_low;
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
};
@@ -1121,21 +1120,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
/* Update S/E-Tag (direct 0x0259) */
struct i40e_aqc_update_tag {
- __le16 seid;
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
- __le16 old_tag;
- __le16 new_tag;
- u8 reserved[10];
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
struct i40e_aqc_update_tag_completion {
- u8 reserved[12];
- __le16 tags_used;
- __le16 tags_free;
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
@@ -1146,30 +1145,30 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
* and the generic direct completion structure
*/
struct i40e_aqc_add_remove_control_packet_filter {
- u8 mac[6];
- __le16 etype;
- __le16 flags;
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
- __le16 seid;
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
- __le16 queue;
- u8 reserved[2];
+ __le16 queue;
+ u8 reserved[2];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
struct i40e_aqc_add_remove_control_packet_filter_completion {
- __le16 mac_etype_used;
- __le16 etype_used;
- __le16 mac_etype_free;
- __le16 etype_free;
- u8 reserved[8];
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
@@ -1180,23 +1179,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
* and the generic indirect completion structure
*/
struct i40e_aqc_add_remove_cloud_filters {
- u8 num_filters;
- u8 reserved;
- __le16 seid;
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
- u8 reserved2[4];
- __le32 addr_high;
- __le32 addr_low;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
struct i40e_aqc_add_remove_cloud_filters_element_data {
- u8 outer_mac[6];
- u8 inner_mac[6];
- __le16 inner_vlan;
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
union {
struct {
u8 reserved[12];
@@ -1206,52 +1205,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 data[16];
} v6;
} ipaddr;
- __le16 flags;
-#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
/* 0x0000 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
/* 0x0002 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
/* 0x0005 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
/* 0x0007 reserved */
/* 0x0008 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
-#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
-#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
-
-#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
-#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
-#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
-#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
-#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
-
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
-
- __le32 tenant_id;
- u8 reserved[4];
- __le16 queue_number;
-#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
- I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
- u8 reserved2[14];
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id;
+ u8 reserved[4];
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved2[14];
/* response section */
- u8 allocation_result;
-#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
-#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
- u8 response_reserved[7];
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
};
struct i40e_aqc_remove_cloud_filters_completion {
@@ -1273,14 +1269,14 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
struct i40e_aqc_add_delete_mirror_rule {
__le16 seid;
__le16 rule_type;
-#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
-#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
-#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
-#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
-#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
__le16 num_entries;
__le16 destination; /* VSI for add, rule id for delete */
__le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
@@ -1290,12 +1286,12 @@ struct i40e_aqc_add_delete_mirror_rule {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
struct i40e_aqc_add_delete_mirror_rule_completion {
- u8 reserved[2];
- __le16 rule_id; /* only used on add */
- __le16 mirror_rules_used;
- __le16 mirror_rules_free;
- __le32 addr_high;
- __le32 addr_low;
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
@@ -1306,11 +1302,11 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
* the command and response use the same descriptor structure
*/
struct i40e_aqc_pfc_ignore {
- u8 tc_bitmap;
- u8 command_flags; /* unused on response */
-#define I40E_AQC_PFC_IGNORE_SET 0x80
-#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
- u8 reserved[14];
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
@@ -1325,10 +1321,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
* this generic struct to pass the SEID in param0
*/
struct i40e_aqc_tx_sched_ind {
- __le16 vsi_seid;
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
@@ -1340,12 +1336,12 @@ struct i40e_aqc_qs_handles_resp {
/* Configure VSI BW limits (direct 0x0400) */
struct i40e_aqc_configure_vsi_bw_limit {
- __le16 vsi_seid;
- u8 reserved[2];
- __le16 credit;
- u8 reserved1[2];
- u8 max_credit; /* 0-3, limit = 2^max */
- u8 reserved2[7];
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
@@ -1354,58 +1350,58 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
* responds with i40e_aqc_qs_handles_resp
*/
struct i40e_aqc_configure_vsi_ets_sla_bw_data {
- u8 tc_valid_bits;
- u8 reserved[15];
- __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
- u8 reserved1[28];
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
};
/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
* responds with i40e_aqc_qs_handles_resp
*/
struct i40e_aqc_configure_vsi_tc_bw_data {
- u8 tc_valid_bits;
- u8 reserved[3];
- u8 tc_bw_credits[8];
- u8 reserved1[4];
- __le16 qs_handles[8];
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
};
/* Query vsi bw configuration (indirect 0x0408) */
struct i40e_aqc_query_vsi_bw_config_resp {
- u8 tc_valid_bits;
- u8 tc_suspended_bits;
- u8 reserved[14];
- __le16 qs_handles[8];
- u8 reserved1[4];
- __le16 port_bw_limit;
- u8 reserved2[2];
- u8 max_bw; /* 0-3, limit = 2^max */
- u8 reserved3[23];
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
};
/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
struct i40e_aqc_query_vsi_ets_sla_config_resp {
- u8 tc_valid_bits;
- u8 reserved[3];
- u8 share_credits[8];
- __le16 credits[8];
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
+ __le16 tc_bw_max[2];
};
/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
struct i40e_aqc_configure_switching_comp_bw_limit {
- __le16 seid;
- u8 reserved[2];
- __le16 credit;
- u8 reserved1[2];
- u8 max_bw; /* 0-3, limit = 2^max */
- u8 reserved2[7];
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
@@ -1415,75 +1411,75 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
* Disable Physical Port ETS (indirect 0x0415)
*/
struct i40e_aqc_configure_switching_comp_ets_data {
- u8 reserved[4];
- u8 tc_valid_bits;
- u8 seepage;
-#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
- u8 tc_strict_priority_flags;
- u8 reserved1[17];
- u8 tc_bw_share_credits[8];
- u8 reserved2[96];
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
+ u8 tc_strict_priority_flags;
+ u8 reserved1[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved2[96];
};
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
- u8 tc_valid_bits;
- u8 reserved[15];
- __le16 tc_bw_credit[8];
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
- u8 reserved1[28];
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
};
/* Configure Switching Component Bandwidth Allocation per Tc
* (indirect 0x0417)
*/
struct i40e_aqc_configure_switching_comp_bw_config_data {
- u8 tc_valid_bits;
- u8 reserved[2];
- u8 absolute_credits; /* bool */
- u8 tc_bw_share_credits[8];
- u8 reserved1[20];
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
};
/* Query Switching Component Configuration (indirect 0x0418) */
struct i40e_aqc_query_switching_comp_ets_config_resp {
- u8 tc_valid_bits;
- u8 reserved[35];
- __le16 port_bw_limit;
- u8 reserved1[2];
- u8 tc_bw_max; /* 0-3, limit = 2^max */
- u8 reserved2[23];
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
};
/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
struct i40e_aqc_query_port_ets_config_resp {
- u8 reserved[4];
- u8 tc_valid_bits;
- u8 reserved1;
- u8 tc_strict_priority_bits;
- u8 reserved2;
- u8 tc_bw_share_credits[8];
- __le16 tc_bw_limits[8];
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
/* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
- __le16 tc_bw_max[2];
- u8 reserved3[32];
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
};
/* Query Switching Component Bandwidth Allocation per Traffic Type
* (indirect 0x041A)
*/
struct i40e_aqc_query_switching_comp_bw_config_resp {
- u8 tc_valid_bits;
- u8 reserved[2];
- u8 absolute_credits_enable; /* bool */
- u8 tc_bw_share_credits[8];
- __le16 tc_bw_limits[8];
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
+ __le16 tc_bw_max[2];
};
/* Suspend/resume port TX traffic
@@ -1494,37 +1490,37 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
* (indirect 0x041D)
*/
struct i40e_aqc_configure_partition_bw_data {
- __le16 pf_valid_bits;
- u8 min_bw[16]; /* guaranteed bandwidth */
- u8 max_bw[16]; /* bandwidth limit */
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
};
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
struct i40e_aq_get_set_hmc_resource_profile {
- u8 pm_profile;
- u8 pe_vf_enabled;
- u8 reserved[14];
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
enum i40e_aq_hmc_profile {
/* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
- I40E_HMC_PROFILE_DEFAULT = 1,
- I40E_HMC_PROFILE_FAVOR_VF = 2,
- I40E_HMC_PROFILE_EQUAL = 3,
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
};
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
-#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
-#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
enum i40e_aq_phy_type {
I40E_PHY_TYPE_SGMII = 0x0,
@@ -1582,147 +1578,147 @@ struct i40e_aqc_module_desc {
};
struct i40e_aq_get_phy_abilities_resp {
- __le32 phy_type; /* bitmap using the above enum for offsets */
- u8 link_speed; /* bitmap using the above enum bit patterns */
- u8 abilities;
-#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
-#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
-#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_LINK_ENABLED 0x08
-#define I40E_AQ_PHY_AN_ENABLED 0x10
-#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
- __le16 eee_capability;
-#define I40E_AQ_EEE_100BASE_TX 0x0002
-#define I40E_AQ_EEE_1000BASE_T 0x0004
-#define I40E_AQ_EEE_10GBASE_T 0x0008
-#define I40E_AQ_EEE_1000BASE_KX 0x0010
-#define I40E_AQ_EEE_10GBASE_KX4 0x0020
-#define I40E_AQ_EEE_10GBASE_KR 0x0040
- __le32 eeer_val;
- u8 d3_lpan;
-#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
- u8 reserved[3];
- u8 phy_id[4];
- u8 module_type[3];
- u8 qualified_module_count;
-#define I40E_AQ_PHY_MAX_QMS 16
- struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 reserved[3];
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
};
/* Set PHY Config (direct 0x0601) */
struct i40e_aq_set_phy_config { /* same bits as above in all */
- __le32 phy_type;
- u8 link_speed;
- u8 abilities;
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
/* bits 0-2 use the values from get_phy_abilities_resp */
#define I40E_AQ_PHY_ENABLE_LINK 0x08
#define I40E_AQ_PHY_ENABLE_AN 0x10
#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
- __le16 eee_capability;
- __le32 eeer;
- u8 low_power_ctrl;
- u8 reserved[3];
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 reserved[3];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
/* Set MAC Config command data structure (direct 0x0603) */
struct i40e_aq_set_mac_config {
- __le16 max_frame_size;
- u8 params;
-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
- u8 tx_timer_priority; /* bitmap */
- __le16 tx_timer_value;
- __le16 fc_refresh_threshold;
- u8 reserved[8];
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
/* Restart Auto-Negotiation (direct 0x605) */
struct i40e_aqc_set_link_restart_an {
- u8 command;
-#define I40E_AQ_PHY_RESTART_AN 0x02
-#define I40E_AQ_PHY_LINK_ENABLE 0x04
- u8 reserved[15];
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
/* Get Link Status cmd & response data structure (direct 0x0607) */
struct i40e_aqc_get_link_status {
- __le16 command_flags; /* only field set on command */
-#define I40E_AQ_LSE_MASK 0x3
-#define I40E_AQ_LSE_NOP 0x0
-#define I40E_AQ_LSE_DISABLE 0x2
-#define I40E_AQ_LSE_ENABLE 0x3
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
/* only response uses this flag */
-#define I40E_AQ_LSE_IS_ENABLED 0x1
- u8 phy_type; /* i40e_aq_phy_type */
- u8 link_speed; /* i40e_aq_link_speed */
- u8 link_info;
-#define I40E_AQ_LINK_UP 0x01
-#define I40E_AQ_LINK_FAULT 0x02
-#define I40E_AQ_LINK_FAULT_TX 0x04
-#define I40E_AQ_LINK_FAULT_RX 0x08
-#define I40E_AQ_LINK_FAULT_REMOTE 0x10
-#define I40E_AQ_MEDIA_AVAILABLE 0x40
-#define I40E_AQ_SIGNAL_DETECT 0x80
- u8 an_info;
-#define I40E_AQ_AN_COMPLETED 0x01
-#define I40E_AQ_LP_AN_ABILITY 0x02
-#define I40E_AQ_PD_FAULT 0x04
-#define I40E_AQ_FEC_EN 0x08
-#define I40E_AQ_PHY_LOW_POWER 0x10
-#define I40E_AQ_LINK_PAUSE_TX 0x20
-#define I40E_AQ_LINK_PAUSE_RX 0x40
-#define I40E_AQ_QUALIFIED_MODULE 0x80
- u8 ext_info;
-#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
-#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
-#define I40E_AQ_LINK_TX_SHIFT 0x02
-#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
-#define I40E_AQ_LINK_TX_ACTIVE 0x00
-#define I40E_AQ_LINK_TX_DRAINED 0x01
-#define I40E_AQ_LINK_TX_FLUSHED 0x03
-#define I40E_AQ_LINK_FORCED_40G 0x10
- u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
- __le16 max_frame_size;
- u8 config;
-#define I40E_AQ_CONFIG_CRC_ENA 0x04
-#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 reserved[5];
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ u8 reserved[5];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
/* Set event mask command (direct 0x613) */
struct i40e_aqc_set_phy_int_mask {
- u8 reserved[8];
- __le16 event_mask;
-#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
-#define I40E_AQ_EVENT_MEDIA_NA 0x0004
-#define I40E_AQ_EVENT_LINK_FAULT 0x0008
-#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
-#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
-#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
-#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
-#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
-#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
- u8 reserved1[6];
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
@@ -1732,27 +1728,27 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
* Get Link Partner AN advt register (direct 0x0616)
*/
struct i40e_aqc_an_advt_reg {
- __le32 local_an_reg0;
- __le16 local_an_reg1;
- u8 reserved[10];
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
/* Set Loopback mode (0x0618) */
struct i40e_aqc_set_lb_mode {
- __le16 lb_mode;
-#define I40E_AQ_LB_PHY_LOCAL 0x01
-#define I40E_AQ_LB_PHY_REMOTE 0x02
-#define I40E_AQ_LB_MAC_LOCAL 0x04
- u8 reserved[14];
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
/* Set PHY Debug command (0x0622) */
struct i40e_aqc_set_phy_debug {
- u8 command_flags;
+ u8 command_flags;
#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
@@ -1761,15 +1757,15 @@ struct i40e_aqc_set_phy_debug {
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
- u8 reserved[15];
+ u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
enum i40e_aq_phy_reg_type {
- I40E_AQC_PHY_REG_INTERNAL = 0x1,
- I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
- I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
};
/* NVM Read command (indirect 0x0701)
@@ -1777,40 +1773,40 @@ enum i40e_aq_phy_reg_type {
* NVM Update commands (indirect 0x0703)
*/
struct i40e_aqc_nvm_update {
- u8 command_flags;
-#define I40E_AQ_NVM_LAST_CMD 0x01
-#define I40E_AQ_NVM_FLASH_ONLY 0x80
- u8 module_pointer;
- __le16 length;
- __le32 offset;
- __le32 addr_high;
- __le32 addr_low;
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
/* NVM Config Read (indirect 0x0704) */
struct i40e_aqc_nvm_config_read {
- __le16 cmd_flags;
+ __le16 cmd_flags;
#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
#define ANVM_READ_SINGLE_FEATURE 0
#define ANVM_READ_MULTIPLE_FEATURES 1
- __le16 element_count;
- __le16 element_id; /* Feature/field ID */
- u8 reserved[2];
- __le32 address_high;
- __le32 address_low;
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
/* NVM Config Write (indirect 0x0705) */
struct i40e_aqc_nvm_config_write {
- __le16 cmd_flags;
- __le16 element_count;
- u8 reserved[4];
- __le32 address_high;
- __le32 address_low;
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
@@ -1835,10 +1831,10 @@ struct i40e_aqc_nvm_config_data_immediate_field {
* Send to Peer PF command (indirect 0x0803)
*/
struct i40e_aqc_pf_vf_message {
- __le32 id;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
@@ -1874,22 +1870,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
* uses i40e_aq_desc
*/
struct i40e_aqc_alternate_write_done {
- __le16 cmd_flags;
+ __le16 cmd_flags;
#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
- u8 reserved[14];
+ u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
/* Set OEM mode (direct 0x0905) */
struct i40e_aqc_alternate_set_mode {
- __le32 mode;
+ __le32 mode;
#define I40E_AQ_ALTERNATE_MODE_NONE 0
#define I40E_AQ_ALTERNATE_MODE_OEM 1
- u8 reserved[12];
+ u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
@@ -1900,33 +1896,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
/* Lan Queue Overflow Event (direct, 0x1001) */
struct i40e_aqc_lan_overflow {
- __le32 prtdcb_rupto;
- __le32 otx_ctl;
- u8 reserved[8];
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
/* Get LLDP MIB (indirect 0x0A00) */
struct i40e_aqc_lldp_get_mib {
- u8 type;
- u8 reserved1;
-#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
-#define I40E_AQ_LLDP_MIB_LOCAL 0x0
-#define I40E_AQ_LLDP_MIB_REMOTE 0x1
-#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
-#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
-#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
-#define I40E_AQ_LLDP_TX_SHIFT 0x4
-#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
/* TX pause flags use I40E_AQ_LINK_TX_* above */
- __le16 local_len;
- __le16 remote_len;
- u8 reserved2[2];
- __le32 addr_high;
- __le32 addr_low;
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
@@ -1935,12 +1931,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
* also used for the event (with type in the command field)
*/
struct i40e_aqc_lldp_update_mib {
- u8 command;
-#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
-#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
- u8 reserved[7];
- __le32 addr_high;
- __le32 addr_low;
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
@@ -1949,35 +1945,35 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
* Delete LLDP TLV (indirect 0x0A04)
*/
struct i40e_aqc_lldp_add_tlv {
- u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
- u8 reserved1[1];
- __le16 len;
- u8 reserved2[4];
- __le32 addr_high;
- __le32 addr_low;
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
/* Update LLDP TLV (indirect 0x0A03) */
struct i40e_aqc_lldp_update_tlv {
- u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
- u8 reserved;
- __le16 old_len;
- __le16 new_offset;
- __le16 new_len;
- __le32 addr_high;
- __le32 addr_low;
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
/* Stop LLDP (direct 0x0A05) */
struct i40e_aqc_lldp_stop {
- u8 command;
-#define I40E_AQ_LLDP_AGENT_STOP 0x0
-#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
- u8 reserved[15];
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
@@ -1985,9 +1981,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
/* Start LLDP (direct 0x0A06) */
struct i40e_aqc_lldp_start {
- u8 command;
-#define I40E_AQ_LLDP_AGENT_START 0x1
- u8 reserved[15];
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
@@ -1998,13 +1994,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
- __le16 udp_port;
- u8 reserved0[3];
- u8 protocol_type;
+ __le16 udp_port;
+ u8 reserved0[3];
+ u8 protocol_type;
#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
- u8 reserved1[10];
+ u8 reserved1[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
@@ -2013,8 +2009,8 @@ struct i40e_aqc_add_udp_tunnel_completion {
__le16 udp_port;
u8 filter_entry_index;
u8 multiple_pfs;
-#define I40E_AQC_SINGLE_PF 0x0
-#define I40E_AQC_MULTIPLE_PFS 0x1
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
u8 total_filters;
u8 reserved[11];
};
@@ -2023,23 +2019,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
/* remove UDP Tunnel command (0x0B01) */
struct i40e_aqc_remove_udp_tunnel {
- u8 reserved[2];
- u8 index; /* 0 to 15 */
- u8 pf_filters;
- u8 total_filters;
- u8 reserved2[11];
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 reserved2[13];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
struct i40e_aqc_del_udp_tunnel_completion {
- __le16 udp_port;
- u8 index; /* 0 to 15 */
- u8 multiple_pfs;
- u8 total_filters_used;
- u8 reserved;
- u8 tunnels_free;
- u8 reserved1[9];
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
@@ -2068,11 +2060,11 @@ struct i40e_aqc_tunnel_key_structure {
u8 key1_len; /* 0 to 15 */
u8 key2_len; /* 0 to 15 */
u8 flags;
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
/* response flags */
-#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
-#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
u8 network_key_index;
#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
@@ -2085,21 +2077,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
/* OEM mode commands (direct 0xFE0x) */
struct i40e_aqc_oem_param_change {
- __le32 param_type;
-#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
-#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
-#define I40E_AQ_OEM_PARAM_MAC 2
- __le32 param_value1;
- u8 param_value2[8];
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ u8 param_value2[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
struct i40e_aqc_oem_state_change {
- __le32 state;
-#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
-#define I40E_AQ_OEM_STATE_LINK_UP 0x1
- u8 reserved[12];
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
@@ -2111,18 +2103,18 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
/* set test more (0xFF01, internal) */
struct i40e_acq_set_test_mode {
- u8 mode;
-#define I40E_AQ_TEST_PARTIAL 0
-#define I40E_AQ_TEST_FULL 1
-#define I40E_AQ_TEST_NVM 2
- u8 reserved[3];
- u8 command;
-#define I40E_AQ_TEST_OPEN 0
-#define I40E_AQ_TEST_CLOSE 1
-#define I40E_AQ_TEST_INC 2
- u8 reserved2[3];
- __le32 address_high;
- __le32 address_low;
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
@@ -2175,21 +2167,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
#define I40E_AQ_CLUSTER_ID_ALTRAM 11
struct i40e_aqc_debug_dump_internals {
- u8 cluster_id;
- u8 table_id;
- __le16 data_size;
- __le32 idx;
- __le32 address_high;
- __le32 address_low;
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
struct i40e_aqc_debug_modify_internals {
- u8 cluster_id;
- u8 cluster_specific_params[7];
- __le32 address_high;
- __le32 address_low;
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 952560551964..28c40c57d4f5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -50,6 +50,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
+ case I40E_DEV_ID_10G_BASE_T:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_VF:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index f6dcf9dd9290..c7f29626eada 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -30,10 +30,7 @@
/* Interrupt Throttling and Rate Limiting Goodies */
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
-#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
-#define I40E_MAX_IRATE 0x03F
-#define I40E_MIN_IRATE 0x001
-#define I40E_IRATE_USEC_RESOLUTION 4
+#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
#define I40E_ITR_100K 0x0005
#define I40E_ITR_20K 0x0019
#define I40E_ITR_8K 0x003E
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 15376436cead..68aec11f6523 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -43,6 +43,7 @@
#define I40E_DEV_ID_QSFP_A 0x1583
#define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
@@ -259,8 +260,7 @@ enum i40e_aq_resource_access_type {
};
struct i40e_nvm_info {
- u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
- u64 hw_semaphore_wait; /* - || - */
+ u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */
u32 timeout; /* [ms] */
u16 sr_size; /* Shadow RAM size in words */
bool blank_nvm_mode; /* is NVM empty (no FW present)*/
@@ -475,6 +475,11 @@ struct i40e_hw {
u32 debug_mask;
};
+static inline bool i40e_is_vf(struct i40e_hw *hw)
+{
+ return hw->mac.type == I40E_MAC_VF;
+}
+
struct i40e_driver_version {
u8 major_version;
u8 minor_version;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index cd18d5689006..e0c8208138f4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -79,6 +79,7 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
I40E_VIRTCHNL_OP_GET_STATS,
I40E_VIRTCHNL_OP_FCOE,
+ I40E_VIRTCHNL_OP_CONFIG_RSS,
/* PF sends status change events to vfs using
* the following op.
*/
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 30ef519d4b91..981224743c73 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -191,6 +191,7 @@ struct i40evf_adapter {
struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
struct list_head vlan_filter_list;
char misc_vector_name[IFNAMSIZ + 9];
+ int num_active_queues;
/* TX */
struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
@@ -243,7 +244,7 @@ struct i40evf_adapter {
struct i40e_hw hw; /* defined in i40e_type.h */
enum i40evf_state_t state;
- volatile unsigned long crit_section;
+ unsigned long crit_section;
struct work_struct watchdog_task;
bool netdev_registered;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index efee6b290c0f..69b97bac182c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -58,8 +58,8 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
#define I40EVF_QUEUE_STATS_LEN(_dev) \
- (((struct i40evf_adapter *) \
- netdev_priv(_dev))->vsi_res->num_queue_pairs \
+ (((struct i40evf_adapter *)\
+ netdev_priv(_dev))->num_active_queues \
* 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
#define I40EVF_STATS_LEN(_dev) \
(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
@@ -121,11 +121,11 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev,
p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset;
data[i] = *(u64 *)p;
}
- for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) {
+ for (j = 0; j < adapter->num_active_queues; j++) {
data[i++] = adapter->tx_rings[j]->stats.packets;
data[i++] = adapter->tx_rings[j]->stats.bytes;
}
- for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) {
+ for (j = 0; j < adapter->num_active_queues; j++) {
data[i++] = adapter->rx_rings[j]->stats.packets;
data[i++] = adapter->rx_rings[j]->stats.bytes;
}
@@ -151,13 +151,13 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ for (i = 0; i < adapter->num_active_queues; i++) {
snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ for (i = 0; i < adapter->num_active_queues; i++) {
snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
@@ -175,6 +175,7 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
static u32 i40evf_get_msglevel(struct net_device *netdev)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
+
return adapter->msg_enable;
}
@@ -189,6 +190,7 @@ static u32 i40evf_get_msglevel(struct net_device *netdev)
static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
+
adapter->msg_enable = data;
}
@@ -219,7 +221,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
* but the number of rings is not reported.
**/
static void i40evf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
@@ -280,7 +282,7 @@ static int i40evf_set_ringparam(struct net_device *netdev,
* this functionality.
**/
static int i40evf_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_vsi *vsi = &adapter->vsi;
@@ -308,7 +310,7 @@ static int i40evf_get_coalesce(struct net_device *netdev,
* Change current coalescing settings.
**/
static int i40evf_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw;
@@ -430,7 +432,7 @@ static int i40evf_get_rxnfc(struct net_device *netdev,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = adapter->vsi_res->num_queue_pairs;
+ cmd->data = adapter->num_active_queues;
ret = 0;
break;
case ETHTOOL_GRXFH:
@@ -598,12 +600,12 @@ static void i40evf_get_channels(struct net_device *netdev,
struct i40evf_adapter *adapter = netdev_priv(netdev);
/* Report maximum channels */
- ch->max_combined = adapter->vsi_res->num_queue_pairs;
+ ch->max_combined = adapter->num_active_queues;
ch->max_other = NONQ_VECS;
ch->other_count = NONQ_VECS;
- ch->combined_count = adapter->vsi_res->num_queue_pairs;
+ ch->combined_count = adapter->num_active_queues;
}
/**
@@ -621,17 +623,23 @@ static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
* i40evf_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
- * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
+ * @key: hash key
*
* Reads the indirection table directly from the hardware. Always returns 0.
**/
-static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw;
u32 hlut_val;
int i, j;
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!indir)
+ return 0;
+
for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
indir[j++] = hlut_val & 0xff;
@@ -646,19 +654,26 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
* i40evf_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
- * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
+ * @key: hash key
*
* Returns -EINVAL if the table specifies an inavlid queue id, otherwise
* returns 0 after programming the table.
**/
static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key)
+ const u8 *key, const u8 hfunc)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw;
u32 hlut_val;
int i, j;
+ /* We do not allow change in unsupported parameters */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!indir)
+ return 0;
+
for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
hlut_val = indir[j++];
hlut_val |= indir[j++] << 8;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index c51bc7a33bc5..cabaf599f562 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710/X710 Virtual Function Network Driver";
-#define DRV_VERSION "1.0.5"
+#define DRV_VERSION "1.0.6"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -185,6 +185,7 @@ static void i40evf_tx_timeout(struct net_device *netdev)
static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
+
wr32(hw, I40E_VFINT_DYN_CTL01, 0);
/* read flush */
@@ -200,6 +201,7 @@ static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
+
wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
@@ -226,7 +228,6 @@ static void i40evf_irq_disable(struct i40evf_adapter *adapter)
}
/* read flush */
rd32(hw, I40E_VFGEN_RSTAT);
-
}
/**
@@ -253,8 +254,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
* @adapter: board private structure
* @mask: bitmap of vectors to trigger
**/
-static void i40evf_fire_sw_int(struct i40evf_adapter *adapter,
- u32 mask)
+static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
{
struct i40e_hw *hw = &adapter->hw;
int i;
@@ -397,8 +397,8 @@ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
int q_vectors;
int v_start = 0;
int rxr_idx = 0, txr_idx = 0;
- int rxr_remaining = adapter->vsi_res->num_queue_pairs;
- int txr_remaining = adapter->vsi_res->num_queue_pairs;
+ int rxr_remaining = adapter->num_active_queues;
+ int txr_remaining = adapter->num_active_queues;
int i, j;
int rqpv, tqpv;
int err = 0;
@@ -551,6 +551,7 @@ static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
{
int i;
int q_vectors;
+
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (i = 0; i < q_vectors; i++) {
@@ -584,7 +585,8 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
int i;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+
+ for (i = 0; i < adapter->num_active_queues; i++)
adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i);
}
@@ -629,7 +631,7 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)
rx_buf_len = ALIGN(max_frame, 1024);
}
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i);
adapter->rx_rings[i]->rx_buf_len = rx_buf_len;
}
@@ -667,9 +669,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
struct i40evf_vlan_filter *f;
f = i40evf_find_vlan(adapter, vlan);
- if (NULL == f) {
+ if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (NULL == f)
+ if (!f)
return NULL;
f->vlan = vlan;
@@ -705,7 +707,7 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
* @vid: VLAN tag
**/
static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+ __always_unused __be16 proto, u16 vid)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
@@ -720,7 +722,7 @@ static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
* @vid: VLAN tag
**/
static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+ __always_unused __be16 proto, u16 vid)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
@@ -772,9 +774,9 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
udelay(1);
f = i40evf_find_filter(adapter, macaddr);
- if (NULL == f) {
+ if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (NULL == f) {
+ if (!f) {
clear_bit(__I40EVF_IN_CRITICAL_TASK,
&adapter->crit_section);
return NULL;
@@ -881,6 +883,7 @@ static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
struct napi_struct *napi;
+
q_vector = adapter->q_vector[q_idx];
napi = &q_vector->napi;
napi_enable(napi);
@@ -918,8 +921,9 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
i40evf_configure_rx(adapter);
adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *ring = adapter->rx_rings[i];
+
i40evf_alloc_rx_buffers(ring, ring->count);
ring->next_to_use = ring->count - 1;
writel(ring->next_to_use, ring->tail);
@@ -950,7 +954,7 @@ static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ for (i = 0; i < adapter->num_active_queues; i++)
i40evf_clean_rx_ring(adapter->rx_rings[i]);
}
@@ -962,7 +966,7 @@ static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ for (i = 0; i < adapter->num_active_queues; i++)
i40evf_clean_tx_ring(adapter->tx_rings[i]);
}
@@ -1064,7 +1068,7 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter)
if (!adapter->vsi_res)
return;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ for (i = 0; i < adapter->num_active_queues; i++) {
if (adapter->tx_rings[i])
kfree_rcu(adapter->tx_rings[i], rcu);
adapter->tx_rings[i] = NULL;
@@ -1084,11 +1088,11 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *tx_ring;
struct i40e_ring *rx_ring;
- tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+ tx_ring = kzalloc(sizeof(*tx_ring) * 2, GFP_KERNEL);
if (!tx_ring)
goto err_out;
@@ -1130,7 +1134,7 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
err = -EIO;
goto out;
}
- pairs = adapter->vsi_res->num_queue_pairs;
+ pairs = adapter->num_active_queues;
/* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
@@ -1172,14 +1176,14 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
- q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+ q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx;
netif_napi_add(adapter->netdev, &q_vector->napi,
- i40evf_napi_poll, NAPI_POLL_WEIGHT);
+ i40evf_napi_poll, NAPI_POLL_WEIGHT);
adapter->q_vector[q_idx] = q_vector;
}
@@ -1210,7 +1214,7 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
int napi_vectors;
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
- napi_vectors = adapter->vsi_res->num_queue_pairs;
+ napi_vectors = adapter->num_active_queues;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct i40e_q_vector *q_vector = adapter->q_vector[q_idx];
@@ -1265,8 +1269,8 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
}
dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
- (adapter->vsi_res->num_queue_pairs > 1) ? "Enabled" :
- "Disabled", adapter->vsi_res->num_queue_pairs);
+ (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
+ adapter->num_active_queues);
return 0;
err_alloc_queues:
@@ -1284,6 +1288,7 @@ err_set_interrupt:
static void i40evf_watchdog_timer(unsigned long data)
{
struct i40evf_adapter *adapter = (struct i40evf_adapter *)data;
+
schedule_work(&adapter->watchdog_task);
/* timer will be rescheduled in watchdog task */
}
@@ -1295,8 +1300,8 @@ static void i40evf_watchdog_timer(unsigned long data)
static void i40evf_watchdog_task(struct work_struct *work)
{
struct i40evf_adapter *adapter = container_of(work,
- struct i40evf_adapter,
- watchdog_task);
+ struct i40evf_adapter,
+ watchdog_task);
struct i40e_hw *hw = &adapter->hw;
uint32_t rstat_val;
@@ -1334,7 +1339,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
/* check for reset */
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
(rstat_val != I40E_VFR_VFACTIVE) &&
(rstat_val != I40E_VFR_COMPLETED)) {
@@ -1425,7 +1430,7 @@ static int next_queue(struct i40evf_adapter *adapter, int j)
{
j += 1;
- return j >= adapter->vsi_res->num_queue_pairs ? 0 : j;
+ return j >= adapter->num_active_queues ? 0 : j;
}
/**
@@ -1434,23 +1439,23 @@ static int next_queue(struct i40evf_adapter *adapter, int j)
**/
static void i40evf_configure_rss(struct i40evf_adapter *adapter)
{
+ u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1];
struct i40e_hw *hw = &adapter->hw;
u32 lut = 0;
int i, j;
u64 hena;
- /* Set of random keys generated using kernel random number generator */
- static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = {
- 0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127,
- 0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0,
- 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e,
- 0x4954b126 };
+ /* No RSS for single queue. */
+ if (adapter->num_active_queues == 1) {
+ wr32(hw, I40E_VFQF_HENA(0), 0);
+ wr32(hw, I40E_VFQF_HENA(1), 0);
+ return;
+ }
/* Hash type is configured by the PF - we just supply the key */
-
- /* Fill out hash function seed */
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_VFQF_HKEY(i), seed[i]);
+ wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]);
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
hena = I40E_DEFAULT_RSS_HENA;
@@ -1458,7 +1463,7 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
/* Populate the LUT with max no. of queues in round robin fashion */
- j = adapter->vsi_res->num_queue_pairs;
+ j = adapter->num_active_queues;
for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
j = next_queue(adapter, j);
lut = j;
@@ -1494,7 +1499,7 @@ static void i40evf_reset_task(struct work_struct *work)
while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
&adapter->crit_section))
- udelay(500);
+ usleep_range(500, 1000);
if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
dev_info(&adapter->pdev->dev, "Requesting reset from PF\n");
@@ -1508,8 +1513,7 @@ static void i40evf_reset_task(struct work_struct *work)
if ((rstat_val != I40E_VFR_VFACTIVE) &&
(rstat_val != I40E_VFR_COMPLETED))
break;
- else
- msleep(I40EVF_RESET_WAIT_MS);
+ msleep(I40EVF_RESET_WAIT_MS);
}
if (i == I40EVF_RESET_WAIT_COUNT) {
adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
@@ -1523,8 +1527,7 @@ static void i40evf_reset_task(struct work_struct *work)
if ((rstat_val == I40E_VFR_VFACTIVE) ||
(rstat_val == I40E_VFR_COMPLETED))
break;
- else
- msleep(I40EVF_RESET_WAIT_MS);
+ msleep(I40EVF_RESET_WAIT_MS);
}
if (i == I40EVF_RESET_WAIT_COUNT) {
struct i40evf_mac_filter *f, *ftmp;
@@ -1575,12 +1578,12 @@ continue_reset:
/* kill and reinit the admin queue */
if (i40evf_shutdown_adminq(hw))
dev_warn(&adapter->pdev->dev,
- "%s: Failed to destroy the Admin Queue resources\n",
- __func__);
+ "%s: Failed to destroy the Admin Queue resources\n",
+ __func__);
err = i40evf_init_adminq(hw);
if (err)
dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n",
- __func__, err);
+ __func__, err);
adapter->aq_pending = 0;
adapter->aq_required = 0;
@@ -1632,8 +1635,8 @@ static void i40evf_adminq_task(struct work_struct *work)
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
return;
- event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
- event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf)
return;
@@ -1645,13 +1648,9 @@ static void i40evf_adminq_task(struct work_struct *work)
i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
v_msg->v_retval, event.msg_buf,
- event.msg_size);
- if (pending != 0) {
- dev_info(&adapter->pdev->dev,
- "%s: ARQ: Pending events %d\n",
- __func__, pending);
+ event.msg_len);
+ if (pending != 0)
memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
- }
} while (pending);
/* check for error indications */
@@ -1705,10 +1704,9 @@ static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ for (i = 0; i < adapter->num_active_queues; i++)
if (adapter->tx_rings[i]->desc)
i40evf_free_tx_resources(adapter->tx_rings[i]);
-
}
/**
@@ -1725,7 +1723,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
{
int i, err = 0;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ for (i = 0; i < adapter->num_active_queues; i++) {
adapter->tx_rings[i]->count = adapter->tx_desc_count;
err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
if (!err)
@@ -1753,7 +1751,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
{
int i, err = 0;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i]->count = adapter->rx_desc_count;
err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
if (!err)
@@ -1776,7 +1774,7 @@ static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ for (i = 0; i < adapter->num_active_queues; i++)
if (adapter->rx_rings[i]->desc)
i40evf_free_rx_resources(adapter->rx_rings[i]);
}
@@ -1980,7 +1978,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
if ((rstat == I40E_VFR_VFACTIVE) ||
(rstat == I40E_VFR_COMPLETED))
return 0;
- udelay(10);
+ usleep_range(10, 20);
}
return -EBUSY;
}
@@ -2022,7 +2020,7 @@ static void i40evf_init_task(struct work_struct *work)
err = i40evf_check_reset_complete(hw);
if (err) {
dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
- err);
+ err);
goto err;
}
hw->aq.num_arq_entries = I40EVF_AQ_LEN;
@@ -2047,6 +2045,8 @@ static void i40evf_init_task(struct work_struct *work)
case __I40EVF_INIT_VERSION_CHECK:
if (!i40evf_asq_done(hw)) {
dev_err(&pdev->dev, "Admin queue command never completed\n");
+ i40evf_shutdown_adminq(hw);
+ adapter->state = __I40EVF_STARTUP;
goto err;
}
@@ -2054,7 +2054,7 @@ static void i40evf_init_task(struct work_struct *work)
err = i40evf_verify_api_ver(adapter);
if (err) {
dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
- err);
+ err);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
dev_info(&pdev->dev, "Resending request\n");
err = i40evf_send_api_ver(adapter);
@@ -2080,8 +2080,11 @@ static void i40evf_init_task(struct work_struct *work)
goto err;
}
err = i40evf_get_vf_config(adapter);
- if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
- goto restart;
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
+ dev_info(&pdev->dev, "Resending VF config request\n");
+ err = i40evf_send_vf_config_msg(adapter);
+ goto err;
+ }
if (err) {
dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
err);
@@ -2138,7 +2141,7 @@ static void i40evf_init_task(struct work_struct *work)
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (NULL == f)
+ if (!f)
goto err_sw_init;
ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
@@ -2152,6 +2155,9 @@ static void i40evf_init_task(struct work_struct *work)
adapter->watchdog_timer.data = (unsigned long)adapter;
mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ adapter->num_active_queues = min_t(int,
+ adapter->vsi_res->num_queue_pairs,
+ (int)(num_online_cpus()));
adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
err = i40evf_init_interrupt_scheme(adapter);
@@ -2500,8 +2506,9 @@ static struct pci_driver i40evf_driver = {
static int __init i40evf_init_module(void)
{
int ret;
+
pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
- i40evf_driver_version);
+ i40evf_driver_version);
pr_info("%s\n", i40evf_copyright);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 66d12f5b4ca8..5fde5a7f4591 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -89,27 +89,37 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
struct i40e_virtchnl_version_info *pf_vvi;
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
+ enum i40e_virtchnl_ops op;
i40e_status err;
- event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
- event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf) {
err = -ENOMEM;
goto out;
}
- err = i40evf_clean_arq_element(hw, &event, NULL);
- if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
- goto out_alloc;
+ while (1) {
+ err = i40evf_clean_arq_element(hw, &event, NULL);
+ /* When the AQ is empty, i40evf_clean_arq_element will return
+ * nonzero and this loop will terminate.
+ */
+ if (err)
+ goto out_alloc;
+ op =
+ (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
+ if (op == I40E_VIRTCHNL_OP_VERSION)
+ break;
+ }
+
err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
if (err)
goto out_alloc;
- if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
- I40E_VIRTCHNL_OP_VERSION) {
+ if (op != I40E_VIRTCHNL_OP_VERSION) {
dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
- le32_to_cpu(event.desc.cookie_high));
+ op);
err = -EIO;
goto out_alloc;
}
@@ -153,42 +163,34 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
- u16 len;
+ enum i40e_virtchnl_ops op;
i40e_status err;
+ u16 len;
len = sizeof(struct i40e_virtchnl_vf_resource) +
I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
- event.msg_size = len;
- event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ event.buf_len = len;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf) {
err = -ENOMEM;
goto out;
}
- err = i40evf_clean_arq_element(hw, &event, NULL);
- if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
- goto out_alloc;
-
- err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
- if (err) {
- dev_err(&adapter->pdev->dev,
- "%s: Error returned from PF, %d, %d\n", __func__,
- le32_to_cpu(event.desc.cookie_high),
- le32_to_cpu(event.desc.cookie_low));
- err = -EIO;
- goto out_alloc;
+ while (1) {
+ /* When the AQ is empty, i40evf_clean_arq_element will return
+ * nonzero and this loop will terminate.
+ */
+ err = i40evf_clean_arq_element(hw, &event, NULL);
+ if (err)
+ goto out_alloc;
+ op =
+ (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
+ if (op == I40E_VIRTCHNL_OP_GET_VF_RESOURCES)
+ break;
}
- if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
- dev_err(&adapter->pdev->dev,
- "%s: Invalid response from PF, %d, %d\n", __func__,
- le32_to_cpu(event.desc.cookie_high),
- le32_to_cpu(event.desc.cookie_low));
- err = -EIO;
- goto out_alloc;
- }
- memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len));
+ err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
i40e_vf_parse_hw_config(hw, adapter->vf_res);
out_alloc:
@@ -207,7 +209,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
{
struct i40e_virtchnl_vsi_queue_config_info *vqci;
struct i40e_virtchnl_queue_pair_info *vqpi;
- int pairs = adapter->vsi_res->num_queue_pairs;
+ int pairs = adapter->num_active_queues;
int i, len;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
@@ -273,7 +275,7 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
}
adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
- vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
+ vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
@@ -299,7 +301,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
}
adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
- vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
+ vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
@@ -393,7 +395,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
- __func__);
+ __func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr);
@@ -454,7 +456,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
- __func__);
+ __func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr);
@@ -516,7 +518,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
- __func__);
+ __func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
sizeof(u16);
@@ -576,7 +578,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
- __func__);
+ __func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
sizeof(u16);
@@ -635,6 +637,7 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
void i40evf_request_stats(struct i40evf_adapter *adapter)
{
struct i40e_virtchnl_queue_select vqs;
+
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* no error message, this isn't crucial */
return;
@@ -709,19 +712,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
"%s: Unknown event %d from pf\n",
__func__, vpe->event);
break;
-
}
return;
}
- if (v_opcode != adapter->current_op) {
- dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d\n",
- __func__, adapter->current_op, v_opcode);
- /* We're probably completely screwed at this point, but clear
- * the current op and try to carry on....
- */
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
- return;
- }
+ if (v_opcode != adapter->current_op)
+ dev_info(&adapter->pdev->dev, "Pending op is %d, received %d\n",
+ adapter->current_op, v_opcode);
if (v_retval) {
dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
__func__, v_retval, v_opcode);
@@ -773,8 +769,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
break;
default:
- dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n",
- __func__, v_opcode);
+ dev_info(&adapter->pdev->dev, "Received unexpected message %d from PF\n",
+ v_opcode);
break;
} /* switch v_opcode */
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 02cfd3b14762..d5673eb90c54 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2842,11 +2842,16 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
return IGB_RETA_SIZE;
}
-static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!indir)
+ return 0;
for (i = 0; i < IGB_RETA_SIZE; i++)
indir[i] = adapter->rss_indir_tbl[i];
@@ -2889,13 +2894,20 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
}
static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key)
+ const u8 *key, const u8 hfunc)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int i;
u32 num_queues;
+ /* We do not allow change in unsupported parameters */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!indir)
+ return 0;
+
num_queues = adapter->rss_queues;
switch (hw->mac.type) {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 487cd9c4ac0d..485d2c609d5d 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3375,14 +3375,11 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 mrqc, rxcsum;
u32 j, num_rx_queues;
- static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
- 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
- 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
- 0xFA01ACBE };
+ u32 rss_key[10];
- /* Fill out hash function seeds */
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
for (j = 0; j < 10; j++)
- wr32(E1000_RSSRK(j), rsskey[j]);
+ wr32(E1000_RSSRK(j), rss_key[j]);
num_rx_queues = adapter->rss_queues;
@@ -5094,12 +5091,8 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
/* The minimum packet size with TCTL.PSP set is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
- if (unlikely(skb->len < 17)) {
- if (skb_pad(skb, 17 - skb->len))
- return NETDEV_TX_OK;
- skb->len = 17;
- skb_set_tail_pointer(skb, 17);
- }
+ if (skb_put_padto(skb, 17))
+ return NETDEV_TX_OK;
return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
}
@@ -6651,8 +6644,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
#endif
/* allocate a skb to store the frags */
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- IGB_RX_HDR_LEN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_failed++;
return NULL;
@@ -6853,14 +6845,9 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
if (skb_is_nonlinear(skb))
igb_pull_tail(rx_ring, rx_desc, skb);
- /* if skb_pad returns an error the skb was freed */
- if (unlikely(skb->len < 60)) {
- int pad_len = 60 - skb->len;
-
- if (skb_pad(skb, pad_len))
- return true;
- __skb_put(skb, pad_len);
- }
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
return false;
}
@@ -6995,7 +6982,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
/* alloc new page for storage */
- page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
+ page = dev_alloc_page();
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
return false;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 055961b0f24b..aa87605b144a 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1963,7 +1963,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
* this should improve performance for small packets with large amounts
* of reassembly being done in the stack
*/
-static void ixgb_check_copybreak(struct net_device *netdev,
+static void ixgb_check_copybreak(struct napi_struct *napi,
struct ixgb_buffer *buffer_info,
u32 length, struct sk_buff **skb)
{
@@ -1972,7 +1972,7 @@ static void ixgb_check_copybreak(struct net_device *netdev,
if (length > copybreak)
return;
- new_skb = netdev_alloc_skb_ip_align(netdev, length);
+ new_skb = napi_alloc_skb(napi, length);
if (!new_skb)
return;
@@ -2064,7 +2064,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
goto rxdesc_done;
}
- ixgb_check_copybreak(netdev, buffer_info, length, &skb);
+ ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
/* Good Receive */
skb_put(skb, length);
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index be2989e60009..35e6fa643c7e 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 5032a602d5c9..b6137be43920 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -300,16 +300,17 @@ enum ixgbe_ring_f_enum {
RING_F_ARRAY_SIZE /* must be last in enum set */
};
-#define IXGBE_MAX_RSS_INDICES 16
-#define IXGBE_MAX_VMDQ_INDICES 64
-#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
-#define IXGBE_MAX_FCOE_INDICES 8
-#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
-#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
-#define IXGBE_MAX_L2A_QUEUES 4
-#define IXGBE_BAD_L2A_QUEUE 3
-#define IXGBE_MAX_MACVLANS 31
-#define IXGBE_MAX_DCBMACVLANS 8
+#define IXGBE_MAX_RSS_INDICES 16
+#define IXGBE_MAX_RSS_INDICES_X550 64
+#define IXGBE_MAX_VMDQ_INDICES 64
+#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
+#define IXGBE_MAX_FCOE_INDICES 8
+#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define IXGBE_MAX_L2A_QUEUES 4
+#define IXGBE_BAD_L2A_QUEUE 3
+#define IXGBE_MAX_MACVLANS 31
+#define IXGBE_MAX_DCBMACVLANS 8
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
@@ -553,11 +554,6 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
-static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
-{
- writel(value, ring->tail);
-}
-
#define IXGBE_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
#define IXGBE_TX_DESC(R, i) \
@@ -769,6 +765,21 @@ struct ixgbe_adapter {
unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
};
+static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
+{
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ return IXGBE_MAX_RSS_INDICES;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ return IXGBE_MAX_RSS_INDICES_X550;
+ default:
+ return 0;
+ }
+}
+
struct ixgbe_fdir_filter {
struct hlist_node fdir_node;
union ixgbe_atr_input filter;
@@ -804,11 +815,15 @@ enum ixgbe_boards {
board_82598,
board_82599,
board_X540,
+ board_X550,
+ board_X550EM_x,
};
extern struct ixgbe_info ixgbe_82598_info;
extern struct ixgbe_info ixgbe_82599_info;
extern struct ixgbe_info ixgbe_X540_info;
+extern struct ixgbe_info ixgbe_X550_info;
+extern struct ixgbe_info ixgbe_X550EM_x_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops dcbnl_ops;
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index b5f484bf3fda..9c66babd4edd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1625,7 +1625,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
* ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
* @hw: pointer to hardware structure
**/
-u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -1636,7 +1636,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
/* Include 0x0-0x3F in the checksum */
for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
- if (hw->eeprom.ops.read(hw, i, &word) != 0) {
+ if (hw->eeprom.ops.read(hw, i, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
break;
}
@@ -1645,24 +1645,35 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
/* Include all data from pointers except for the fw pointer */
for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
- hw->eeprom.ops.read(hw, i, &pointer);
+ if (hw->eeprom.ops.read(hw, i, &pointer)) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+
+ /* If the pointer seems invalid */
+ if (pointer == 0xFFFF || pointer == 0)
+ continue;
+
+ if (hw->eeprom.ops.read(hw, pointer, &length)) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
- /* Make sure the pointer seems valid */
- if (pointer != 0xFFFF && pointer != 0) {
- hw->eeprom.ops.read(hw, pointer, &length);
+ if (length == 0xFFFF || length == 0)
+ continue;
- if (length != 0xFFFF && length != 0) {
- for (j = pointer+1; j <= pointer+length; j++) {
- hw->eeprom.ops.read(hw, j, &word);
- checksum += word;
- }
+ for (j = pointer + 1; j <= pointer + length; j++) {
+ if (hw->eeprom.ops.read(hw, j, &word)) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
}
+ checksum += word;
}
}
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return checksum;
+ return (s32)checksum;
}
/**
@@ -1686,26 +1697,33 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
+ if (status) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ return status;
+ }
- if (status == 0) {
- checksum = hw->eeprom.ops.calc_checksum(hw);
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ return status;
- hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+ checksum = (u16)(status & 0xffff);
- /*
- * Verify read checksum from EEPROM is the same as
- * calculated checksum
- */
- if (read_checksum != checksum)
- status = IXGBE_ERR_EEPROM_CHECKSUM;
-
- /* If the user cares, return the calculated checksum */
- if (checksum_val)
- *checksum_val = checksum;
- } else {
+ status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+ if (status) {
hw_dbg(hw, "EEPROM read failed\n");
+ return status;
}
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum)
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+
return status;
}
@@ -1724,15 +1742,19 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
-
- if (status == 0) {
- checksum = hw->eeprom.ops.calc_checksum(hw);
- status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
- checksum);
- } else {
+ if (status) {
hw_dbg(hw, "EEPROM read failed\n");
+ return status;
}
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
+
return status;
}
@@ -2469,7 +2491,7 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
* Acquires the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
{
u32 gssr = 0;
u32 swmask = mask;
@@ -2514,7 +2536,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
* Releases the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
{
u32 gssr;
u32 swmask = mask;
@@ -2799,6 +2821,8 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
@@ -3192,17 +3216,27 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
*link_up = false;
}
- if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_10G_82599)
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
- else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_1G_82599)
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ if ((hw->mac.type >= ixgbe_mac_X550) &&
+ (links_reg & IXGBE_LINKS_SPEED_NON_STD))
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_100_82599)
- *speed = IXGBE_LINK_SPEED_100_FULL;
- else
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ if ((hw->mac.type >= ixgbe_mac_X550) &&
+ (links_reg & IXGBE_LINKS_SPEED_NON_STD))
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ default:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
return 0;
}
@@ -3434,23 +3468,34 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* @buffer: contains the command to write and where the return status will
* be placed
* @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
+ * @return_data: read and return data from the buffer (true) or not (false)
+ * Needed because FW structures are big endian and decoding of
+ * these fields can be 8 bit or 16 bit based on command. Decoding
+ * is not easily understood without making a table of commands.
+ * So we will leave this up to the caller to read back the data
+ * in these cases.
*
* Communicates with the manageability block. On success return 0
* else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
**/
-static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
- u32 length)
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+ u32 length, u32 timeout,
+ bool return_data)
{
- u32 hicr, i, bi;
+ u32 hicr, i, bi, fwsts;
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
- u8 buf_len, dword_len;
+ u16 buf_len, dword_len;
- if (length == 0 || length & 0x3 ||
- length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
- hw_dbg(hw, "Buffer length failure.\n");
+ if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
+ /* Set bit 9 of FWSTS clearing FW reset indication */
+ fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
+ IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
+
/* Check that the host interface is enabled. */
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if ((hicr & IXGBE_HICR_EN) == 0) {
@@ -3458,7 +3503,12 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
- /* Calculate length in DWORDs */
+ /* Calculate length in DWORDs. We must be DWORD aligned */
+ if ((length % (sizeof(u32))) != 0) {
+ hw_dbg(hw, "Buffer length failure, not aligned to dword");
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
dword_len = length >> 2;
/*
@@ -3472,7 +3522,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
- for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
+ for (i = 0; i < timeout; i++) {
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if (!(hicr & IXGBE_HICR_C))
break;
@@ -3480,12 +3530,15 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
}
/* Check command successful completion. */
- if (i == IXGBE_HI_COMMAND_TIMEOUT ||
+ if ((timeout != 0 && i == timeout) ||
(!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
hw_dbg(hw, "Command has failed with no status valid.\n");
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
+ if (!return_data)
+ return 0;
+
/* Calculate length in DWORDs */
dword_len = hdr_size >> 2;
@@ -3556,7 +3609,9 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
- sizeof(fw_cmd));
+ sizeof(fw_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
if (ret_val != 0)
continue;
@@ -3583,7 +3638,8 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
**/
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
{
- u32 gcr_ext, hlreg0;
+ u32 gcr_ext, hlreg0, i, poll;
+ u16 value;
/*
* If double reset is not requested then all transactions should
@@ -3600,6 +3656,23 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
+ /* wait for a last completion before clearing buffers */
+ IXGBE_WRITE_FLUSH(hw);
+ usleep_range(3000, 6000);
+
+ /* Before proceeding, make sure that the PCIe block does not have
+ * transactions pending.
+ */
+ poll = ixgbe_pcie_timeout_poll(hw);
+ for (i = 0; i < poll; i++) {
+ usleep_range(100, 200);
+ value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
+ if (ixgbe_removed(hw->hw_addr))
+ break;
+ if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+ break;
+ }
+
/* initiate cleaning flow for buffers in the PCIe transaction layer */
gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 2ae5d4b8fc93..8cfadcb2676e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -64,7 +64,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
@@ -84,8 +84,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
-void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
@@ -110,6 +110,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+ u32 length, u32 timeout, bool return_data);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 48f35fc963f8..a507a6fe3624 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -286,6 +286,8 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
bwgid, ptype);
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
bwgid, ptype, prio_tc);
default:
@@ -302,6 +304,8 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
return ixgbe_dcb_config_pfc_82598(hw, pfc_en);
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
default:
break;
@@ -357,6 +361,8 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
bwg_id, prio_type, prio_tc);
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
@@ -385,6 +391,8 @@ void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
ixgbe_dcb_read_rtrup2tc_82599(hw, map);
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 58a7f5312a96..2707bda37418 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -180,6 +180,7 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
for (j = 0; j < netdev->addr_len; j++, i++)
perm_addr[i] = adapter->hw.mac.san_addr[j];
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0ae038b9af90..e5be0dd508de 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -511,6 +511,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
break;
@@ -622,6 +624,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
for (i = 0; i < 8; i++)
@@ -1406,6 +1410,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
toggle = 0x7FFFF30F;
test = reg_test_82599;
break;
@@ -1644,6 +1650,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
reg_ctl &= ~IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
@@ -1680,6 +1688,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
@@ -1733,12 +1743,16 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
- /* X540 needs to set the MACC.FLU bit to force link up */
- if (adapter->hw.mac.type == ixgbe_mac_X540) {
+ /* X540 and X550 needs to set the MACC.FLU bit to force link up */
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
reg_data |= IXGBE_MACC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
- } else {
+ break;
+ default:
if (hw->mac.orig_autoc) {
reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
@@ -2776,7 +2790,14 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
/* if we changed something we need to update flags */
if (flags2 != adapter->flags2) {
struct ixgbe_hw *hw = &adapter->hw;
- u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ u32 mrqc;
+ unsigned int pf_pool = adapter->num_vfs;
+
+ if ((hw->mac.type >= ixgbe_mac_X550) &&
+ (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
+ else
+ mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
if ((flags2 & UDP_RSS_FLAGS) &&
!(adapter->flags2 & UDP_RSS_FLAGS))
@@ -2799,7 +2820,11 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ if ((hw->mac.type >= ixgbe_mac_X550) &&
+ (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
}
return 0;
@@ -2833,6 +2858,8 @@ static int ixgbe_get_ts_info(struct net_device *dev,
struct ixgbe_adapter *adapter = netdev_priv(dev);
switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
info->so_timestamping =
@@ -2900,7 +2927,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
max_combined = IXGBE_MAX_FDIR_INDICES;
} else {
/* support up to 16 queues with RSS */
- max_combined = IXGBE_MAX_RSS_INDICES;
+ max_combined = ixgbe_max_rss_indices(adapter);
}
return max_combined;
@@ -2948,6 +2975,7 @@ static int ixgbe_set_channels(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
unsigned int count = ch->combined_count;
+ u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
/* verify they are not requesting separate vectors */
if (!count || ch->rx_count || ch->tx_count)
@@ -2964,9 +2992,9 @@ static int ixgbe_set_channels(struct net_device *dev,
/* update feature limits from largest to smallest supported values */
adapter->ring_feature[RING_F_FDIR].limit = count;
- /* cap RSS limit at 16 */
- if (count > IXGBE_MAX_RSS_INDICES)
- count = IXGBE_MAX_RSS_INDICES;
+ /* cap RSS limit */
+ if (count > max_rss_indices)
+ count = max_rss_indices;
adapter->ring_feature[RING_F_RSS].limit = count;
#ifdef IXGBE_FCOE
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index ce40c77381e9..68e1e757ecef 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -126,6 +126,8 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
if (num_tcs > 4) {
/*
* TCs : TC0/1 TC2/3 TC4-7
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cc51554c9e99..798b05556e1b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -42,6 +42,7 @@
#include <linux/slab.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
+#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
@@ -50,6 +51,15 @@
#include <linux/prefetch.h>
#include <scsi/fc/fc_fcoe.h>
+#ifdef CONFIG_OF
+#include <linux/of_net.h>
+#endif
+
+#ifdef CONFIG_SPARC
+#include <asm/idprom.h>
+#include <asm/prom.h>
+#endif
+
#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
@@ -65,15 +75,17 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "3.19.1-k"
+#define DRV_VERSION "4.0.1-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2014 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
- [board_82598] = &ixgbe_82598_info,
- [board_82599] = &ixgbe_82599_info,
- [board_X540] = &ixgbe_X540_info,
+ [board_82598] = &ixgbe_82598_info,
+ [board_82599] = &ixgbe_82599_info,
+ [board_X540] = &ixgbe_X540_info,
+ [board_X550] = &ixgbe_X550_info,
+ [board_X550EM_x] = &ixgbe_X550EM_x_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -115,6 +127,9 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
/* required last entry */
{0, }
};
@@ -835,6 +850,8 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
if (direction == -1) {
/* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -871,6 +888,8 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
mask = (qmask & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (qmask >> 32);
@@ -1412,41 +1431,21 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
-static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
-{
- rx_ring->next_to_use = val;
-
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = val;
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- ixgbe_write_tail(rx_ring, val);
-}
-
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *bi)
{
struct page *page = bi->page;
- dma_addr_t dma = bi->dma;
+ dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */
- if (likely(dma))
+ if (likely(page))
return true;
/* alloc new page for storage */
- if (likely(!page)) {
- page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
- bi->skb, ixgbe_rx_pg_order(rx_ring));
- if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
- return false;
- }
- bi->page = page;
+ page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ return false;
}
/* map page for use */
@@ -1459,13 +1458,13 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
__free_pages(page, ixgbe_rx_pg_order(rx_ring));
- bi->page = NULL;
rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
}
bi->dma = dma;
+ bi->page = page;
bi->page_offset = 0;
return true;
@@ -1509,16 +1508,28 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
i -= rx_ring->count;
}
- /* clear the hdr_addr for the next_to_use descriptor */
- rx_desc->read.hdr_addr = 0;
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.upper.status_error = 0;
cleaned_count--;
} while (cleaned_count);
i += rx_ring->count;
- if (rx_ring->next_to_use != i)
- ixgbe_release_rx_desc(rx_ring, i);
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, rx_ring->tail);
+ }
}
static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
@@ -1763,14 +1774,9 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
return false;
#endif
- /* if skb_pad returns an error the skb was freed */
- if (unlikely(skb->len < 60)) {
- int pad_len = 60 - skb->len;
-
- if (skb_pad(skb, pad_len))
- return true;
- __skb_put(skb, pad_len);
- }
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
return false;
}
@@ -1795,9 +1801,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- new_buff->page = old_buff->page;
- new_buff->dma = old_buff->dma;
- new_buff->page_offset = old_buff->page_offset;
+ *new_buff = *old_buff;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
@@ -1806,6 +1810,11 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
DMA_FROM_DEVICE);
}
+static inline bool ixgbe_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+}
+
/**
* ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
@@ -1841,12 +1850,12 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- /* we can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(page) == numa_node_id()))
+ /* page is not reserved, we can reuse buffer as-is */
+ if (likely(!ixgbe_page_is_reserved(page)))
return true;
/* this page cannot be reused so discard it */
- put_page(page);
+ __free_pages(page, ixgbe_rx_pg_order(rx_ring));
return false;
}
@@ -1854,7 +1863,7 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
rx_buffer->page_offset, size, truesize);
/* avoid re-using remote pages */
- if (unlikely(page_to_nid(page) != numa_node_id()))
+ if (unlikely(ixgbe_page_is_reserved(page)))
return false;
#if (PAGE_SIZE < 8192)
@@ -1864,22 +1873,19 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
/* flip page offset to other buffer */
rx_buffer->page_offset ^= truesize;
-
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
- */
- atomic_inc(&page->_count);
#else
/* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
if (rx_buffer->page_offset > last_offset)
return false;
-
- /* bump ref count on page before it is given to the stack */
- get_page(page);
#endif
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
+ */
+ atomic_inc(&page->_count);
+
return true;
}
@@ -1907,8 +1913,8 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
#endif
/* allocate a skb to store the frags */
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- IXGBE_RX_HDR_SIZE);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi,
+ IXGBE_RX_HDR_SIZE);
if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
return NULL;
@@ -1942,6 +1948,8 @@ dma_sync:
rx_buffer->page_offset,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
+
+ rx_buffer->skb = NULL;
}
/* pull page into skb */
@@ -1959,8 +1967,6 @@ dma_sync:
}
/* clear contents of buffer_info */
- rx_buffer->skb = NULL;
- rx_buffer->dma = 0;
rx_buffer->page = NULL;
return skb;
@@ -2155,6 +2161,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
break;
default:
@@ -2264,6 +2272,8 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
/*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
@@ -2467,6 +2477,8 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -2493,6 +2505,8 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
@@ -2525,6 +2539,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
mask |= IXGBE_EIMS_GPI_SDP0;
break;
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
mask |= IXGBE_EIMS_TS;
break;
default:
@@ -2536,7 +2552,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_82599EB:
mask |= IXGBE_EIMS_GPI_SDP1;
mask |= IXGBE_EIMS_GPI_SDP2;
+ /* fall through */
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_MAILBOX;
break;
@@ -2544,9 +2563,6 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
break;
}
- if (adapter->hw.mac.type == ixgbe_mac_X540)
- mask |= IXGBE_EIMS_TIMESYNC;
-
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
mask |= IXGBE_EIMS_FLOW_DIR;
@@ -2592,6 +2608,8 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -2811,6 +2829,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_sfp_event(adapter, eicr);
/* Fall through */
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -2905,6 +2925,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -3190,16 +3212,14 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
}
-static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
+static void ixgbe_setup_reta(struct ixgbe_adapter *adapter, const u32 *seed)
{
struct ixgbe_hw *hw = &adapter->hw;
- static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
- 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
- 0x6A3E67EA, 0x14364D17, 0x3BED200D};
- u32 mrqc = 0, reta = 0;
- u32 rxcsum;
+ u32 reta = 0;
int i, j;
+ int reta_entries = 128;
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+ int indices_multi;
/*
* Program table for at least 2 queues w/ SR-IOV so that VFs can
@@ -3213,16 +3233,69 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
for (i = 0; i < 10; i++)
IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
+ /* Fill out the redirection table as follows:
+ * 82598: 128 (8 bit wide) entries containing pair of 4 bit RSS indices
+ * 82599/X540: 128 (8 bit wide) entries containing 4 bit RSS index
+ * X550: 512 (8 bit wide) entries containing 6 bit RSS index
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ indices_multi = 0x11;
+ else
+ indices_multi = 0x1;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ reta_entries = 512;
+ default:
+ break;
+ }
+
/* Fill out redirection table */
- for (i = 0, j = 0; i < 128; i++, j++) {
+ for (i = 0, j = 0; i < reta_entries; i++, j++) {
if (j == rss_i)
j = 0;
- /* reta = 4-byte sliding window of
- * 0x00..(indices-1)(indices-1)00..etc. */
- reta = (reta << 8) | (j * 0x11);
+ reta = (reta << 8) | (j * indices_multi);
+ if ((i & 3) == 3) {
+ if (i < 128)
+ IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
+ reta);
+ }
+ }
+}
+
+static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter, const u32 *seed)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vfreta = 0;
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+ unsigned int pf_pool = adapter->num_vfs;
+ int i, j;
+
+ /* Fill out hash function seeds */
+ for (i = 0; i < 10; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), seed[i]);
+
+ /* Fill out the redirection table */
+ for (i = 0, j = 0; i < 64; i++, j++) {
+ if (j == rss_i)
+ j = 0;
+ vfreta = (vfreta << 8) | j;
if ((i & 3) == 3)
- IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
+ vfreta);
}
+}
+
+static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
+ u32 rss_key[10];
+ u32 rxcsum;
/* Disable indicating checksum in descriptor, enables RSS hash */
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
@@ -3255,17 +3328,35 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
}
/* Perform hash on these packet types */
- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 |
- IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
- IXGBE_MRQC_RSS_FIELD_IPV6 |
- IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+ rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
+ IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
+ IXGBE_MRQC_RSS_FIELD_IPV6 |
+ IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
+ rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
-
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
+
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
+ if ((hw->mac.type >= ixgbe_mac_X550) &&
+ (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
+ unsigned int pf_pool = adapter->num_vfs;
+
+ /* Enable VF RSS mode */
+ mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+ /* Setup RSS through the VF registers */
+ ixgbe_setup_vfreta(adapter, rss_key);
+ vfmrqc = IXGBE_MRQC_RSSEN;
+ vfmrqc |= rss_field;
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
+ } else {
+ ixgbe_setup_reta(adapter, rss_key);
+ mrqc |= rss_field;
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ }
}
/**
@@ -3534,6 +3625,8 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
case ixgbe_mac_82598EB:
/*
* For VMDq support of different descriptor types or
@@ -3657,6 +3750,8 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -3691,6 +3786,8 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4112,6 +4209,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
dv_id = IXGBE_DV_X540(link, tc);
break;
default:
@@ -4170,6 +4269,8 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
dv_id = IXGBE_LOW_DV_X540(tc);
break;
default:
@@ -4308,29 +4409,26 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- struct ixgbe_rx_buffer *rx_buffer;
+ struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
- rx_buffer = &rx_ring->rx_buffer_info[i];
if (rx_buffer->skb) {
struct sk_buff *skb = rx_buffer->skb;
- if (IXGBE_CB(skb)->page_released) {
+ if (IXGBE_CB(skb)->page_released)
dma_unmap_page(dev,
IXGBE_CB(skb)->dma,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
- IXGBE_CB(skb)->page_released = false;
- }
dev_kfree_skb(skb);
rx_buffer->skb = NULL;
}
- if (rx_buffer->dma)
- dma_unmap_page(dev, rx_buffer->dma,
- ixgbe_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE);
- rx_buffer->dma = 0;
- if (rx_buffer->page)
- __free_pages(rx_buffer->page,
- ixgbe_rx_pg_order(rx_ring));
+
+ if (!rx_buffer->page)
+ continue;
+
+ dma_unmap_page(dev, rx_buffer->dma,
+ ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+ __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
+
rx_buffer->page = NULL;
}
@@ -4606,6 +4704,8 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
default:
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -4948,10 +5048,12 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
}
- /* Disable the Tx DMA engine on 82599 and X540 */
+ /* Disable the Tx DMA engine on 82599 and later MAC */
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
@@ -5016,7 +5118,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->subsystem_device_id = pdev->subsystem_device;
/* Set common capability flags and settings */
- rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+ rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
adapter->ring_feature[RING_F_RSS].limit = rss;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
@@ -5071,6 +5173,12 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
if (fwsm & IXGBE_FWSM_TS_ENABLED)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550:
+#ifdef CONFIG_IXGBE_DCA
+ adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
+#endif
+ break;
default:
break;
}
@@ -5086,6 +5194,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCB
switch (hw->mac.type) {
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
break;
@@ -5675,6 +5785,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
pci_wake_from_d3(pdev, !!wufc);
break;
default:
@@ -5806,6 +5918,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
break;
@@ -5819,7 +5933,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
if ((hw->mac.type == ixgbe_mac_82599EB) ||
- (hw->mac.type == ixgbe_mac_X540)) {
+ (hw->mac.type == ixgbe_mac_X540) ||
+ (hw->mac.type == ixgbe_mac_X550) ||
+ (hw->mac.type == ixgbe_mac_X550EM_x)) {
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -5842,7 +5958,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
break;
case ixgbe_mac_X540:
- /* OS2BMC stats are X540 only*/
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ /* OS2BMC stats are X540 and later */
hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
@@ -6110,6 +6228,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
}
break;
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
case ixgbe_mac_82599EB: {
u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -6221,6 +6341,10 @@ static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
if (!adapter->num_vfs)
return false;
+ /* resetting the PF is only needed for MAC before X550 */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ return false;
+
for (i = 0; i < adapter->num_vfs; i++) {
for (j = 0; j < q_per_pool; j++) {
u32 h, t;
@@ -6256,6 +6380,66 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
}
}
+#ifdef CONFIG_PCI_IOV
+static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter,
+ struct pci_dev *vfdev)
+{
+ if (!pci_wait_for_pending_transaction(vfdev))
+ e_dev_warn("Issuing VFLR with pending transactions\n");
+
+ e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev));
+ pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+
+ msleep(100);
+}
+
+static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pci_dev *vfdev;
+ u32 gpc;
+ int pos;
+ unsigned short vf_id;
+
+ if (!(netif_carrier_ok(adapter->netdev)))
+ return;
+
+ gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
+ if (gpc) /* If incrementing then no need for the check below */
+ return;
+ /* Check to see if a bad DMA write target from an errant or
+ * malicious VF has caused a PCIe error. If so then we can
+ * issue a VFLR to the offending VF(s) and then resume without
+ * requesting a full slot reset.
+ */
+
+ if (!pdev)
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return;
+
+ /* get the device ID for the VF */
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
+
+ /* check status reg for all VFs owned by this PF */
+ vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn && (vfdev->physfn == pdev)) {
+ u16 status_reg;
+
+ pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
+ if (status_reg & PCI_STATUS_REC_MASTER_ABORT)
+ /* issue VFLR */
+ ixgbe_issue_vf_flr(adapter, vfdev);
+ }
+
+ vfdev = pci_get_device(pdev->vendor, vf_id, vfdev);
+ }
+}
+
static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
{
u32 ssvpc;
@@ -6276,6 +6460,17 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
}
+#else
+static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
+{
+}
+
+static void
+ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
+{
+}
+#endif /* CONFIG_PCI_IOV */
+
/**
* ixgbe_watchdog_subtask - check and bring link up
@@ -6296,6 +6491,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
else
ixgbe_watchdog_link_is_down(adapter);
+ ixgbe_check_for_bad_vf(adapter);
ixgbe_spoof_check(adapter);
ixgbe_update_stats(adapter);
@@ -6407,51 +6603,6 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
}
-#ifdef CONFIG_PCI_IOV
-static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
-{
- int vf;
- struct ixgbe_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- u32 gpc;
- u32 ciaa, ciad;
-
- gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
- if (gpc) /* If incrementing then no need for the check below */
- return;
- /*
- * Check to see if a bad DMA write target from an errant or
- * malicious VF has caused a PCIe error. If so then we can
- * issue a VFLR to the offending VF(s) and then resume without
- * requesting a full slot reset.
- */
-
- for (vf = 0; vf < adapter->num_vfs; vf++) {
- ciaa = (vf << 16) | 0x80000000;
- /* 32 bit read so align, we really want status at offset 6 */
- ciaa |= PCI_COMMAND;
- IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
- ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_82599);
- ciaa &= 0x7FFFFFFF;
- /* disable debug mode asap after reading data */
- IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
- /* Get the upper 16 bits which will be the PCI status reg */
- ciad >>= 16;
- if (ciad & PCI_STATUS_REC_MASTER_ABORT) {
- netdev_err(netdev, "VF %d Hung DMA\n", vf);
- /* Issue VFLR */
- ciaa = (vf << 16) | 0x80000000;
- ciaa |= 0xA8;
- IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
- ciad = 0x00008000; /* VFLR */
- IXGBE_WRITE_REG(hw, IXGBE_CIAD_82599, ciad);
- ciaa &= 0x7FFFFFFF;
- IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
- }
- }
-}
-
-#endif
/**
* ixgbe_service_timer - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
@@ -6460,7 +6611,6 @@ static void ixgbe_service_timer(unsigned long data)
{
struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
unsigned long next_event_offset;
- bool ready = true;
/* poll faster when waiting for link */
if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
@@ -6468,32 +6618,10 @@ static void ixgbe_service_timer(unsigned long data)
else
next_event_offset = HZ * 2;
-#ifdef CONFIG_PCI_IOV
- /*
- * don't bother with SR-IOV VF DMA hang check if there are
- * no VFs or the link is down
- */
- if (!adapter->num_vfs ||
- (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
- goto normal_timer_service;
-
- /* If we have VFs allocated then we must check for DMA hangs */
- ixgbe_check_for_bad_vf(adapter);
- next_event_offset = HZ / 50;
- adapter->timer_event_accumulator++;
-
- if (adapter->timer_event_accumulator >= 100)
- adapter->timer_event_accumulator = 0;
- else
- ready = false;
-
-normal_timer_service:
-#endif
/* Reset the timer */
mod_timer(&adapter->service_timer, next_event_offset + jiffies);
- if (ready)
- ixgbe_service_event_schedule(adapter);
+ ixgbe_service_event_schedule(adapter);
}
static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
@@ -6898,8 +7026,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
- /* notify HW of packet */
- ixgbe_write_tail(tx_ring, i);
+ writel(i, tx_ring->tail);
+
+ /* we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
+ mmiowb();
}
return;
@@ -7197,12 +7329,8 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
* The minimum packet size for olinfo paylen is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
- if (unlikely(skb->len < 17)) {
- if (skb_pad(skb, 17 - skb->len))
- return NETDEV_TX_OK;
- skb->len = 17;
- skb_set_tail_pointer(skb, 17);
- }
+ if (skb_put_padto(skb, 17))
+ return NETDEV_TX_OK;
tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
@@ -7646,7 +7774,7 @@ static int ixgbe_set_features(struct net_device *netdev,
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr,
+ const unsigned char *addr, u16 vid,
u16 flags)
{
/* guarantee we can provide a unique filter for the unicast address */
@@ -7655,7 +7783,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return -ENOMEM;
}
- return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
+ return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
}
static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
@@ -7716,7 +7844,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
else
mode = BRIDGE_MODE_VEPA;
- return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0);
}
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
@@ -7965,6 +8093,29 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
}
/**
+ * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM
+ * @adapter: Pointer to adapter struct
+ */
+static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
+{
+#ifdef CONFIG_OF
+ struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ const unsigned char *addr;
+
+ addr = of_get_mac_address(dp);
+ if (addr) {
+ ether_addr_copy(hw->mac.perm_addr, addr);
+ return;
+ }
+#endif /* CONFIG_OF */
+
+#ifdef CONFIG_SPARC
+ ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
+#endif /* CONFIG_SPARC */
+}
+
+/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in ixgbe_pci_tbl
@@ -8046,7 +8197,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(netdev, &pdev->dev);
adapter = netdev_priv(netdev);
- pci_set_drvdata(pdev, adapter);
adapter->netdev = netdev;
adapter->pdev = pdev;
@@ -8104,6 +8254,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
break;
default:
@@ -8167,6 +8319,8 @@ skip_sriov:
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
netdev->features |= NETIF_F_SCTP_CSUM;
netdev->hw_features |= NETIF_F_SCTP_CSUM |
NETIF_F_NTUPLE;
@@ -8229,6 +8383,8 @@ skip_sriov:
goto err_sw_init;
}
+ ixgbe_get_platform_mac_addr(adapter);
+
memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->dev_addr)) {
@@ -8320,6 +8476,8 @@ skip_sriov:
if (err)
goto err_register;
+ pci_set_drvdata(pdev, adapter);
+
/* power down the optics for 82599 SFP+ fiber */
if (hw->mac.ops.disable_tx_laser)
hw->mac.ops.disable_tx_laser(hw);
@@ -8399,9 +8557,14 @@ err_dma:
static void ixgbe_remove(struct pci_dev *pdev)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
+ struct net_device *netdev;
bool disable_dev;
+ /* if !adapter then we already cleaned up in probe */
+ if (!adapter)
+ return;
+
+ netdev = adapter->netdev;
ixgbe_dbg_adapter_exit(adapter);
set_bit(__IXGBE_REMOVING, &adapter->state);
@@ -8523,6 +8686,12 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
case ixgbe_mac_X540:
device_id = IXGBE_X540_VF_DEVICE_ID;
break;
+ case ixgbe_mac_X550:
+ device_id = IXGBE_DEV_ID_X550_VF;
+ break;
+ case ixgbe_mac_X550EM_x:
+ device_id = IXGBE_DEV_ID_X550EM_X_VF;
+ break;
default:
device_id = 0;
break;
@@ -8542,8 +8711,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
* VFLR. Just clean up the AER in that case.
*/
if (vfdev) {
- e_dev_err("Issuing VFLR to VF %d\n", vf);
- pci_write_config_dword(vfdev, 0xA8, 0x00008000);
+ ixgbe_issue_vf_flr(adapter, vfdev);
/* Free device reference count */
pci_dev_put(vfdev);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index cc8f0128286c..9993a471d668 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -305,6 +305,8 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
break;
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
default:
@@ -426,6 +428,8 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
hw->mac.type != ixgbe_mac_X540)
return;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 28b81ae09b5a..8a2be444113d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -43,13 +43,195 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
-static bool ixgbe_get_i2c_data(u32 *i2cctl);
+static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
/**
+ * ixgbe_out_i2c_byte_ack - Send I2C byte with ack
+ * @hw: pointer to the hardware structure
+ * @byte: byte to send
+ *
+ * Returns an error code on error.
+ **/
+static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
+{
+ s32 status;
+
+ status = ixgbe_clock_out_i2c_byte(hw, byte);
+ if (status)
+ return status;
+ return ixgbe_get_i2c_ack(hw);
+}
+
+/**
+ * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack
+ * @hw: pointer to the hardware structure
+ * @byte: pointer to a u8 to receive the byte
+ *
+ * Returns an error code on error.
+ **/
+static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
+{
+ s32 status;
+
+ status = ixgbe_clock_in_i2c_byte(hw, byte);
+ if (status)
+ return status;
+ /* ACK */
+ return ixgbe_clock_out_i2c_bit(hw, false);
+}
+
+/**
+ * ixgbe_ones_comp_byte_add - Perform one's complement addition
+ * @add1: addend 1
+ * @add2: addend 2
+ *
+ * Returns one's complement 8-bit sum.
+ **/
+static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
+{
+ u16 sum = add1 + add2;
+
+ sum = (sum & 0xFF) + (sum >> 8);
+ return sum & 0xFF;
+}
+
+/**
+ * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ **/
+s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ int max_retry = 10;
+ int retry = 0;
+ u8 csum_byte;
+ u8 high_bits;
+ u8 low_bits;
+ u8 reg_high;
+ u8 csum;
+
+ reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */
+ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
+ csum = ~csum;
+ do {
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ return IXGBE_ERR_SWFW_SYNC;
+ ixgbe_i2c_start(hw);
+ /* Device Address and write indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr))
+ goto fail;
+ /* Write bits 14:8 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg_high))
+ goto fail;
+ /* Write bits 7:0 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
+ goto fail;
+ /* Write csum */
+ if (ixgbe_out_i2c_byte_ack(hw, csum))
+ goto fail;
+ /* Re-start condition */
+ ixgbe_i2c_start(hw);
+ /* Device Address and read indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
+ goto fail;
+ /* Get upper bits */
+ if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
+ goto fail;
+ /* Get low bits */
+ if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
+ goto fail;
+ /* Get csum */
+ if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
+ goto fail;
+ /* NACK */
+ if (ixgbe_clock_out_i2c_bit(hw, false))
+ goto fail;
+ ixgbe_i2c_stop(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ *val = (high_bits << 8) | low_bits;
+ return 0;
+
+fail:
+ ixgbe_i2c_bus_clear(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ retry++;
+ if (retry < max_retry)
+ hw_dbg(hw, "I2C byte read combined error - Retry.\n");
+ else
+ hw_dbg(hw, "I2C byte read combined error.\n");
+ } while (retry < max_retry);
+
+ return IXGBE_ERR_I2C;
+}
+
+/**
+ * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ **/
+s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+ u8 addr, u16 reg, u16 val)
+{
+ int max_retry = 1;
+ int retry = 0;
+ u8 reg_high;
+ u8 csum;
+
+ reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */
+ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
+ csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
+ csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
+ csum = ~csum;
+ do {
+ ixgbe_i2c_start(hw);
+ /* Device Address and write indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr))
+ goto fail;
+ /* Write bits 14:8 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg_high))
+ goto fail;
+ /* Write bits 7:0 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
+ goto fail;
+ /* Write data 15:8 */
+ if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
+ goto fail;
+ /* Write data 7:0 */
+ if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
+ goto fail;
+ /* Write csum */
+ if (ixgbe_out_i2c_byte_ack(hw, csum))
+ goto fail;
+ ixgbe_i2c_stop(hw);
+ return 0;
+
+fail:
+ ixgbe_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ hw_dbg(hw, "I2C byte write combined error - Retry.\n");
+ else
+ hw_dbg(hw, "I2C byte write combined error.\n");
+ } while (retry < max_retry);
+
+ return IXGBE_ERR_I2C;
+}
+
+/**
* ixgbe_identify_phy_generic - Get physical layer module
* @hw: pointer to hardware structure
*
@@ -60,6 +242,15 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
u32 phy_addr;
u16 ext_ability = 0;
+ if (!hw->phy.phy_semaphore_mask) {
+ hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) &
+ IXGBE_STATUS_LAN_ID_1;
+ if (hw->phy.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+ }
+
if (hw->phy.type == ixgbe_phy_unknown) {
for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
hw->phy.mdio.prtad = phy_addr;
@@ -315,12 +506,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
s32 status;
- u16 gssr;
-
- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
- gssr = IXGBE_GSSR_PHY1_SM;
- else
- gssr = IXGBE_GSSR_PHY0_SM;
+ u32 gssr = hw->phy.phy_semaphore_mask;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
@@ -418,7 +604,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
s32 status;
- u16 gssr;
+ u32 gssr;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
gssr = IXGBE_GSSR_PHY1_SM;
@@ -576,6 +762,10 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
*speed |= IXGBE_LINK_SPEED_100_FULL;
}
+ /* Internal PHY does not support 100 Mbps */
+ if (hw->mac.type == ixgbe_mac_X550EM_x)
+ *speed &= ~IXGBE_LINK_SPEED_100_FULL;
+
return status;
}
@@ -632,6 +822,9 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* @hw: pointer to hardware structure
*
* Restart autonegotiation and PHY and waits for completion.
+ * This function always returns success, this is nessary since
+ * it is called via a function pointer that could call other
+ * functions that could return an error.
**/
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
{
@@ -1462,15 +1655,10 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
s32 status;
u32 max_retry = 10;
u32 retry = 0;
- u16 swfw_mask = 0;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
bool nack = true;
*data = 0;
- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
- swfw_mask = IXGBE_GSSR_PHY1_SM;
- else
- swfw_mask = IXGBE_GSSR_PHY0_SM;
-
do {
if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return IXGBE_ERR_SWFW_SYNC;
@@ -1548,12 +1736,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
s32 status;
u32 max_retry = 1;
u32 retry = 0;
- u16 swfw_mask = 0;
-
- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
- swfw_mask = IXGBE_GSSR_PHY1_SM;
- else
- swfw_mask = IXGBE_GSSR_PHY0_SM;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return IXGBE_ERR_SWFW_SYNC;
@@ -1610,7 +1793,7 @@ fail:
**/
static void ixgbe_i2c_start(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
/* Start condition must begin with data and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 1);
@@ -1639,7 +1822,7 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw)
**/
static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
/* Stop condition must begin with data low and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 0);
@@ -1697,9 +1880,9 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
}
/* Release SDA line (set high) */
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- i2cctl |= IXGBE_I2C_DATA_OUT;
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
IXGBE_WRITE_FLUSH(hw);
return status;
@@ -1715,7 +1898,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
s32 status = 0;
u32 i = 0;
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
u32 timeout = 10;
bool ack = true;
@@ -1728,8 +1911,8 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
/* Poll for ACK. Note that ACK in I2C spec is
* transition from 1 to 0 */
for (i = 0; i < timeout; i++) {
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- ack = ixgbe_get_i2c_data(&i2cctl);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ ack = ixgbe_get_i2c_data(hw, &i2cctl);
udelay(1);
if (ack == 0)
@@ -1758,15 +1941,15 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
**/
static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
ixgbe_raise_i2c_clk(hw, &i2cctl);
/* Minimum high period of clock is 4us */
udelay(IXGBE_I2C_T_HIGH);
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- *data = ixgbe_get_i2c_data(&i2cctl);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ *data = ixgbe_get_i2c_data(hw, &i2cctl);
ixgbe_lower_i2c_clk(hw, &i2cctl);
@@ -1786,7 +1969,7 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
{
s32 status;
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
status = ixgbe_set_i2c_data(hw, &i2cctl, data);
if (status == 0) {
@@ -1822,14 +2005,14 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
u32 i2cctl_r = 0;
for (i = 0; i < timeout; i++) {
- *i2cctl |= IXGBE_I2C_CLK_OUT;
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* SCL rise time (1000ns) */
udelay(IXGBE_I2C_T_RISE);
- i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- if (i2cctl_r & IXGBE_I2C_CLK_IN)
+ i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw))
break;
}
}
@@ -1844,9 +2027,9 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
- *i2cctl &= ~IXGBE_I2C_CLK_OUT;
+ *i2cctl &= ~IXGBE_I2C_CLK_OUT_BY_MAC(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* SCL fall time (300ns) */
@@ -1864,19 +2047,19 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
{
if (data)
- *i2cctl |= IXGBE_I2C_DATA_OUT;
+ *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
else
- *i2cctl &= ~IXGBE_I2C_DATA_OUT;
+ *i2cctl &= ~IXGBE_I2C_DATA_OUT_BY_MAC(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
/* Verify data was set correctly */
- *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- if (data != ixgbe_get_i2c_data(i2cctl)) {
+ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
return IXGBE_ERR_I2C;
}
@@ -1891,9 +2074,9 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
*
* Returns the I2C data bit value
**/
-static bool ixgbe_get_i2c_data(u32 *i2cctl)
+static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
{
- if (*i2cctl & IXGBE_I2C_DATA_IN)
+ if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw))
return true;
return false;
}
@@ -1907,7 +2090,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl)
**/
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
u32 i;
ixgbe_i2c_start(hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 54071ed17e3b..434643881287 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -77,6 +77,11 @@
#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+#define IXGBE_CS4227 0xBE /* CS4227 address */
+#define IXGBE_CS4227_SPARE24_LSB 0x12B0 /* Reg to program EDC */
+#define IXGBE_CS4227_EDC_MODE_CX1 0x0002
+#define IXGBE_CS4227_EDC_MODE_SR 0x0004
+
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
#define IXGBE_TAF_ASM_PAUSE 0x800
@@ -110,7 +115,6 @@
/* SFP+ SFF-8472 Compliance code */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
-s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
@@ -157,4 +161,8 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
+s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val);
+s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 val);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 97c85b859536..c76ba90ecc6e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -221,7 +221,8 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
- rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+ rss = min_t(int, ixgbe_max_rss_indices(adapter),
+ num_online_cpus());
} else {
rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
}
@@ -618,6 +619,27 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
return 0;
}
+static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf,
+ u32 qde)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+ u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+ int i;
+
+ for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
+ u32 reg;
+
+ /* flush previous write */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* indicate to hardware that we want to set drop enable */
+ reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE;
+ reg |= i << IXGBE_QDE_IDX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
+ }
+}
+
static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
@@ -647,15 +669,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
/* force drop enable for all VF Rx queues */
- for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
- /* flush previous write */
- IXGBE_WRITE_FLUSH(hw);
-
- /* indicate to hardware that we want to set drop enable */
- reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE;
- reg |= i << IXGBE_QDE_IDX_SHIFT;
- IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
- }
+ ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
/* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
@@ -1079,52 +1093,86 @@ int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
return ixgbe_set_vf_mac(adapter, vf, mac);
}
+static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
+ u16 vlan, u8 qos)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err = 0;
+
+ if (adapter->vfinfo[vf].pf_vlan)
+ err = ixgbe_set_vf_vlan(adapter, false,
+ adapter->vfinfo[vf].pf_vlan,
+ vf);
+ if (err)
+ goto out;
+ ixgbe_set_vmvir(adapter, vlan, qos, vf);
+ ixgbe_set_vmolr(hw, vf, false);
+ if (adapter->vfinfo[vf].spoofchk_enabled)
+ hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+ adapter->vfinfo[vf].vlan_count++;
+
+ /* enable hide vlan on X550 */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE |
+ IXGBE_QDE_HIDE_VLAN);
+
+ adapter->vfinfo[vf].pf_vlan = vlan;
+ adapter->vfinfo[vf].pf_qos = qos;
+ dev_info(&adapter->pdev->dev,
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF VLAN has been set, but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before attempting to use the VF device.\n");
+ }
+
+out:
+ return err;
+}
+
+static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ err = ixgbe_set_vf_vlan(adapter, false,
+ adapter->vfinfo[vf].pf_vlan, vf);
+ ixgbe_clear_vmvir(adapter, vf);
+ ixgbe_set_vmolr(hw, vf, true);
+ hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
+ if (adapter->vfinfo[vf].vlan_count)
+ adapter->vfinfo[vf].vlan_count--;
+ adapter->vfinfo[vf].pf_vlan = 0;
+ adapter->vfinfo[vf].pf_qos = 0;
+
+ return err;
+}
+
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
{
int err = 0;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
if (vlan || qos) {
+ /* Check if there is already a port VLAN set, if so
+ * we have to delete the old one first before we
+ * can set the new one. The usage model had
+ * previously assumed the user would delete the
+ * old port VLAN before setting a new one but this
+ * is not necessarily the case.
+ */
if (adapter->vfinfo[vf].pf_vlan)
- err = ixgbe_set_vf_vlan(adapter, false,
- adapter->vfinfo[vf].pf_vlan,
- vf);
- if (err)
- goto out;
- err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
+ err = ixgbe_disable_port_vlan(adapter, vf);
if (err)
goto out;
- ixgbe_set_vmvir(adapter, vlan, qos, vf);
- ixgbe_set_vmolr(hw, vf, false);
- if (adapter->vfinfo[vf].spoofchk_enabled)
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
- adapter->vfinfo[vf].vlan_count++;
- adapter->vfinfo[vf].pf_vlan = vlan;
- adapter->vfinfo[vf].pf_qos = qos;
- dev_info(&adapter->pdev->dev,
- "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
- if (test_bit(__IXGBE_DOWN, &adapter->state)) {
- dev_warn(&adapter->pdev->dev,
- "The VF VLAN has been set,"
- " but the PF device is not up.\n");
- dev_warn(&adapter->pdev->dev,
- "Bring the PF device up before"
- " attempting to use the VF device.\n");
- }
+ err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos);
} else {
- err = ixgbe_set_vf_vlan(adapter, false,
- adapter->vfinfo[vf].pf_vlan, vf);
- ixgbe_clear_vmvir(adapter, vf);
- ixgbe_set_vmolr(hw, vf, true);
- hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
- if (adapter->vfinfo[vf].vlan_count)
- adapter->vfinfo[vf].vlan_count--;
- adapter->vfinfo[vf].pf_vlan = 0;
- adapter->vfinfo[vf].pf_qos = 0;
+ err = ixgbe_disable_port_vlan(adapter, vf);
}
+
out:
return err;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index dfd55d83bc03..d101b25dc4b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -74,9 +74,22 @@
#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558
#define IXGBE_DEV_ID_X540T1 0x1560
+#define IXGBE_DEV_ID_X550T 0x1563
+#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
+#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
+#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
+#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
+#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
+#define IXGBE_DEV_ID_X550_VF_HV 0x1564
+#define IXGBE_DEV_ID_X550_VF 0x1565
+#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+
/* VF Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_X550_VF 0x1565
+#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
/* General Registers */
#define IXGBE_CTRL 0x00000
@@ -84,7 +97,8 @@
#define IXGBE_CTRL_EXT 0x00018
#define IXGBE_ESDP 0x00020
#define IXGBE_EODSDP 0x00028
-#define IXGBE_I2CCTL 0x00028
+#define IXGBE_I2CCTL_BY_MAC(_hw)((((_hw)->mac.type >= ixgbe_mac_X550) ? \
+ 0x15F5C : 0x00028))
#define IXGBE_LEDCTL 0x00200
#define IXGBE_FRTIMER 0x00048
#define IXGBE_TCPTIMER 0x0004C
@@ -112,10 +126,14 @@
#define IXGBE_VPDDIAG1 0x10208
/* I2CCTL Bit Masks */
-#define IXGBE_I2C_CLK_IN 0x00000001
-#define IXGBE_I2C_CLK_OUT 0x00000002
-#define IXGBE_I2C_DATA_IN 0x00000004
-#define IXGBE_I2C_DATA_OUT 0x00000008
+#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+ 0x00004000 : 0x00000001)
+#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+ 0x00000200 : 0x00000002)
+#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+ 0x00001000 : 0x00000004)
+#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+ 0x00000400 : 0x00000008)
#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
@@ -290,8 +308,17 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_IMIRVP 0x05AC0
#define IXGBE_VMD_CTL 0x0581C
#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
+#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */
#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
+/* Registers for setting up RSS on X550 with SRIOV
+ * _p - pool number (0..63)
+ * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA)
+ */
+#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4))
+#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40))
+#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40))
+
/* Flow Director registers */
#define IXGBE_FDIRCTRL 0x0EE00
#define IXGBE_FDIRHKEY 0x0EE68
@@ -725,6 +752,24 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_LDPCECL 0x0E820
#define IXGBE_LDPCECH 0x0E821
+/* MII clause 22/28 definitions */
+#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
+
+#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register */
+#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */
+
+#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */
+
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s F Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */
+
/* Management */
#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -798,6 +843,12 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_PBACLR_82599 0x11068
#define IXGBE_CIAA_82599 0x11088
#define IXGBE_CIAD_82599 0x1108C
+#define IXGBE_CIAA_X550 0x11508
+#define IXGBE_CIAD_X550 0x11510
+#define IXGBE_CIAA_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
+ IXGBE_CIAA_X550 : IXGBE_CIAA_82599))
+#define IXGBE_CIAD_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
+ IXGBE_CIAD_X550 : IXGBE_CIAD_82599))
#define IXGBE_PICAUSE 0x110B0
#define IXGBE_PIENA 0x110B8
#define IXGBE_CDQ_MBR_82599 0x110B4
@@ -1120,6 +1171,13 @@ struct ixgbe_thermal_sensor_data {
/* MDIO definitions */
+#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
+#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
+#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
+#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
+#define IXGBE_TWINAX_DEV 1
+
#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */
@@ -1129,9 +1187,23 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
+#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
+#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */
+
+#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */
+#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */
+#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
+#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */
+
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Stat Reg */
+#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Tx Dis Reg */
+#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Tx Dis */
/* MII clause 22/28 definitions */
#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
@@ -1632,6 +1704,7 @@ enum {
#define IXGBE_LINKS_TL_FAULT 0x00001000
#define IXGBE_LINKS_SIGNAL 0x00000F00
+#define IXGBE_LINKS_SPEED_NON_STD 0x08000000
#define IXGBE_LINKS_SPEED_82599 0x30000000
#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
@@ -1674,12 +1747,14 @@ enum {
#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
/* SW_FW_SYNC/GSSR definitions */
-#define IXGBE_GSSR_EEP_SM 0x0001
-#define IXGBE_GSSR_PHY0_SM 0x0002
-#define IXGBE_GSSR_PHY1_SM 0x0004
-#define IXGBE_GSSR_MAC_CSR_SM 0x0008
-#define IXGBE_GSSR_FLASH_SM 0x0010
-#define IXGBE_GSSR_SW_MNG_SM 0x0400
+#define IXGBE_GSSR_EEP_SM 0x0001
+#define IXGBE_GSSR_PHY0_SM 0x0002
+#define IXGBE_GSSR_PHY1_SM 0x0004
+#define IXGBE_GSSR_MAC_CSR_SM 0x0008
+#define IXGBE_GSSR_FLASH_SM 0x0010
+#define IXGBE_GSSR_SW_MNG_SM 0x0400
+#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */
+#define IXGBE_GSSR_I2C_MASK 0x1800
/* FW Status register bitmask */
#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */
@@ -1713,27 +1788,32 @@ enum {
#define IXGBE_PBANUM_LENGTH 11
/* Checksum and EEPROM pointers */
-#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
-#define IXGBE_EEPROM_CHECKSUM 0x3F
-#define IXGBE_EEPROM_SUM 0xBABA
-#define IXGBE_PCIE_ANALOG_PTR 0x03
-#define IXGBE_ATLAS0_CONFIG_PTR 0x04
-#define IXGBE_PHY_PTR 0x04
-#define IXGBE_ATLAS1_CONFIG_PTR 0x05
-#define IXGBE_OPTION_ROM_PTR 0x05
-#define IXGBE_PCIE_GENERAL_PTR 0x06
-#define IXGBE_PCIE_CONFIG0_PTR 0x07
-#define IXGBE_PCIE_CONFIG1_PTR 0x08
-#define IXGBE_CORE0_PTR 0x09
-#define IXGBE_CORE1_PTR 0x0A
-#define IXGBE_MAC0_PTR 0x0B
-#define IXGBE_MAC1_PTR 0x0C
-#define IXGBE_CSR0_CONFIG_PTR 0x0D
-#define IXGBE_CSR1_CONFIG_PTR 0x0E
-#define IXGBE_FW_PTR 0x0F
-#define IXGBE_PBANUM0_PTR 0x15
-#define IXGBE_PBANUM1_PTR 0x16
-#define IXGBE_FREE_SPACE_PTR 0X3E
+#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
+#define IXGBE_EEPROM_CHECKSUM 0x3F
+#define IXGBE_EEPROM_SUM 0xBABA
+#define IXGBE_PCIE_ANALOG_PTR 0x03
+#define IXGBE_ATLAS0_CONFIG_PTR 0x04
+#define IXGBE_PHY_PTR 0x04
+#define IXGBE_ATLAS1_CONFIG_PTR 0x05
+#define IXGBE_OPTION_ROM_PTR 0x05
+#define IXGBE_PCIE_GENERAL_PTR 0x06
+#define IXGBE_PCIE_CONFIG0_PTR 0x07
+#define IXGBE_PCIE_CONFIG1_PTR 0x08
+#define IXGBE_CORE0_PTR 0x09
+#define IXGBE_CORE1_PTR 0x0A
+#define IXGBE_MAC0_PTR 0x0B
+#define IXGBE_MAC1_PTR 0x0C
+#define IXGBE_CSR0_CONFIG_PTR 0x0D
+#define IXGBE_CSR1_CONFIG_PTR 0x0E
+#define IXGBE_PCIE_ANALOG_PTR_X550 0x02
+#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000
+#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24
+#define IXGBE_PCIE_CONFIG_SIZE 0x08
+#define IXGBE_EEPROM_LAST_WORD 0x41
+#define IXGBE_FW_PTR 0x0F
+#define IXGBE_PBANUM0_PTR 0x15
+#define IXGBE_PBANUM1_PTR 0x16
+#define IXGBE_FREE_SPACE_PTR 0X3E
/* External Thermal Sensor Config */
#define IXGBE_ETS_CFG 0x26
@@ -1994,12 +2074,14 @@ enum {
#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
+#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000
#define IXGBE_MRQC_L3L4TXSWEN 0x00008000
#define IXGBE_FWSM_TS_ENABLED 0x1
/* Queue Drop Enable */
#define IXGBE_QDE_ENABLE 0x00000001
+#define IXGBE_QDE_HIDE_VLAN 0x00000002
#define IXGBE_QDE_IDX_MASK 0x00007F00
#define IXGBE_QDE_IDX_SHIFT 8
#define IXGBE_QDE_WRITE 0x00010000
@@ -2289,18 +2371,32 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIR_DROP_QUEUE 127
/* Manageablility Host Interface defines */
-#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
-#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
-#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
+#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
+#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
+#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
+#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */
+#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */
+#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */
/* CEM Support */
-#define FW_CEM_HDR_LEN 0x4
-#define FW_CEM_CMD_DRIVER_INFO 0xDD
-#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
-#define FW_CEM_CMD_RESERVED 0x0
-#define FW_CEM_UNUSED_VER 0x0
-#define FW_CEM_MAX_RETRIES 3
-#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_CEM_HDR_LEN 0x4
+#define FW_CEM_CMD_DRIVER_INFO 0xDD
+#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
+#define FW_CEM_CMD_RESERVED 0x0
+#define FW_CEM_UNUSED_VER 0x0
+#define FW_CEM_MAX_RETRIES 3
+#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_READ_SHADOW_RAM_CMD 0x31
+#define FW_READ_SHADOW_RAM_LEN 0x6
+#define FW_WRITE_SHADOW_RAM_CMD 0x33
+#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */
+#define FW_SHADOW_RAM_DUMP_CMD 0x36
+#define FW_SHADOW_RAM_DUMP_LEN 0
+#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */
+#define FW_NVM_DATA_OFFSET 3
+#define FW_MAX_READ_BUFFER_SIZE 1024
+#define FW_DISABLE_RXEN_CMD 0xDE
+#define FW_DISABLE_RXEN_LEN 0x1
/* Host Interface Command Structures */
struct ixgbe_hic_hdr {
@@ -2313,6 +2409,25 @@ struct ixgbe_hic_hdr {
u8 checksum;
};
+struct ixgbe_hic_hdr2_req {
+ u8 cmd;
+ u8 buf_lenh;
+ u8 buf_lenl;
+ u8 checksum;
+};
+
+struct ixgbe_hic_hdr2_rsp {
+ u8 cmd;
+ u8 buf_lenl;
+ u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */
+ u8 checksum;
+};
+
+union ixgbe_hic_hdr2 {
+ struct ixgbe_hic_hdr2_req req;
+ struct ixgbe_hic_hdr2_rsp rsp;
+};
+
struct ixgbe_hic_drv_info {
struct ixgbe_hic_hdr hdr;
u8 port_num;
@@ -2324,6 +2439,32 @@ struct ixgbe_hic_drv_info {
u16 pad2; /* end spacing to ensure length is mult. of dword2 */
};
+/* These need to be dword aligned */
+struct ixgbe_hic_read_shadow_ram {
+ union ixgbe_hic_hdr2 hdr;
+ u32 address;
+ u16 length;
+ u16 pad2;
+ u16 data;
+ u16 pad3;
+};
+
+struct ixgbe_hic_write_shadow_ram {
+ union ixgbe_hic_hdr2 hdr;
+ u32 address;
+ u16 length;
+ u16 pad2;
+ u16 data;
+ u16 pad3;
+};
+
+struct ixgbe_hic_disable_rxen {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 pad2;
+ u16 pad3;
+};
+
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
struct {
@@ -2437,10 +2578,12 @@ struct ixgbe_adv_tx_context_desc {
typedef u32 ixgbe_autoneg_advertised;
/* Link speed */
typedef u32 ixgbe_link_speed;
-#define IXGBE_LINK_SPEED_UNKNOWN 0
-#define IXGBE_LINK_SPEED_100_FULL 0x0008
-#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
-#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+#define IXGBE_LINK_SPEED_UNKNOWN 0
+#define IXGBE_LINK_SPEED_100_FULL 0x0008
+#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400
+#define IXGBE_LINK_SPEED_5GB_FULL 0x0800
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
IXGBE_LINK_SPEED_10GB_FULL)
#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
@@ -2588,6 +2731,8 @@ enum ixgbe_mac_type {
ixgbe_mac_82598EB,
ixgbe_mac_82599EB,
ixgbe_mac_X540,
+ ixgbe_mac_X550,
+ ixgbe_mac_X550EM_x,
ixgbe_num_macs
};
@@ -2596,6 +2741,9 @@ enum ixgbe_phy_type {
ixgbe_phy_none,
ixgbe_phy_tn,
ixgbe_phy_aq,
+ ixgbe_phy_x550em_kr,
+ ixgbe_phy_x550em_kx4,
+ ixgbe_phy_x550em_ext_t,
ixgbe_phy_cu_unknown,
ixgbe_phy_qt,
ixgbe_phy_xaui,
@@ -2839,7 +2987,7 @@ struct ixgbe_eeprom_operations {
s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
s32 (*update_checksum)(struct ixgbe_hw *);
- u16 (*calc_checksum)(struct ixgbe_hw *);
+ s32 (*calc_checksum)(struct ixgbe_hw *);
};
struct ixgbe_mac_operations {
@@ -2861,8 +3009,8 @@ struct ixgbe_mac_operations {
s32 (*disable_rx_buff)(struct ixgbe_hw *);
s32 (*enable_rx_buff)(struct ixgbe_hw *);
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
- s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
- void (*release_swfw_sync)(struct ixgbe_hw *, u16);
+ s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
+ void (*release_swfw_sync)(struct ixgbe_hw *, u32);
s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
@@ -2908,6 +3056,11 @@ struct ixgbe_mac_operations {
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+
+ /* DMA Coalescing */
+ s32 (*dmac_config)(struct ixgbe_hw *hw);
+ s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
+ s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
};
struct ixgbe_phy_operations {
@@ -2920,6 +3073,7 @@ struct ixgbe_phy_operations {
s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
s32 (*setup_link)(struct ixgbe_hw *);
+ s32 (*setup_internal_link)(struct ixgbe_hw *);
s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
@@ -2928,6 +3082,8 @@ struct ixgbe_phy_operations {
s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+ s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
+ s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
s32 (*check_overtemp)(struct ixgbe_hw *);
};
@@ -2980,6 +3136,8 @@ struct ixgbe_phy_info {
bool sfp_setup_needed;
u32 revision;
enum ixgbe_media_type media_type;
+ u8 lan_id;
+ u32 phy_semaphore_mask;
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
enum ixgbe_smart_speed smart_speed;
@@ -3086,4 +3244,71 @@ struct ixgbe_info {
#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010))
+#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C))
+#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634))
+#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638))
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00))
+#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00))
+#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520))
+#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00))
+
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
+
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
+
+#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16)
+
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2)
+
+#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16)
+
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31)
+
+#define IXGBE_KX4_LINK_CNTL_1 0x4C
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31)
+
+#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
+#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
+
+#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0
+#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF
+#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18
+#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \
+ (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20
+#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \
+ (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28
+#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7
+#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
+#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
+#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
+#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1
+#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
+#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
+
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index e88305d5d18d..ba54ff07b438 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -32,6 +32,7 @@
#include "ixgbe.h"
#include "ixgbe_phy.h"
+#include "ixgbe_x540.h"
#define IXGBE_X540_MAX_TX_QUEUES 128
#define IXGBE_X540_MAX_RX_QUEUES 128
@@ -42,17 +43,15 @@
static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
-static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
-static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
-static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
{
return ixgbe_media_type_copper;
}
-static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
+s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -76,9 +75,8 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
* @speed: new link speed
* @autoneg_wait_to_complete: true when waiting for completion is needed
**/
-static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
return hw->phy.ops.setup_link_speed(hw, speed,
autoneg_wait_to_complete);
@@ -92,7 +90,7 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
-static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
s32 status;
u32 ctrl, i;
@@ -179,7 +177,7 @@ mac_reset_top:
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
-static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
{
s32 ret_val;
@@ -197,7 +195,7 @@ static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
u32 eec;
@@ -316,7 +314,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
*
* @hw: pointer to hardware structure
**/
-static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -324,6 +322,8 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
u16 length = 0;
u16 pointer = 0;
u16 word = 0;
+ u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM;
+ u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
/*
* Do not use hw->eeprom.ops.read because we do not want to take
@@ -332,10 +332,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
*/
/* Include 0x0-0x3F in the checksum */
- for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
- if (ixgbe_read_eerd_generic(hw, i, &word) != 0) {
+ for (i = 0; i < checksum_last_word; i++) {
+ if (ixgbe_read_eerd_generic(hw, i, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
- break;
+ return IXGBE_ERR_EEPROM;
}
checksum += word;
}
@@ -344,11 +344,11 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
* Include all data from pointers 0x3, 0x6-0xE. This excludes the
* FW, PHY module, and PCIe Expansion/Option ROM pointers.
*/
- for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+ for (i = ptr_start; i < IXGBE_FW_PTR; i++) {
if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
continue;
- if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) {
+ if (ixgbe_read_eerd_generic(hw, i, &pointer)) {
hw_dbg(hw, "EEPROM read failed\n");
break;
}
@@ -358,8 +358,9 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
pointer >= hw->eeprom.word_size)
continue;
- if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) {
+ if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
hw_dbg(hw, "EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
break;
}
@@ -368,10 +369,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
(pointer + length) >= hw->eeprom.word_size)
continue;
- for (j = pointer+1; j <= pointer+length; j++) {
- if (ixgbe_read_eerd_generic(hw, j, &word) != 0) {
+ for (j = pointer + 1; j <= pointer + length; j++) {
+ if (ixgbe_read_eerd_generic(hw, j, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
- break;
+ return IXGBE_ERR_EEPROM;
}
checksum += word;
}
@@ -379,7 +380,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return checksum;
+ return (s32)checksum;
}
/**
@@ -410,23 +411,34 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return IXGBE_ERR_SWFW_SYNC;
- checksum = hw->eeprom.ops.calc_checksum(hw);
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ goto out;
+
+ checksum = (u16)(status & 0xffff);
/* Do not use hw->eeprom.ops.read because we do not want to take
* the synchronization semaphores twice here.
*/
status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
&read_checksum);
+ if (status)
+ goto out;
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum) {
+ hw_dbg(hw, "Invalid EEPROM checksum");
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+ }
/* If the user cares, return the calculated checksum */
if (checksum_val)
*checksum_val = checksum;
- /* Verify read and calculated checksums are the same */
- if (read_checksum != checksum)
- return IXGBE_ERR_EEPROM_CHECKSUM;
+out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
@@ -457,15 +469,22 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return IXGBE_ERR_SWFW_SYNC;
- checksum = hw->eeprom.ops.calc_checksum(hw);
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ goto out;
+
+ checksum = (u16)(status & 0xffff);
/* Do not use hw->eeprom.ops.write because we do not want to
* take the synchronization semaphores twice here.
*/
status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum);
- if (!status)
- status = ixgbe_update_flash_X540(hw);
+ if (status)
+ goto out;
+
+ status = ixgbe_update_flash_X540(hw);
+out:
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
@@ -544,7 +563,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
* Acquires the SWFW semaphore thought the SW_FW_SYNC register for
* the specified function (CSR, PHY0, PHY1, NVM, Flash)
**/
-static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
u32 swfw_sync;
u32 swmask = mask;
@@ -612,7 +631,7 @@ static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
* Releases the SWFW semaphore through the SW_FW_SYNC register
* for the specified function (CSR, PHY0, PHY1, EVM, Flash)
**/
-static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
u32 swfw_sync;
u32 swmask = mask;
@@ -699,7 +718,7 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
* Devices that implement the version 2 interface:
* X540
**/
-static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
@@ -735,7 +754,7 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
* Devices that implement the version 2 interface:
* X540
**/
-static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
new file mode 100644
index 000000000000..a1468b1f4d8a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -0,0 +1,39 @@
+/*******************************************************************************
+ *
+ * Intel 10 Gigabit PCI Express Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
new file mode 100644
index 000000000000..ffdd1231f419
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -0,0 +1,1432 @@
+/*******************************************************************************
+ *
+ * Intel 10 Gigabit PCI Express Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+#include "ixgbe_x540.h"
+#include "ixgbe_type.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+/** ixgbe_identify_phy_x550em - Get PHY type based on device id
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
+{
+ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_SFP:
+ /* set up for CS4227 usage */
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+ if (hw->bus.lan_id) {
+ esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
+ esdp |= IXGBE_ESDP_SDP1_DIR;
+ }
+ esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+
+ return ixgbe_identify_module_generic(hw);
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ hw->phy.type = ixgbe_phy_x550em_kx4;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ hw->phy.type = ixgbe_phy_x550em_kr;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ return ixgbe_identify_phy_generic(hw);
+ default:
+ break;
+ }
+ return 0;
+}
+
+static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ return IXGBE_NOT_IMPLEMENTED;
+}
+
+static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ return IXGBE_NOT_IMPLEMENTED;
+}
+
+/** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->semaphore_delay = 10;
+ eeprom->type = ixgbe_flash;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return 0;
+}
+
+/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the
+ * IOSF device
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @phy_data: Pointer to read data from the register
+ **/
+s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 *data)
+{
+ u32 i, command, error;
+
+ command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+ (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+ /* Write IOSF control register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+ /* Check every 10 usec to see if the address cycle completed.
+ * The SB IOSF BUSY bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usleep_range(10, 20);
+
+ command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+ if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+ error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+ IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ hw_dbg(hw, "Failed to read, error %x\n", error);
+ return IXGBE_ERR_PHY;
+ }
+
+ if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+ hw_dbg(hw, "Read timed out\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
+
+ return 0;
+}
+
+/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
+ * command assuming that the semaphore is already obtained.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status;
+ struct ixgbe_hic_read_shadow_ram buffer;
+
+ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* convert offset from words to bytes */
+ buffer.address = cpu_to_be32(offset * 2);
+ /* one word */
+ buffer.length = cpu_to_be16(sizeof(u16));
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
+ if (status)
+ return status;
+
+ *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ FW_NVM_DATA_OFFSET);
+
+ return 0;
+}
+
+/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ struct ixgbe_hic_read_shadow_ram buffer;
+ u32 current_word = 0;
+ u16 words_to_read;
+ s32 status;
+ u32 i;
+
+ /* Take semaphore for the entire operation. */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ if (status) {
+ hw_dbg(hw, "EEPROM read buffer - semaphore failed\n");
+ return status;
+ }
+
+ while (words) {
+ if (words > FW_MAX_READ_BUFFER_SIZE / 2)
+ words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
+ else
+ words_to_read = words;
+
+ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* convert offset from words to bytes */
+ buffer.address = cpu_to_be32((offset + current_word) * 2);
+ buffer.length = cpu_to_be16(words_to_read * 2);
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ false);
+ if (status) {
+ hw_dbg(hw, "Host interface command failed\n");
+ goto out;
+ }
+
+ for (i = 0; i < words_to_read; i++) {
+ u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
+ 2 * i;
+ u32 value = IXGBE_READ_REG(hw, reg);
+
+ data[current_word] = (u16)(value & 0xffff);
+ current_word++;
+ i++;
+ if (i < words_to_read) {
+ value >>= 16;
+ data[current_word] = (u16)(value & 0xffff);
+ current_word++;
+ }
+ }
+ words -= words_to_read;
+ }
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/** ixgbe_checksum_ptr_x550 - Checksum one pointer region
+ * @hw: pointer to hardware structure
+ * @ptr: pointer offset in eeprom
+ * @size: size of section pointed by ptr, if 0 first word will be used as size
+ * @csum: address of checksum to update
+ *
+ * Returns error status for any failure
+ **/
+static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
+ u16 size, u16 *csum, u16 *buffer,
+ u32 buffer_size)
+{
+ u16 buf[256];
+ s32 status;
+ u16 length, bufsz, i, start;
+ u16 *local_buffer;
+
+ bufsz = sizeof(buf) / sizeof(buf[0]);
+
+ /* Read a chunk at the pointer location */
+ if (!buffer) {
+ status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
+ if (status) {
+ hw_dbg(hw, "Failed to read EEPROM image\n");
+ return status;
+ }
+ local_buffer = buf;
+ } else {
+ if (buffer_size < ptr)
+ return IXGBE_ERR_PARAM;
+ local_buffer = &buffer[ptr];
+ }
+
+ if (size) {
+ start = 0;
+ length = size;
+ } else {
+ start = 1;
+ length = local_buffer[0];
+
+ /* Skip pointer section if length is invalid. */
+ if (length == 0xFFFF || length == 0 ||
+ (ptr + length) >= hw->eeprom.word_size)
+ return 0;
+ }
+
+ if (buffer && ((u32)start + (u32)length > buffer_size))
+ return IXGBE_ERR_PARAM;
+
+ for (i = start; length; i++, length--) {
+ if (i == bufsz && !buffer) {
+ ptr += bufsz;
+ i = 0;
+ if (length < bufsz)
+ bufsz = length;
+
+ /* Read a chunk at the pointer location */
+ status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
+ bufsz, buf);
+ if (status) {
+ hw_dbg(hw, "Failed to read EEPROM image\n");
+ return status;
+ }
+ }
+ *csum += local_buffer[i];
+ }
+ return 0;
+}
+
+/** ixgbe_calc_checksum_X550 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @buffer: pointer to buffer containing calculated checksum
+ * @buffer_size: size of buffer
+ *
+ * Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
+{
+ u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
+ u16 *local_buffer;
+ s32 status;
+ u16 checksum = 0;
+ u16 pointer, i, size;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (!buffer) {
+ /* Read pointer area */
+ status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
+ IXGBE_EEPROM_LAST_WORD + 1,
+ eeprom_ptrs);
+ if (status) {
+ hw_dbg(hw, "Failed to read EEPROM image\n");
+ return status;
+ }
+ local_buffer = eeprom_ptrs;
+ } else {
+ if (buffer_size < IXGBE_EEPROM_LAST_WORD)
+ return IXGBE_ERR_PARAM;
+ local_buffer = buffer;
+ }
+
+ /* For X550 hardware include 0x0-0x41 in the checksum, skip the
+ * checksum word itself
+ */
+ for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
+ if (i != IXGBE_EEPROM_CHECKSUM)
+ checksum += local_buffer[i];
+
+ /* Include all data from pointers 0x3, 0x6-0xE. This excludes the
+ * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+ */
+ for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
+ if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+ continue;
+
+ pointer = local_buffer[i];
+
+ /* Skip pointer section if the pointer is invalid. */
+ if (pointer == 0xFFFF || pointer == 0 ||
+ pointer >= hw->eeprom.word_size)
+ continue;
+
+ switch (i) {
+ case IXGBE_PCIE_GENERAL_PTR:
+ size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
+ break;
+ case IXGBE_PCIE_CONFIG0_PTR:
+ case IXGBE_PCIE_CONFIG1_PTR:
+ size = IXGBE_PCIE_CONFIG_SIZE;
+ break;
+ default:
+ size = 0;
+ break;
+ }
+
+ status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
+ buffer, buffer_size);
+ if (status)
+ return status;
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return (s32)checksum;
+}
+
+/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
+{
+ return ixgbe_calc_checksum_X550(hw, NULL, 0);
+}
+
+/** ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
+ status = ixgbe_read_ee_hostif_data_X550(hw, offset, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/** ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ s32 status;
+ u16 checksum;
+ u16 read_checksum = 0;
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+ if (status) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ return status;
+ }
+
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
+ &read_checksum);
+ if (status)
+ return status;
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum) {
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+ hw_dbg(hw, "Invalid EEPROM checksum");
+ }
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+
+ return status;
+}
+
+/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status;
+ struct ixgbe_hic_write_shadow_ram buffer;
+
+ buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
+ buffer.hdr.req.buf_lenh = 0;
+ buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
+ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* one word */
+ buffer.length = cpu_to_be16(sizeof(u16));
+ buffer.data = data;
+ buffer.address = cpu_to_be32(offset * 2);
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
+ return status;
+}
+
+/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
+ status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ hw_dbg(hw, "write ee hostif failed to get semaphore");
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/** ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
+ * @hw: pointer to hardware structure
+ *
+ * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
+ **/
+s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ union ixgbe_hic_hdr2 buffer;
+
+ buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
+ buffer.req.buf_lenh = 0;
+ buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
+ buffer.req.checksum = FW_DEFAULT_CHECKSUM;
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
+ return status;
+}
+
+/** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 checksum = 0;
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
+ if (status) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ return status;
+ }
+
+ status = ixgbe_calc_eeprom_checksum_X550(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
+ checksum);
+ if (status)
+ return status;
+
+ status = ixgbe_update_flash_X550(hw);
+
+ return status;
+}
+
+/** ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ *
+ * Write a 16 bit word(s) to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ s32 status = 0;
+ u32 i = 0;
+
+ /* Take semaphore for the entire operation. */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ if (status) {
+ hw_dbg(hw, "EEPROM write buffer - semaphore failed\n");
+ return status;
+ }
+
+ for (i = 0; i < words; i++) {
+ status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
+ data[i]);
+ if (status) {
+ hw_dbg(hw, "Eeprom buffered write failed\n");
+ break;
+ }
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+ return status;
+}
+
+/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
+ * @hw: pointer to hardware structure
+ **/
+void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ /* CS4227 does not support autoneg, so disable the laser control
+ * functions for SFP+ fiber
+ */
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
+ mac->ops.disable_tx_laser = NULL;
+ mac->ops.enable_tx_laser = NULL;
+ mac->ops.flap_tx_laser = NULL;
+ }
+}
+
+/** ixgbe_setup_sfp_modules_X550em - Setup SFP module
+ * @hw: pointer to hardware structure
+ */
+s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
+{
+ bool setup_linear;
+ u16 reg_slice, edc_mode;
+ s32 ret_val;
+
+ switch (hw->phy.sfp_type) {
+ case ixgbe_sfp_type_unknown:
+ return 0;
+ case ixgbe_sfp_type_not_present:
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+ case ixgbe_sfp_type_da_cu_core0:
+ case ixgbe_sfp_type_da_cu_core1:
+ setup_linear = true;
+ break;
+ case ixgbe_sfp_type_srlr_core0:
+ case ixgbe_sfp_type_srlr_core1:
+ case ixgbe_sfp_type_da_act_lmt_core0:
+ case ixgbe_sfp_type_da_act_lmt_core1:
+ case ixgbe_sfp_type_1g_sx_core0:
+ case ixgbe_sfp_type_1g_sx_core1:
+ setup_linear = false;
+ break;
+ default:
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ ixgbe_init_mac_link_ops_X550em(hw);
+ hw->phy.ops.reset = NULL;
+
+ /* The CS4227 slice address is the base address + the port-pair reg
+ * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0.
+ */
+ reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12);
+
+ if (setup_linear)
+ edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
+ edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+
+ /* Configure CS4227 for connection type. */
+ ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
+ edc_mode);
+
+ if (ret_val)
+ ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice,
+ edc_mode);
+
+ return ret_val;
+}
+
+/** ixgbe_get_link_capabilities_x550em - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ **/
+s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ /* SFP */
+ if (hw->phy.media_type == ixgbe_media_type_fiber) {
+ /* CS4227 SFP must not enable auto-negotiation */
+ *autoneg = false;
+
+ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ return 0;
+ }
+
+ /* Link capabilities are based on SFP */
+ if (hw->phy.multispeed_fiber)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ } else {
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ }
+ return 0;
+}
+
+/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the
+ * IOSF device
+ *
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Data to write to the register
+ **/
+s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 data)
+{
+ u32 i, command, error;
+
+ command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+ (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+ /* Write IOSF control register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+ /* Write IOSF data register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
+
+ /* Check every 10 usec to see if the address cycle completed.
+ * The SB IOSF BUSY bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usleep_range(10, 20);
+
+ command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+ if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+ error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+ IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ hw_dbg(hw, "Failed to write, error %x\n", error);
+ return IXGBE_ERR_PHY;
+ }
+
+ if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+ hw_dbg(hw, "Write timed out\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return 0;
+}
+
+/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
+ * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ *
+ * Configures the integrated KR PHY to use iXFI mode. Used to connect an
+ * internal and external PHY at a specific speed, without autonegotiation.
+ **/
+static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+{
+ s32 status;
+ u32 reg_val;
+
+ /* Disable AN and force speed to 10G Serial. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+
+ /* Select forced link speed for internal PHY. */
+ switch (*speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ break;
+ default:
+ /* Other link speeds are not supported by internal KR PHY. */
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Disable training protocol FSM. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Disable Flex from training TXFFE. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Enable override for coefficients. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Toggle port SW reset by AN reset. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ return status;
+}
+
+/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY.
+ * @hw: pointer to hardware structure
+ *
+ * Configures the integrated KX4 PHY.
+ **/
+s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 reg_val;
+
+ status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
+ IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
+ hw->bus.lan_id, &reg_val);
+ if (status)
+ return status;
+
+ reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 |
+ IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX);
+
+ reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE;
+
+ /* Advertise 10G support. */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4;
+
+ /* Advertise 1G support. */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX;
+
+ /* Restart auto-negotiation. */
+ reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
+ status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
+ IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
+ hw->bus.lan_id, reg_val);
+
+ return status;
+}
+
+/** ixgbe_setup_kr_x550em - Configure the KR PHY.
+ * @hw: pointer to hardware structure
+ *
+ * Configures the integrated KR PHY.
+ **/
+s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 reg_val;
+
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ;
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
+ reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
+ IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
+
+ /* Advertise 10G support. */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
+
+ /* Advertise 1G support. */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
+
+ /* Restart auto-negotiation. */
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ return status;
+}
+
+/** ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY
+ * @hw: point to hardware structure
+ *
+ * Configures the integrated KR PHY to talk to the external PHY. The base
+ * driver will call this function when it gets notification via interrupt from
+ * the external PHY. This function forces the internal PHY into iXFI mode at
+ * the correct speed.
+ *
+ * A return of a non-zero value indicates an error, and the base driver should
+ * not report link up.
+ **/
+s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
+{
+ u32 status;
+ u16 lasi, autoneg_status, speed;
+ ixgbe_link_speed force_speed;
+
+ /* Verify that the external link status has changed */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &lasi);
+ if (status)
+ return status;
+
+ /* If there was no change in link status, we can just exit */
+ if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM))
+ return 0;
+
+ /* we read this twice back to back to indicate current status */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (status)
+ return status;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (status)
+ return status;
+
+ /* If link is not up return an error indicating treat link as down */
+ if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &speed);
+
+ /* clear everything but the speed and duplex bits */
+ speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
+
+ switch (speed) {
+ case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
+ force_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
+ force_speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ default:
+ /* Internal PHY does not support anything else */
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ return ixgbe_setup_ixfi_x550em(hw, &force_speed);
+}
+
+/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY/SFP type was
+ * not known. Perform the SFP init if necessary.
+ **/
+s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 esdp;
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+
+ if (hw->bus.lan_id) {
+ esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
+ esdp |= IXGBE_ESDP_SDP1_DIR;
+ }
+ esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ }
+
+ /* Identify the PHY or SFP module */
+ ret_val = phy->ops.identify(hw);
+
+ /* Setup function pointers based on detected SFP module and speeds */
+ ixgbe_init_mac_link_ops_X550em(hw);
+ if (phy->sfp_type != ixgbe_sfp_type_unknown)
+ phy->ops.reset = NULL;
+
+ /* Set functions pointers based on phy type */
+ switch (hw->phy.type) {
+ case ixgbe_phy_x550em_kx4:
+ phy->ops.setup_link = ixgbe_setup_kx4_x550em;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ break;
+ case ixgbe_phy_x550em_kr:
+ phy->ops.setup_link = ixgbe_setup_kr_x550em;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ break;
+ case ixgbe_phy_x550em_ext_t:
+ phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em;
+ break;
+ default:
+ break;
+ }
+ return ret_val;
+}
+
+/** ixgbe_get_media_type_X550em - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ *
+ */
+enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
+{
+ enum ixgbe_media_type media_type;
+
+ /* Detect if there is a copper PHY attached. */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ media_type = ixgbe_media_type_backplane;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_SFP:
+ media_type = ixgbe_media_type_fiber;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ media_type = ixgbe_media_type_copper;
+ break;
+ default:
+ media_type = ixgbe_media_type_unknown;
+ break;
+ }
+ return media_type;
+}
+
+/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
+ ** @hw: pointer to hardware structure
+ **/
+s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
+{
+ u32 status;
+ u16 reg;
+ u32 retries = 2;
+
+ do {
+ /* decrement retries counter and exit if we hit 0 */
+ if (retries < 1) {
+ hw_dbg(hw, "External PHY not yet finished resetting.");
+ return IXGBE_ERR_PHY;
+ }
+ retries--;
+
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_TX_VENDOR_ALARMS_3,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ /* Verify PHY FW reset has completed */
+ } while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1);
+
+ /* Set port to low power mode */
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ /* Enable the transmitter */
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE;
+
+ status = hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ reg);
+ if (status)
+ return status;
+
+ /* Un-stall the PHY FW */
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_GLOBAL_RES_PR_10,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ reg &= ~IXGBE_MDIO_POWER_UP_STALL;
+
+ status = hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_GLOBAL_RES_PR_10,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+ return status;
+}
+
+/** ixgbe_reset_hw_X550em - Perform hardware reset
+ ** @hw: pointer to hardware structure
+ **
+ ** Resets the hardware by resetting the transmit and receive units, masks
+ ** and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ ** reset.
+ **/
+s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+{
+ ixgbe_link_speed link_speed;
+ s32 status;
+ u32 ctrl = 0;
+ u32 i;
+ bool link_up = false;
+
+ /* Call adapter stop to disable Tx/Rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status)
+ return status;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+ /* PHY ops must be identified and initialized prior to reset */
+
+ /* Identify PHY and related function pointers */
+ status = hw->phy.ops.init(hw);
+
+ /* start the external PHY */
+ if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
+ status = ixgbe_init_ext_t_x550em(hw);
+ if (status)
+ return status;
+ }
+
+ /* Setup SFP module if there is one present. */
+ if (hw->phy.sfp_setup_needed) {
+ status = hw->mac.ops.setup_sfp(hw);
+ hw->phy.sfp_setup_needed = false;
+ }
+
+ /* Reset PHY */
+ if (!hw->phy.reset_disable && hw->phy.ops.reset)
+ hw->phy.ops.reset(hw);
+
+mac_reset_top:
+ /* Issue global reset to the MAC. Needs to be SW reset if link is up.
+ * If link reset is used when link is up, it might reset the PHY when
+ * mng is using it. If link is down or the flag to force full link
+ * reset is set, then perform link reset.
+ */
+ ctrl = IXGBE_CTRL_LNK_RST;
+
+ if (!hw->force_full_reset) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up)
+ ctrl = IXGBE_CTRL_RST;
+ }
+
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear meaning reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ hw_dbg(hw, "Reset polling failed to complete.\n");
+ }
+
+ msleep(50);
+
+ /* Double resets are required for recovery from certain error
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /* Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ return status;
+}
+
+#define X550_COMMON_MAC \
+ .init_hw = &ixgbe_init_hw_generic, \
+ .start_hw = &ixgbe_start_hw_X540, \
+ .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, \
+ .enable_rx_dma = &ixgbe_enable_rx_dma_generic, \
+ .get_mac_addr = &ixgbe_get_mac_addr_generic, \
+ .get_device_caps = &ixgbe_get_device_caps_generic, \
+ .stop_adapter = &ixgbe_stop_adapter_generic, \
+ .get_bus_info = &ixgbe_get_bus_info_generic, \
+ .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \
+ .read_analog_reg8 = NULL, \
+ .write_analog_reg8 = NULL, \
+ .set_rxpba = &ixgbe_set_rxpba_generic, \
+ .check_link = &ixgbe_check_mac_link_generic, \
+ .led_on = &ixgbe_led_on_generic, \
+ .led_off = &ixgbe_led_off_generic, \
+ .blink_led_start = &ixgbe_blink_led_start_X540, \
+ .blink_led_stop = &ixgbe_blink_led_stop_X540, \
+ .set_rar = &ixgbe_set_rar_generic, \
+ .clear_rar = &ixgbe_clear_rar_generic, \
+ .set_vmdq = &ixgbe_set_vmdq_generic, \
+ .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, \
+ .clear_vmdq = &ixgbe_clear_vmdq_generic, \
+ .init_rx_addrs = &ixgbe_init_rx_addrs_generic, \
+ .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, \
+ .enable_mc = &ixgbe_enable_mc_generic, \
+ .disable_mc = &ixgbe_disable_mc_generic, \
+ .clear_vfta = &ixgbe_clear_vfta_generic, \
+ .set_vfta = &ixgbe_set_vfta_generic, \
+ .fc_enable = &ixgbe_fc_enable_generic, \
+ .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, \
+ .init_uta_tables = &ixgbe_init_uta_tables_generic, \
+ .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
+ .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
+ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \
+ .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \
+ .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
+ .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
+ .get_thermal_sensor_data = NULL, \
+ .init_thermal_sensor_thresh = NULL, \
+ .prot_autoc_read = &prot_autoc_read_generic, \
+ .prot_autoc_write = &prot_autoc_write_generic, \
+
+static struct ixgbe_mac_operations mac_ops_X550 = {
+ X550_COMMON_MAC
+ .reset_hw = &ixgbe_reset_hw_X540,
+ .get_media_type = &ixgbe_get_media_type_X540,
+ .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
+ .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
+ .setup_link = &ixgbe_setup_mac_link_X540,
+ .set_rxpba = &ixgbe_set_rxpba_generic,
+ .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
+ .setup_sfp = NULL,
+};
+
+static struct ixgbe_mac_operations mac_ops_X550EM_x = {
+ X550_COMMON_MAC
+ .reset_hw = &ixgbe_reset_hw_X550em,
+ .get_media_type = &ixgbe_get_media_type_X550em,
+ .get_san_mac_addr = NULL,
+ .get_wwn_prefix = NULL,
+ .setup_link = NULL, /* defined later */
+ .get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
+ .setup_sfp = ixgbe_setup_sfp_modules_X550em,
+
+};
+
+#define X550_COMMON_EEP \
+ .read = &ixgbe_read_ee_hostif_X550, \
+ .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \
+ .write = &ixgbe_write_ee_hostif_X550, \
+ .write_buffer = &ixgbe_write_ee_hostif_buffer_X550, \
+ .validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \
+ .update_checksum = &ixgbe_update_eeprom_checksum_X550, \
+ .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \
+
+static struct ixgbe_eeprom_operations eeprom_ops_X550 = {
+ X550_COMMON_EEP
+ .init_params = &ixgbe_init_eeprom_params_X550,
+};
+
+static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
+ X550_COMMON_EEP
+ .init_params = &ixgbe_init_eeprom_params_X540,
+};
+
+#define X550_COMMON_PHY \
+ .identify_sfp = &ixgbe_identify_module_generic, \
+ .reset = NULL, \
+ .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, \
+ .read_i2c_byte = &ixgbe_read_i2c_byte_generic, \
+ .write_i2c_byte = &ixgbe_write_i2c_byte_generic, \
+ .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \
+ .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \
+ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \
+ .check_overtemp = &ixgbe_tn_check_overtemp, \
+ .get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
+
+static struct ixgbe_phy_operations phy_ops_X550 = {
+ X550_COMMON_PHY
+ .init = NULL,
+ .identify = &ixgbe_identify_phy_generic,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
+ .setup_link = &ixgbe_setup_phy_link_generic,
+ .read_i2c_combined = &ixgbe_read_i2c_combined_generic,
+ .write_i2c_combined = &ixgbe_write_i2c_combined_generic,
+};
+
+static struct ixgbe_phy_operations phy_ops_X550EM_x = {
+ X550_COMMON_PHY
+ .init = &ixgbe_init_phy_ops_X550em,
+ .identify = &ixgbe_identify_phy_x550em,
+ .read_reg = NULL, /* defined later */
+ .write_reg = NULL, /* defined later */
+ .setup_link = NULL, /* defined later */
+};
+
+struct ixgbe_info ixgbe_X550_info = {
+ .mac = ixgbe_mac_X550,
+ .get_invariants = &ixgbe_get_invariants_X540,
+ .mac_ops = &mac_ops_X550,
+ .eeprom_ops = &eeprom_ops_X550,
+ .phy_ops = &phy_ops_X550,
+ .mbx_ops = &mbx_ops_generic,
+};
+
+struct ixgbe_info ixgbe_X550EM_x_info = {
+ .mac = ixgbe_mac_X550EM_x,
+ .get_invariants = &ixgbe_get_invariants_X540,
+ .mac_ops = &mac_ops_X550EM_x,
+ .eeprom_ops = &eeprom_ops_X550EM_x,
+ .phy_ops = &phy_ops_X550EM_x,
+ .mbx_ops = &mbx_ops_generic,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 05e4f32d84f7..7412d378b77b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -31,6 +31,8 @@
/* Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_X550_VF 0x1565
+#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index ba96cb5b886d..8c44ab25f3fa 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -58,8 +58,9 @@ struct ixgbevf_tx_buffer {
};
struct ixgbevf_rx_buffer {
- struct sk_buff *skb;
dma_addr_t dma;
+ struct page *page;
+ unsigned int page_offset;
};
struct ixgbevf_stats {
@@ -79,7 +80,6 @@ struct ixgbevf_tx_queue_stats {
};
struct ixgbevf_rx_queue_stats {
- u64 non_eop_descs;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
u64 csum_err;
@@ -92,9 +92,10 @@ struct ixgbevf_ring {
void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys. address of descriptor ring */
unsigned int size; /* length in bytes */
- unsigned int count; /* amount of descriptors */
- unsigned int next_to_use;
- unsigned int next_to_clean;
+ u16 count; /* amount of descriptors */
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_to_alloc;
union {
struct ixgbevf_tx_buffer *tx_buffer_info;
@@ -110,12 +111,11 @@ struct ixgbevf_ring {
u64 hw_csum_rx_error;
u8 __iomem *tail;
+ struct sk_buff *skb;
u16 reg_idx; /* holds the special value that gets the hardware register
* offset associated with this ring, which is different
* for DCB and RSS modes */
-
- u16 rx_buf_len;
int queue_index; /* needed for multiqueue queue management */
};
@@ -134,12 +134,10 @@ struct ixgbevf_ring {
/* Supported Rx Buffer Sizes */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
-#define IXGBEVF_RXBUFFER_2K 2048
-#define IXGBEVF_RXBUFFER_4K 4096
-#define IXGBEVF_RXBUFFER_8K 8192
-#define IXGBEVF_RXBUFFER_10K 10240
+#define IXGBEVF_RXBUFFER_2048 2048
#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
+#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
@@ -307,6 +305,13 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+/* ixgbevf_test_staterr - tests bits in Rx descriptor status and error fields */
+static inline __le32 ixgbevf_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
+ const u32 stat_err_bits)
+{
+ return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
+}
+
static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
{
u16 ntc = ring->next_to_clean;
@@ -339,8 +344,10 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
/* board specific private data structure */
struct ixgbevf_adapter {
- struct timer_list watchdog_timer;
+ /* this field must be first, see ixgbevf_process_skb_fields */
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+ struct timer_list watchdog_timer;
struct work_struct reset_task;
struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
@@ -363,7 +370,6 @@ struct ixgbevf_adapter {
struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
- u64 non_eop_descs;
int num_msix_vectors;
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
@@ -373,7 +379,7 @@ struct ixgbevf_adapter {
*/
u32 flags;
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
-#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1)
+
#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
struct msix_entry *msix_entries;
@@ -423,18 +429,17 @@ enum ixbgevf_state_t {
__IXGBEVF_WORK_INIT,
};
-struct ixgbevf_cb {
- struct sk_buff *prev;
-};
-#define IXGBE_CB(skb) ((struct ixgbevf_cb *)(skb)->cb)
-
enum ixgbevf_boards {
board_82599_vf,
board_X540_vf,
+ board_X550_vf,
+ board_X550EM_x_vf,
};
extern const struct ixgbevf_info ixgbevf_82599_vf_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_info;
+extern const struct ixgbevf_info ixgbevf_X550_vf_info;
+extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
/* needed by ethtool.c */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 030a219c85e3..62a0d8e0f17d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -66,6 +66,8 @@ static char ixgbevf_copyright[] =
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info,
[board_X540_vf] = &ixgbevf_X540_vf_info,
+ [board_X550_vf] = &ixgbevf_X550_vf_info,
+ [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -79,6 +81,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
/* required last entry */
{0, }
};
@@ -143,21 +147,6 @@ u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
return value;
}
-static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
- u32 val)
-{
- rx_ring->next_to_use = val;
-
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- ixgbevf_write_tail(rx_ring, val);
-}
-
/**
* ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
* @adapter: pointer to adapter struct
@@ -343,39 +332,12 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
}
/**
- * ixgbevf_receive_skb - Send a completed packet up the stack
- * @q_vector: structure containing interrupt and ring information
- * @skb: packet to send up
- * @status: hardware indication of status of receive
- * @rx_desc: rx descriptor
- **/
-static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
- struct sk_buff *skb, u8 status,
- union ixgbe_adv_rx_desc *rx_desc)
-{
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- bool is_vlan = (status & IXGBE_RXD_STAT_VP);
- u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
-
- if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
-
- if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
- napi_gro_receive(&q_vector->napi, skb);
- else
- netif_rx(skb);
-}
-
-/**
* ixgbevf_rx_skb - Helper function to determine proper Rx method
* @q_vector: structure containing interrupt and ring information
* @skb: packet to send up
- * @status: hardware indication of status of receive
- * @rx_desc: rx descriptor
**/
static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
- struct sk_buff *skb, u8 status,
- union ixgbe_adv_rx_desc *rx_desc)
+ struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
skb_mark_napi_id(skb, &q_vector->napi);
@@ -387,17 +349,17 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
}
#endif /* CONFIG_NET_RX_BUSY_POLL */
- ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
+ napi_gro_receive(&q_vector->napi, skb);
}
-/**
- * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
- * @ring: pointer to Rx descriptor ring structure
- * @status_err: hardware indication of status of receive
+/* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @ring: structure containig ring specific data
+ * @rx_desc: current Rx descriptor being processed
* @skb: skb currently being received and modified
- **/
+ */
static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
- u32 status_err, struct sk_buff *skb)
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
@@ -406,16 +368,16 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
return;
/* if IP and error */
- if ((status_err & IXGBE_RXD_STAT_IPCS) &&
- (status_err & IXGBE_RXDADV_ERR_IPE)) {
+ if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
+ ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
ring->rx_stats.csum_err++;
return;
}
- if (!(status_err & IXGBE_RXD_STAT_L4CS))
+ if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
return;
- if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+ if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
ring->rx_stats.csum_err++;
return;
}
@@ -424,52 +386,408 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
+/* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the checksum, VLAN, protocol, and other fields within
+ * the skb.
+ */
+static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
+
+ if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
+ u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+ unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
+
+ if (test_bit(vid & VLAN_VID_MASK, active_vlans))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+}
+
+/**
+ * ixgbevf_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
+
+ if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+ return false;
+
+ return true;
+}
+
+static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
+ struct ixgbevf_rx_buffer *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t dma = bi->dma;
+
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page))
+ return true;
+
+ /* alloc new page for storage */
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ return false;
+ }
+
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_page(page);
+
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ return false;
+ }
+
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
+
+ return true;
+}
+
/**
* ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
* @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
+ * @cleaned_count: number of buffers to replace
**/
static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
- int cleaned_count)
+ u16 cleaned_count)
{
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbevf_rx_buffer *bi;
unsigned int i = rx_ring->next_to_use;
- while (cleaned_count--) {
- rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_buffer_info[i];
+ /* nothing to do or no valid netdev defined */
+ if (!cleaned_count || !rx_ring->netdev)
+ return;
- if (!bi->skb) {
- struct sk_buff *skb;
+ rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ i -= rx_ring->count;
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_buf_len);
- if (!skb)
- goto no_buffers;
+ do {
+ if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
+ break;
- bi->skb = skb;
+ /* Refresh the desc even if pkt_addr didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
- bi->dma = dma_map_single(rx_ring->dev, skb->data,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- dev_kfree_skb(skb);
- bi->skb = NULL;
- dev_err(rx_ring->dev, "Rx DMA map failed\n");
- break;
- }
+ rx_desc++;
+ bi++;
+ i++;
+ if (unlikely(!i)) {
+ rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_buffer_info;
+ i -= rx_ring->count;
}
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
- i++;
- if (i == rx_ring->count)
- i = 0;
+ /* clear the hdr_addr for the next_to_use descriptor */
+ rx_desc->read.hdr_addr = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ i += rx_ring->count;
+
+ if (rx_ring->next_to_use != i) {
+ /* record the next descriptor to use */
+ rx_ring->next_to_use = i;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ ixgbevf_write_tail(rx_ring, i);
}
+}
+
+/* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an ixgbevf specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
-no_buffers:
- rx_ring->rx_stats.alloc_rx_buff_failed++;
- if (rx_ring->next_to_use != i)
- ixgbevf_release_rx_desc(rx_ring, i);
+ /* it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
+
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+/* ixgbevf_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Check for corrupted packet headers caused by senders on the local L2
+ * embedded NIC switch not setting up their Tx Descriptors right. These
+ * should be very rare.
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ */
+static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ /* verify that the packet does not have any known errors */
+ if (unlikely(ixgbevf_test_staterr(rx_desc,
+ IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
+ struct net_device *netdev = rx_ring->netdev;
+
+ if (!(netdev->features & NETIF_F_RXALL)) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+ }
+
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ ixgbevf_pull_tail(rx_ring, skb);
+
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
+
+ return false;
+}
+
+/* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ */
+static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
+ struct ixgbevf_rx_buffer *old_buff)
+{
+ struct ixgbevf_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
+
+ new_buff = &rx_ring->rx_buffer_info[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ new_buff->page = old_buff->page;
+ new_buff->dma = old_buff->dma;
+ new_buff->page_offset = old_buff->page_offset;
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
+ new_buff->page_offset,
+ IXGBEVF_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+}
+
+static inline bool ixgbevf_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+}
+
+/* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ */
+static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
+ struct ixgbevf_rx_buffer *rx_buffer,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct page *page = rx_buffer->page;
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = IXGBEVF_RX_BUFSZ;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+#endif
+
+ if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+ /* page is not reserved, we can reuse buffer as is */
+ if (likely(!ixgbevf_page_is_reserved(page)))
+ return true;
+
+ /* this page cannot be reused so discard it */
+ put_page(page);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, truesize);
+
+ /* avoid re-using remote pages */
+ if (unlikely(ixgbevf_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
+
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
+ return false;
+
+#endif
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
+ */
+ atomic_inc(&page->_count);
+
+ return true;
+}
+
+static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct ixgbevf_rx_buffer *rx_buffer;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) +
+ rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ IXGBEVF_RX_HDR_SIZE);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ return NULL;
+ }
+
+ /* we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ IXGBEVF_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* pull page into skb */
+ if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->dma = 0;
+ rx_buffer->page = NULL;
+
+ return skb;
}
static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
@@ -484,78 +802,51 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring,
int budget)
{
- union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
- struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
- struct sk_buff *skb;
- unsigned int i;
- u32 len, staterr;
- int cleaned_count = 0;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
+ struct sk_buff *skb = rx_ring->skb;
- i = rx_ring->next_to_clean;
- rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ while (likely(total_rx_packets < budget)) {
+ union ixgbe_adv_rx_desc *rx_desc;
- while (staterr & IXGBE_RXD_STAT_DD) {
- if (!budget)
- break;
- budget--;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
+ ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
- rmb(); /* read descriptor and rx_buffer_info after status DD */
- len = le16_to_cpu(rx_desc->wb.upper.length);
- skb = rx_buffer_info->skb;
- prefetch(skb->data - NET_IP_ALIGN);
- rx_buffer_info->skb = NULL;
+ rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
- if (rx_buffer_info->dma) {
- dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_buffer_info->dma = 0;
- skb_put(skb, len);
- }
+ if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
+ break;
- i++;
- if (i == rx_ring->count)
- i = 0;
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
- next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
- prefetch(next_rxd);
- cleaned_count++;
+ /* retrieve a buffer from the ring */
+ skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
- next_buffer = &rx_ring->rx_buffer_info[i];
+ /* exit if we failed to retrieve a buffer */
+ if (!skb)
+ break;
- if (!(staterr & IXGBE_RXD_STAT_EOP)) {
- skb->next = next_buffer->skb;
- IXGBE_CB(skb->next)->prev = skb;
- rx_ring->rx_stats.non_eop_descs++;
- goto next_desc;
- }
+ cleaned_count++;
- /* we should not be chaining buffers, if we did drop the skb */
- if (IXGBE_CB(skb)->prev) {
- do {
- struct sk_buff *this = skb;
- skb = IXGBE_CB(skb)->prev;
- dev_kfree_skb(this);
- } while (skb);
- goto next_desc;
- }
+ /* fetch next buffer in frame if non-eop */
+ if (ixgbevf_is_non_eop(rx_ring, rx_desc))
+ continue;
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
- dev_kfree_skb_irq(skb);
- goto next_desc;
+ /* verify the packet layout is correct */
+ if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
+ skb = NULL;
+ continue;
}
- ixgbevf_rx_checksum(rx_ring, staterr, skb);
-
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- total_rx_packets++;
-
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
/* Workaround hardware that can't do proper VEPA multicast
* source pruning.
@@ -565,32 +856,23 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
ether_addr_equal(rx_ring->netdev->dev_addr,
eth_hdr(skb)->h_source)) {
dev_kfree_skb_irq(skb);
- goto next_desc;
+ continue;
}
- ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
+ /* populate checksum, VLAN, and protocol */
+ ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
-next_desc:
- rx_desc->wb.upper.status_error = 0;
+ ixgbevf_rx_skb(q_vector, skb);
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
- ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
- cleaned_count = 0;
- }
-
- /* use prefetched values */
- rx_desc = next_rxd;
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ /* reset skb pointer */
+ skb = NULL;
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ /* update budget accounting */
+ total_rx_packets++;
}
- rx_ring->next_to_clean = i;
- cleaned_count = ixgbevf_desc_unused(rx_ring);
-
- if (cleaned_count)
- ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
+ /* place incomplete frames back on ring for completion */
+ rx_ring->skb = skb;
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
@@ -634,12 +916,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
else
per_ring_budget = budget;
- adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
ixgbevf_for_each_ring(ring, q_vector->rx)
clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
per_ring_budget)
< per_ring_budget);
- adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
#ifdef CONFIG_NET_RX_BUSY_POLL
ixgbevf_qv_unlock_napi(q_vector);
@@ -1229,19 +1509,15 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
{
- struct ixgbevf_ring *rx_ring;
struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl;
- rx_ring = adapter->rx_ring[index];
-
srrctl = IXGBE_SRRCTL_DROP_EN;
+ srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
}
@@ -1260,40 +1536,6 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
}
-static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
- int i;
- u16 rx_buf_len;
-
- /* notify the PF of our intent to use this size of frame */
- ixgbevf_rlpml_set_vf(hw, max_frame);
-
- /* PF will allow an extra 4 bytes past for vlan tagged frames */
- max_frame += VLAN_HLEN;
-
- /*
- * Allocate buffer sizes that fit well into 32K and
- * take into account max frame size of 9.5K
- */
- if ((hw->mac.type == ixgbe_mac_X540_vf) &&
- (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
- rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- else if (max_frame <= IXGBEVF_RXBUFFER_2K)
- rx_buf_len = IXGBEVF_RXBUFFER_2K;
- else if (max_frame <= IXGBEVF_RXBUFFER_4K)
- rx_buf_len = IXGBEVF_RXBUFFER_4K;
- else if (max_frame <= IXGBEVF_RXBUFFER_8K)
- rx_buf_len = IXGBEVF_RXBUFFER_8K;
- else
- rx_buf_len = IXGBEVF_RXBUFFER_10K;
-
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
-}
-
#define IXGBEVF_MAX_RX_DESC_POLL 10
static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *ring)
@@ -1371,12 +1613,13 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
/* reset ntu and ntc to place SW in sync with hardwdare */
ring->next_to_clean = 0;
ring->next_to_use = 0;
+ ring->next_to_alloc = 0;
ixgbevf_configure_srrctl(adapter, reg_idx);
- /* prevent DMA from exceeding buffer space available */
- rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
- rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
+ /* allow any size packet since we can handle overflow */
+ rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
+
rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
@@ -1393,11 +1636,13 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
{
int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
ixgbevf_setup_psrtype(adapter);
- /* set_rx_buffer_len must be called before ring initialization */
- ixgbevf_set_rx_buffer_len(adapter);
+ /* notify the PF of our intent to use this size of frame */
+ ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
@@ -1702,32 +1947,32 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
{
+ struct device *dev = rx_ring->dev;
unsigned long size;
unsigned int i;
+ /* Free Rx ring sk_buff */
+ if (rx_ring->skb) {
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+ }
+
+ /* ring already cleared, nothing to do */
if (!rx_ring->rx_buffer_info)
return;
- /* Free all the Rx ring sk_buffs */
+ /* Free all the Rx ring pages */
for (i = 0; i < rx_ring->count; i++) {
- struct ixgbevf_rx_buffer *rx_buffer_info;
+ struct ixgbevf_rx_buffer *rx_buffer;
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
- if (rx_buffer_info->dma) {
- dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_buffer_info->dma = 0;
- }
- if (rx_buffer_info->skb) {
- struct sk_buff *skb = rx_buffer_info->skb;
- rx_buffer_info->skb = NULL;
- do {
- struct sk_buff *this = skb;
- skb = IXGBE_CB(skb)->prev;
- dev_kfree_skb(this);
- } while (skb);
- }
+ rx_buffer = &rx_ring->rx_buffer_info[i];
+ if (rx_buffer->dma)
+ dma_unmap_page(dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rx_buffer->dma = 0;
+ if (rx_buffer->page)
+ __free_page(rx_buffer->page);
+ rx_buffer->page = NULL;
}
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
@@ -3274,6 +3519,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
@@ -3282,7 +3528,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
break;
default:
- if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+ if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
break;
}
@@ -3291,17 +3537,35 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
if ((new_mtu < 68) || (max_frame > max_possible_frame))
return -EINVAL;
- hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
+ hw_dbg(hw, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
- if (netif_running(netdev))
- ixgbevf_reinit_locked(adapter);
+ /* notify the PF of our intent to use this size of frame */
+ ixgbevf_rlpml_set_vf(hw, max_frame);
return 0;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void ixgbevf_netpoll(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ return;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -3438,6 +3702,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = ixgbevf_busy_poll_recv,
#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ixgbevf_netpoll,
+#endif
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -3465,6 +3732,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbe_hw *hw = NULL;
const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
int err, pci_using_dac;
+ bool disable_dev = false;
err = pci_enable_device(pdev);
if (err)
@@ -3499,7 +3767,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(netdev, &pdev->dev);
- pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
@@ -3588,16 +3855,28 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_register;
+ pci_set_drvdata(pdev, netdev);
netif_carrier_off(netdev);
ixgbevf_init_last_counter_stats(adapter);
- /* print the MAC address */
- hw_dbg(hw, "%pM\n", netdev->dev_addr);
+ /* print the VF info */
+ dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
+ dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
- hw_dbg(hw, "MAC: %d\n", hw->mac.type);
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550_vf:
+ dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
+ break;
+ case ixgbe_mac_X540_vf:
+ dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
+ break;
+ case ixgbe_mac_82599_vf:
+ default:
+ dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
+ break;
+ }
- hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
return 0;
err_register:
@@ -3606,12 +3885,13 @@ err_sw_init:
ixgbevf_reset_interrupt_capability(adapter);
iounmap(adapter->io_addr);
err_ioremap:
+ disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
pci_release_regions(pdev);
err_pci_reg:
err_dma:
- if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+ if (!adapter || disable_dev)
pci_disable_device(pdev);
return err;
}
@@ -3628,7 +3908,13 @@ err_dma:
static void ixgbevf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_adapter *adapter;
+ bool disable_dev;
+
+ if (!netdev)
+ return;
+
+ adapter = netdev_priv(netdev);
set_bit(__IXGBEVF_REMOVING, &adapter->state);
@@ -3648,9 +3934,10 @@ static void ixgbevf_remove(struct pci_dev *pdev)
hw_dbg(&adapter->hw, "Remove complete\n");
+ disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
free_netdev(netdev);
- if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+ if (disable_dev)
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 9cddd56d02c3..cdb53be7d995 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -617,3 +617,13 @@ const struct ixgbevf_info ixgbevf_X540_vf_info = {
.mac = ixgbe_mac_X540_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+
+const struct ixgbevf_info ixgbevf_X550_vf_info = {
+ .mac = ixgbe_mac_X550_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
+
+const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
+ .mac = ixgbe_mac_X550EM_x_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index aa8cc8dc25d1..5b172427f459 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -74,6 +74,8 @@ enum ixgbe_mac_type {
ixgbe_mac_unknown = 0,
ixgbe_mac_82599_vf,
ixgbe_mac_X540_vf,
+ ixgbe_mac_X550_vf,
+ ixgbe_mac_X550EM_x_vf,
ixgbe_num_macs
};
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 67a84cfaefa1..96208f17bb53 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2559,11 +2559,10 @@ static void mvneta_adjust_link(struct net_device *ndev)
MVNETA_GMAC_FORCE_LINK_DOWN);
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
mvneta_port_up(pp);
- netdev_info(pp->dev, "link up\n");
} else {
mvneta_port_down(pp);
- netdev_info(pp->dev, "link down\n");
}
+ phy_print_status(phydev);
}
}
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index a3e394c47819..af829c578400 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -106,6 +106,7 @@
#define SDMA_CMD_ERD (1 << 7)
/* Bit definitions of the Port Config Reg */
+#define PCR_DUPLEX_FULL (1 << 15)
#define PCR_HS (1 << 12)
#define PCR_EN (1 << 7)
#define PCR_PM (1 << 0)
@@ -113,11 +114,17 @@
/* Bit definitions of the Port Config Extend Reg */
#define PCXR_2BSM (1 << 28)
#define PCXR_DSCP_EN (1 << 21)
+#define PCXR_RMII_EN (1 << 20)
+#define PCXR_AN_SPEED_DIS (1 << 19)
+#define PCXR_SPEED_100 (1 << 18)
#define PCXR_MFL_1518 (0 << 14)
#define PCXR_MFL_1536 (1 << 14)
#define PCXR_MFL_2048 (2 << 14)
#define PCXR_MFL_64K (3 << 14)
+#define PCXR_FLOWCTL_DIS (1 << 12)
#define PCXR_FLP (1 << 11)
+#define PCXR_AN_FLOWCTL_DIS (1 << 10)
+#define PCXR_AN_DUPLEX_DIS (1 << 9)
#define PCXR_PRIO_TX_OFF 3
#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
@@ -170,7 +177,6 @@
#define LINK_UP (1 << 3)
/* Bit definitions for work to be done */
-#define WORK_LINK (1 << 0)
#define WORK_TX_DONE (1 << 1)
/*
@@ -197,6 +203,9 @@ struct tx_desc {
struct pxa168_eth_private {
int port_num; /* User Ethernet port number */
int phy_addr;
+ int phy_speed;
+ int phy_duplex;
+ phy_interface_t phy_intf;
int rx_resource_err; /* Rx ring resource error flag */
@@ -269,11 +278,11 @@ enum hash_table_entry {
static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
static int pxa168_init_hw(struct pxa168_eth_private *pep);
+static int pxa168_init_phy(struct net_device *dev);
static void eth_port_reset(struct net_device *dev);
static void eth_port_start(struct net_device *dev);
static int pxa168_eth_open(struct net_device *dev);
static int pxa168_eth_stop(struct net_device *dev);
-static int ethernet_phy_setup(struct net_device *dev);
static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
{
@@ -305,26 +314,6 @@ static void abort_dma(struct pxa168_eth_private *pep)
netdev_err(pep->dev, "%s : DMA Stuck\n", __func__);
}
-static int ethernet_phy_get(struct pxa168_eth_private *pep)
-{
- unsigned int reg_data;
-
- reg_data = rdl(pep, PHY_ADDRESS);
-
- return (reg_data >> (5 * pep->port_num)) & 0x1f;
-}
-
-static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
-{
- u32 reg_data;
- int addr_shift = 5 * pep->port_num;
-
- reg_data = rdl(pep, PHY_ADDRESS);
- reg_data &= ~(0x1f << addr_shift);
- reg_data |= (phy_addr & 0x1f) << addr_shift;
- wrl(pep, PHY_ADDRESS, reg_data);
-}
-
static void rxq_refill(struct net_device *dev)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -655,14 +644,7 @@ static void eth_port_start(struct net_device *dev)
struct pxa168_eth_private *pep = netdev_priv(dev);
int tx_curr_desc, rx_curr_desc;
- /* Perform PHY reset, if there is a PHY. */
- if (pep->phy != NULL) {
- struct ethtool_cmd cmd;
-
- pxa168_get_settings(pep->dev, &cmd);
- phy_init_hw(pep->phy);
- pxa168_set_settings(pep->dev, &cmd);
- }
+ phy_start(pep->phy);
/* Assignment of Tx CTRP of given queue */
tx_curr_desc = pep->tx_curr_desc_q;
@@ -717,6 +699,8 @@ static void eth_port_reset(struct net_device *dev)
val = rdl(pep, PORT_CONFIG);
val &= ~PCR_EN;
wrl(pep, PORT_CONFIG, val);
+
+ phy_stop(pep->phy);
}
/*
@@ -884,43 +868,9 @@ static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
}
if (icr & ICR_RXBUF)
ret = 1;
- if (icr & ICR_MII_CH) {
- pep->work_todo |= WORK_LINK;
- ret = 1;
- }
return ret;
}
-static void handle_link_event(struct pxa168_eth_private *pep)
-{
- struct net_device *dev = pep->dev;
- u32 port_status;
- int speed;
- int duplex;
- int fc;
-
- port_status = rdl(pep, PORT_STATUS);
- if (!(port_status & LINK_UP)) {
- if (netif_carrier_ok(dev)) {
- netdev_info(dev, "link down\n");
- netif_carrier_off(dev);
- txq_reclaim(dev, 1);
- }
- return;
- }
- if (port_status & PORT_SPEED_100)
- speed = 100;
- else
- speed = 10;
-
- duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
- fc = (port_status & FLOW_CONTROL_DISABLED) ? 0 : 1;
- netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
- speed, duplex ? "full" : "half", fc ? "en" : "dis");
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
-}
-
static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
@@ -978,8 +928,11 @@ static int set_port_config_ext(struct pxa168_eth_private *pep)
skb_size = PCXR_MFL_64K;
/* Extended Port Configuration */
- wrl(pep,
- PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
+ wrl(pep, PORT_CONFIG_EXT,
+ PCXR_AN_SPEED_DIS | /* Disable HW AN */
+ PCXR_AN_DUPLEX_DIS |
+ PCXR_AN_FLOWCTL_DIS |
+ PCXR_2BSM | /* Two byte prefix aligns IP hdr */
PCXR_DSCP_EN | /* Enable DSCP in IP */
skb_size | PCXR_FLP | /* do not force link pass */
PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
@@ -987,6 +940,69 @@ static int set_port_config_ext(struct pxa168_eth_private *pep)
return 0;
}
+static void pxa168_eth_adjust_link(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct phy_device *phy = pep->phy;
+ u32 cfg, cfg_o = rdl(pep, PORT_CONFIG);
+ u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT);
+
+ cfg = cfg_o & ~PCR_DUPLEX_FULL;
+ cfgext = cfgext_o & ~(PCXR_SPEED_100 | PCXR_FLOWCTL_DIS | PCXR_RMII_EN);
+
+ if (phy->interface == PHY_INTERFACE_MODE_RMII)
+ cfgext |= PCXR_RMII_EN;
+ if (phy->speed == SPEED_100)
+ cfgext |= PCXR_SPEED_100;
+ if (phy->duplex)
+ cfg |= PCR_DUPLEX_FULL;
+ if (!phy->pause)
+ cfgext |= PCXR_FLOWCTL_DIS;
+
+ /* Bail out if there has nothing changed */
+ if (cfg == cfg_o && cfgext == cfgext_o)
+ return;
+
+ wrl(pep, PORT_CONFIG, cfg);
+ wrl(pep, PORT_CONFIG_EXT, cfgext);
+
+ phy_print_status(phy);
+}
+
+static int pxa168_init_phy(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct ethtool_cmd cmd;
+ int err;
+
+ if (pep->phy)
+ return 0;
+
+ pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
+ if (!pep->phy)
+ return -ENODEV;
+
+ err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link,
+ pep->phy_intf);
+ if (err)
+ return err;
+
+ err = pxa168_get_settings(dev, &cmd);
+ if (err)
+ return err;
+
+ cmd.phy_address = pep->phy_addr;
+ cmd.speed = pep->phy_speed;
+ cmd.duplex = pep->phy_duplex;
+ cmd.advertising = PHY_BASIC_FEATURES;
+ cmd.autoneg = AUTONEG_ENABLE;
+
+ if (cmd.speed != 0)
+ cmd.autoneg = AUTONEG_DISABLE;
+
+ return pxa168_set_settings(dev, &cmd);
+}
+
static int pxa168_init_hw(struct pxa168_eth_private *pep)
{
int err = 0;
@@ -1133,6 +1149,10 @@ static int pxa168_eth_open(struct net_device *dev)
struct pxa168_eth_private *pep = netdev_priv(dev);
int err;
+ err = pxa168_init_phy(dev);
+ if (err)
+ return err;
+
err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
if (err) {
dev_err(&dev->dev, "can't assign irq\n");
@@ -1231,10 +1251,6 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget)
struct net_device *dev = pep->dev;
int work_done = 0;
- if (unlikely(pep->work_todo & WORK_LINK)) {
- pep->work_todo &= ~(WORK_LINK);
- handle_link_event(pep);
- }
/*
* We call txq_reclaim every time since in NAPI interupts are disabled
* and due to this we miss the TX_DONE interrupt,which is not updated in
@@ -1357,77 +1373,6 @@ static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
return -EOPNOTSUPP;
}
-static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
-{
- struct mii_bus *bus = pep->smi_bus;
- struct phy_device *phydev;
- int start;
- int num;
- int i;
-
- if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
- /* Scan entire range */
- start = ethernet_phy_get(pep);
- num = 32;
- } else {
- /* Use phy addr specific to platform */
- start = phy_addr & 0x1f;
- num = 1;
- }
- phydev = NULL;
- for (i = 0; i < num; i++) {
- int addr = (start + i) & 0x1f;
- if (bus->phy_map[addr] == NULL)
- mdiobus_scan(bus, addr);
-
- if (phydev == NULL) {
- phydev = bus->phy_map[addr];
- if (phydev != NULL)
- ethernet_phy_set_addr(pep, addr);
- }
- }
-
- return phydev;
-}
-
-static void phy_init(struct pxa168_eth_private *pep)
-{
- struct phy_device *phy = pep->phy;
-
- phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII);
-
- if (pep->pd && pep->pd->speed != 0) {
- phy->autoneg = AUTONEG_DISABLE;
- phy->advertising = 0;
- phy->speed = pep->pd->speed;
- phy->duplex = pep->pd->duplex;
- } else {
- phy->autoneg = AUTONEG_ENABLE;
- phy->speed = 0;
- phy->duplex = 0;
- phy->supported &= PHY_BASIC_FEATURES;
- phy->advertising = phy->supported | ADVERTISED_Autoneg;
- }
-
- phy_start_aneg(phy);
-}
-
-static int ethernet_phy_setup(struct net_device *dev)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
-
- if (pep->pd && pep->pd->init)
- pep->pd->init();
-
- pep->phy = phy_scan(pep, pep->phy_addr & 0x1f);
- if (pep->phy != NULL)
- phy_init(pep);
-
- update_hash_table_mac_address(pep, NULL, dev->dev_addr);
-
- return 0;
-}
-
static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -1505,16 +1450,14 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep = netdev_priv(dev);
pep->dev = dev;
pep->clk = clk;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- err = -ENODEV;
- goto err_netdev;
- }
pep->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pep->base)) {
err = -ENOMEM;
goto err_netdev;
}
+
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
BUG_ON(!res);
dev->irq = res->start;
@@ -1552,13 +1495,23 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep->port_num = pep->pd->port_number;
pep->phy_addr = pep->pd->phy_addr;
+ pep->phy_speed = pep->pd->speed;
+ pep->phy_duplex = pep->pd->duplex;
+ pep->phy_intf = pep->pd->intf;
+
+ if (pep->pd->init)
+ pep->pd->init();
} else if (pdev->dev.of_node) {
of_property_read_u32(pdev->dev.of_node, "port-id",
&pep->port_num);
np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
- if (np)
- of_property_read_u32(np, "reg", &pep->phy_addr);
+ if (!np) {
+ dev_err(&pdev->dev, "missing phy-handle\n");
+ return -EINVAL;
+ }
+ of_property_read_u32(np, "reg", &pep->phy_addr);
+ pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
}
/* Hardware supports only 3 ports */
@@ -1587,11 +1540,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
if (err)
goto err_free_mdio;
- pxa168_init_hw(pep);
- err = ethernet_phy_setup(dev);
- if (err)
- goto err_mdiobus;
SET_NETDEV_DEV(dev, &pdev->dev);
+ pxa168_init_hw(pep);
err = register_netdev(dev);
if (err)
goto err_mdiobus;
@@ -1621,13 +1571,13 @@ static int pxa168_eth_remove(struct platform_device *pdev)
pep->htpr, pep->htpr_dma);
pep->htpr = NULL;
}
+ if (pep->phy)
+ phy_disconnect(pep->phy);
if (pep->clk) {
clk_disable(pep->clk);
clk_put(pep->clk);
pep->clk = NULL;
}
- if (pep->phy != NULL)
- phy_detach(pep->phy);
iounmap(pep->base);
pep->base = NULL;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index f14544c8d73f..867a6a3ef81f 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1290,14 +1290,6 @@ static void rx_set_checksum(struct sky2_port *sky2)
? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
}
-/*
- * Fixed initial key as seed to RSS.
- */
-static const uint32_t rss_init_key[10] = {
- 0x7c3351da, 0x51c5cf4e, 0x44adbdd1, 0xe8d38d18, 0x48897c43,
- 0xb1d60e7e, 0x6a3dd760, 0x01a2e453, 0x16f46f13, 0x1a0e7b30
-};
-
/* Enable/disable receive hash calculation (RSS) */
static void rx_set_rss(struct net_device *dev, netdev_features_t features)
{
@@ -1313,9 +1305,12 @@ static void rx_set_rss(struct net_device *dev, netdev_features_t features)
/* Program RSS initial values */
if (features & NETIF_F_RXHASH) {
+ u32 rss_key[10];
+
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
for (i = 0; i < nkeys; i++)
sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
- rss_init_key[i]);
+ rss_key[i]);
/* Need to turn on (undocumented) flag to make hashing work */
sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
@@ -1366,7 +1361,9 @@ static void sky2_rx_clean(struct sky2_port *sky2)
{
unsigned i;
- memset(sky2->rx_le, 0, RX_LE_BYTES);
+ if (sky2->rx_le)
+ memset(sky2->rx_le, 0, RX_LE_BYTES);
+
for (i = 0; i < sky2->rx_pending; i++) {
struct rx_ring_info *re = sky2->rx_ring + i;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index b16e1b95566f..5c93d1451c44 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -990,11 +990,11 @@ static struct mlx4_cmd_info cmd_info[] = {
{
.opcode = MLX4_CMD_CONFIG_DEV,
.has_inbox = false,
- .has_outbox = false,
+ .has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = mlx4_CMD_EPERM_wrapper
+ .wrapper = mlx4_CONFIG_DEV_wrapper
},
{
.opcode = MLX4_CMD_ALLOC_RES,
@@ -1338,6 +1338,15 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL,
.wrapper = mlx4_QUERY_IF_STAT_wrapper
},
+ {
+ .opcode = MLX4_CMD_ACCESS_REG,
+ .has_inbox = true,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_ACCESS_REG_wrapper,
+ },
/* Native multicast commands are not available for guests */
{
.opcode = MLX4_CMD_QP_ATTACH,
@@ -2108,50 +2117,52 @@ err_vhcr:
int mlx4_cmd_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
+ int flags = 0;
+
+ if (!priv->cmd.initialized) {
+ mutex_init(&priv->cmd.hcr_mutex);
+ mutex_init(&priv->cmd.slave_cmd_mutex);
+ sema_init(&priv->cmd.poll_sem, 1);
+ priv->cmd.use_events = 0;
+ priv->cmd.toggle = 1;
+ priv->cmd.initialized = 1;
+ flags |= MLX4_CMD_CLEANUP_STRUCT;
+ }
- mutex_init(&priv->cmd.hcr_mutex);
- mutex_init(&priv->cmd.slave_cmd_mutex);
- sema_init(&priv->cmd.poll_sem, 1);
- priv->cmd.use_events = 0;
- priv->cmd.toggle = 1;
-
- priv->cmd.hcr = NULL;
- priv->mfunc.vhcr = NULL;
-
- if (!mlx4_is_slave(dev)) {
+ if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
MLX4_HCR_BASE, MLX4_HCR_SIZE);
if (!priv->cmd.hcr) {
mlx4_err(dev, "Couldn't map command register\n");
- return -ENOMEM;
+ goto err;
}
+ flags |= MLX4_CMD_CLEANUP_HCR;
}
- if (mlx4_is_mfunc(dev)) {
+ if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
&priv->mfunc.vhcr_dma,
GFP_KERNEL);
if (!priv->mfunc.vhcr)
- goto err_hcr;
+ goto err;
+
+ flags |= MLX4_CMD_CLEANUP_VHCR;
}
- priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
- MLX4_MAILBOX_SIZE,
- MLX4_MAILBOX_SIZE, 0);
- if (!priv->cmd.pool)
- goto err_vhcr;
+ if (!priv->cmd.pool) {
+ priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
+ MLX4_MAILBOX_SIZE,
+ MLX4_MAILBOX_SIZE, 0);
+ if (!priv->cmd.pool)
+ goto err;
- return 0;
+ flags |= MLX4_CMD_CLEANUP_POOL;
+ }
-err_vhcr:
- if (mlx4_is_mfunc(dev))
- dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
- priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
- priv->mfunc.vhcr = NULL;
+ return 0;
-err_hcr:
- if (!mlx4_is_slave(dev))
- iounmap(priv->cmd.hcr);
+err:
+ mlx4_cmd_cleanup(dev, flags);
return -ENOMEM;
}
@@ -2175,18 +2186,28 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
iounmap(priv->mfunc.comm);
}
-void mlx4_cmd_cleanup(struct mlx4_dev *dev)
+void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- pci_pool_destroy(priv->cmd.pool);
+ if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
+ pci_pool_destroy(priv->cmd.pool);
+ priv->cmd.pool = NULL;
+ }
- if (!mlx4_is_slave(dev))
+ if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
+ (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
iounmap(priv->cmd.hcr);
- if (mlx4_is_mfunc(dev))
+ priv->cmd.hcr = NULL;
+ }
+ if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
+ (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
- priv->mfunc.vhcr = NULL;
+ priv->mfunc.vhcr = NULL;
+ }
+ if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
+ priv->cmd.initialized = 0;
}
/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 57dda95b67d8..999014413b1a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -35,52 +35,6 @@
#include "mlx4_en.h"
-int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- int port_up = 0;
- int err = 0;
-
- if (priv->hwtstamp_config.tx_type == tx_type &&
- priv->hwtstamp_config.rx_filter == rx_filter)
- return 0;
-
- mutex_lock(&mdev->state_lock);
- if (priv->port_up) {
- port_up = 1;
- mlx4_en_stop_port(dev, 1);
- }
-
- mlx4_en_free_resources(priv);
-
- en_warn(priv, "Changing Time Stamp configuration\n");
-
- priv->hwtstamp_config.tx_type = tx_type;
- priv->hwtstamp_config.rx_filter = rx_filter;
-
- if (rx_filter != HWTSTAMP_FILTER_NONE)
- dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
- else
- dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
-
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
- if (port_up) {
- err = mlx4_en_start_port(dev);
- if (err)
- en_err(priv, "Failed starting port\n");
- }
-
-out:
- mutex_unlock(&mdev->state_lock);
- netdev_features_change(dev);
- return err;
-}
-
/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter)
*/
static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ae83da9cd18a..90e0f045a6bc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -35,6 +35,7 @@
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/mlx4/driver.h>
+#include <linux/mlx4/device.h>
#include <linux/in.h>
#include <net/ip.h>
@@ -114,7 +115,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
"tso_packets",
"xmit_more",
"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
- "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
+ "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
/* packet statistics */
"broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
@@ -374,7 +375,302 @@ static void mlx4_en_get_strings(struct net_device *dev,
}
}
-static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static u32 mlx4_en_autoneg_get(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ u32 autoneg = AUTONEG_DISABLE;
+
+ if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) &&
+ (priv->port_state.flags & MLX4_EN_PORT_ANE))
+ autoneg = AUTONEG_ENABLE;
+
+ return autoneg;
+}
+
+static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg)
+{
+ u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
+
+ if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
+ | MLX4_PROT_MASK(MLX4_1000BASE_T)
+ | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
+ return SUPPORTED_TP;
+ }
+
+ if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
+ | MLX4_PROT_MASK(MLX4_10GBASE_SR)
+ | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
+ | MLX4_PROT_MASK(MLX4_40GBASE_CR4)
+ | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
+ | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
+ return SUPPORTED_FIBRE;
+ }
+
+ if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
+ | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
+ | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
+ | MLX4_PROT_MASK(MLX4_10GBASE_KR)
+ | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
+ | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
+ return SUPPORTED_Backplane;
+ }
+ return 0;
+}
+
+static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
+{
+ u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper);
+
+ if (!eth_proto) /* link down */
+ eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
+
+ if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
+ | MLX4_PROT_MASK(MLX4_1000BASE_T)
+ | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
+ return PORT_TP;
+ }
+
+ if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR)
+ | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
+ | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
+ | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
+ return PORT_FIBRE;
+ }
+
+ if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
+ | MLX4_PROT_MASK(MLX4_56GBASE_CR4)
+ | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) {
+ return PORT_DA;
+ }
+
+ if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
+ | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
+ | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
+ | MLX4_PROT_MASK(MLX4_10GBASE_KR)
+ | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
+ | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
+ return PORT_NONE;
+ }
+ return PORT_OTHER;
+}
+
+#define MLX4_LINK_MODES_SZ \
+ (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8)
+
+enum ethtool_report {
+ SUPPORTED = 0,
+ ADVERTISED = 1,
+ SPEED = 2
+};
+
+/* Translates mlx4 link mode to equivalent ethtool Link modes/speed */
+static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = {
+ [MLX4_100BASE_TX] = {
+ SUPPORTED_100baseT_Full,
+ ADVERTISED_100baseT_Full,
+ SPEED_100
+ },
+
+ [MLX4_1000BASE_T] = {
+ SUPPORTED_1000baseT_Full,
+ ADVERTISED_1000baseT_Full,
+ SPEED_1000
+ },
+ [MLX4_1000BASE_CX_SGMII] = {
+ SUPPORTED_1000baseKX_Full,
+ ADVERTISED_1000baseKX_Full,
+ SPEED_1000
+ },
+ [MLX4_1000BASE_KX] = {
+ SUPPORTED_1000baseKX_Full,
+ ADVERTISED_1000baseKX_Full,
+ SPEED_1000
+ },
+
+ [MLX4_10GBASE_T] = {
+ SUPPORTED_10000baseT_Full,
+ ADVERTISED_10000baseT_Full,
+ SPEED_10000
+ },
+ [MLX4_10GBASE_CX4] = {
+ SUPPORTED_10000baseKX4_Full,
+ ADVERTISED_10000baseKX4_Full,
+ SPEED_10000
+ },
+ [MLX4_10GBASE_KX4] = {
+ SUPPORTED_10000baseKX4_Full,
+ ADVERTISED_10000baseKX4_Full,
+ SPEED_10000
+ },
+ [MLX4_10GBASE_KR] = {
+ SUPPORTED_10000baseKR_Full,
+ ADVERTISED_10000baseKR_Full,
+ SPEED_10000
+ },
+ [MLX4_10GBASE_CR] = {
+ SUPPORTED_10000baseKR_Full,
+ ADVERTISED_10000baseKR_Full,
+ SPEED_10000
+ },
+ [MLX4_10GBASE_SR] = {
+ SUPPORTED_10000baseKR_Full,
+ ADVERTISED_10000baseKR_Full,
+ SPEED_10000
+ },
+
+ [MLX4_20GBASE_KR2] = {
+ SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full,
+ ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full,
+ SPEED_20000
+ },
+
+ [MLX4_40GBASE_CR4] = {
+ SUPPORTED_40000baseCR4_Full,
+ ADVERTISED_40000baseCR4_Full,
+ SPEED_40000
+ },
+ [MLX4_40GBASE_KR4] = {
+ SUPPORTED_40000baseKR4_Full,
+ ADVERTISED_40000baseKR4_Full,
+ SPEED_40000
+ },
+ [MLX4_40GBASE_SR4] = {
+ SUPPORTED_40000baseSR4_Full,
+ ADVERTISED_40000baseSR4_Full,
+ SPEED_40000
+ },
+
+ [MLX4_56GBASE_KR4] = {
+ SUPPORTED_56000baseKR4_Full,
+ ADVERTISED_56000baseKR4_Full,
+ SPEED_56000
+ },
+ [MLX4_56GBASE_CR4] = {
+ SUPPORTED_56000baseCR4_Full,
+ ADVERTISED_56000baseCR4_Full,
+ SPEED_56000
+ },
+ [MLX4_56GBASE_SR4] = {
+ SUPPORTED_56000baseSR4_Full,
+ ADVERTISED_56000baseSR4_Full,
+ SPEED_56000
+ },
+};
+
+static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report)
+{
+ int i;
+ u32 link_modes = 0;
+
+ for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
+ if (eth_proto & MLX4_PROT_MASK(i))
+ link_modes |= ptys2ethtool_map[i][report];
+ }
+ return link_modes;
+}
+
+static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report)
+{
+ int i;
+ u32 ptys_modes = 0;
+
+ for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
+ if (ptys2ethtool_map[i][report] & link_modes)
+ ptys_modes |= 1 << i;
+ }
+ return ptys_modes;
+}
+
+/* Convert actual speed (SPEED_XXX) to ptys link modes */
+static u32 speed2ptys_link_modes(u32 speed)
+{
+ int i;
+ u32 ptys_modes = 0;
+
+ for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
+ if (ptys2ethtool_map[i][SPEED] == speed)
+ ptys_modes |= 1 << i;
+ }
+ return ptys_modes;
+}
+
+static int ethtool_get_ptys_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_ptys_reg ptys_reg;
+ u32 eth_proto;
+ int ret;
+
+ memset(&ptys_reg, 0, sizeof(ptys_reg));
+ ptys_reg.local_port = priv->port;
+ ptys_reg.proto_mask = MLX4_PTYS_EN;
+ ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
+ MLX4_ACCESS_REG_QUERY, &ptys_reg);
+ if (ret) {
+ en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)",
+ ret);
+ return ret;
+ }
+ en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n",
+ ptys_reg.proto_mask);
+ en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n",
+ be32_to_cpu(ptys_reg.eth_proto_cap));
+ en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n",
+ be32_to_cpu(ptys_reg.eth_proto_admin));
+ en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n",
+ be32_to_cpu(ptys_reg.eth_proto_oper));
+ en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n",
+ be32_to_cpu(ptys_reg.eth_proto_lp_adv));
+
+ cmd->supported = 0;
+ cmd->advertising = 0;
+
+ cmd->supported |= ptys_get_supported_port(&ptys_reg);
+
+ eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap);
+ cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED);
+
+ eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin);
+ cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED);
+
+ cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0;
+
+ cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ?
+ ADVERTISED_Asym_Pause : 0;
+
+ cmd->port = ptys_get_active_port(&ptys_reg);
+ cmd->transceiver = (SUPPORTED_TP & cmd->supported) ?
+ XCVR_EXTERNAL : XCVR_INTERNAL;
+
+ if (mlx4_en_autoneg_get(dev)) {
+ cmd->supported |= SUPPORTED_Autoneg;
+ cmd->advertising |= ADVERTISED_Autoneg;
+ }
+
+ cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv);
+ cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED);
+
+ cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
+ ADVERTISED_Autoneg : 0;
+
+ cmd->phy_address = 0;
+ cmd->mdio_support = 0;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ return ret;
+}
+
+static void ethtool_get_default_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int trans_type;
@@ -382,18 +678,7 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->autoneg = AUTONEG_DISABLE;
cmd->supported = SUPPORTED_10000baseT_Full;
cmd->advertising = ADVERTISED_10000baseT_Full;
-
- if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
- return -ENOMEM;
-
- trans_type = priv->port_state.transciver;
- if (netif_carrier_ok(dev)) {
- ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
- cmd->duplex = DUPLEX_FULL;
- } else {
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
- }
+ trans_type = priv->port_state.transceiver;
if (trans_type > 0 && trans_type <= 0xC) {
cmd->port = PORT_FIBRE;
@@ -409,17 +694,118 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->port = -1;
cmd->transceiver = -1;
}
+}
+
+static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int ret = -EINVAL;
+
+ if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+ return -ENOMEM;
+
+ en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n",
+ priv->port_state.flags & MLX4_EN_PORT_ANC,
+ priv->port_state.flags & MLX4_EN_PORT_ANE);
+
+ if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL)
+ ret = ethtool_get_ptys_settings(dev, cmd);
+ if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */
+ ethtool_get_default_settings(dev, cmd);
+
+ if (netif_carrier_ok(dev)) {
+ ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
+ cmd->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
+ }
return 0;
}
+/* Calculate PTYS admin according ethtool speed (SPEED_XXX) */
+static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed,
+ __be32 proto_cap)
+{
+ __be32 proto_admin = 0;
+
+ if (!speed) { /* Speed = 0 ==> Reset Link modes */
+ proto_admin = proto_cap;
+ en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n",
+ be32_to_cpu(proto_cap));
+ } else {
+ u32 ptys_link_modes = speed2ptys_link_modes(speed);
+
+ proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap;
+ en_info(priv, "Setting Speed to %d\n", speed);
+ }
+ return proto_admin;
+}
+
static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- if ((cmd->autoneg == AUTONEG_ENABLE) ||
- (ethtool_cmd_speed(cmd) != SPEED_10000) ||
- (cmd->duplex != DUPLEX_FULL))
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_ptys_reg ptys_reg;
+ __be32 proto_admin;
+ int ret;
+
+ u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED);
+ int speed = ethtool_cmd_speed(cmd);
+
+ en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n",
+ speed, cmd->advertising, cmd->autoneg, cmd->duplex);
+
+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) ||
+ (cmd->duplex == DUPLEX_HALF))
return -EINVAL;
- /* Nothing to change */
+ memset(&ptys_reg, 0, sizeof(ptys_reg));
+ ptys_reg.local_port = priv->port;
+ ptys_reg.proto_mask = MLX4_PTYS_EN;
+ ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
+ MLX4_ACCESS_REG_QUERY, &ptys_reg);
+ if (ret) {
+ en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n",
+ ret);
+ return 0;
+ }
+
+ proto_admin = cpu_to_be32(ptys_adv);
+ if (speed >= 0 && speed != priv->port_state.link_speed)
+ /* If speed was set then speed decides :-) */
+ proto_admin = speed_set_ptys_admin(priv, speed,
+ ptys_reg.eth_proto_cap);
+
+ proto_admin &= ptys_reg.eth_proto_cap;
+
+ if (proto_admin == ptys_reg.eth_proto_admin)
+ return 0; /* Nothing to change */
+
+ if (!proto_admin) {
+ en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
+ return -EINVAL; /* nothing to change due to bad input */
+ }
+
+ en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
+ be32_to_cpu(proto_admin));
+
+ ptys_reg.eth_proto_admin = proto_admin;
+ ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE,
+ &ptys_reg);
+ if (ret) {
+ en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)",
+ be32_to_cpu(ptys_reg.eth_proto_admin), ret);
+ return ret;
+ }
+
+ en_warn(priv, "Port link mode changed, restarting port...\n");
+ mutex_lock(&priv->mdev->state_lock);
+ if (priv->port_up) {
+ mlx4_en_stop_port(dev, 1);
+ if (mlx4_en_start_port(dev))
+ en_err(priv, "Failed restarting port %d\n", priv->port);
+ }
+ mutex_unlock(&priv->mdev->state_lock);
return 0;
}
@@ -587,7 +973,34 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
return priv->rx_ring_num;
}
-static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key)
+static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev)
+{
+ return MLX4_EN_RSS_KEY_SIZE;
+}
+
+static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ /* check if requested function is supported by the device */
+ if ((hfunc == ETH_RSS_HASH_TOP &&
+ !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) ||
+ (hfunc == ETH_RSS_HASH_XOR &&
+ !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)))
+ return -EINVAL;
+
+ priv->rss_hash_fn = hfunc;
+ if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH))
+ en_warn(priv,
+ "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
+ if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH))
+ en_warn(priv,
+ "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
+ return 0;
+}
+
+static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
+ u8 *hfunc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
@@ -596,17 +1009,23 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key)
int err = 0;
rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
+ rss_rings = 1 << ilog2(rss_rings);
while (n--) {
+ if (!ring_index)
+ break;
ring_index[n] = rss_map->qps[n % rss_rings].qpn -
rss_map->base_qpn;
}
-
+ if (key)
+ memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
+ if (hfunc)
+ *hfunc = priv->rss_hash_fn;
return err;
}
static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
- const u8 *key)
+ const u8 *key, const u8 hfunc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -619,6 +1038,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
* between rings
*/
for (i = 0; i < priv->rx_ring_num; i++) {
+ if (!ring_index)
+ continue;
if (i > 0 && !ring_index[i] && !rss_rings)
rss_rings = i;
@@ -633,13 +1054,22 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
if (!is_power_of_2(rss_rings))
return -EINVAL;
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
+ err = mlx4_en_check_rxfh_func(dev, hfunc);
+ if (err)
+ return err;
+ }
+
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- priv->prof->rss_rings = rss_rings;
+ if (ring_index)
+ priv->prof->rss_rings = rss_rings;
+ if (key)
+ memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
if (port_up) {
err = mlx4_en_start_port(dev);
@@ -1309,6 +1739,86 @@ static int mlx4_en_set_tunable(struct net_device *dev,
return ret;
}
+static int mlx4_en_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int ret;
+ u8 data[4];
+
+ /* Read first 2 bytes to get Module & REV ID */
+ ret = mlx4_get_module_info(mdev->dev, priv->port,
+ 0/*offset*/, 2/*size*/, data);
+ if (ret < 2)
+ return -EIO;
+
+ switch (data[0] /* identifier */) {
+ case MLX4_MODULE_ID_QSFP:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case MLX4_MODULE_ID_QSFP_PLUS:
+ if (data[1] >= 0x3) { /* revision id */
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ }
+ break;
+ case MLX4_MODULE_ID_QSFP28:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ break;
+ case MLX4_MODULE_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+static int mlx4_en_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int offset = ee->offset;
+ int i = 0, ret;
+
+ if (ee->len == 0)
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ while (i < ee->len) {
+ en_dbg(DRV, priv,
+ "mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
+ i, offset, ee->len - i);
+
+ ret = mlx4_get_module_info(mdev->dev, priv->port,
+ offset, ee->len - i, data + i);
+
+ if (!ret) /* Done reading */
+ return 0;
+
+ if (ret < 0) {
+ en_err(priv,
+ "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
+ i, offset, ee->len - i, ret);
+ return 0;
+ }
+
+ i += ret;
+ offset += ret;
+ }
+ return 0;
+}
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
@@ -1332,6 +1842,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_rxnfc = mlx4_en_get_rxnfc,
.set_rxnfc = mlx4_en_set_rxnfc,
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
+ .get_rxfh_key_size = mlx4_en_get_rxfh_key_size,
.get_rxfh = mlx4_en_get_rxfh,
.set_rxfh = mlx4_en_set_rxfh,
.get_channels = mlx4_en_get_channels,
@@ -1341,6 +1852,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_priv_flags = mlx4_en_get_priv_flags,
.get_tunable = mlx4_en_get_tunable,
.set_tunable = mlx4_en_set_tunable,
+ .get_module_info = mlx4_en_get_module_info,
+ .get_module_eeprom = mlx4_en_get_module_eeprom
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 2091ae88615d..9f16f754137b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -221,15 +221,12 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
{
struct mlx4_en_dev *mdev;
int i;
- int err;
printk_once(KERN_INFO "%s", mlx4_en_version);
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
- if (!mdev) {
- err = -ENOMEM;
+ if (!mdev)
goto err_free_res;
- }
if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
goto err_free_dev;
@@ -264,8 +261,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
}
/* Build device profile according to supplied module parameters */
- err = mlx4_en_get_profile(mdev);
- if (err) {
+ if (mlx4_en_get_profile(mdev)) {
mlx4_err(mdev, "Bad module parameters, aborting\n");
goto err_mr;
}
@@ -286,10 +282,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
* Note: we cannot use the shared workqueue because of deadlocks caused
* by the rtnl lock */
mdev->workqueue = create_singlethread_workqueue("mlx4_en");
- if (!mdev->workqueue) {
- err = -ENOMEM;
+ if (!mdev->workqueue)
goto err_mr;
- }
/* At this stage all non-port specific tasks are complete:
* mark the card state as up */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 4d69e382b4e5..dccf0e1f86be 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -575,7 +575,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
struct mlx4_mac_entry *entry;
int index = 0;
int err = 0;
- u64 reg_id;
+ u64 reg_id = 0;
int *qpn = &priv->base_qpn;
u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
@@ -1843,8 +1843,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
}
local_bh_enable();
- while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
- msleep(1);
+ napi_synchronize(&cq->napi);
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq);
@@ -1894,6 +1893,7 @@ static void mlx4_en_clear_stats(struct net_device *dev)
priv->rx_ring[i]->packets = 0;
priv->rx_ring[i]->csum_ok = 0;
priv->rx_ring[i]->csum_none = 0;
+ priv->rx_ring[i]->csum_complete = 0;
}
}
@@ -2157,7 +2157,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE;
}
- if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) {
+ if (mlx4_en_reset_config(dev, config, dev->features)) {
config.tx_type = HWTSTAMP_TX_OFF;
config.rx_filter = HWTSTAMP_FILTER_NONE;
}
@@ -2190,6 +2190,16 @@ static int mlx4_en_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
+ int ret = 0;
+
+ if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
+ en_info(priv, "Turn %s RX vlan strip offload\n",
+ (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
+ ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
+ features);
+ if (ret)
+ return ret;
+ }
if (features & NETIF_F_LOOPBACK)
priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
@@ -2249,7 +2259,7 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
#define PORT_ID_BYTE_LEN 8
static int mlx4_en_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_port_id *ppid)
+ struct netdev_phys_item_id *ppid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_dev *mdev = priv->mdev->dev;
@@ -2455,6 +2465,21 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv = netdev_priv(dev);
memset(priv, 0, sizeof(struct mlx4_en_priv));
+ spin_lock_init(&priv->stats_lock);
+ INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
+ INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
+ INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
+ INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
+ INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
+#ifdef CONFIG_MLX4_EN_VXLAN
+ INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
+ INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
+#endif
+#ifdef CONFIG_RFS_ACCEL
+ INIT_LIST_HEAD(&priv->filters);
+ spin_lock_init(&priv->filters_lock);
+#endif
+
priv->dev = dev;
priv->mdev = mdev;
priv->ddev = &mdev->pdev->dev;
@@ -2468,6 +2493,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
priv->tx_ring_num = prof->tx_ring_num;
priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
+ netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
GFP_KERNEL);
@@ -2486,16 +2512,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->cqe_size = mdev->dev->caps.cqe_size;
priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL;
- spin_lock_init(&priv->stats_lock);
- INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
- INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
- INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
- INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
- INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
-#ifdef CONFIG_MLX4_EN_VXLAN
- INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
- INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
-#endif
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
@@ -2513,6 +2529,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/* Query for default mac and max mtu */
priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
+ if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
+ MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
+ priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
+
/* Set default MAC */
dev->addr_len = ETH_ALEN;
mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
@@ -2538,11 +2558,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (err)
goto out;
-#ifdef CONFIG_RFS_ACCEL
- INIT_LIST_HEAD(&priv->filters);
- spin_lock_init(&priv->filters_lock);
-#endif
-
/* Initialize time stamping config */
priv->hwtstamp_config.flags = 0;
priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
@@ -2583,7 +2598,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->features = dev->hw_features | NETIF_F_HIGHDMA |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
- dev->hw_features |= NETIF_F_LOOPBACK;
+ dev->hw_features |= NETIF_F_LOOPBACK |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED)
@@ -2592,6 +2608,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
dev->priv_flags |= IFF_UNICAST_FLT;
+ /* Setting a default hash function value */
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
+ priv->rss_hash_fn = ETH_RSS_HASH_TOP;
+ } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
+ priv->rss_hash_fn = ETH_RSS_HASH_XOR;
+ } else {
+ en_warn(priv,
+ "No RSS hash capabilities exposed, using Toeplitz\n");
+ priv->rss_hash_fn = ETH_RSS_HASH_TOP;
+ }
+
mdev->pndev[port] = dev;
netif_carrier_off(dev);
@@ -2650,3 +2677,79 @@ out:
return err;
}
+int mlx4_en_reset_config(struct net_device *dev,
+ struct hwtstamp_config ts_config,
+ netdev_features_t features)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int port_up = 0;
+ int err = 0;
+
+ if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
+ priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
+ !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX))
+ return 0; /* Nothing to change */
+
+ if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
+ (features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
+ en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev, 1);
+ }
+
+ mlx4_en_free_resources(priv);
+
+ en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
+ ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+
+ priv->hwtstamp_config.tx_type = ts_config.tx_type;
+ priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
+
+ if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ else
+ dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ /* RX time-stamping is OFF, update the RX vlan offload
+ * to the latest wanted state
+ */
+ if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ else
+ dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+ /* RX vlan offload and RX time-stamping can't co-exist !
+ * Regardless of the caller's choice,
+ * Turn Off RX vlan offload in case of time-stamping is ON
+ */
+ if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
+ dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+ err = mlx4_en_alloc_resources(priv);
+ if (err) {
+ en_err(priv, "Failed reallocating port resources\n");
+ goto out;
+ }
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+ if (err)
+ en_err(priv, "Failed starting port\n");
+ }
+
+out:
+ mutex_unlock(&mdev->state_lock);
+ netdev_features_change(dev);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 0a0261d128b9..6cb80072af6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -91,21 +91,37 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
* already synchronized, no need in locking */
state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) {
+ case MLX4_EN_100M_SPEED:
+ state->link_speed = SPEED_100;
+ break;
case MLX4_EN_1G_SPEED:
- state->link_speed = 1000;
+ state->link_speed = SPEED_1000;
break;
case MLX4_EN_10G_SPEED_XAUI:
case MLX4_EN_10G_SPEED_XFI:
- state->link_speed = 10000;
+ state->link_speed = SPEED_10000;
+ break;
+ case MLX4_EN_20G_SPEED:
+ state->link_speed = SPEED_20000;
break;
case MLX4_EN_40G_SPEED:
- state->link_speed = 40000;
+ state->link_speed = SPEED_40000;
+ break;
+ case MLX4_EN_56G_SPEED:
+ state->link_speed = SPEED_56000;
break;
default:
state->link_speed = -1;
break;
}
- state->transciver = qport_context->transceiver;
+
+ state->transceiver = qport_context->transceiver;
+
+ state->flags = 0; /* Reset and recalculate the port flags */
+ state->flags |= (qport_context->link_up & MLX4_EN_ANC_MASK) ?
+ MLX4_EN_PORT_ANC : 0;
+ state->flags |= (qport_context->autoneg & MLX4_EN_AUTONEG_MASK) ?
+ MLX4_EN_PORT_ANE : 0;
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
@@ -139,11 +155,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->rx_bytes = 0;
priv->port_stats.rx_chksum_good = 0;
priv->port_stats.rx_chksum_none = 0;
+ priv->port_stats.rx_chksum_complete = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
stats->rx_packets += priv->rx_ring[i]->packets;
stats->rx_bytes += priv->rx_ring[i]->bytes;
priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
+ priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index 745090b49d9e..040da4b16b1c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -53,22 +53,49 @@ enum {
MLX4_MCAST_ENABLE = 2,
};
+enum mlx4_link_mode {
+ MLX4_1000BASE_CX_SGMII = 0,
+ MLX4_1000BASE_KX = 1,
+ MLX4_10GBASE_CX4 = 2,
+ MLX4_10GBASE_KX4 = 3,
+ MLX4_10GBASE_KR = 4,
+ MLX4_20GBASE_KR2 = 5,
+ MLX4_40GBASE_CR4 = 6,
+ MLX4_40GBASE_KR4 = 7,
+ MLX4_56GBASE_KR4 = 8,
+ MLX4_10GBASE_CR = 12,
+ MLX4_10GBASE_SR = 13,
+ MLX4_40GBASE_SR4 = 15,
+ MLX4_56GBASE_CR4 = 17,
+ MLX4_56GBASE_SR4 = 18,
+ MLX4_100BASE_TX = 24,
+ MLX4_1000BASE_T = 25,
+ MLX4_10GBASE_T = 26,
+};
+
+#define MLX4_PROT_MASK(link_mode) (1<<link_mode)
+
enum {
- MLX4_EN_1G_SPEED = 0x02,
- MLX4_EN_10G_SPEED_XFI = 0x01,
+ MLX4_EN_100M_SPEED = 0x04,
MLX4_EN_10G_SPEED_XAUI = 0x00,
+ MLX4_EN_10G_SPEED_XFI = 0x01,
+ MLX4_EN_1G_SPEED = 0x02,
+ MLX4_EN_20G_SPEED = 0x08,
MLX4_EN_40G_SPEED = 0x40,
+ MLX4_EN_56G_SPEED = 0x20,
MLX4_EN_OTHER_SPEED = 0x0f,
};
struct mlx4_en_query_port_context {
u8 link_up;
#define MLX4_EN_LINK_UP_MASK 0x80
- u8 reserved;
+#define MLX4_EN_ANC_MASK 0x40
+ u8 autoneg;
+#define MLX4_EN_AUTONEG_MASK 0x80
__be16 mtu;
u8 reserved2;
u8 link_speed;
-#define MLX4_EN_SPEED_MASK 0x43
+#define MLX4_EN_SPEED_MASK 0x6f
u16 reserved3[5];
__be64 mac;
u8 transceiver;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 01660c595f5c..4ca396e3168f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -42,6 +42,10 @@
#include <linux/vmalloc.h>
#include <linux/irq.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ip6_checksum.h>
+#endif
+
#include "mlx4_en.h"
static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
@@ -74,7 +78,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
page_alloc->page_size = PAGE_SIZE << order;
page_alloc->page = page;
page_alloc->dma = dma;
- page_alloc->page_offset = frag_info->frag_align;
+ page_alloc->page_offset = 0;
/* Not doing get_page() for each frag is a big win
* on asymetric workloads. Note we can not use atomic_set().
*/
@@ -119,7 +123,6 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
out:
while (i--) {
- frag_info = &priv->frag_info[i];
if (page_alloc[i].page != ring_alloc[i].page) {
dma_unmap_page(priv->ddev, page_alloc[i].dma,
page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
@@ -157,7 +160,7 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
- frag_info, GFP_KERNEL))
+ frag_info, GFP_KERNEL | __GFP_COLD))
goto out;
}
return 0;
@@ -269,7 +272,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->actual_size,
- GFP_KERNEL)) {
+ GFP_KERNEL | __GFP_COLD)) {
if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
en_err(priv, "Failed to allocate enough rx buffers\n");
return -ENOMEM;
@@ -636,13 +639,94 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
int index = ring->prod & ring->size_mask;
while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
- if (mlx4_en_prepare_rx_desc(priv, ring, index, GFP_ATOMIC))
+ if (mlx4_en_prepare_rx_desc(priv, ring, index,
+ GFP_ATOMIC | __GFP_COLD))
break;
ring->prod++;
index = ring->prod & ring->size_mask;
}
}
+/* When hardware doesn't strip the vlan, we need to calculate the checksum
+ * over it and add it to the hardware's checksum calculation
+ */
+static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
+ struct vlan_hdr *vlanh)
+{
+ return csum_add(hw_checksum, *(__wsum *)vlanh);
+}
+
+/* Although the stack expects checksum which doesn't include the pseudo
+ * header, the HW adds it. To address that, we are subtracting the pseudo
+ * header checksum from the checksum value provided by the HW.
+ */
+static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
+ struct iphdr *iph)
+{
+ __u16 length_for_csum = 0;
+ __wsum csum_pseudo_header = 0;
+
+ length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
+ csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
+ length_for_csum, iph->protocol, 0);
+ skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+/* In IPv6 packets, besides subtracting the pseudo header checksum,
+ * we also compute/add the IP header checksum which
+ * is not added by the HW.
+ */
+static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
+ struct ipv6hdr *ipv6h)
+{
+ __wsum csum_pseudo_hdr = 0;
+
+ if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
+ return -1;
+ hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
+
+ csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
+ sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
+ csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
+ csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr));
+
+ skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
+ skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
+ return 0;
+}
+#endif
+static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
+ int hwtstamp_rx_filter)
+{
+ __wsum hw_checksum = 0;
+
+ void *hdr = (u8 *)va + sizeof(struct ethhdr);
+
+ hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
+
+ if (((struct ethhdr *)va)->h_proto == htons(ETH_P_8021Q) &&
+ hwtstamp_rx_filter != HWTSTAMP_FILTER_NONE) {
+ /* next protocol non IPv4 or IPv6 */
+ if (((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
+ != htons(ETH_P_IP) &&
+ ((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
+ != htons(ETH_P_IPV6))
+ return -1;
+ hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
+ hdr += sizeof(struct vlan_hdr);
+ }
+
+ if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
+ get_fixed_ipv4_csum(hw_checksum, skb, hdr);
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
+ if (get_fixed_ipv6_csum(hw_checksum, skb, hdr))
+ return -1;
+#endif
+ return 0;
+}
+
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -744,73 +828,95 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
if (likely(dev->features & NETIF_F_RXCSUM)) {
- if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
- (cqe->checksum == cpu_to_be16(0xffff))) {
- ring->csum_ok++;
- /* This packet is eligible for GRO if it is:
- * - DIX Ethernet (type interpretation)
- * - TCP/IP (v4)
- * - without IP options
- * - not an IP fragment
- * - no LLS polling in progress
- */
- if (!mlx4_en_cq_busy_polling(cq) &&
- (dev->features & NETIF_F_GRO)) {
- struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
- if (!gro_skb)
- goto next;
-
- nr = mlx4_en_complete_rx_desc(priv,
- rx_desc, frags, gro_skb,
- length);
- if (!nr)
- goto next;
-
- skb_shinfo(gro_skb)->nr_frags = nr;
- gro_skb->len = length;
- gro_skb->data_len = length;
- gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
+ MLX4_CQE_STATUS_UDP)) {
+ if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+ cqe->checksum == cpu_to_be16(0xffff)) {
+ ip_summed = CHECKSUM_UNNECESSARY;
+ ring->csum_ok++;
+ } else {
+ ip_summed = CHECKSUM_NONE;
+ ring->csum_none++;
+ }
+ } else {
+ if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
+ (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+ MLX4_CQE_STATUS_IPV6))) {
+ ip_summed = CHECKSUM_COMPLETE;
+ ring->csum_complete++;
+ } else {
+ ip_summed = CHECKSUM_NONE;
+ ring->csum_none++;
+ }
+ }
+ } else {
+ ip_summed = CHECKSUM_NONE;
+ ring->csum_none++;
+ }
- if (l2_tunnel)
- gro_skb->csum_level = 1;
- if ((cqe->vlan_my_qpn &
- cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
- (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
- u16 vid = be16_to_cpu(cqe->sl_vid);
+ /* This packet is eligible for GRO if it is:
+ * - DIX Ethernet (type interpretation)
+ * - TCP/IP (v4)
+ * - without IP options
+ * - not an IP fragment
+ * - no LLS polling in progress
+ */
+ if (!mlx4_en_cq_busy_polling(cq) &&
+ (dev->features & NETIF_F_GRO)) {
+ struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
+ if (!gro_skb)
+ goto next;
+
+ nr = mlx4_en_complete_rx_desc(priv,
+ rx_desc, frags, gro_skb,
+ length);
+ if (!nr)
+ goto next;
+
+ if (ip_summed == CHECKSUM_COMPLETE) {
+ void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
+ if (check_csum(cqe, gro_skb, va, ring->hwtstamp_rx_filter)) {
+ ip_summed = CHECKSUM_NONE;
+ ring->csum_none++;
+ ring->csum_complete--;
+ }
+ }
- __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
- }
+ skb_shinfo(gro_skb)->nr_frags = nr;
+ gro_skb->len = length;
+ gro_skb->data_len = length;
+ gro_skb->ip_summed = ip_summed;
- if (dev->features & NETIF_F_RXHASH)
- skb_set_hash(gro_skb,
- be32_to_cpu(cqe->immed_rss_invalid),
- PKT_HASH_TYPE_L3);
+ if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
+ gro_skb->encapsulation = 1;
+ if ((cqe->vlan_my_qpn &
+ cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
+ (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ u16 vid = be16_to_cpu(cqe->sl_vid);
- skb_record_rx_queue(gro_skb, cq->ring);
- skb_mark_napi_id(gro_skb, &cq->napi);
+ __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
+ }
- if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
- timestamp = mlx4_en_get_cqe_ts(cqe);
- mlx4_en_fill_hwtstamps(mdev,
- skb_hwtstamps(gro_skb),
- timestamp);
- }
+ if (dev->features & NETIF_F_RXHASH)
+ skb_set_hash(gro_skb,
+ be32_to_cpu(cqe->immed_rss_invalid),
+ PKT_HASH_TYPE_L3);
- napi_gro_frags(&cq->napi);
- goto next;
- }
+ skb_record_rx_queue(gro_skb, cq->ring);
+ skb_mark_napi_id(gro_skb, &cq->napi);
- /* GRO not possible, complete processing here */
- ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- ip_summed = CHECKSUM_NONE;
- ring->csum_none++;
+ if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
+ timestamp = mlx4_en_get_cqe_ts(cqe);
+ mlx4_en_fill_hwtstamps(mdev,
+ skb_hwtstamps(gro_skb),
+ timestamp);
}
- } else {
- ip_summed = CHECKSUM_NONE;
- ring->csum_none++;
+
+ napi_gro_frags(&cq->napi);
+ goto next;
}
+ /* GRO not possible, complete processing here */
skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
if (!skb) {
priv->stats.rx_dropped++;
@@ -822,6 +928,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next;
}
+ if (ip_summed == CHECKSUM_COMPLETE) {
+ if (check_csum(cqe, skb, skb->data, ring->hwtstamp_rx_filter)) {
+ ip_summed = CHECKSUM_NONE;
+ ring->csum_complete--;
+ ring->csum_none++;
+ }
+ }
+
skb->ip_summed = ip_summed;
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, cq->ring);
@@ -879,8 +993,8 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
- if (priv->port_up)
- napi_schedule(&cq->napi);
+ if (likely(priv->port_up))
+ napi_schedule_irqoff(&cq->napi);
else
mlx4_en_arm_cq(priv, cq);
}
@@ -910,20 +1024,18 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
cpu_curr = smp_processor_id();
aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
- if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) {
- /* Current cpu is not according to smp_irq_affinity -
- * probably affinity changed. need to stop this NAPI
- * poll, and restart it on the right CPU
- */
- napi_complete(napi);
- mlx4_en_arm_cq(priv, cq);
- return 0;
- }
- } else {
- /* Done for now */
- napi_complete(napi);
- mlx4_en_arm_cq(priv, cq);
+ if (likely(cpumask_test_cpu(cpu_curr, aff)))
+ return budget;
+
+ /* Current cpu is not according to smp_irq_affinity -
+ * probably affinity changed. need to stop this NAPI
+ * poll, and restart it on the right CPU
+ */
+ done = 0;
}
+ /* Done for now */
+ napi_complete_done(napi, done);
+ mlx4_en_arm_cq(priv, cq);
return done;
}
@@ -946,15 +1058,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
(eff_mtu > buf_size + frag_sizes[i]) ?
frag_sizes[i] : eff_mtu - buf_size;
priv->frag_info[i].frag_prefix_size = buf_size;
- if (!i) {
- priv->frag_info[i].frag_align = NET_IP_ALIGN;
- priv->frag_info[i].frag_stride =
- ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
- } else {
- priv->frag_info[i].frag_align = 0;
- priv->frag_info[i].frag_stride =
- ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
- }
+ priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i],
+ SMP_CACHE_BYTES);
buf_size += priv->frag_info[i].frag_size;
i++;
}
@@ -967,11 +1072,10 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) {
en_err(priv,
- " frag:%d - size:%d prefix:%d align:%d stride:%d\n",
+ " frag:%d - size:%d prefix:%d stride:%d\n",
i,
priv->frag_info[i].frag_size,
priv->frag_info[i].frag_prefix_size,
- priv->frag_info[i].frag_align,
priv->frag_info[i].frag_stride);
}
}
@@ -1065,9 +1169,6 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
int i, qpn;
int err = 0;
int good_qps = 0;
- static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC,
- 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD,
- 0x593D56D9, 0xF3253C06, 0x2ADC1FFC};
en_dbg(DRV, priv, "Configuring rss steering\n");
err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
@@ -1122,9 +1223,19 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
rss_context->flags = rss_mask;
rss_context->hash_fn = MLX4_RSS_HASH_TOP;
- for (i = 0; i < 10; i++)
- rss_context->rss_key[i] = cpu_to_be32(rsskey[i]);
-
+ if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) {
+ rss_context->hash_fn = MLX4_RSS_HASH_XOR;
+ } else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) {
+ rss_context->hash_fn = MLX4_RSS_HASH_TOP;
+ memcpy(rss_context->rss_key, priv->rss_key,
+ MLX4_EN_RSS_KEY_SIZE);
+ netdev_rss_key_fill(rss_context->rss_key,
+ MLX4_EN_RSS_KEY_SIZE);
+ } else {
+ en_err(priv, "Unknown RSS hash function requested\n");
+ err = -EINVAL;
+ goto indir_err;
+ }
err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
&rss_map->indir_qp, &rss_map->indir_state);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 49d5afc7cfb8..2d8ee66138e8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -129,11 +129,15 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
return -ENOMEM;
- /* The device supports 1G, 10G and 40G speeds */
- if (priv->port_state.link_speed != 1000 &&
- priv->port_state.link_speed != 10000 &&
- priv->port_state.link_speed != 40000)
+ /* The device supports 100M, 1G, 10G, 20G, 40G and 56G speed */
+ if (priv->port_state.link_speed != SPEED_100 &&
+ priv->port_state.link_speed != SPEED_1000 &&
+ priv->port_state.link_speed != SPEED_10000 &&
+ priv->port_state.link_speed != SPEED_20000 &&
+ priv->port_state.link_speed != SPEED_40000 &&
+ priv->port_state.link_speed != SPEED_56000)
return priv->port_state.link_speed;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 454d9fea640e..d0cecbdd9ba8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -479,8 +479,8 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
- if (priv->port_up)
- napi_schedule(&cq->napi);
+ if (likely(priv->port_up))
+ napi_schedule_irqoff(&cq->napi);
else
mlx4_en_arm_cq(priv, cq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 49290a405903..d68b264cee4d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1123,8 +1123,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
goto err_out_free;
}
- err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
- dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
+ err = mlx4_bitmap_init(&priv->eq_table.bitmap,
+ roundup_pow_of_two(dev->caps.num_eqs),
+ dev->caps.num_eqs - 1,
+ dev->caps.reserved_eqs,
+ roundup_pow_of_two(dev->caps.num_eqs) -
+ dev->caps.num_eqs);
if (err)
goto err_out_free;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 2e88a235e26b..4251f81a0275 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -139,7 +139,12 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[10] = "TCP/IP offloads/flow-steering for VXLAN support",
[11] = "MAD DEMUX (Secure-Host) support",
[12] = "Large cache line (>64B) CQE stride support",
- [13] = "Large cache line (>64B) EQE stride support"
+ [13] = "Large cache line (>64B) EQE stride support",
+ [14] = "Ethernet protocol control support",
+ [15] = "Ethernet Backplane autoneg support",
+ [16] = "CONFIG DEV support",
+ [17] = "Asymmetric EQs support",
+ [18] = "More than 80 VFs support"
};
int i;
@@ -174,6 +179,61 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
return err;
}
+int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ u8 in_modifier;
+ u8 field;
+ u16 field16;
+ int err;
+
+#define QUERY_FUNC_BUS_OFFSET 0x00
+#define QUERY_FUNC_DEVICE_OFFSET 0x01
+#define QUERY_FUNC_FUNCTION_OFFSET 0x01
+#define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03
+#define QUERY_FUNC_RSVD_EQS_OFFSET 0x04
+#define QUERY_FUNC_MAX_EQ_OFFSET 0x06
+#define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ in_modifier = slave;
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0,
+ MLX4_CMD_QUERY_FUNC,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ goto out;
+
+ MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET);
+ func->bus = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET);
+ func->device = field & 0xf1;
+ MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET);
+ func->function = field & 0x7;
+ MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET);
+ func->physical_function = field & 0xf;
+ MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET);
+ func->rsvd_eqs = field16 & 0xffff;
+ MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET);
+ func->max_eq = field16 & 0xffff;
+ MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET);
+ func->rsvd_uars = field & 0x0f;
+
+ mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n",
+ func->bus, func->device, func->function, func->physical_function,
+ func->max_eq, func->rsvd_eqs, func->rsvd_uars);
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -184,6 +244,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
u8 field, port;
u32 size, proxy_qp, qkey;
int err = 0;
+ struct mlx4_func func;
#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
@@ -228,6 +289,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
+#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
if (vhcr->op_modifier == 1) {
struct mlx4_active_ports actv_ports =
@@ -306,11 +368,24 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size = dev->caps.num_cqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
- size = dev->caps.num_eqs;
- MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
-
- size = dev->caps.reserved_eqs;
- MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+ if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) ||
+ mlx4_QUERY_FUNC(dev, &func, slave)) {
+ size = vhcr->in_modifier &
+ QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
+ dev->caps.num_eqs :
+ rounddown_pow_of_two(dev->caps.num_eqs);
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+ size = dev->caps.reserved_eqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+ } else {
+ size = vhcr->in_modifier &
+ QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
+ func.max_eq :
+ rounddown_pow_of_two(func.max_eq);
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+ size = func.rsvd_eqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+ }
size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
@@ -332,7 +407,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
-int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
struct mlx4_func_cap *func_cap)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -340,14 +415,17 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
u8 field, op_modifier;
u32 size, qkey;
int err = 0, quotas = 0;
+ u32 in_modifier;
op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
+ in_modifier = op_modifier ? gen_or_port :
+ QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- err = mlx4_cmd_box(dev, 0, mailbox->dma, gen_or_port, op_modifier,
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier,
MLX4_CMD_QUERY_FUNC_CAP,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)
@@ -519,6 +597,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
#define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
+#define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26
#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
@@ -560,6 +639,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
+#define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET 0x7a
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
@@ -571,8 +651,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
+#define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
+#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
#define QUERY_DEV_CAP_VXLAN 0x9e
#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
@@ -605,7 +687,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
dev_cap->max_mpts = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
- dev_cap->reserved_eqs = field & 0xf;
+ dev_cap->reserved_eqs = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
dev_cap->max_eqs = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
@@ -616,6 +698,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->reserved_mrws = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
dev_cap->max_mtt_seg = 1 << (field & 0x3f);
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET);
+ dev_cap->num_sys_eqs = size & 0xfff;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
@@ -737,15 +821,22 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
dev_cap->max_rq_desc_sz = size;
MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
+ if (field & (1 << 5))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL;
if (field & (1 << 6))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
if (field & (1 << 7))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
-
MLX4_GET(dev_cap->bmme_flags, outbox,
QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
+ if (field & 0x20)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
+ MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
+ if (field32 & (1 << 0))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
@@ -770,6 +861,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
if (field32 & (1 << 20))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
+ if (field32 & (1 << 21))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS;
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
for (i = 1; i <= dev_cap->num_ports; ++i) {
@@ -836,8 +929,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
* we can't use any EQs whose doorbell falls on that page,
* even if the EQ itself isn't reserved.
*/
- dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
- dev_cap->reserved_eqs);
+ if (dev_cap->num_sys_eqs == 0)
+ dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
+ dev_cap->reserved_eqs);
+ else
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
mlx4_dbg(dev, "Max ICM size %lld MB\n",
(unsigned long long) dev_cap->max_icm_sz >> 20);
@@ -847,8 +943,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
- mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
- dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
+ mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n",
+ dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs,
+ dev_cap->eqc_entry_sz);
mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
dev_cap->reserved_mrws, dev_cap->reserved_mtts);
mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
@@ -1394,6 +1491,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
+#define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a)
#define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
#define INIT_HCA_MCAST_OFFSET 0x0c0
@@ -1497,6 +1595,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
+ MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET);
MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
@@ -1607,6 +1706,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
+ MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
@@ -1841,14 +1941,18 @@ int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
struct mlx4_config_dev {
__be32 update_flags;
- __be32 rsdv1[3];
+ __be32 rsvd1[3];
__be16 vxlan_udp_dport;
__be16 rsvd2;
+ __be32 rsvd3[27];
+ __be16 rsvd4;
+ u8 rsvd5;
+ u8 rx_checksum_val;
};
#define MLX4_VXLAN_UDP_DPORT (1 << 0)
-static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
+static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
{
int err;
struct mlx4_cmd_mailbox *mailbox;
@@ -1866,6 +1970,77 @@ static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_
return err;
}
+static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
+{
+ int err;
+ struct mlx4_cmd_mailbox *mailbox;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+
+ if (!err)
+ memcpy(config_dev, mailbox->buf, sizeof(*config_dev));
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+/* Conversion between the HW values and the actual functionality.
+ * The value represented by the array index,
+ * and the functionality determined by the flags.
+ */
+static const u8 config_dev_csum_flags[] = {
+ [0] = 0,
+ [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP,
+ [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP |
+ MLX4_RX_CSUM_MODE_L4,
+ [3] = MLX4_RX_CSUM_MODE_L4 |
+ MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP |
+ MLX4_RX_CSUM_MODE_MULTI_VLAN
+};
+
+int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
+ struct mlx4_config_dev_params *params)
+{
+ struct mlx4_config_dev config_dev;
+ int err;
+ u8 csum_mask;
+
+#define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7
+#define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0
+#define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4
+
+ if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV))
+ return -ENOTSUPP;
+
+ err = mlx4_CONFIG_DEV_get(dev, &config_dev);
+ if (err)
+ return err;
+
+ csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) &
+ CONFIG_DEV_RX_CSUM_MODE_MASK;
+
+ if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
+ return -EINVAL;
+ params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask];
+
+ csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) &
+ CONFIG_DEV_RX_CSUM_MODE_MASK;
+
+ if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
+ return -EINVAL;
+ params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask];
+
+ params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval);
+
int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
{
struct mlx4_config_dev config_dev;
@@ -1874,7 +2049,7 @@ int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
config_dev.vxlan_udp_dport = udp_port;
- return mlx4_CONFIG_DEV(dev, &config_dev);
+ return mlx4_CONFIG_DEV_set(dev, &config_dev);
}
EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
@@ -2144,3 +2319,142 @@ out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
+
+/* Access Reg commands */
+enum mlx4_access_reg_masks {
+ MLX4_ACCESS_REG_STATUS_MASK = 0x7f,
+ MLX4_ACCESS_REG_METHOD_MASK = 0x7f,
+ MLX4_ACCESS_REG_LEN_MASK = 0x7ff
+};
+
+struct mlx4_access_reg {
+ __be16 constant1;
+ u8 status;
+ u8 resrvd1;
+ __be16 reg_id;
+ u8 method;
+ u8 constant2;
+ __be32 resrvd2[2];
+ __be16 len_const;
+ __be16 resrvd3;
+#define MLX4_ACCESS_REG_HEADER_SIZE (20)
+ u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE];
+} __attribute__((__packed__));
+
+/**
+ * mlx4_ACCESS_REG - Generic access reg command.
+ * @dev: mlx4_dev.
+ * @reg_id: register ID to access.
+ * @method: Access method Read/Write.
+ * @reg_len: register length to Read/Write in bytes.
+ * @reg_data: reg_data pointer to Read/Write From/To.
+ *
+ * Access ConnectX registers FW command.
+ * Returns 0 on success and copies outbox mlx4_access_reg data
+ * field into reg_data or a negative error code.
+ */
+static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id,
+ enum mlx4_access_reg_method method,
+ u16 reg_len, void *reg_data)
+{
+ struct mlx4_cmd_mailbox *inbox, *outbox;
+ struct mlx4_access_reg *inbuf, *outbuf;
+ int err;
+
+ inbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inbox))
+ return PTR_ERR(inbox);
+
+ outbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outbox)) {
+ mlx4_free_cmd_mailbox(dev, inbox);
+ return PTR_ERR(outbox);
+ }
+
+ inbuf = inbox->buf;
+ outbuf = outbox->buf;
+
+ inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4);
+ inbuf->constant2 = 0x1;
+ inbuf->reg_id = cpu_to_be16(reg_id);
+ inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK;
+
+ reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data)));
+ inbuf->len_const =
+ cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) |
+ ((0x3) << 12));
+
+ memcpy(inbuf->reg_data, reg_data, reg_len);
+ err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0,
+ MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ goto out;
+
+ if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) {
+ err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK;
+ mlx4_err(dev,
+ "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n",
+ reg_id, err);
+ goto out;
+ }
+
+ memcpy(reg_data, outbuf->reg_data, reg_len);
+out:
+ mlx4_free_cmd_mailbox(dev, inbox);
+ mlx4_free_cmd_mailbox(dev, outbox);
+ return err;
+}
+
+/* ConnectX registers IDs */
+enum mlx4_reg_id {
+ MLX4_REG_ID_PTYS = 0x5004,
+};
+
+/**
+ * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed)
+ * register
+ * @dev: mlx4_dev.
+ * @method: Access method Read/Write.
+ * @ptys_reg: PTYS register data pointer.
+ *
+ * Access ConnectX PTYS register, to Read/Write Port Type/Speed
+ * configuration
+ * Returns 0 on success or a negative error code.
+ */
+int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
+ enum mlx4_access_reg_method method,
+ struct mlx4_ptys_reg *ptys_reg)
+{
+ return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS,
+ method, sizeof(*ptys_reg), ptys_reg);
+}
+EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG);
+
+int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_access_reg *inbuf = inbox->buf;
+ u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK;
+ u16 reg_id = be16_to_cpu(inbuf->reg_id);
+
+ if (slave != mlx4_master_func_num(dev) &&
+ method == MLX4_ACCESS_REG_WRITE)
+ return -EPERM;
+
+ if (reg_id == MLX4_REG_ID_PTYS) {
+ struct mlx4_ptys_reg *ptys_reg =
+ (struct mlx4_ptys_reg *)inbuf->reg_data;
+
+ ptys_reg->local_port =
+ mlx4_slave_convert_port(dev, slave,
+ ptys_reg->local_port);
+ }
+
+ return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier,
+ 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 9b835aecac96..475215ee370f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -56,6 +56,7 @@ struct mlx4_dev_cap {
int max_mpts;
int reserved_eqs;
int max_eqs;
+ int num_sys_eqs;
int reserved_mtts;
int max_mrw_sz;
int reserved_mrws;
@@ -145,6 +146,16 @@ struct mlx4_func_cap {
u64 phys_port_id;
};
+struct mlx4_func {
+ int bus;
+ int device;
+ int function;
+ int physical_function;
+ int rsvd_eqs;
+ int max_eq;
+ int rsvd_uars;
+};
+
struct mlx4_adapter {
char board_id[MLX4_BOARD_ID_LEN];
u8 inta_pin;
@@ -170,6 +181,7 @@ struct mlx4_init_hca_param {
u8 log_num_srqs;
u8 log_num_cqs;
u8 log_num_eqs;
+ u16 num_sys_eqs;
u8 log_rd_per_qp;
u8 log_mc_table_sz;
u8 log_mpt_sz;
@@ -204,13 +216,14 @@ struct mlx4_set_ib_param {
};
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
-int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
struct mlx4_func_cap *func_cap);
int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave);
int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_FA(struct mlx4_dev *dev);
int mlx4_RUN_FW(struct mlx4_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 90de6e1ad06e..3044f9e623cb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -197,6 +197,29 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev)
dev->caps.port_mask[i] = dev->caps.port_type[i];
}
+enum {
+ MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0,
+};
+
+static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+{
+ int err = 0;
+ struct mlx4_func func;
+
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
+ err = mlx4_QUERY_FUNC(dev, &func, 0);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ return err;
+ }
+ dev_cap->max_eqs = func.max_eq;
+ dev_cap->reserved_eqs = func.rsvd_eqs;
+ dev_cap->reserved_uars = func.rsvd_uars;
+ err |= MLX4_QUERY_FUNC_NUM_SYS_EQS;
+ }
+ return err;
+}
+
static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
{
struct mlx4_caps *dev_cap = &dev->caps;
@@ -261,7 +284,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
dev->caps.num_ports = dev_cap->num_ports;
- dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM;
+ dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
+ dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
+ dev->caps.num_sys_eqs :
+ MLX4_MAX_EQ_NUM;
for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.vl_cap[i] = dev_cap->max_vl[i];
dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
@@ -631,7 +657,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
struct mlx4_dev_cap dev_cap;
struct mlx4_func_cap func_cap;
struct mlx4_init_hca_param hca_param;
- int i;
+ u8 i;
memset(&hca_param, 0, sizeof(hca_param));
err = mlx4_QUERY_HCA(dev, &hca_param);
@@ -732,7 +758,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
}
for (i = 1; i <= dev->caps.num_ports; ++i) {
- err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
+ err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap);
if (err) {
mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
i, err);
@@ -901,9 +927,12 @@ static ssize_t set_port_type(struct device *dev,
struct mlx4_priv *priv = mlx4_priv(mdev);
enum mlx4_port_type types[MLX4_MAX_PORTS];
enum mlx4_port_type new_types[MLX4_MAX_PORTS];
+ static DEFINE_MUTEX(set_port_type_mutex);
int i;
int err = 0;
+ mutex_lock(&set_port_type_mutex);
+
if (!strcmp(buf, "ib\n"))
info->tmp_type = MLX4_PORT_TYPE_IB;
else if (!strcmp(buf, "eth\n"))
@@ -912,7 +941,8 @@ static ssize_t set_port_type(struct device *dev,
info->tmp_type = MLX4_PORT_TYPE_AUTO;
else {
mlx4_err(mdev, "%s is not supported port type\n", buf);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_out;
}
mlx4_stop_sense(mdev);
@@ -958,6 +988,9 @@ static ssize_t set_port_type(struct device *dev,
out:
mlx4_start_sense(mdev);
mutex_unlock(&priv->port_mutex);
+err_out:
+ mutex_unlock(&set_port_type_mutex);
+
return err ? err : count;
}
@@ -1123,8 +1156,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
if (err)
goto err_srq;
- num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
- dev->caps.num_eqs;
+ num_eqs = dev->phys_caps.num_phys_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_EQ *
@@ -1186,8 +1218,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
}
- num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
- dev->caps.num_eqs;
+ num_eqs = dev->phys_caps.num_phys_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.table,
init_hca->eqc_base, dev_cap->eqc_entry_sz,
num_eqs, num_eqs, 0, 0);
@@ -1466,6 +1497,12 @@ static void mlx4_close_hca(struct mlx4_dev *dev)
else {
mlx4_CLOSE_HCA(dev, 0);
mlx4_free_icms(dev);
+ }
+}
+
+static void mlx4_close_fw(struct mlx4_dev *dev)
+{
+ if (!mlx4_is_slave(dev)) {
mlx4_UNMAP_FA(dev);
mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
}
@@ -1612,16 +1649,10 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
== MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
}
-static int mlx4_init_hca(struct mlx4_dev *dev)
+static int mlx4_init_fw(struct mlx4_dev *dev)
{
- struct mlx4_priv *priv = mlx4_priv(dev);
- struct mlx4_adapter adapter;
- struct mlx4_dev_cap dev_cap;
struct mlx4_mod_stat_cfg mlx4_cfg;
- struct mlx4_profile profile;
- struct mlx4_init_hca_param init_hca;
- u64 icm_size;
- int err;
+ int err = 0;
if (!mlx4_is_slave(dev)) {
err = mlx4_QUERY_FW(dev);
@@ -1644,7 +1675,23 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
if (err)
mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
+ }
+
+ return err;
+}
+
+static int mlx4_init_hca(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_adapter adapter;
+ struct mlx4_dev_cap dev_cap;
+ struct mlx4_profile profile;
+ struct mlx4_init_hca_param init_hca;
+ u64 icm_size;
+ struct mlx4_config_dev_params params;
+ int err;
+ if (!mlx4_is_slave(dev)) {
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
@@ -1696,6 +1743,19 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
mlx4_err(dev, "INIT_HCA command failed, aborting\n");
goto err_free_icm;
}
+
+ if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
+ err = mlx4_query_func(dev, &dev_cap);
+ if (err < 0) {
+ mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
+ goto err_stop_fw;
+ } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
+ dev->caps.num_eqs = dev_cap.max_eqs;
+ dev->caps.reserved_eqs = dev_cap.reserved_eqs;
+ dev->caps.reserved_uars = dev_cap.reserved_uars;
+ }
+ }
+
/*
* If TS is supported by FW
* read HCA frequency by QUERY_HCA command
@@ -1755,6 +1815,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
goto unmap_bf;
}
+ /* Query CONFIG_DEV parameters */
+ err = mlx4_config_dev_retrieval(dev, &params);
+ if (err && err != -ENOTSUPP) {
+ mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
+ } else if (!err) {
+ dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
+ dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
+ }
priv->eq_table.inta_pin = adapter.inta_pin;
memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
@@ -2054,12 +2122,11 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
- int nreq = min_t(int, dev->caps.num_ports *
- min_t(int, num_online_cpus() + 1,
- MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
int i;
if (msi_x) {
+ int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ;
+
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
nreq);
@@ -2259,6 +2326,71 @@ static void mlx4_free_ownership(struct mlx4_dev *dev)
iounmap(owner);
}
+#define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
+ !!((flags) & MLX4_FLAG_MASTER))
+
+static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
+ u8 total_vfs, int existing_vfs)
+{
+ u64 dev_flags = dev->flags;
+
+ dev->dev_vfs = kzalloc(
+ total_vfs * sizeof(*dev->dev_vfs),
+ GFP_KERNEL);
+ if (NULL == dev->dev_vfs) {
+ mlx4_err(dev, "Failed to allocate memory for VFs\n");
+ goto disable_sriov;
+ } else if (!(dev->flags & MLX4_FLAG_SRIOV)) {
+ int err = 0;
+
+ atomic_inc(&pf_loading);
+ if (existing_vfs) {
+ if (existing_vfs != total_vfs)
+ mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
+ existing_vfs, total_vfs);
+ } else {
+ mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
+ err = pci_enable_sriov(pdev, total_vfs);
+ }
+ if (err) {
+ mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
+ err);
+ atomic_dec(&pf_loading);
+ goto disable_sriov;
+ } else {
+ mlx4_warn(dev, "Running in master mode\n");
+ dev_flags |= MLX4_FLAG_SRIOV |
+ MLX4_FLAG_MASTER;
+ dev_flags &= ~MLX4_FLAG_SLAVE;
+ dev->num_vfs = total_vfs;
+ }
+ }
+ return dev_flags;
+
+disable_sriov:
+ dev->num_vfs = 0;
+ kfree(dev->dev_vfs);
+ return dev_flags & ~MLX4_FLAG_MASTER;
+}
+
+enum {
+ MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1,
+};
+
+static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
+ int *nvfs)
+{
+ int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2];
+ /* Checking for 64 VFs as a limitation of CX2 */
+ if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) &&
+ requested_vfs >= 64) {
+ mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
+ requested_vfs);
+ return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64;
+ }
+ return 0;
+}
+
static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
int total_vfs, int *nvfs, struct mlx4_priv *priv)
{
@@ -2267,6 +2399,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
int err;
int port;
int i;
+ struct mlx4_dev_cap *dev_cap = NULL;
int existing_vfs = 0;
dev = &priv->dev;
@@ -2303,40 +2436,6 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
}
}
- if (total_vfs) {
- mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
- total_vfs);
- dev->dev_vfs = kzalloc(
- total_vfs * sizeof(*dev->dev_vfs),
- GFP_KERNEL);
- if (NULL == dev->dev_vfs) {
- mlx4_err(dev, "Failed to allocate memory for VFs\n");
- err = -ENOMEM;
- goto err_free_own;
- } else {
- atomic_inc(&pf_loading);
- existing_vfs = pci_num_vf(pdev);
- if (existing_vfs) {
- err = 0;
- if (existing_vfs != total_vfs)
- mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
- existing_vfs, total_vfs);
- } else {
- err = pci_enable_sriov(pdev, total_vfs);
- }
- if (err) {
- mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
- err);
- atomic_dec(&pf_loading);
- } else {
- mlx4_warn(dev, "Running in master mode\n");
- dev->flags |= MLX4_FLAG_SRIOV |
- MLX4_FLAG_MASTER;
- dev->num_vfs = total_vfs;
- }
- }
- }
-
atomic_set(&priv->opreq_count, 0);
INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
@@ -2350,6 +2449,12 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
mlx4_err(dev, "Failed to reset HCA, aborting\n");
goto err_sriov;
}
+
+ if (total_vfs) {
+ existing_vfs = pci_num_vf(pdev);
+ dev->flags = MLX4_FLAG_MASTER;
+ dev->num_vfs = total_vfs;
+ }
}
slave_start:
@@ -2363,9 +2468,10 @@ slave_start:
* before posting commands. Also, init num_slaves before calling
* mlx4_init_hca */
if (mlx4_is_mfunc(dev)) {
- if (mlx4_is_master(dev))
+ if (mlx4_is_master(dev)) {
dev->num_slaves = MLX4_MAX_NUM_SLAVES;
- else {
+
+ } else {
dev->num_slaves = 0;
err = mlx4_multi_func_init(dev);
if (err) {
@@ -2375,17 +2481,109 @@ slave_start:
}
}
+ err = mlx4_init_fw(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to init fw, aborting.\n");
+ goto err_mfunc;
+ }
+
+ if (mlx4_is_master(dev)) {
+ if (!dev_cap) {
+ dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
+
+ if (!dev_cap) {
+ err = -ENOMEM;
+ goto err_fw;
+ }
+
+ err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ goto err_fw;
+ }
+
+ if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
+ goto err_fw;
+
+ if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
+ u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
+ existing_vfs);
+
+ mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
+ dev->flags = dev_flags;
+ if (!SRIOV_VALID_STATE(dev->flags)) {
+ mlx4_err(dev, "Invalid SRIOV state\n");
+ goto err_sriov;
+ }
+ err = mlx4_reset(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+ goto err_sriov;
+ }
+ goto slave_start;
+ }
+ } else {
+ /* Legacy mode FW requires SRIOV to be enabled before
+ * doing QUERY_DEV_CAP, since max_eq's value is different if
+ * SRIOV is enabled.
+ */
+ memset(dev_cap, 0, sizeof(*dev_cap));
+ err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ goto err_fw;
+ }
+
+ if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
+ goto err_fw;
+ }
+ }
+
err = mlx4_init_hca(dev);
if (err) {
if (err == -EACCES) {
/* Not primary Physical function
* Running in slave mode */
- mlx4_cmd_cleanup(dev);
+ mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
+ /* We're not a PF */
+ if (dev->flags & MLX4_FLAG_SRIOV) {
+ if (!existing_vfs)
+ pci_disable_sriov(pdev);
+ if (mlx4_is_master(dev))
+ atomic_dec(&pf_loading);
+ dev->flags &= ~MLX4_FLAG_SRIOV;
+ }
+ if (!mlx4_is_slave(dev))
+ mlx4_free_ownership(dev);
dev->flags |= MLX4_FLAG_SLAVE;
dev->flags &= ~MLX4_FLAG_MASTER;
goto slave_start;
} else
- goto err_mfunc;
+ goto err_fw;
+ }
+
+ if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
+ u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs);
+
+ if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
+ mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
+ dev->flags = dev_flags;
+ err = mlx4_cmd_init(dev);
+ if (err) {
+ /* Only VHCR is cleaned up, so could still
+ * send FW commands
+ */
+ mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
+ goto err_close;
+ }
+ } else {
+ dev->flags = dev_flags;
+ }
+
+ if (!SRIOV_VALID_STATE(dev->flags)) {
+ mlx4_err(dev, "Invalid SRIOV state\n");
+ goto err_close;
+ }
}
/* check if the device is functioning at its maximum possible speed.
@@ -2540,12 +2738,15 @@ err_master_mfunc:
err_close:
mlx4_close_hca(dev);
+err_fw:
+ mlx4_close_fw(dev);
+
err_mfunc:
if (mlx4_is_slave(dev))
mlx4_multi_func_cleanup(dev);
err_cmd:
- mlx4_cmd_cleanup(dev);
+ mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
err_sriov:
if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs)
@@ -2556,10 +2757,10 @@ err_sriov:
kfree(priv->dev.dev_vfs);
-err_free_own:
if (!mlx4_is_slave(dev))
mlx4_free_ownership(dev);
+ kfree(dev_cap);
return err;
}
@@ -2787,15 +2988,17 @@ static void mlx4_unload_one(struct pci_dev *pdev)
if (mlx4_is_master(dev))
mlx4_multi_func_cleanup(dev);
mlx4_close_hca(dev);
+ mlx4_close_fw(dev);
if (mlx4_is_slave(dev))
mlx4_multi_func_cleanup(dev);
- mlx4_cmd_cleanup(dev);
+ mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
mlx4_warn(dev, "Disabling SR-IOV\n");
pci_disable_sriov(pdev);
+ dev->flags &= ~MLX4_FLAG_SRIOV;
dev->num_vfs = 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index de10dbb2e6ed..f48e7c3eecf8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -606,6 +606,7 @@ struct mlx4_cmd {
u8 use_events;
u8 toggle;
u8 comm_toggle;
+ u8 initialized;
};
enum {
@@ -947,6 +948,11 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -1121,8 +1127,16 @@ int mlx4_QUERY_QP_wrapper(struct mlx4_dev *dev, int slave,
int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
+enum {
+ MLX4_CMD_CLEANUP_STRUCT = 1UL << 0,
+ MLX4_CMD_CLEANUP_POOL = 1UL << 1,
+ MLX4_CMD_CLEANUP_HCR = 1UL << 2,
+ MLX4_CMD_CLEANUP_VHCR = 1UL << 3,
+ MLX4_CMD_CLEANUP_ALL = (MLX4_CMD_CLEANUP_VHCR << 1) - 1
+};
+
int mlx4_cmd_init(struct mlx4_dev *dev);
-void mlx4_cmd_cleanup(struct mlx4_dev *dev);
+void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask);
int mlx4_multi_func_init(struct mlx4_dev *dev);
void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
@@ -1273,6 +1287,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 8fef65840b3b..ac48a8d91501 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -326,6 +326,7 @@ struct mlx4_en_rx_ring {
#endif
unsigned long csum_ok;
unsigned long csum_none;
+ unsigned long csum_complete;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
};
@@ -375,7 +376,6 @@ struct mlx4_en_port_profile {
};
struct mlx4_en_profile {
- int rss_xor;
int udp_rss;
u8 rss_mask;
u32 active_ports;
@@ -421,10 +421,16 @@ struct mlx4_en_rss_map {
enum mlx4_qp_state indir_state;
};
+enum mlx4_en_port_flag {
+ MLX4_EN_PORT_ANC = 1<<0, /* Auto-negotiation complete */
+ MLX4_EN_PORT_ANE = 1<<1, /* Auto-negotiation enabled */
+};
+
struct mlx4_en_port_state {
int link_state;
int link_speed;
- int transciver;
+ int transceiver;
+ u32 flags;
};
struct mlx4_en_pkt_stats {
@@ -443,6 +449,7 @@ struct mlx4_en_port_stats {
unsigned long rx_alloc_failed;
unsigned long rx_chksum_good;
unsigned long rx_chksum_none;
+ unsigned long rx_chksum_complete;
unsigned long tx_chksum_offload;
#define NUM_PORT_STATS 9
};
@@ -475,7 +482,6 @@ struct mlx4_en_frag_info {
u16 frag_size;
u16 frag_prefix_size;
u16 frag_stride;
- u16 frag_align;
};
#ifdef CONFIG_MLX4_EN_DCB
@@ -502,7 +508,8 @@ enum {
MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2),
/* whether we need to drop packets that hardware loopback-ed */
MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
- MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4)
+ MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4),
+ MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5),
};
#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
@@ -610,6 +617,8 @@ struct mlx4_en_priv {
__be16 vxlan_port;
u32 pflags;
+ u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
+ u8 rss_hash_fn;
};
enum mlx4_en_wol {
@@ -829,6 +838,13 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
+#define DEV_FEATURE_CHANGED(dev, new_features, feature) \
+ ((dev->features & feature) ^ (new_features & feature))
+
+int mlx4_en_reset_config(struct net_device *dev,
+ struct hwtstamp_config ts_config,
+ netdev_features_t new_features);
+
/*
* Functions for time stamping
*/
@@ -838,9 +854,6 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
u64 timestamp);
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev);
-int mlx4_en_timestamp_config(struct net_device *dev,
- int tx_type,
- int rx_filter);
/* Globals
*/
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 193a6adb5d04..d6f549685c0f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -130,10 +130,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
- if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i]))
- vfree(buddy->bits[i]);
- else
- kfree(buddy->bits[i]);
+ kvfree(buddy->bits[i]);
err_out:
kfree(buddy->bits);
@@ -147,10 +144,7 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
int i;
for (i = 0; i <= buddy->max_order; ++i)
- if (is_vmalloc_addr(buddy->bits[i]))
- vfree(buddy->bits[i]);
- else
- kfree(buddy->bits[i]);
+ kvfree(buddy->bits[i]);
kfree(buddy->bits);
kfree(buddy->num_free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 94eeb2c7d7e4..30eb1ead0fe6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1311,3 +1311,159 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
return 0;
}
EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
+
+/* Cable Module Info */
+#define MODULE_INFO_MAX_READ 48
+
+#define I2C_ADDR_LOW 0x50
+#define I2C_ADDR_HIGH 0x51
+#define I2C_PAGE_SIZE 256
+
+/* Module Info Data */
+struct mlx4_cable_info {
+ u8 i2c_addr;
+ u8 page_num;
+ __be16 dev_mem_address;
+ __be16 reserved1;
+ __be16 size;
+ __be32 reserved2[2];
+ u8 data[MODULE_INFO_MAX_READ];
+};
+
+enum cable_info_err {
+ CABLE_INF_INV_PORT = 0x1,
+ CABLE_INF_OP_NOSUP = 0x2,
+ CABLE_INF_NOT_CONN = 0x3,
+ CABLE_INF_NO_EEPRM = 0x4,
+ CABLE_INF_PAGE_ERR = 0x5,
+ CABLE_INF_INV_ADDR = 0x6,
+ CABLE_INF_I2C_ADDR = 0x7,
+ CABLE_INF_QSFP_VIO = 0x8,
+ CABLE_INF_I2C_BUSY = 0x9,
+};
+
+#define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
+
+static inline const char *cable_info_mad_err_str(u16 mad_status)
+{
+ u8 err = MAD_STATUS_2_CABLE_ERR(mad_status);
+
+ switch (err) {
+ case CABLE_INF_INV_PORT:
+ return "invalid port selected";
+ case CABLE_INF_OP_NOSUP:
+ return "operation not supported for this port (the port is of type CX4 or internal)";
+ case CABLE_INF_NOT_CONN:
+ return "cable is not connected";
+ case CABLE_INF_NO_EEPRM:
+ return "the connected cable has no EPROM (passive copper cable)";
+ case CABLE_INF_PAGE_ERR:
+ return "page number is greater than 15";
+ case CABLE_INF_INV_ADDR:
+ return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
+ case CABLE_INF_I2C_ADDR:
+ return "invalid I2C slave address";
+ case CABLE_INF_QSFP_VIO:
+ return "at least one cable violates the QSFP specification and ignores the modsel signal";
+ case CABLE_INF_I2C_BUSY:
+ return "I2C bus is constantly busy";
+ }
+ return "Unknown Error";
+}
+
+/**
+ * mlx4_get_module_info - Read cable module eeprom data
+ * @dev: mlx4_dev.
+ * @port: port number.
+ * @offset: byte offset in eeprom to start reading data from.
+ * @size: num of bytes to read.
+ * @data: output buffer to put the requested data into.
+ *
+ * Reads cable module eeprom data, puts the outcome data into
+ * data pointer paramer.
+ * Returns num of read bytes on success or a negative error
+ * code.
+ */
+int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
+ u16 offset, u16 size, u8 *data)
+{
+ struct mlx4_cmd_mailbox *inbox, *outbox;
+ struct mlx4_mad_ifc *inmad, *outmad;
+ struct mlx4_cable_info *cable_info;
+ u16 i2c_addr;
+ int ret;
+
+ if (size > MODULE_INFO_MAX_READ)
+ size = MODULE_INFO_MAX_READ;
+
+ inbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inbox))
+ return PTR_ERR(inbox);
+
+ outbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outbox)) {
+ mlx4_free_cmd_mailbox(dev, inbox);
+ return PTR_ERR(outbox);
+ }
+
+ inmad = (struct mlx4_mad_ifc *)(inbox->buf);
+ outmad = (struct mlx4_mad_ifc *)(outbox->buf);
+
+ inmad->method = 0x1; /* Get */
+ inmad->class_version = 0x1;
+ inmad->mgmt_class = 0x1;
+ inmad->base_version = 0x1;
+ inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
+
+ if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE)
+ /* Cross pages reads are not allowed
+ * read until offset 256 in low page
+ */
+ size -= offset + size - I2C_PAGE_SIZE;
+
+ i2c_addr = I2C_ADDR_LOW;
+ if (offset >= I2C_PAGE_SIZE) {
+ /* Reset offset to high page */
+ i2c_addr = I2C_ADDR_HIGH;
+ offset -= I2C_PAGE_SIZE;
+ }
+
+ cable_info = (struct mlx4_cable_info *)inmad->data;
+ cable_info->dev_mem_address = cpu_to_be16(offset);
+ cable_info->page_num = 0;
+ cable_info->i2c_addr = i2c_addr;
+ cable_info->size = cpu_to_be16(size);
+
+ ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (ret)
+ goto out;
+
+ if (be16_to_cpu(outmad->status)) {
+ /* Mad returned with bad status */
+ ret = be16_to_cpu(outmad->status);
+ mlx4_warn(dev,
+ "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
+ 0xFF60, port, i2c_addr, offset, size,
+ ret, cable_info_mad_err_str(ret));
+
+ if (i2c_addr == I2C_ADDR_HIGH &&
+ MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR)
+ /* Some SFP cables do not support i2c slave
+ * address 0x51 (high page), abort silently.
+ */
+ ret = 0;
+ else
+ ret = -ret;
+ goto out;
+ }
+ cable_info = (struct mlx4_cable_info *)outmad->data;
+ memcpy(data, cable_info->data, size);
+ ret = size;
+out:
+ mlx4_free_cmd_mailbox(dev, inbox);
+ mlx4_free_cmd_mailbox(dev, outbox);
+ return ret;
+}
+EXPORT_SYMBOL(mlx4_get_module_info);
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 14089d9e1667..2bf437aafc53 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -126,8 +126,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_AUXC].num = request->num_qp;
profile[MLX4_RES_SRQ].num = request->num_srq;
profile[MLX4_RES_CQ].num = request->num_cq;
- profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ?
- dev->phys_caps.num_phys_eqs :
+ profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? dev->phys_caps.num_phys_eqs :
min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
profile[MLX4_RES_DMPT].num = request->num_mpt;
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
@@ -216,10 +215,18 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
init_hca->log_num_cqs = profile[i].log_num;
break;
case MLX4_RES_EQ:
- dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs,
- MAX_MSIX));
- init_hca->eqc_base = profile[i].start;
- init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
+ if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
+ init_hca->log_num_eqs = 0x1f;
+ init_hca->eqc_base = profile[i].start;
+ init_hca->num_sys_eqs = dev_cap->num_sys_eqs;
+ } else {
+ dev->caps.num_eqs = roundup_pow_of_two(
+ min_t(unsigned,
+ dev_cap->max_eqs,
+ MAX_MSIX));
+ init_hca->eqc_base = profile[i].start;
+ init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
+ }
break;
case MLX4_RES_DMPT:
dev->caps.num_mpts = profile[i].num;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index cd5cf6d957c7..16f617b5749e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2872,6 +2872,23 @@ out_add:
return err;
}
+int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ u8 get = vhcr->op_modifier;
+
+ if (get != 1)
+ return -EPERM;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+ return err;
+}
+
static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
int len, struct res_mtt **res)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 368c6c5ea014..a2853057c779 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1363,7 +1363,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
goto err_map;
}
- if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) {
+ if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
dev_err(&dev->pdev->dev, "command queue size overflow\n");
err = -EINVAL;
goto err_map;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ad2c96a02a53..ab684463780b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -225,8 +225,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
- mlx5_core_dbg(dev, "event %s(%d) arrived\n",
- eqe_type_str(eqe->type), eqe->type);
+ mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
+ eqe_type_str(eqe->type), eqe->type, rsn);
mlx5_rsc_event(dev, rsn, eqe->type);
break;
@@ -390,7 +390,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
*/
eq_update_ci(eq, 1);
- mlx5_vfree(in);
+ kvfree(in);
return 0;
err_irq:
@@ -400,7 +400,7 @@ err_eq:
mlx5_cmd_destroy_eq(dev, eq->eqn);
err_in:
- mlx5_vfree(in);
+ kvfree(in);
err_buf:
mlx5_buf_free(dev, &eq->buf);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 71b10b210792..3f4525619a07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -43,6 +43,7 @@
#include <linux/mlx5/qp.h>
#include <linux/mlx5/srq.h>
#include <linux/debugfs.h>
+#include <linux/kmod.h>
#include <linux/mlx5/mlx5_ifc.h>
#include "mlx5_core.h"
@@ -225,7 +226,7 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
table->msix_arr[i].entry = i;
nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
- MLX5_EQ_VEC_COMP_BASE, nvec);
+ MLX5_EQ_VEC_COMP_BASE + 1, nvec);
if (nvec < 0)
return nvec;
@@ -840,6 +841,8 @@ struct mlx5_core_event_handler {
void *data);
};
+#define MLX5_IB_MOD "mlx5_ib"
+
static int init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -878,6 +881,10 @@ static int init_one(struct pci_dev *pdev,
goto out_init;
}
+ err = request_module_nowait(MLX5_IB_MOD);
+ if (err)
+ pr_info("failed request module on %s\n", MLX5_IB_MOD);
+
return 0;
out_init:
@@ -896,8 +903,12 @@ static void remove_one(struct pci_dev *pdev)
}
static const struct pci_device_id mlx5_core_pci_table[] = {
- { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */
+ { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */
+ { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */
{ PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */
+ { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */
+ { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */
+ { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index d476918ef269..4fdaae9b54d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -349,7 +349,7 @@ out_4k:
for (i--; i >= 0; i--)
free_4k(dev, be64_to_cpu(in->pas[i]));
out_free:
- mlx5_vfree(in);
+ kvfree(in);
return err;
}
@@ -400,7 +400,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
}
out_free:
- mlx5_vfree(out);
+ kvfree(out);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 313965853e10..72c2d002c3b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -68,9 +68,9 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
memcpy(data_out, out->data, size_out);
ex2:
- mlx5_vfree(out);
+ kvfree(out);
ex1:
- mlx5_vfree(in);
+ kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 0a6348cefc01..06801d6f595e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -96,6 +96,7 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
int err;
memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR);
in.uarn = cpu_to_be32(uarn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 9e7e3f1dce3e..af099057f0e9 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2913,16 +2913,11 @@ again:
flags |= MXGEFW_FLAGS_SMALL;
/* pad frames to at least ETH_ZLEN bytes */
- if (unlikely(skb->len < ETH_ZLEN)) {
- if (skb_padto(skb, ETH_ZLEN)) {
- /* The packet is gone, so we must
- * return 0 */
- ss->stats.tx_dropped += 1;
- return NETDEV_TX_OK;
- }
- /* adjust the len to account for the zero pad
- * so that the nic can know how long it is */
- skb->len = ETH_ZLEN;
+ if (eth_skb_pad(skb)) {
+ /* The packet is gone, so we must
+ * return 0 */
+ ss->stats.tx_dropped += 1;
+ return NETDEV_TX_OK;
}
}
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 9e4ddbba7036..66c2d50d5b8d 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -326,13 +326,9 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
macintosh_config->ident == MAC_MODEL_P588 ||
macintosh_config->ident == MAC_MODEL_P575 ||
macintosh_config->ident == MAC_MODEL_C610) {
- unsigned long flags;
int card_present;
- local_irq_save(flags);
card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS);
- local_irq_restore(flags);
-
if (!card_present) {
printk("none.\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 30d934d66356..44e8d7d25547 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1837,10 +1837,8 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
out:
- if (mac->iob_pdev)
- pci_dev_put(mac->iob_pdev);
- if (mac->dma_pdev)
- pci_dev_put(mac->dma_pdev);
+ pci_dev_put(mac->iob_pdev);
+ pci_dev_put(mac->dma_pdev);
free_netdev(dev);
out_disable_device:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index a913b3ad2f89..1aa25b13ace1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -376,13 +376,14 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
}
static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *netdev, const unsigned char *addr)
+ struct net_device *netdev,
+ const unsigned char *addr, u16 vid)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = -EOPNOTSUPP;
if (!adapter->fdb_mac_learn)
- return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
+ return ndo_dflt_fdb_del(ndm, tb, netdev, addr, vid);
if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
qlcnic_sriov_check(adapter)) {
@@ -401,13 +402,13 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *netdev,
- const unsigned char *addr, u16 flags)
+ const unsigned char *addr, u16 vid, u16 flags)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = 0;
if (!adapter->fdb_mac_learn)
- return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
+ return ndo_dflt_fdb_add(ndm, tb, netdev, addr, vid, flags);
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
!qlcnic_sriov_check(adapter)) {
@@ -460,7 +461,7 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
}
static int qlcnic_get_phys_port_id(struct net_device *netdev,
- struct netdev_phys_port_id *ppid)
+ struct netdev_phys_item_id *ppid)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 75b1693ec8bf..9c31e46d1eee 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -507,7 +507,7 @@ rx_status_loop:
netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
rx_tail, status, len);
- new_skb = netdev_alloc_skb_ip_align(dev, buflen);
+ new_skb = napi_alloc_skb(napi, buflen);
if (!new_skb) {
dev->stats.rx_dropped++;
goto rx_next;
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 007b38cce69a..6d0b9dfac313 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -112,6 +112,7 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/gfp.h>
+#include <linux/if_vlan.h>
#include <asm/irq.h>
#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION
@@ -182,10 +183,13 @@ static int debug = -1;
/* Number of Tx descriptor registers. */
#define NUM_TX_DESC 4
-/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
-#define MAX_ETH_FRAME_SIZE 1536
+/* max supported ethernet frame size -- must be at least (dev->mtu+18+4).*/
+#define MAX_ETH_FRAME_SIZE 1792
-/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
+/* max supported payload size */
+#define MAX_ETH_DATA_SIZE (MAX_ETH_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN)
+
+/* Size of the Tx bounce buffers -- must be at least (dev->mtu+18+4). */
#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
@@ -920,11 +924,19 @@ static int rtl8139_set_features(struct net_device *dev, netdev_features_t featur
return 0;
}
+static int rtl8139_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu < 68 || new_mtu > MAX_ETH_DATA_SIZE)
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
static const struct net_device_ops rtl8139_netdev_ops = {
.ndo_open = rtl8139_open,
.ndo_stop = rtl8139_close,
.ndo_get_stats64 = rtl8139_get_stats64,
- .ndo_change_mtu = eth_change_mtu,
+ .ndo_change_mtu = rtl8139_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = rtl8139_set_mac_address,
.ndo_start_xmit = rtl8139_start_xmit,
@@ -2025,7 +2037,7 @@ keep_pkt:
/* Malloc up new buffer, compatible with net-2e. */
/* Omit the four octet CRC from the length. */
- skb = netdev_alloc_skb_ip_align(dev, pkt_size);
+ skb = napi_alloc_skb(&tp->napi, pkt_size);
if (likely(skb)) {
#if RX_BUF_IDX == 3
wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
index 040b13739947..32497f0e537c 100644
--- a/drivers/net/ethernet/realtek/atp.h
+++ b/drivers/net/ethernet/realtek/atp.h
@@ -6,10 +6,10 @@
/* The header prepended to received packets. */
struct rx_header {
- ushort pad; /* Pad. */
- ushort rx_count;
- ushort rx_status; /* Unknown bit assignments :-<. */
- ushort cur_addr; /* Apparently the current buffer address(?) */
+ ushort pad; /* Pad. */
+ ushort rx_count;
+ ushort rx_status; /* Unknown bit assignments :-<. */
+ ushort cur_addr; /* Apparently the current buffer address(?) */
};
#define PAR_DATA 0
@@ -29,22 +29,25 @@ struct rx_header {
#define RdAddr 0xC0
#define HNib 0x10
-enum page0_regs
-{
- /* The first six registers hold the ethernet physical station address. */
- PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5,
- TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */
- TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */
- ISR = 10, IMR = 11, /* Interrupt status and mask. */
- CMR1 = 12, /* Command register 1. */
- CMR2 = 13, /* Command register 2. */
- MODSEL = 14, /* Mode select register. */
- MAR = 14, /* Memory address register (?). */
- CMR2_h = 0x1d, };
-
-enum eepage_regs
-{ PROM_CMD = 6, PROM_DATA = 7 }; /* Note that PROM_CMD is in the "high" bits. */
+enum page0_regs {
+ /* The first six registers hold
+ * the ethernet physical station address.
+ */
+ PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5,
+ TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */
+ TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */
+ ISR = 10, IMR = 11, /* Interrupt status and mask. */
+ CMR1 = 12, /* Command register 1. */
+ CMR2 = 13, /* Command register 2. */
+ MODSEL = 14, /* Mode select register. */
+ MAR = 14, /* Memory address register (?). */
+ CMR2_h = 0x1d,
+};
+enum eepage_regs {
+ PROM_CMD = 6,
+ PROM_DATA = 7 /* Note that PROM_CMD is in the "high" bits. */
+};
#define ISR_TxOK 0x01
#define ISR_RxOK 0x04
@@ -72,141 +75,146 @@ enum eepage_regs
#define CMR2h_Normal 2 /* Accept physical and broadcast address. */
#define CMR2h_PROMISC 3 /* Promiscuous mode. */
-/* An inline function used below: it differs from inb() by explicitly return an unsigned
- char, saving a truncation. */
+/* An inline function used below: it differs from inb() by explicitly
+ * return an unsigned char, saving a truncation.
+ */
static inline unsigned char inbyte(unsigned short port)
{
- unsigned char _v;
- __asm__ __volatile__ ("inb %w1,%b0" :"=a" (_v):"d" (port));
- return _v;
+ unsigned char _v;
+
+ __asm__ __volatile__ ("inb %w1,%b0" : "=a" (_v) : "d" (port));
+ return _v;
}
/* Read register OFFSET.
- This command should always be terminated with read_end(). */
+ * This command should always be terminated with read_end().
+ */
static inline unsigned char read_nibble(short port, unsigned char offset)
{
- unsigned char retval;
- outb(EOC+offset, port + PAR_DATA);
- outb(RdAddr+offset, port + PAR_DATA);
- inbyte(port + PAR_STATUS); /* Settling time delay */
- retval = inbyte(port + PAR_STATUS);
- outb(EOC+offset, port + PAR_DATA);
-
- return retval;
+ unsigned char retval;
+
+ outb(EOC+offset, port + PAR_DATA);
+ outb(RdAddr+offset, port + PAR_DATA);
+ inbyte(port + PAR_STATUS); /* Settling time delay */
+ retval = inbyte(port + PAR_STATUS);
+ outb(EOC+offset, port + PAR_DATA);
+
+ return retval;
}
/* Functions for bulk data read. The interrupt line is always disabled. */
/* Get a byte using read mode 0, reading data from the control lines. */
static inline unsigned char read_byte_mode0(short ioaddr)
{
- unsigned char low_nib;
-
- outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
- inbyte(ioaddr + PAR_STATUS);
- low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
- outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
- inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
- inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
- return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+ unsigned char low_nib;
+
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
}
/* The same as read_byte_mode0(), but does multiple inb()s for stability. */
static inline unsigned char read_byte_mode2(short ioaddr)
{
- unsigned char low_nib;
-
- outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
- inbyte(ioaddr + PAR_STATUS);
- low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
- outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
- inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
- return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+ unsigned char low_nib;
+
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
}
/* Read a byte through the data register. */
static inline unsigned char read_byte_mode4(short ioaddr)
{
- unsigned char low_nib;
+ unsigned char low_nib;
- outb(RdAddr | MAR, ioaddr + PAR_DATA);
- low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
- outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
- return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+ outb(RdAddr | MAR, ioaddr + PAR_DATA);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
}
/* Read a byte through the data register, double reading to allow settling. */
static inline unsigned char read_byte_mode6(short ioaddr)
{
- unsigned char low_nib;
-
- outb(RdAddr | MAR, ioaddr + PAR_DATA);
- inbyte(ioaddr + PAR_STATUS);
- low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
- outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
- inbyte(ioaddr + PAR_STATUS);
- return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+ unsigned char low_nib;
+
+ outb(RdAddr | MAR, ioaddr + PAR_DATA);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
+ inbyte(ioaddr + PAR_STATUS);
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
}
static inline void
write_reg(short port, unsigned char reg, unsigned char value)
{
- unsigned char outval;
- outb(EOC | reg, port + PAR_DATA);
- outval = WrAddr | reg;
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA); /* Double write for PS/2. */
-
- outval &= 0xf0;
- outval |= value;
- outb(outval, port + PAR_DATA);
- outval &= 0x1f;
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA);
-
- outb(EOC | outval, port + PAR_DATA);
+ unsigned char outval;
+
+ outb(EOC | reg, port + PAR_DATA);
+ outval = WrAddr | reg;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outval &= 0xf0;
+ outval |= value;
+ outb(outval, port + PAR_DATA);
+ outval &= 0x1f;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA);
+
+ outb(EOC | outval, port + PAR_DATA);
}
static inline void
write_reg_high(short port, unsigned char reg, unsigned char value)
{
- unsigned char outval = EOC | HNib | reg;
+ unsigned char outval = EOC | HNib | reg;
- outb(outval, port + PAR_DATA);
- outval &= WrAddr | HNib | 0x0f;
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+ outb(outval, port + PAR_DATA);
+ outval &= WrAddr | HNib | 0x0f;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
- outval = WrAddr | HNib | value;
- outb(outval, port + PAR_DATA);
- outval &= HNib | 0x0f; /* HNib | value */
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA);
+ outval = WrAddr | HNib | value;
+ outb(outval, port + PAR_DATA);
+ outval &= HNib | 0x0f; /* HNib | value */
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA);
- outb(EOC | HNib | outval, port + PAR_DATA);
+ outb(EOC | HNib | outval, port + PAR_DATA);
}
/* Write a byte out using nibble mode. The low nibble is written first. */
static inline void
write_reg_byte(short port, unsigned char reg, unsigned char value)
{
- unsigned char outval;
- outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */
- outval = WrAddr | reg;
- outb(outval, port + PAR_DATA);
- outb(outval, port + PAR_DATA); /* Double write for PS/2. */
-
- outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA);
- outb(value & 0x0f, port + PAR_DATA);
- value >>= 4;
- outb(value, port + PAR_DATA);
- outb(0x10 | value, port + PAR_DATA);
- outb(0x10 | value, port + PAR_DATA);
-
- outb(EOC | value, port + PAR_DATA); /* Reset the address register. */
+ unsigned char outval;
+
+ outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */
+ outval = WrAddr | reg;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA);
+ outb(value & 0x0f, port + PAR_DATA);
+ value >>= 4;
+ outb(value, port + PAR_DATA);
+ outb(0x10 | value, port + PAR_DATA);
+ outb(0x10 | value, port + PAR_DATA);
+
+ outb(EOC | value, port + PAR_DATA); /* Reset the address register. */
}
-/*
- * Bulk data writes to the packet buffer. The interrupt line remains enabled.
+/* Bulk data writes to the packet buffer. The interrupt line remains enabled.
* The first, faster method uses only the dataport (data modes 0, 2 & 4).
* The second (backup) method uses data and control regs (modes 1, 3 & 5).
* It should only be needed when there is skew between the individual data
@@ -214,28 +222,28 @@ write_reg_byte(short port, unsigned char reg, unsigned char value)
*/
static inline void write_byte_mode0(short ioaddr, unsigned char value)
{
- outb(value & 0x0f, ioaddr + PAR_DATA);
- outb((value>>4) | 0x10, ioaddr + PAR_DATA);
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ outb((value>>4) | 0x10, ioaddr + PAR_DATA);
}
static inline void write_byte_mode1(short ioaddr, unsigned char value)
{
- outb(value & 0x0f, ioaddr + PAR_DATA);
- outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL);
- outb((value>>4) | 0x10, ioaddr + PAR_DATA);
- outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL);
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL);
+ outb((value>>4) | 0x10, ioaddr + PAR_DATA);
+ outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL);
}
/* Write 16bit VALUE to the packet buffer: the same as above just doubled. */
static inline void write_word_mode0(short ioaddr, unsigned short value)
{
- outb(value & 0x0f, ioaddr + PAR_DATA);
- value >>= 4;
- outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
- value >>= 4;
- outb(value & 0x0f, ioaddr + PAR_DATA);
- value >>= 4;
- outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
}
/* EEPROM_Ctrl bits. */
@@ -248,10 +256,10 @@ static inline void write_word_mode0(short ioaddr, unsigned short value)
/* Delay between EEPROM clock transitions. */
#define eeprom_delay(ticks) \
-do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
+do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; } } while (0)
/* The EEPROM commands include the alway-set leading bit. */
#define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17)
-#define EE_READ(offset) (((6 << 6) + (offset)) << 17)
+#define EE_READ(offset) (((6 << 6) + (offset)) << 17)
#define EE_ERASE(offset) (((7 << 6) + (offset)) << 17)
#define EE_CMD_SIZE 27 /* The command+address+data size. */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index cf154f74cba1..3dad7e884952 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1377,6 +1377,16 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond)
return RTL_R8(IBISR0) & 0x02;
}
+static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
+ rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000);
+ RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
+ RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
+}
+
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
{
rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
@@ -1417,12 +1427,7 @@ static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
- rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000);
- RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
- RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
+ rtl8168ep_stop_cmac(tp);
ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01);
rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10);
@@ -5934,7 +5939,6 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC);
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
RTL_W8(MaxTxPacketSize, EarlySize);
@@ -6027,7 +6031,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
RTL_W8(MaxTxPacketSize, EarlySize);
@@ -6091,6 +6094,8 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
+ rtl8168ep_stop_cmac(tp);
+
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
@@ -6109,7 +6114,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
RTL_W8(MaxTxPacketSize, EarlySize);
@@ -6832,14 +6836,6 @@ err_out:
return -EIO;
}
-static bool rtl_skb_pad(struct sk_buff *skb)
-{
- if (skb_padto(skb, ETH_ZLEN))
- return false;
- skb_put(skb, ETH_ZLEN - skb->len);
- return true;
-}
-
static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
{
return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
@@ -6980,7 +6976,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
u8 ip_protocol;
if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
- return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb);
+ return !(skb_checksum_help(skb) || eth_skb_pad(skb));
if (transport_offset > TCPHO_MAX) {
netif_warn(tp, tx_err, tp->dev,
@@ -7015,7 +7011,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
opts[1] |= transport_offset << TCPHO_SHIFT;
} else {
if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
- return rtl_skb_pad(skb);
+ return !eth_skb_pad(skb);
}
return true;
@@ -7264,7 +7260,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
data = rtl8169_align(data);
dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
prefetch(data);
- skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
+ skb = napi_alloc_skb(&tp->napi, pkt_size);
if (skb)
memcpy(skb->data, data, pkt_size);
dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
@@ -8005,6 +8001,12 @@ static void rtl_hw_init_8168g(struct rtl8169_private *tp)
return;
}
+static void rtl_hw_init_8168ep(struct rtl8169_private *tp)
+{
+ rtl8168ep_stop_cmac(tp);
+ rtl_hw_init_8168g(tp);
+}
+
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
@@ -8017,12 +8019,13 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_46:
case RTL_GIGA_MAC_VER_47:
case RTL_GIGA_MAC_VER_48:
+ rtl_hw_init_8168g(tp);
+ break;
case RTL_GIGA_MAC_VER_49:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
- rtl_hw_init_8168g(tp);
+ rtl_hw_init_8168ep(tp);
break;
-
default:
break;
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index b5db6b3f939f..c29ba80ae02b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,5 +1,6 @@
/* SuperH Ethernet device driver
*
+ * Copyright (C) 2014 Renesas Electronics Corporation
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
* Copyright (C) 2008-2014 Renesas Solutions Corp.
* Copyright (C) 2013-2014 Cogent Embedded, Inc.
@@ -1136,7 +1137,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
DMA_FROM_DEVICE);
- rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
+ rxdesc->addr = virt_to_phys(skb->data);
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
/* Rx descriptor address set */
@@ -1387,11 +1388,14 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
int entry = mdp->cur_rx % mdp->num_rx_ring;
int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
+ int limit;
struct sk_buff *skb;
u16 pkt_len = 0;
u32 desc_status;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+ boguscnt = min(boguscnt, *quota);
+ limit = boguscnt;
rxdesc = &mdp->rx_ring[entry];
while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
desc_status = edmac_to_cpu(mdp, rxdesc->status);
@@ -1400,11 +1404,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (--boguscnt < 0)
break;
- if (*quota <= 0)
- break;
-
- (*quota)--;
-
if (!(desc_status & RDFEND))
ndev->stats.rx_length_errors++;
@@ -1471,7 +1470,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
rxdesc->buffer_length, DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
- rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
+ rxdesc->addr = virt_to_phys(skb->data);
}
if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
@@ -1495,6 +1494,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
sh_eth_write(ndev, EDRRR_R, EDRRR);
}
+ *quota -= limit - boguscnt - 1;
+
return *quota <= 0;
}
@@ -2746,6 +2747,7 @@ static const struct of_device_id sh_eth_match_table[] = {
{ .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
{ .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
{ .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
+ { .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data },
{ .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data },
{ .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
{ }
@@ -2769,10 +2771,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(res == NULL)) {
- dev_err(&pdev->dev, "invalid resource\n");
- return -EINVAL;
- }
ndev = alloc_etherdev(sizeof(struct sh_eth_private));
if (!ndev)
@@ -2781,8 +2779,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- /* The sh Ether-specific entries in the device structure. */
- ndev->base_addr = res->start;
devno = pdev->id;
if (devno < 0)
devno = 0;
@@ -2806,6 +2802,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
goto out_release;
}
+ ndev->base_addr = res->start;
+
spin_lock_init(&mdp->lock);
mdp->pdev = pdev;
@@ -2887,6 +2885,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
}
}
+ if (mdp->cd->rmiimode)
+ sh_eth_write(ndev, 0x1, RMIIMODE);
+
/* MDIO bus init */
ret = sh_mdio_init(mdp, pd);
if (ret) {
@@ -2973,6 +2974,7 @@ static struct platform_device_id sh_eth_id_table[] = {
{ "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
{ "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
{ "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
+ { "r8a7793-ether", (kernel_ulong_t)&r8a779x_data },
{ "r8a7794-ether", (kernel_ulong_t)&r8a779x_data },
{ }
};
diff --git a/drivers/net/ethernet/rocker/Kconfig b/drivers/net/ethernet/rocker/Kconfig
new file mode 100644
index 000000000000..b9952ef040e4
--- /dev/null
+++ b/drivers/net/ethernet/rocker/Kconfig
@@ -0,0 +1,27 @@
+#
+# Rocker device configuration
+#
+
+config NET_VENDOR_ROCKER
+ bool "Rocker devices"
+ default y
+ ---help---
+ If you have a network device belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Rocker devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_ROCKER
+
+config ROCKER
+ tristate "Rocker switch driver (EXPERIMENTAL)"
+ depends on PCI && NET_SWITCHDEV && BRIDGE
+ ---help---
+ This driver supports Rocker switch device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rocker.
+
+endif # NET_VENDOR_ROCKER
diff --git a/drivers/net/ethernet/rocker/Makefile b/drivers/net/ethernet/rocker/Makefile
new file mode 100644
index 000000000000..f85fb12f36f1
--- /dev/null
+++ b/drivers/net/ethernet/rocker/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Rocker network device drivers.
+#
+
+obj-$(CONFIG_ROCKER) += rocker.o
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
new file mode 100644
index 000000000000..2f398fa4b9e6
--- /dev/null
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -0,0 +1,4375 @@
+/*
+ * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/hashtable.h>
+#include <linux/crc32.h>
+#include <linux/sort.h>
+#include <linux/random.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/bitops.h>
+#include <net/switchdev.h>
+#include <net/rtnetlink.h>
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+#include <generated/utsrelease.h>
+
+#include "rocker.h"
+
+static const char rocker_driver_name[] = "rocker";
+
+static const struct pci_device_id rocker_pci_id_table[] = {
+ {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
+ {0, }
+};
+
+struct rocker_flow_tbl_key {
+ u32 priority;
+ enum rocker_of_dpa_table_id tbl_id;
+ union {
+ struct {
+ u32 in_lport;
+ u32 in_lport_mask;
+ enum rocker_of_dpa_table_id goto_tbl;
+ } ig_port;
+ struct {
+ u32 in_lport;
+ __be16 vlan_id;
+ __be16 vlan_id_mask;
+ enum rocker_of_dpa_table_id goto_tbl;
+ bool untagged;
+ __be16 new_vlan_id;
+ } vlan;
+ struct {
+ u32 in_lport;
+ u32 in_lport_mask;
+ __be16 eth_type;
+ u8 eth_dst[ETH_ALEN];
+ u8 eth_dst_mask[ETH_ALEN];
+ __be16 vlan_id;
+ __be16 vlan_id_mask;
+ enum rocker_of_dpa_table_id goto_tbl;
+ bool copy_to_cpu;
+ } term_mac;
+ struct {
+ __be16 eth_type;
+ __be32 dst4;
+ __be32 dst4_mask;
+ enum rocker_of_dpa_table_id goto_tbl;
+ u32 group_id;
+ } ucast_routing;
+ struct {
+ u8 eth_dst[ETH_ALEN];
+ u8 eth_dst_mask[ETH_ALEN];
+ int has_eth_dst;
+ int has_eth_dst_mask;
+ __be16 vlan_id;
+ u32 tunnel_id;
+ enum rocker_of_dpa_table_id goto_tbl;
+ u32 group_id;
+ bool copy_to_cpu;
+ } bridge;
+ struct {
+ u32 in_lport;
+ u32 in_lport_mask;
+ u8 eth_src[ETH_ALEN];
+ u8 eth_src_mask[ETH_ALEN];
+ u8 eth_dst[ETH_ALEN];
+ u8 eth_dst_mask[ETH_ALEN];
+ __be16 eth_type;
+ __be16 vlan_id;
+ __be16 vlan_id_mask;
+ u8 ip_proto;
+ u8 ip_proto_mask;
+ u8 ip_tos;
+ u8 ip_tos_mask;
+ u32 group_id;
+ } acl;
+ };
+};
+
+struct rocker_flow_tbl_entry {
+ struct hlist_node entry;
+ u32 ref_count;
+ u64 cookie;
+ struct rocker_flow_tbl_key key;
+ u32 key_crc32; /* key */
+};
+
+struct rocker_group_tbl_entry {
+ struct hlist_node entry;
+ u32 cmd;
+ u32 group_id; /* key */
+ u16 group_count;
+ u32 *group_ids;
+ union {
+ struct {
+ u8 pop_vlan;
+ } l2_interface;
+ struct {
+ u8 eth_src[ETH_ALEN];
+ u8 eth_dst[ETH_ALEN];
+ __be16 vlan_id;
+ u32 group_id;
+ } l2_rewrite;
+ struct {
+ u8 eth_src[ETH_ALEN];
+ u8 eth_dst[ETH_ALEN];
+ __be16 vlan_id;
+ bool ttl_check;
+ u32 group_id;
+ } l3_unicast;
+ };
+};
+
+struct rocker_fdb_tbl_entry {
+ struct hlist_node entry;
+ u32 key_crc32; /* key */
+ bool learned;
+ struct rocker_fdb_tbl_key {
+ u32 lport;
+ u8 addr[ETH_ALEN];
+ __be16 vlan_id;
+ } key;
+};
+
+struct rocker_internal_vlan_tbl_entry {
+ struct hlist_node entry;
+ int ifindex; /* key */
+ u32 ref_count;
+ __be16 vlan_id;
+};
+
+struct rocker_desc_info {
+ char *data; /* mapped */
+ size_t data_size;
+ size_t tlv_size;
+ struct rocker_desc *desc;
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+};
+
+struct rocker_dma_ring_info {
+ size_t size;
+ u32 head;
+ u32 tail;
+ struct rocker_desc *desc; /* mapped */
+ dma_addr_t mapaddr;
+ struct rocker_desc_info *desc_info;
+ unsigned int type;
+};
+
+struct rocker;
+
+enum {
+ ROCKER_CTRL_LINK_LOCAL_MCAST,
+ ROCKER_CTRL_LOCAL_ARP,
+ ROCKER_CTRL_IPV4_MCAST,
+ ROCKER_CTRL_IPV6_MCAST,
+ ROCKER_CTRL_DFLT_BRIDGING,
+ ROCKER_CTRL_MAX,
+};
+
+#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
+#define ROCKER_N_INTERNAL_VLANS 255
+#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
+#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
+
+struct rocker_port {
+ struct net_device *dev;
+ struct net_device *bridge_dev;
+ struct rocker *rocker;
+ unsigned int port_number;
+ u32 lport;
+ __be16 internal_vlan_id;
+ int stp_state;
+ u32 brport_flags;
+ bool ctrls[ROCKER_CTRL_MAX];
+ unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
+ struct napi_struct napi_tx;
+ struct napi_struct napi_rx;
+ struct rocker_dma_ring_info tx_ring;
+ struct rocker_dma_ring_info rx_ring;
+};
+
+struct rocker {
+ struct pci_dev *pdev;
+ u8 __iomem *hw_addr;
+ struct msix_entry *msix_entries;
+ unsigned int port_count;
+ struct rocker_port **ports;
+ struct {
+ u64 id;
+ } hw;
+ spinlock_t cmd_ring_lock;
+ struct rocker_dma_ring_info cmd_ring;
+ struct rocker_dma_ring_info event_ring;
+ DECLARE_HASHTABLE(flow_tbl, 16);
+ spinlock_t flow_tbl_lock;
+ u64 flow_tbl_next_cookie;
+ DECLARE_HASHTABLE(group_tbl, 16);
+ spinlock_t group_tbl_lock;
+ DECLARE_HASHTABLE(fdb_tbl, 16);
+ spinlock_t fdb_tbl_lock;
+ unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
+ DECLARE_HASHTABLE(internal_vlan_tbl, 8);
+ spinlock_t internal_vlan_tbl_lock;
+};
+
+static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
+static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
+static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
+static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
+static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+/* Rocker priority levels for flow table entries. Higher
+ * priority match takes precedence over lower priority match.
+ */
+
+enum {
+ ROCKER_PRIORITY_UNKNOWN = 0,
+ ROCKER_PRIORITY_IG_PORT = 1,
+ ROCKER_PRIORITY_VLAN = 1,
+ ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
+ ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
+ ROCKER_PRIORITY_UNICAST_ROUTING = 1,
+ ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
+ ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
+ ROCKER_PRIORITY_BRIDGING_VLAN = 3,
+ ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
+ ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
+ ROCKER_PRIORITY_BRIDGING_TENANT = 3,
+ ROCKER_PRIORITY_ACL_CTRL = 3,
+ ROCKER_PRIORITY_ACL_NORMAL = 2,
+ ROCKER_PRIORITY_ACL_DFLT = 1,
+};
+
+static bool rocker_vlan_id_is_internal(__be16 vlan_id)
+{
+ u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
+ u16 end = 0xffe;
+ u16 _vlan_id = ntohs(vlan_id);
+
+ return (_vlan_id >= start && _vlan_id <= end);
+}
+
+static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
+ u16 vid, bool *pop_vlan)
+{
+ __be16 vlan_id;
+
+ if (pop_vlan)
+ *pop_vlan = false;
+ vlan_id = htons(vid);
+ if (!vlan_id) {
+ vlan_id = rocker_port->internal_vlan_id;
+ if (pop_vlan)
+ *pop_vlan = true;
+ }
+
+ return vlan_id;
+}
+
+static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
+ __be16 vlan_id)
+{
+ if (rocker_vlan_id_is_internal(vlan_id))
+ return 0;
+
+ return ntohs(vlan_id);
+}
+
+static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
+{
+ return !!rocker_port->bridge_dev;
+}
+
+struct rocker_wait {
+ wait_queue_head_t wait;
+ bool done;
+ bool nowait;
+};
+
+static void rocker_wait_reset(struct rocker_wait *wait)
+{
+ wait->done = false;
+ wait->nowait = false;
+}
+
+static void rocker_wait_init(struct rocker_wait *wait)
+{
+ init_waitqueue_head(&wait->wait);
+ rocker_wait_reset(wait);
+}
+
+static struct rocker_wait *rocker_wait_create(gfp_t gfp)
+{
+ struct rocker_wait *wait;
+
+ wait = kmalloc(sizeof(*wait), gfp);
+ if (!wait)
+ return NULL;
+ rocker_wait_init(wait);
+ return wait;
+}
+
+static void rocker_wait_destroy(struct rocker_wait *work)
+{
+ kfree(work);
+}
+
+static bool rocker_wait_event_timeout(struct rocker_wait *wait,
+ unsigned long timeout)
+{
+ wait_event_timeout(wait->wait, wait->done, HZ / 10);
+ if (!wait->done)
+ return false;
+ return true;
+}
+
+static void rocker_wait_wake_up(struct rocker_wait *wait)
+{
+ wait->done = true;
+ wake_up(&wait->wait);
+}
+
+static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
+{
+ return rocker->msix_entries[vector].vector;
+}
+
+static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
+{
+ return rocker_msix_vector(rocker_port->rocker,
+ ROCKER_MSIX_VEC_TX(rocker_port->port_number));
+}
+
+static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
+{
+ return rocker_msix_vector(rocker_port->rocker,
+ ROCKER_MSIX_VEC_RX(rocker_port->port_number));
+}
+
+#define rocker_write32(rocker, reg, val) \
+ writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
+#define rocker_read32(rocker, reg) \
+ readl((rocker)->hw_addr + (ROCKER_ ## reg))
+#define rocker_write64(rocker, reg, val) \
+ writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
+#define rocker_read64(rocker, reg) \
+ readq((rocker)->hw_addr + (ROCKER_ ## reg))
+
+/*****************************
+ * HW basic testing functions
+ *****************************/
+
+static int rocker_reg_test(struct rocker *rocker)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ u64 test_reg;
+ u64 rnd;
+
+ rnd = prandom_u32();
+ rnd >>= 1;
+ rocker_write32(rocker, TEST_REG, rnd);
+ test_reg = rocker_read32(rocker, TEST_REG);
+ if (test_reg != rnd * 2) {
+ dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
+ test_reg, rnd * 2);
+ return -EIO;
+ }
+
+ rnd = prandom_u32();
+ rnd <<= 31;
+ rnd |= prandom_u32();
+ rocker_write64(rocker, TEST_REG64, rnd);
+ test_reg = rocker_read64(rocker, TEST_REG64);
+ if (test_reg != rnd * 2) {
+ dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
+ test_reg, rnd * 2);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
+ u32 test_type, dma_addr_t dma_handle,
+ unsigned char *buf, unsigned char *expect,
+ size_t size)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ int i;
+
+ rocker_wait_reset(wait);
+ rocker_write32(rocker, TEST_DMA_CTRL, test_type);
+
+ if (!rocker_wait_event_timeout(wait, HZ / 10)) {
+ dev_err(&pdev->dev, "no interrupt received within a timeout\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < size; i++) {
+ if (buf[i] != expect[i]) {
+ dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
+ buf[i], i, expect[i]);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
+#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
+
+static int rocker_dma_test_offset(struct rocker *rocker,
+ struct rocker_wait *wait, int offset)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ unsigned char *alloc;
+ unsigned char *buf;
+ unsigned char *expect;
+ dma_addr_t dma_handle;
+ int i;
+ int err;
+
+ alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
+ GFP_KERNEL | GFP_DMA);
+ if (!alloc)
+ return -ENOMEM;
+ buf = alloc + offset;
+ expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
+
+ dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(pdev, dma_handle)) {
+ err = -EIO;
+ goto free_alloc;
+ }
+
+ rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
+ rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
+
+ memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
+ err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
+ dma_handle, buf, expect,
+ ROCKER_TEST_DMA_BUF_SIZE);
+ if (err)
+ goto unmap;
+
+ memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
+ err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
+ dma_handle, buf, expect,
+ ROCKER_TEST_DMA_BUF_SIZE);
+ if (err)
+ goto unmap;
+
+ prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
+ for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
+ expect[i] = ~buf[i];
+ err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
+ dma_handle, buf, expect,
+ ROCKER_TEST_DMA_BUF_SIZE);
+ if (err)
+ goto unmap;
+
+unmap:
+ pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+free_alloc:
+ kfree(alloc);
+
+ return err;
+}
+
+static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
+{
+ int i;
+ int err;
+
+ for (i = 0; i < 8; i++) {
+ err = rocker_dma_test_offset(rocker, wait, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
+{
+ struct rocker_wait *wait = dev_id;
+
+ rocker_wait_wake_up(wait);
+
+ return IRQ_HANDLED;
+}
+
+static int rocker_basic_hw_test(struct rocker *rocker)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ struct rocker_wait wait;
+ int err;
+
+ err = rocker_reg_test(rocker);
+ if (err) {
+ dev_err(&pdev->dev, "reg test failed\n");
+ return err;
+ }
+
+ err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
+ rocker_test_irq_handler, 0,
+ rocker_driver_name, &wait);
+ if (err) {
+ dev_err(&pdev->dev, "cannot assign test irq\n");
+ return err;
+ }
+
+ rocker_wait_init(&wait);
+ rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
+
+ if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
+ dev_err(&pdev->dev, "no interrupt received within a timeout\n");
+ err = -EIO;
+ goto free_irq;
+ }
+
+ err = rocker_dma_test(rocker, &wait);
+ if (err)
+ dev_err(&pdev->dev, "dma test failed\n");
+
+free_irq:
+ free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
+ return err;
+}
+
+/******
+ * TLV
+ ******/
+
+#define ROCKER_TLV_ALIGNTO 8U
+#define ROCKER_TLV_ALIGN(len) \
+ (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
+#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
+
+/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
+ * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
+ * | Header | Pad | Payload | Pad |
+ * | (struct rocker_tlv) | ing | | ing |
+ * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
+ * <--------------------------- tlv->len -------------------------->
+ */
+
+static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
+ int *remaining)
+{
+ int totlen = ROCKER_TLV_ALIGN(tlv->len);
+
+ *remaining -= totlen;
+ return (struct rocker_tlv *) ((char *) tlv + totlen);
+}
+
+static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
+{
+ return remaining >= (int) ROCKER_TLV_HDRLEN &&
+ tlv->len >= ROCKER_TLV_HDRLEN &&
+ tlv->len <= remaining;
+}
+
+#define rocker_tlv_for_each(pos, head, len, rem) \
+ for (pos = head, rem = len; \
+ rocker_tlv_ok(pos, rem); \
+ pos = rocker_tlv_next(pos, &(rem)))
+
+#define rocker_tlv_for_each_nested(pos, tlv, rem) \
+ rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
+ rocker_tlv_len(tlv), rem)
+
+static int rocker_tlv_attr_size(int payload)
+{
+ return ROCKER_TLV_HDRLEN + payload;
+}
+
+static int rocker_tlv_total_size(int payload)
+{
+ return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
+}
+
+static int rocker_tlv_padlen(int payload)
+{
+ return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
+}
+
+static int rocker_tlv_type(const struct rocker_tlv *tlv)
+{
+ return tlv->type;
+}
+
+static void *rocker_tlv_data(const struct rocker_tlv *tlv)
+{
+ return (char *) tlv + ROCKER_TLV_HDRLEN;
+}
+
+static int rocker_tlv_len(const struct rocker_tlv *tlv)
+{
+ return tlv->len - ROCKER_TLV_HDRLEN;
+}
+
+static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
+{
+ return *(u8 *) rocker_tlv_data(tlv);
+}
+
+static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
+{
+ return *(u16 *) rocker_tlv_data(tlv);
+}
+
+static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
+{
+ return *(__be16 *) rocker_tlv_data(tlv);
+}
+
+static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
+{
+ return *(u32 *) rocker_tlv_data(tlv);
+}
+
+static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
+{
+ return *(u64 *) rocker_tlv_data(tlv);
+}
+
+static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
+ const char *buf, int buf_len)
+{
+ const struct rocker_tlv *tlv;
+ const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
+ int rem;
+
+ memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
+
+ rocker_tlv_for_each(tlv, head, buf_len, rem) {
+ u32 type = rocker_tlv_type(tlv);
+
+ if (type > 0 && type <= maxtype)
+ tb[type] = (struct rocker_tlv *) tlv;
+ }
+}
+
+static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
+ const struct rocker_tlv *tlv)
+{
+ rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
+ rocker_tlv_len(tlv));
+}
+
+static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
+ struct rocker_desc_info *desc_info)
+{
+ rocker_tlv_parse(tb, maxtype, desc_info->data,
+ desc_info->desc->tlv_size);
+}
+
+static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
+{
+ return (struct rocker_tlv *) ((char *) desc_info->data +
+ desc_info->tlv_size);
+}
+
+static int rocker_tlv_put(struct rocker_desc_info *desc_info,
+ int attrtype, int attrlen, const void *data)
+{
+ int tail_room = desc_info->data_size - desc_info->tlv_size;
+ int total_size = rocker_tlv_total_size(attrlen);
+ struct rocker_tlv *tlv;
+
+ if (unlikely(tail_room < total_size))
+ return -EMSGSIZE;
+
+ tlv = rocker_tlv_start(desc_info);
+ desc_info->tlv_size += total_size;
+ tlv->type = attrtype;
+ tlv->len = rocker_tlv_attr_size(attrlen);
+ memcpy(rocker_tlv_data(tlv), data, attrlen);
+ memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
+ return 0;
+}
+
+static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
+ int attrtype, u8 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
+}
+
+static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
+ int attrtype, u16 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
+}
+
+static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
+ int attrtype, __be16 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
+}
+
+static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
+ int attrtype, u32 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
+}
+
+static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
+ int attrtype, __be32 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
+}
+
+static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
+ int attrtype, u64 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
+}
+
+static struct rocker_tlv *
+rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
+{
+ struct rocker_tlv *start = rocker_tlv_start(desc_info);
+
+ if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
+ return NULL;
+
+ return start;
+}
+
+static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
+ struct rocker_tlv *start)
+{
+ start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
+}
+
+static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
+ struct rocker_tlv *start)
+{
+ desc_info->tlv_size = (char *) start - desc_info->data;
+}
+
+/******************************************
+ * DMA rings and descriptors manipulations
+ ******************************************/
+
+static u32 __pos_inc(u32 pos, size_t limit)
+{
+ return ++pos == limit ? 0 : pos;
+}
+
+static int rocker_desc_err(struct rocker_desc_info *desc_info)
+{
+ return -(desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN);
+}
+
+static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
+{
+ desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
+}
+
+static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
+{
+ u32 comp_err = desc_info->desc->comp_err;
+
+ return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
+}
+
+static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
+{
+ return (void *) desc_info->desc->cookie;
+}
+
+static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
+ void *ptr)
+{
+ desc_info->desc->cookie = (long) ptr;
+}
+
+static struct rocker_desc_info *
+rocker_desc_head_get(struct rocker_dma_ring_info *info)
+{
+ static struct rocker_desc_info *desc_info;
+ u32 head = __pos_inc(info->head, info->size);
+
+ desc_info = &info->desc_info[info->head];
+ if (head == info->tail)
+ return NULL; /* ring full */
+ desc_info->tlv_size = 0;
+ return desc_info;
+}
+
+static void rocker_desc_commit(struct rocker_desc_info *desc_info)
+{
+ desc_info->desc->buf_size = desc_info->data_size;
+ desc_info->desc->tlv_size = desc_info->tlv_size;
+}
+
+static void rocker_desc_head_set(struct rocker *rocker,
+ struct rocker_dma_ring_info *info,
+ struct rocker_desc_info *desc_info)
+{
+ u32 head = __pos_inc(info->head, info->size);
+
+ BUG_ON(head == info->tail);
+ rocker_desc_commit(desc_info);
+ info->head = head;
+ rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
+}
+
+static struct rocker_desc_info *
+rocker_desc_tail_get(struct rocker_dma_ring_info *info)
+{
+ static struct rocker_desc_info *desc_info;
+
+ if (info->tail == info->head)
+ return NULL; /* nothing to be done between head and tail */
+ desc_info = &info->desc_info[info->tail];
+ if (!rocker_desc_gen(desc_info))
+ return NULL; /* gen bit not set, desc is not ready yet */
+ info->tail = __pos_inc(info->tail, info->size);
+ desc_info->tlv_size = desc_info->desc->tlv_size;
+ return desc_info;
+}
+
+static void rocker_dma_ring_credits_set(struct rocker *rocker,
+ struct rocker_dma_ring_info *info,
+ u32 credits)
+{
+ if (credits)
+ rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
+}
+
+static unsigned long rocker_dma_ring_size_fix(size_t size)
+{
+ return max(ROCKER_DMA_SIZE_MIN,
+ min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
+}
+
+static int rocker_dma_ring_create(struct rocker *rocker,
+ unsigned int type,
+ size_t size,
+ struct rocker_dma_ring_info *info)
+{
+ int i;
+
+ BUG_ON(size != rocker_dma_ring_size_fix(size));
+ info->size = size;
+ info->type = type;
+ info->head = 0;
+ info->tail = 0;
+ info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
+ GFP_KERNEL);
+ if (!info->desc_info)
+ return -ENOMEM;
+
+ info->desc = pci_alloc_consistent(rocker->pdev,
+ info->size * sizeof(*info->desc),
+ &info->mapaddr);
+ if (!info->desc) {
+ kfree(info->desc_info);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < info->size; i++)
+ info->desc_info[i].desc = &info->desc[i];
+
+ rocker_write32(rocker, DMA_DESC_CTRL(info->type),
+ ROCKER_DMA_DESC_CTRL_RESET);
+ rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
+ rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
+
+ return 0;
+}
+
+static void rocker_dma_ring_destroy(struct rocker *rocker,
+ struct rocker_dma_ring_info *info)
+{
+ rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
+
+ pci_free_consistent(rocker->pdev,
+ info->size * sizeof(struct rocker_desc),
+ info->desc, info->mapaddr);
+ kfree(info->desc_info);
+}
+
+static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
+ struct rocker_dma_ring_info *info)
+{
+ int i;
+
+ BUG_ON(info->head || info->tail);
+
+ /* When ring is consumer, we need to advance head for each desc.
+ * That tells hw that the desc is ready to be used by it.
+ */
+ for (i = 0; i < info->size - 1; i++)
+ rocker_desc_head_set(rocker, info, &info->desc_info[i]);
+ rocker_desc_commit(&info->desc_info[i]);
+}
+
+static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
+ struct rocker_dma_ring_info *info,
+ int direction, size_t buf_size)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ int i;
+ int err;
+
+ for (i = 0; i < info->size; i++) {
+ struct rocker_desc_info *desc_info = &info->desc_info[i];
+ struct rocker_desc *desc = &info->desc[i];
+ dma_addr_t dma_handle;
+ char *buf;
+
+ buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ err = -ENOMEM;
+ goto rollback;
+ }
+
+ dma_handle = pci_map_single(pdev, buf, buf_size, direction);
+ if (pci_dma_mapping_error(pdev, dma_handle)) {
+ kfree(buf);
+ err = -EIO;
+ goto rollback;
+ }
+
+ desc_info->data = buf;
+ desc_info->data_size = buf_size;
+ dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
+
+ desc->buf_addr = dma_handle;
+ desc->buf_size = buf_size;
+ }
+ return 0;
+
+rollback:
+ for (i--; i >= 0; i--) {
+ struct rocker_desc_info *desc_info = &info->desc_info[i];
+
+ pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
+ desc_info->data_size, direction);
+ kfree(desc_info->data);
+ }
+ return err;
+}
+
+static void rocker_dma_ring_bufs_free(struct rocker *rocker,
+ struct rocker_dma_ring_info *info,
+ int direction)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ int i;
+
+ for (i = 0; i < info->size; i++) {
+ struct rocker_desc_info *desc_info = &info->desc_info[i];
+ struct rocker_desc *desc = &info->desc[i];
+
+ desc->buf_addr = 0;
+ desc->buf_size = 0;
+ pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
+ desc_info->data_size, direction);
+ kfree(desc_info->data);
+ }
+}
+
+static int rocker_dma_rings_init(struct rocker *rocker)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ int err;
+
+ err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
+ ROCKER_DMA_CMD_DEFAULT_SIZE,
+ &rocker->cmd_ring);
+ if (err) {
+ dev_err(&pdev->dev, "failed to create command dma ring\n");
+ return err;
+ }
+
+ spin_lock_init(&rocker->cmd_ring_lock);
+
+ err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
+ PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
+ if (err) {
+ dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
+ goto err_dma_cmd_ring_bufs_alloc;
+ }
+
+ err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
+ ROCKER_DMA_EVENT_DEFAULT_SIZE,
+ &rocker->event_ring);
+ if (err) {
+ dev_err(&pdev->dev, "failed to create event dma ring\n");
+ goto err_dma_event_ring_create;
+ }
+
+ err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
+ PCI_DMA_FROMDEVICE, PAGE_SIZE);
+ if (err) {
+ dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
+ goto err_dma_event_ring_bufs_alloc;
+ }
+ rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
+ return 0;
+
+err_dma_event_ring_bufs_alloc:
+ rocker_dma_ring_destroy(rocker, &rocker->event_ring);
+err_dma_event_ring_create:
+ rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
+ PCI_DMA_BIDIRECTIONAL);
+err_dma_cmd_ring_bufs_alloc:
+ rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
+ return err;
+}
+
+static void rocker_dma_rings_fini(struct rocker *rocker)
+{
+ rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
+ PCI_DMA_BIDIRECTIONAL);
+ rocker_dma_ring_destroy(rocker, &rocker->event_ring);
+ rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
+ PCI_DMA_BIDIRECTIONAL);
+ rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
+}
+
+static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ struct sk_buff *skb, size_t buf_len)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ dma_addr_t dma_handle;
+
+ dma_handle = pci_map_single(pdev, skb->data, buf_len,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(pdev, dma_handle))
+ return -EIO;
+ if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
+ goto tlv_put_failure;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
+ goto tlv_put_failure;
+ return 0;
+
+tlv_put_failure:
+ pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
+ desc_info->tlv_size = 0;
+ return -EMSGSIZE;
+}
+
+static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
+{
+ return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+}
+
+static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info)
+{
+ struct net_device *dev = rocker_port->dev;
+ struct sk_buff *skb;
+ size_t buf_len = rocker_port_rx_buf_len(rocker_port);
+ int err;
+
+ /* Ensure that hw will see tlv_size zero in case of an error.
+ * That tells hw to use another descriptor.
+ */
+ rocker_desc_cookie_ptr_set(desc_info, NULL);
+ desc_info->tlv_size = 0;
+
+ skb = netdev_alloc_skb_ip_align(dev, buf_len);
+ if (!skb)
+ return -ENOMEM;
+ err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
+ skb, buf_len);
+ if (err) {
+ dev_kfree_skb_any(skb);
+ return err;
+ }
+ rocker_desc_cookie_ptr_set(desc_info, skb);
+ return 0;
+}
+
+static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
+ struct rocker_tlv **attrs)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ dma_addr_t dma_handle;
+ size_t len;
+
+ if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
+ !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
+ return;
+ dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
+ len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
+ pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
+}
+
+static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
+ struct rocker_desc_info *desc_info)
+{
+ struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+ struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
+
+ if (!skb)
+ return;
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
+ rocker_dma_rx_ring_skb_unmap(rocker, attrs);
+ dev_kfree_skb_any(skb);
+}
+
+static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
+ struct rocker_port *rocker_port)
+{
+ struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ int i;
+ int err;
+
+ for (i = 0; i < rx_ring->size; i++) {
+ err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
+ &rx_ring->desc_info[i]);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ for (i--; i >= 0; i--)
+ rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
+ return err;
+}
+
+static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
+ struct rocker_port *rocker_port)
+{
+ struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ int i;
+
+ for (i = 0; i < rx_ring->size; i++)
+ rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
+}
+
+static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ int err;
+
+ err = rocker_dma_ring_create(rocker,
+ ROCKER_DMA_TX(rocker_port->port_number),
+ ROCKER_DMA_TX_DEFAULT_SIZE,
+ &rocker_port->tx_ring);
+ if (err) {
+ netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
+ return err;
+ }
+
+ err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
+ PCI_DMA_TODEVICE,
+ ROCKER_DMA_TX_DESC_SIZE);
+ if (err) {
+ netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
+ goto err_dma_tx_ring_bufs_alloc;
+ }
+
+ err = rocker_dma_ring_create(rocker,
+ ROCKER_DMA_RX(rocker_port->port_number),
+ ROCKER_DMA_RX_DEFAULT_SIZE,
+ &rocker_port->rx_ring);
+ if (err) {
+ netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
+ goto err_dma_rx_ring_create;
+ }
+
+ err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
+ PCI_DMA_BIDIRECTIONAL,
+ ROCKER_DMA_RX_DESC_SIZE);
+ if (err) {
+ netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
+ goto err_dma_rx_ring_bufs_alloc;
+ }
+
+ err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
+ if (err) {
+ netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
+ goto err_dma_rx_ring_skbs_alloc;
+ }
+ rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
+
+ return 0;
+
+err_dma_rx_ring_skbs_alloc:
+ rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
+ PCI_DMA_BIDIRECTIONAL);
+err_dma_rx_ring_bufs_alloc:
+ rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
+err_dma_rx_ring_create:
+ rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
+ PCI_DMA_TODEVICE);
+err_dma_tx_ring_bufs_alloc:
+ rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
+ return err;
+}
+
+static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
+{
+ struct rocker *rocker = rocker_port->rocker;
+
+ rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
+ rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
+ PCI_DMA_BIDIRECTIONAL);
+ rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
+ rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
+ PCI_DMA_TODEVICE);
+ rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
+}
+
+static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
+{
+ u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
+
+ if (enable)
+ val |= 1 << rocker_port->lport;
+ else
+ val &= ~(1 << rocker_port->lport);
+ rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
+}
+
+/********************************
+ * Interrupt handler and helpers
+ ********************************/
+
+static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
+{
+ struct rocker *rocker = dev_id;
+ struct rocker_desc_info *desc_info;
+ struct rocker_wait *wait;
+ u32 credits = 0;
+
+ spin_lock(&rocker->cmd_ring_lock);
+ while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
+ wait = rocker_desc_cookie_ptr_get(desc_info);
+ if (wait->nowait) {
+ rocker_desc_gen_clear(desc_info);
+ rocker_wait_destroy(wait);
+ } else {
+ rocker_wait_wake_up(wait);
+ }
+ credits++;
+ }
+ spin_unlock(&rocker->cmd_ring_lock);
+ rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
+
+ return IRQ_HANDLED;
+}
+
+static void rocker_port_link_up(struct rocker_port *rocker_port)
+{
+ netif_carrier_on(rocker_port->dev);
+ netdev_info(rocker_port->dev, "Link is up\n");
+}
+
+static void rocker_port_link_down(struct rocker_port *rocker_port)
+{
+ netif_carrier_off(rocker_port->dev);
+ netdev_info(rocker_port->dev, "Link is down\n");
+}
+
+static int rocker_event_link_change(struct rocker *rocker,
+ const struct rocker_tlv *info)
+{
+ struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
+ unsigned int port_number;
+ bool link_up;
+ struct rocker_port *rocker_port;
+
+ rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
+ if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT] ||
+ !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
+ return -EIO;
+ port_number =
+ rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT]) - 1;
+ link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
+
+ if (port_number >= rocker->port_count)
+ return -EINVAL;
+
+ rocker_port = rocker->ports[port_number];
+ if (netif_carrier_ok(rocker_port->dev) != link_up) {
+ if (link_up)
+ rocker_port_link_up(rocker_port);
+ else
+ rocker_port_link_down(rocker_port);
+ }
+
+ return 0;
+}
+
+#define ROCKER_OP_FLAG_REMOVE BIT(0)
+#define ROCKER_OP_FLAG_NOWAIT BIT(1)
+#define ROCKER_OP_FLAG_LEARNED BIT(2)
+#define ROCKER_OP_FLAG_REFRESH BIT(3)
+
+static int rocker_port_fdb(struct rocker_port *rocker_port,
+ const unsigned char *addr,
+ __be16 vlan_id, int flags);
+
+static int rocker_event_mac_vlan_seen(struct rocker *rocker,
+ const struct rocker_tlv *info)
+{
+ struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
+ unsigned int port_number;
+ struct rocker_port *rocker_port;
+ unsigned char *addr;
+ int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
+ __be16 vlan_id;
+
+ rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
+ if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT] ||
+ !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
+ !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
+ return -EIO;
+ port_number =
+ rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT]) - 1;
+ addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
+ vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
+
+ if (port_number >= rocker->port_count)
+ return -EINVAL;
+
+ rocker_port = rocker->ports[port_number];
+
+ if (rocker_port->stp_state != BR_STATE_LEARNING &&
+ rocker_port->stp_state != BR_STATE_FORWARDING)
+ return 0;
+
+ return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+}
+
+static int rocker_event_process(struct rocker *rocker,
+ struct rocker_desc_info *desc_info)
+{
+ struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
+ struct rocker_tlv *info;
+ u16 type;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
+ !attrs[ROCKER_TLV_EVENT_INFO])
+ return -EIO;
+
+ type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
+ info = attrs[ROCKER_TLV_EVENT_INFO];
+
+ switch (type) {
+ case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
+ return rocker_event_link_change(rocker, info);
+ case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
+ return rocker_event_mac_vlan_seen(rocker, info);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
+{
+ struct rocker *rocker = dev_id;
+ struct pci_dev *pdev = rocker->pdev;
+ struct rocker_desc_info *desc_info;
+ u32 credits = 0;
+ int err;
+
+ while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
+ err = rocker_desc_err(desc_info);
+ if (err) {
+ dev_err(&pdev->dev, "event desc received with err %d\n",
+ err);
+ } else {
+ err = rocker_event_process(rocker, desc_info);
+ if (err)
+ dev_err(&pdev->dev, "event processing failed with err %d\n",
+ err);
+ }
+ rocker_desc_gen_clear(desc_info);
+ rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
+ credits++;
+ }
+ rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
+{
+ struct rocker_port *rocker_port = dev_id;
+
+ napi_schedule(&rocker_port->napi_tx);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
+{
+ struct rocker_port *rocker_port = dev_id;
+
+ napi_schedule(&rocker_port->napi_rx);
+ return IRQ_HANDLED;
+}
+
+/********************
+ * Command interface
+ ********************/
+
+typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv);
+
+static int rocker_cmd_exec(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ rocker_cmd_cb_t prepare, void *prepare_priv,
+ rocker_cmd_cb_t process, void *process_priv,
+ bool nowait)
+{
+ struct rocker_desc_info *desc_info;
+ struct rocker_wait *wait;
+ unsigned long flags;
+ int err;
+
+ wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL);
+ if (!wait)
+ return -ENOMEM;
+ wait->nowait = nowait;
+
+ spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
+ desc_info = rocker_desc_head_get(&rocker->cmd_ring);
+ if (!desc_info) {
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+ err = -EAGAIN;
+ goto out;
+ }
+ err = prepare(rocker, rocker_port, desc_info, prepare_priv);
+ if (err) {
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+ goto out;
+ }
+ rocker_desc_cookie_ptr_set(desc_info, wait);
+ rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+
+ if (nowait)
+ return 0;
+
+ if (!rocker_wait_event_timeout(wait, HZ / 10))
+ return -EIO;
+
+ err = rocker_desc_err(desc_info);
+ if (err)
+ return err;
+
+ if (process)
+ err = process(rocker, rocker_port, desc_info, process_priv);
+
+ rocker_desc_gen_clear(desc_info);
+out:
+ rocker_wait_destroy(wait);
+ return err;
+}
+
+static int
+rocker_cmd_get_port_settings_prep(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
+ rocker_port->lport))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+ return 0;
+}
+
+static int
+rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct ethtool_cmd *ecmd = priv;
+ struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ u32 speed;
+ u8 duplex;
+ u8 autoneg;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_CMD_INFO])
+ return -EIO;
+
+ rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
+ attrs[ROCKER_TLV_CMD_INFO]);
+ if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
+ !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
+ !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
+ return -EIO;
+
+ speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
+ duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
+ autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
+
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->supported = SUPPORTED_TP;
+ ecmd->phy_address = 0xff;
+ ecmd->port = PORT_TP;
+ ethtool_cmd_speed_set(ecmd, speed);
+ ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int
+rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ unsigned char *macaddr = priv;
+ struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ struct rocker_tlv *attr;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_CMD_INFO])
+ return -EIO;
+
+ rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
+ attrs[ROCKER_TLV_CMD_INFO]);
+ attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
+ if (!attr)
+ return -EIO;
+
+ if (rocker_tlv_len(attr) != ETH_ALEN)
+ return -EINVAL;
+
+ ether_addr_copy(macaddr, rocker_tlv_data(attr));
+ return 0;
+}
+
+static int
+rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct ethtool_cmd *ecmd = priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
+ rocker_port->lport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
+ ethtool_cmd_speed(ecmd)))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
+ ecmd->duplex))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
+ ecmd->autoneg))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+ return 0;
+}
+
+static int
+rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ unsigned char *macaddr = priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
+ rocker_port->lport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
+ ETH_ALEN, macaddr))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+ return 0;
+}
+
+static int
+rocker_cmd_set_port_learning_prep(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
+ rocker_port->lport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
+ !!(rocker_port->brport_flags & BR_LEARNING)))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+ return 0;
+}
+
+static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
+ struct ethtool_cmd *ecmd)
+{
+ return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ rocker_cmd_get_port_settings_prep, NULL,
+ rocker_cmd_get_port_settings_ethtool_proc,
+ ecmd, false);
+}
+
+static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
+ unsigned char *macaddr)
+{
+ return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ rocker_cmd_get_port_settings_prep, NULL,
+ rocker_cmd_get_port_settings_macaddr_proc,
+ macaddr, false);
+}
+
+static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
+ struct ethtool_cmd *ecmd)
+{
+ return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ rocker_cmd_set_port_settings_ethtool_prep,
+ ecmd, NULL, NULL, false);
+}
+
+static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
+ unsigned char *macaddr)
+{
+ return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ rocker_cmd_set_port_settings_macaddr_prep,
+ macaddr, NULL, NULL, false);
+}
+
+static int rocker_port_set_learning(struct rocker_port *rocker_port)
+{
+ return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ rocker_cmd_set_port_learning_prep,
+ NULL, NULL, NULL, false);
+}
+
+static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
+ struct rocker_flow_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
+ entry->key.ig_port.in_lport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
+ entry->key.ig_port.in_lport_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+ entry->key.ig_port.goto_tbl))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
+ struct rocker_flow_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
+ entry->key.vlan.in_lport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->key.vlan.vlan_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
+ entry->key.vlan.vlan_id_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+ entry->key.vlan.goto_tbl))
+ return -EMSGSIZE;
+ if (entry->key.vlan.untagged &&
+ rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
+ entry->key.vlan.new_vlan_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
+ struct rocker_flow_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
+ entry->key.term_mac.in_lport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
+ entry->key.term_mac.in_lport_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
+ entry->key.term_mac.eth_type))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+ ETH_ALEN, entry->key.term_mac.eth_dst))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
+ ETH_ALEN, entry->key.term_mac.eth_dst_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->key.term_mac.vlan_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
+ entry->key.term_mac.vlan_id_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+ entry->key.term_mac.goto_tbl))
+ return -EMSGSIZE;
+ if (entry->key.term_mac.copy_to_cpu &&
+ rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
+ entry->key.term_mac.copy_to_cpu))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
+ struct rocker_flow_tbl_entry *entry)
+{
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
+ entry->key.ucast_routing.eth_type))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
+ entry->key.ucast_routing.dst4))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
+ entry->key.ucast_routing.dst4_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+ entry->key.ucast_routing.goto_tbl))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+ entry->key.ucast_routing.group_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
+ struct rocker_flow_tbl_entry *entry)
+{
+ if (entry->key.bridge.has_eth_dst &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+ ETH_ALEN, entry->key.bridge.eth_dst))
+ return -EMSGSIZE;
+ if (entry->key.bridge.has_eth_dst_mask &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
+ ETH_ALEN, entry->key.bridge.eth_dst_mask))
+ return -EMSGSIZE;
+ if (entry->key.bridge.vlan_id &&
+ rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->key.bridge.vlan_id))
+ return -EMSGSIZE;
+ if (entry->key.bridge.tunnel_id &&
+ rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
+ entry->key.bridge.tunnel_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+ entry->key.bridge.goto_tbl))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+ entry->key.bridge.group_id))
+ return -EMSGSIZE;
+ if (entry->key.bridge.copy_to_cpu &&
+ rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
+ entry->key.bridge.copy_to_cpu))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
+ struct rocker_flow_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
+ entry->key.acl.in_lport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
+ entry->key.acl.in_lport_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
+ ETH_ALEN, entry->key.acl.eth_src))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
+ ETH_ALEN, entry->key.acl.eth_src_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+ ETH_ALEN, entry->key.acl.eth_dst))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
+ ETH_ALEN, entry->key.acl.eth_dst_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
+ entry->key.acl.eth_type))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->key.acl.vlan_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
+ entry->key.acl.vlan_id_mask))
+ return -EMSGSIZE;
+
+ switch (ntohs(entry->key.acl.eth_type)) {
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
+ entry->key.acl.ip_proto))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info,
+ ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
+ entry->key.acl.ip_proto_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
+ entry->key.acl.ip_tos & 0x3f))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info,
+ ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
+ entry->key.acl.ip_tos_mask & 0x3f))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
+ (entry->key.acl.ip_tos & 0xc0) >> 6))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info,
+ ROCKER_TLV_OF_DPA_IP_ECN_MASK,
+ (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
+ return -EMSGSIZE;
+ break;
+ }
+
+ if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
+ rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+ entry->key.acl.group_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct rocker_flow_tbl_entry *entry = priv;
+ struct rocker_tlv *cmd_info;
+ int err = 0;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
+ entry->key.tbl_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
+ entry->key.priority))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
+ entry->cookie))
+ return -EMSGSIZE;
+
+ switch (entry->key.tbl_id) {
+ case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
+ err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_TABLE_ID_VLAN:
+ err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
+ err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
+ err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
+ err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
+ err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
+ break;
+ default:
+ err = -ENOTSUPP;
+ break;
+ }
+
+ if (err)
+ return err;
+
+ rocker_tlv_nest_end(desc_info, cmd_info);
+
+ return 0;
+}
+
+static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ const struct rocker_flow_tbl_entry *entry = priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
+ entry->cookie))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+
+ return 0;
+}
+
+static int
+rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
+ struct rocker_group_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_LPORT,
+ ROCKER_GROUP_PORT_GET(entry->group_id)))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
+ entry->l2_interface.pop_vlan))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
+ struct rocker_group_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
+ entry->l2_rewrite.group_id))
+ return -EMSGSIZE;
+ if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
+ ETH_ALEN, entry->l2_rewrite.eth_src))
+ return -EMSGSIZE;
+ if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+ ETH_ALEN, entry->l2_rewrite.eth_dst))
+ return -EMSGSIZE;
+ if (entry->l2_rewrite.vlan_id &&
+ rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->l2_rewrite.vlan_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
+ struct rocker_group_tbl_entry *entry)
+{
+ int i;
+ struct rocker_tlv *group_ids;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
+ entry->group_count))
+ return -EMSGSIZE;
+
+ group_ids = rocker_tlv_nest_start(desc_info,
+ ROCKER_TLV_OF_DPA_GROUP_IDS);
+ if (!group_ids)
+ return -EMSGSIZE;
+
+ for (i = 0; i < entry->group_count; i++)
+ /* Note TLV array is 1-based */
+ if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
+ return -EMSGSIZE;
+
+ rocker_tlv_nest_end(desc_info, group_ids);
+
+ return 0;
+}
+
+static int
+rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
+ struct rocker_group_tbl_entry *entry)
+{
+ if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
+ ETH_ALEN, entry->l3_unicast.eth_src))
+ return -EMSGSIZE;
+ if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+ ETH_ALEN, entry->l3_unicast.eth_dst))
+ return -EMSGSIZE;
+ if (entry->l3_unicast.vlan_id &&
+ rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->l3_unicast.vlan_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
+ entry->l3_unicast.ttl_check))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
+ entry->l3_unicast.group_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int rocker_cmd_group_tbl_add(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct rocker_group_tbl_entry *entry = priv;
+ struct rocker_tlv *cmd_info;
+ int err = 0;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+ entry->group_id))
+ return -EMSGSIZE;
+
+ switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
+ err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
+ err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
+ err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
+ err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
+ break;
+ default:
+ err = -ENOTSUPP;
+ break;
+ }
+
+ if (err)
+ return err;
+
+ rocker_tlv_nest_end(desc_info, cmd_info);
+
+ return 0;
+}
+
+static int rocker_cmd_group_tbl_del(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ const struct rocker_group_tbl_entry *entry = priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+ entry->group_id))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+
+ return 0;
+}
+
+/*****************************************
+ * Flow, group, FDB, internal VLAN tables
+ *****************************************/
+
+static int rocker_init_tbls(struct rocker *rocker)
+{
+ hash_init(rocker->flow_tbl);
+ spin_lock_init(&rocker->flow_tbl_lock);
+
+ hash_init(rocker->group_tbl);
+ spin_lock_init(&rocker->group_tbl_lock);
+
+ hash_init(rocker->fdb_tbl);
+ spin_lock_init(&rocker->fdb_tbl_lock);
+
+ hash_init(rocker->internal_vlan_tbl);
+ spin_lock_init(&rocker->internal_vlan_tbl_lock);
+
+ return 0;
+}
+
+static void rocker_free_tbls(struct rocker *rocker)
+{
+ unsigned long flags;
+ struct rocker_flow_tbl_entry *flow_entry;
+ struct rocker_group_tbl_entry *group_entry;
+ struct rocker_fdb_tbl_entry *fdb_entry;
+ struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
+ struct hlist_node *tmp;
+ int bkt;
+
+ spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+ hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
+ hash_del(&flow_entry->entry);
+ spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+
+ spin_lock_irqsave(&rocker->group_tbl_lock, flags);
+ hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
+ hash_del(&group_entry->entry);
+ spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
+
+ spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
+ hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
+ hash_del(&fdb_entry->entry);
+ spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
+
+ spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
+ hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
+ tmp, internal_vlan_entry, entry)
+ hash_del(&internal_vlan_entry->entry);
+ spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
+}
+
+static struct rocker_flow_tbl_entry *
+rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
+{
+ struct rocker_flow_tbl_entry *found;
+
+ hash_for_each_possible(rocker->flow_tbl, found,
+ entry, match->key_crc32) {
+ if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
+ return found;
+ }
+
+ return NULL;
+}
+
+static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
+ struct rocker_flow_tbl_entry *match,
+ bool nowait)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_flow_tbl_entry *found;
+ unsigned long flags;
+ bool add_to_hw = false;
+ int err = 0;
+
+ match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
+
+ spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+
+ found = rocker_flow_tbl_find(rocker, match);
+
+ if (found) {
+ kfree(match);
+ } else {
+ found = match;
+ found->cookie = rocker->flow_tbl_next_cookie++;
+ hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
+ add_to_hw = true;
+ }
+
+ found->ref_count++;
+
+ spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+
+ if (add_to_hw) {
+ err = rocker_cmd_exec(rocker, rocker_port,
+ rocker_cmd_flow_tbl_add,
+ found, NULL, NULL, nowait);
+ if (err) {
+ spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+ hash_del(&found->entry);
+ spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+ kfree(found);
+ }
+ }
+
+ return err;
+}
+
+static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
+ struct rocker_flow_tbl_entry *match,
+ bool nowait)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_flow_tbl_entry *found;
+ unsigned long flags;
+ bool del_from_hw = false;
+ int err = 0;
+
+ match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
+
+ spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
+
+ found = rocker_flow_tbl_find(rocker, match);
+
+ if (found) {
+ found->ref_count--;
+ if (found->ref_count == 0) {
+ hash_del(&found->entry);
+ del_from_hw = true;
+ }
+ }
+
+ spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
+
+ kfree(match);
+
+ if (del_from_hw) {
+ err = rocker_cmd_exec(rocker, rocker_port,
+ rocker_cmd_flow_tbl_del,
+ found, NULL, NULL, nowait);
+ kfree(found);
+ }
+
+ return err;
+}
+
+static gfp_t rocker_op_flags_gfp(int flags)
+{
+ return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL;
+}
+
+static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
+ int flags, struct rocker_flow_tbl_entry *entry)
+{
+ bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
+
+ if (flags & ROCKER_OP_FLAG_REMOVE)
+ return rocker_flow_tbl_del(rocker_port, entry, nowait);
+ else
+ return rocker_flow_tbl_add(rocker_port, entry, nowait);
+}
+
+static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
+ int flags, u32 in_lport, u32 in_lport_mask,
+ enum rocker_of_dpa_table_id goto_tbl)
+{
+ struct rocker_flow_tbl_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->key.priority = ROCKER_PRIORITY_IG_PORT;
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
+ entry->key.ig_port.in_lport = in_lport;
+ entry->key.ig_port.in_lport_mask = in_lport_mask;
+ entry->key.ig_port.goto_tbl = goto_tbl;
+
+ return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
+ int flags, u32 in_lport,
+ __be16 vlan_id, __be16 vlan_id_mask,
+ enum rocker_of_dpa_table_id goto_tbl,
+ bool untagged, __be16 new_vlan_id)
+{
+ struct rocker_flow_tbl_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->key.priority = ROCKER_PRIORITY_VLAN;
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
+ entry->key.vlan.in_lport = in_lport;
+ entry->key.vlan.vlan_id = vlan_id;
+ entry->key.vlan.vlan_id_mask = vlan_id_mask;
+ entry->key.vlan.goto_tbl = goto_tbl;
+
+ entry->key.vlan.untagged = untagged;
+ entry->key.vlan.new_vlan_id = new_vlan_id;
+
+ return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
+ u32 in_lport, u32 in_lport_mask,
+ __be16 eth_type, const u8 *eth_dst,
+ const u8 *eth_dst_mask, __be16 vlan_id,
+ __be16 vlan_id_mask, bool copy_to_cpu,
+ int flags)
+{
+ struct rocker_flow_tbl_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ if (is_multicast_ether_addr(eth_dst)) {
+ entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
+ entry->key.term_mac.goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
+ } else {
+ entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
+ entry->key.term_mac.goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
+ }
+
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
+ entry->key.term_mac.in_lport = in_lport;
+ entry->key.term_mac.in_lport_mask = in_lport_mask;
+ entry->key.term_mac.eth_type = eth_type;
+ ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
+ ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
+ entry->key.term_mac.vlan_id = vlan_id;
+ entry->key.term_mac.vlan_id_mask = vlan_id_mask;
+ entry->key.term_mac.copy_to_cpu = copy_to_cpu;
+
+ return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
+ int flags,
+ const u8 *eth_dst, const u8 *eth_dst_mask,
+ __be16 vlan_id, u32 tunnel_id,
+ enum rocker_of_dpa_table_id goto_tbl,
+ u32 group_id, bool copy_to_cpu)
+{
+ struct rocker_flow_tbl_entry *entry;
+ u32 priority;
+ bool vlan_bridging = !!vlan_id;
+ bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
+ bool wild = false;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
+
+ if (eth_dst) {
+ entry->key.bridge.has_eth_dst = 1;
+ ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
+ }
+ if (eth_dst_mask) {
+ entry->key.bridge.has_eth_dst_mask = 1;
+ ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
+ if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN))
+ wild = true;
+ }
+
+ priority = ROCKER_PRIORITY_UNKNOWN;
+ if (vlan_bridging && dflt && wild)
+ priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
+ else if (vlan_bridging && dflt && !wild)
+ priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
+ else if (vlan_bridging && !dflt)
+ priority = ROCKER_PRIORITY_BRIDGING_VLAN;
+ else if (!vlan_bridging && dflt && wild)
+ priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
+ else if (!vlan_bridging && dflt && !wild)
+ priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
+ else if (!vlan_bridging && !dflt)
+ priority = ROCKER_PRIORITY_BRIDGING_TENANT;
+
+ entry->key.priority = priority;
+ entry->key.bridge.vlan_id = vlan_id;
+ entry->key.bridge.tunnel_id = tunnel_id;
+ entry->key.bridge.goto_tbl = goto_tbl;
+ entry->key.bridge.group_id = group_id;
+ entry->key.bridge.copy_to_cpu = copy_to_cpu;
+
+ return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
+ int flags, u32 in_lport,
+ u32 in_lport_mask,
+ const u8 *eth_src, const u8 *eth_src_mask,
+ const u8 *eth_dst, const u8 *eth_dst_mask,
+ __be16 eth_type,
+ __be16 vlan_id, __be16 vlan_id_mask,
+ u8 ip_proto, u8 ip_proto_mask,
+ u8 ip_tos, u8 ip_tos_mask,
+ u32 group_id)
+{
+ u32 priority;
+ struct rocker_flow_tbl_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ priority = ROCKER_PRIORITY_ACL_NORMAL;
+ if (eth_dst && eth_dst_mask) {
+ if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0)
+ priority = ROCKER_PRIORITY_ACL_DFLT;
+ else if (is_link_local_ether_addr(eth_dst))
+ priority = ROCKER_PRIORITY_ACL_CTRL;
+ }
+
+ entry->key.priority = priority;
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+ entry->key.acl.in_lport = in_lport;
+ entry->key.acl.in_lport_mask = in_lport_mask;
+
+ if (eth_src)
+ ether_addr_copy(entry->key.acl.eth_src, eth_src);
+ if (eth_src_mask)
+ ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
+ if (eth_dst)
+ ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
+ if (eth_dst_mask)
+ ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
+
+ entry->key.acl.eth_type = eth_type;
+ entry->key.acl.vlan_id = vlan_id;
+ entry->key.acl.vlan_id_mask = vlan_id_mask;
+ entry->key.acl.ip_proto = ip_proto;
+ entry->key.acl.ip_proto_mask = ip_proto_mask;
+ entry->key.acl.ip_tos = ip_tos;
+ entry->key.acl.ip_tos_mask = ip_tos_mask;
+ entry->key.acl.group_id = group_id;
+
+ return rocker_flow_tbl_do(rocker_port, flags, entry);
+}
+
+static struct rocker_group_tbl_entry *
+rocker_group_tbl_find(struct rocker *rocker,
+ struct rocker_group_tbl_entry *match)
+{
+ struct rocker_group_tbl_entry *found;
+
+ hash_for_each_possible(rocker->group_tbl, found,
+ entry, match->group_id) {
+ if (found->group_id == match->group_id)
+ return found;
+ }
+
+ return NULL;
+}
+
+static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry)
+{
+ switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
+ kfree(entry->group_ids);
+ break;
+ default:
+ break;
+ }
+ kfree(entry);
+}
+
+static int rocker_group_tbl_add(struct rocker_port *rocker_port,
+ struct rocker_group_tbl_entry *match,
+ bool nowait)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_group_tbl_entry *found;
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&rocker->group_tbl_lock, flags);
+
+ found = rocker_group_tbl_find(rocker, match);
+
+ if (found) {
+ hash_del(&found->entry);
+ rocker_group_tbl_entry_free(found);
+ found = match;
+ found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
+ } else {
+ found = match;
+ found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
+ }
+
+ hash_add(rocker->group_tbl, &found->entry, found->group_id);
+
+ spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
+
+ if (found->cmd)
+ err = rocker_cmd_exec(rocker, rocker_port,
+ rocker_cmd_group_tbl_add,
+ found, NULL, NULL, nowait);
+
+ return err;
+}
+
+static int rocker_group_tbl_del(struct rocker_port *rocker_port,
+ struct rocker_group_tbl_entry *match,
+ bool nowait)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_group_tbl_entry *found;
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&rocker->group_tbl_lock, flags);
+
+ found = rocker_group_tbl_find(rocker, match);
+
+ if (found) {
+ hash_del(&found->entry);
+ found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
+ }
+
+ spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
+
+ rocker_group_tbl_entry_free(match);
+
+ if (found) {
+ err = rocker_cmd_exec(rocker, rocker_port,
+ rocker_cmd_group_tbl_del,
+ found, NULL, NULL, nowait);
+ rocker_group_tbl_entry_free(found);
+ }
+
+ return err;
+}
+
+static int rocker_group_tbl_do(struct rocker_port *rocker_port,
+ int flags, struct rocker_group_tbl_entry *entry)
+{
+ bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
+
+ if (flags & ROCKER_OP_FLAG_REMOVE)
+ return rocker_group_tbl_del(rocker_port, entry, nowait);
+ else
+ return rocker_group_tbl_add(rocker_port, entry, nowait);
+}
+
+static int rocker_group_l2_interface(struct rocker_port *rocker_port,
+ int flags, __be16 vlan_id,
+ u32 out_lport, int pop_vlan)
+{
+ struct rocker_group_tbl_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
+ entry->l2_interface.pop_vlan = pop_vlan;
+
+ return rocker_group_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
+ int flags, u8 group_count,
+ u32 *group_ids, u32 group_id)
+{
+ struct rocker_group_tbl_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->group_id = group_id;
+ entry->group_count = group_count;
+
+ entry->group_ids = kcalloc(group_count, sizeof(u32),
+ rocker_op_flags_gfp(flags));
+ if (!entry->group_ids) {
+ kfree(entry);
+ return -ENOMEM;
+ }
+ memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
+
+ return rocker_group_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_group_l2_flood(struct rocker_port *rocker_port,
+ int flags, __be16 vlan_id,
+ u8 group_count, u32 *group_ids,
+ u32 group_id)
+{
+ return rocker_group_l2_fan_out(rocker_port, flags,
+ group_count, group_ids,
+ group_id);
+}
+
+static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
+ int flags, __be16 vlan_id)
+{
+ struct rocker_port *p;
+ struct rocker *rocker = rocker_port->rocker;
+ u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
+ u32 group_ids[rocker->port_count];
+ u8 group_count = 0;
+ int err;
+ int i;
+
+ /* Adjust the flood group for this VLAN. The flood group
+ * references an L2 interface group for each port in this
+ * VLAN.
+ */
+
+ for (i = 0; i < rocker->port_count; i++) {
+ p = rocker->ports[i];
+ if (!rocker_port_is_bridged(p))
+ continue;
+ if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
+ group_ids[group_count++] =
+ ROCKER_GROUP_L2_INTERFACE(vlan_id,
+ p->lport);
+ }
+ }
+
+ /* If there are no bridged ports in this VLAN, we're done */
+ if (group_count == 0)
+ return 0;
+
+ err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
+ group_count, group_ids,
+ group_id);
+ if (err)
+ netdev_err(rocker_port->dev,
+ "Error (%d) port VLAN l2 flood group\n", err);
+
+ return err;
+}
+
+static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
+ int flags, __be16 vlan_id,
+ bool pop_vlan)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_port *p;
+ bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
+ u32 out_lport;
+ int ref = 0;
+ int err;
+ int i;
+
+ /* An L2 interface group for this port in this VLAN, but
+ * only when port STP state is LEARNING|FORWARDING.
+ */
+
+ if (rocker_port->stp_state == BR_STATE_LEARNING ||
+ rocker_port->stp_state == BR_STATE_FORWARDING) {
+ out_lport = rocker_port->lport;
+ err = rocker_group_l2_interface(rocker_port, flags,
+ vlan_id, out_lport,
+ pop_vlan);
+ if (err) {
+ netdev_err(rocker_port->dev,
+ "Error (%d) port VLAN l2 group for lport %d\n",
+ err, out_lport);
+ return err;
+ }
+ }
+
+ /* An L2 interface group for this VLAN to CPU port.
+ * Add when first port joins this VLAN and destroy when
+ * last port leaves this VLAN.
+ */
+
+ for (i = 0; i < rocker->port_count; i++) {
+ p = rocker->ports[i];
+ if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
+ ref++;
+ }
+
+ if ((!adding || ref != 1) && (adding || ref != 0))
+ return 0;
+
+ out_lport = 0;
+ err = rocker_group_l2_interface(rocker_port, flags,
+ vlan_id, out_lport,
+ pop_vlan);
+ if (err) {
+ netdev_err(rocker_port->dev,
+ "Error (%d) port VLAN l2 group for CPU port\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static struct rocker_ctrl {
+ const u8 *eth_dst;
+ const u8 *eth_dst_mask;
+ __be16 eth_type;
+ bool acl;
+ bool bridge;
+ bool term;
+ bool copy_to_cpu;
+} rocker_ctrls[] = {
+ [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
+ /* pass link local multicast pkts up to CPU for filtering */
+ .eth_dst = ll_mac,
+ .eth_dst_mask = ll_mask,
+ .acl = true,
+ },
+ [ROCKER_CTRL_LOCAL_ARP] = {
+ /* pass local ARP pkts up to CPU */
+ .eth_dst = zero_mac,
+ .eth_dst_mask = zero_mac,
+ .eth_type = htons(ETH_P_ARP),
+ .acl = true,
+ },
+ [ROCKER_CTRL_IPV4_MCAST] = {
+ /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
+ .eth_dst = ipv4_mcast,
+ .eth_dst_mask = ipv4_mask,
+ .eth_type = htons(ETH_P_IP),
+ .term = true,
+ .copy_to_cpu = true,
+ },
+ [ROCKER_CTRL_IPV6_MCAST] = {
+ /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
+ .eth_dst = ipv6_mcast,
+ .eth_dst_mask = ipv6_mask,
+ .eth_type = htons(ETH_P_IPV6),
+ .term = true,
+ .copy_to_cpu = true,
+ },
+ [ROCKER_CTRL_DFLT_BRIDGING] = {
+ /* flood any pkts on vlan */
+ .bridge = true,
+ .copy_to_cpu = true,
+ },
+};
+
+static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
+ int flags, struct rocker_ctrl *ctrl,
+ __be16 vlan_id)
+{
+ u32 in_lport = rocker_port->lport;
+ u32 in_lport_mask = 0xffffffff;
+ u32 out_lport = 0;
+ u8 *eth_src = NULL;
+ u8 *eth_src_mask = NULL;
+ __be16 vlan_id_mask = htons(0xffff);
+ u8 ip_proto = 0;
+ u8 ip_proto_mask = 0;
+ u8 ip_tos = 0;
+ u8 ip_tos_mask = 0;
+ u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
+ int err;
+
+ err = rocker_flow_tbl_acl(rocker_port, flags,
+ in_lport, in_lport_mask,
+ eth_src, eth_src_mask,
+ ctrl->eth_dst, ctrl->eth_dst_mask,
+ ctrl->eth_type,
+ vlan_id, vlan_id_mask,
+ ip_proto, ip_proto_mask,
+ ip_tos, ip_tos_mask,
+ group_id);
+
+ if (err)
+ netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
+
+ return err;
+}
+
+static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
+ int flags, struct rocker_ctrl *ctrl,
+ __be16 vlan_id)
+{
+ enum rocker_of_dpa_table_id goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+ u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
+ u32 tunnel_id = 0;
+ int err;
+
+ if (!rocker_port_is_bridged(rocker_port))
+ return 0;
+
+ err = rocker_flow_tbl_bridge(rocker_port, flags,
+ ctrl->eth_dst, ctrl->eth_dst_mask,
+ vlan_id, tunnel_id,
+ goto_tbl, group_id, ctrl->copy_to_cpu);
+
+ if (err)
+ netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
+
+ return err;
+}
+
+static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
+ int flags, struct rocker_ctrl *ctrl,
+ __be16 vlan_id)
+{
+ u32 in_lport_mask = 0xffffffff;
+ __be16 vlan_id_mask = htons(0xffff);
+ int err;
+
+ if (ntohs(vlan_id) == 0)
+ vlan_id = rocker_port->internal_vlan_id;
+
+ err = rocker_flow_tbl_term_mac(rocker_port,
+ rocker_port->lport, in_lport_mask,
+ ctrl->eth_type, ctrl->eth_dst,
+ ctrl->eth_dst_mask, vlan_id,
+ vlan_id_mask, ctrl->copy_to_cpu,
+ flags);
+
+ if (err)
+ netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
+
+ return err;
+}
+
+static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags,
+ struct rocker_ctrl *ctrl, __be16 vlan_id)
+{
+ if (ctrl->acl)
+ return rocker_port_ctrl_vlan_acl(rocker_port, flags,
+ ctrl, vlan_id);
+ if (ctrl->bridge)
+ return rocker_port_ctrl_vlan_bridge(rocker_port, flags,
+ ctrl, vlan_id);
+
+ if (ctrl->term)
+ return rocker_port_ctrl_vlan_term(rocker_port, flags,
+ ctrl, vlan_id);
+
+ return -EOPNOTSUPP;
+}
+
+static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
+ int flags, __be16 vlan_id)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < ROCKER_CTRL_MAX; i++) {
+ if (rocker_port->ctrls[i]) {
+ err = rocker_port_ctrl_vlan(rocker_port, flags,
+ &rocker_ctrls[i], vlan_id);
+ if (err)
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
+ struct rocker_ctrl *ctrl)
+{
+ u16 vid;
+ int err = 0;
+
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ if (!test_bit(vid, rocker_port->vlan_bitmap))
+ continue;
+ err = rocker_port_ctrl_vlan(rocker_port, flags,
+ ctrl, htons(vid));
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
+ u16 vid)
+{
+ enum rocker_of_dpa_table_id goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
+ u32 in_lport = rocker_port->lport;
+ __be16 vlan_id = htons(vid);
+ __be16 vlan_id_mask = htons(0xffff);
+ __be16 internal_vlan_id;
+ bool untagged;
+ bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
+ int err;
+
+ internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
+
+ if (adding && test_and_set_bit(ntohs(internal_vlan_id),
+ rocker_port->vlan_bitmap))
+ return 0; /* already added */
+ else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id),
+ rocker_port->vlan_bitmap))
+ return 0; /* already removed */
+
+ if (adding) {
+ err = rocker_port_ctrl_vlan_add(rocker_port, flags,
+ internal_vlan_id);
+ if (err) {
+ netdev_err(rocker_port->dev,
+ "Error (%d) port ctrl vlan add\n", err);
+ return err;
+ }
+ }
+
+ err = rocker_port_vlan_l2_groups(rocker_port, flags,
+ internal_vlan_id, untagged);
+ if (err) {
+ netdev_err(rocker_port->dev,
+ "Error (%d) port VLAN l2 groups\n", err);
+ return err;
+ }
+
+ err = rocker_port_vlan_flood_group(rocker_port, flags,
+ internal_vlan_id);
+ if (err) {
+ netdev_err(rocker_port->dev,
+ "Error (%d) port VLAN l2 flood group\n", err);
+ return err;
+ }
+
+ err = rocker_flow_tbl_vlan(rocker_port, flags,
+ in_lport, vlan_id, vlan_id_mask,
+ goto_tbl, untagged, internal_vlan_id);
+ if (err)
+ netdev_err(rocker_port->dev,
+ "Error (%d) port VLAN table\n", err);
+
+ return err;
+}
+
+static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
+{
+ enum rocker_of_dpa_table_id goto_tbl;
+ u32 in_lport;
+ u32 in_lport_mask;
+ int err;
+
+ /* Normal Ethernet Frames. Matches pkts from any local physical
+ * ports. Goto VLAN tbl.
+ */
+
+ in_lport = 0;
+ in_lport_mask = 0xffff0000;
+ goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
+
+ err = rocker_flow_tbl_ig_port(rocker_port, flags,
+ in_lport, in_lport_mask,
+ goto_tbl);
+ if (err)
+ netdev_err(rocker_port->dev,
+ "Error (%d) ingress port table entry\n", err);
+
+ return err;
+}
+
+struct rocker_fdb_learn_work {
+ struct work_struct work;
+ struct net_device *dev;
+ int flags;
+ u8 addr[ETH_ALEN];
+ u16 vid;
+};
+
+static void rocker_port_fdb_learn_work(struct work_struct *work)
+{
+ struct rocker_fdb_learn_work *lw =
+ container_of(work, struct rocker_fdb_learn_work, work);
+ bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
+ bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
+
+ if (learned && removing)
+ br_fdb_external_learn_del(lw->dev, lw->addr, lw->vid);
+ else if (learned && !removing)
+ br_fdb_external_learn_add(lw->dev, lw->addr, lw->vid);
+
+ kfree(work);
+}
+
+static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
+ int flags, const u8 *addr, __be16 vlan_id)
+{
+ struct rocker_fdb_learn_work *lw;
+ enum rocker_of_dpa_table_id goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+ u32 out_lport = rocker_port->lport;
+ u32 tunnel_id = 0;
+ u32 group_id = ROCKER_GROUP_NONE;
+ bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
+ bool copy_to_cpu = false;
+ int err;
+
+ if (rocker_port_is_bridged(rocker_port))
+ group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
+
+ if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
+ err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
+ vlan_id, tunnel_id, goto_tbl,
+ group_id, copy_to_cpu);
+ if (err)
+ return err;
+ }
+
+ if (!syncing)
+ return 0;
+
+ if (!rocker_port_is_bridged(rocker_port))
+ return 0;
+
+ lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags));
+ if (!lw)
+ return -ENOMEM;
+
+ INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
+
+ lw->dev = rocker_port->dev;
+ lw->flags = flags;
+ ether_addr_copy(lw->addr, addr);
+ lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
+
+ schedule_work(&lw->work);
+
+ return 0;
+}
+
+static struct rocker_fdb_tbl_entry *
+rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
+{
+ struct rocker_fdb_tbl_entry *found;
+
+ hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
+ if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
+ return found;
+
+ return NULL;
+}
+
+static int rocker_port_fdb(struct rocker_port *rocker_port,
+ const unsigned char *addr,
+ __be16 vlan_id, int flags)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_fdb_tbl_entry *fdb;
+ struct rocker_fdb_tbl_entry *found;
+ bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
+ unsigned long lock_flags;
+
+ fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags));
+ if (!fdb)
+ return -ENOMEM;
+
+ fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
+ fdb->key.lport = rocker_port->lport;
+ ether_addr_copy(fdb->key.addr, addr);
+ fdb->key.vlan_id = vlan_id;
+ fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
+
+ spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+
+ found = rocker_fdb_tbl_find(rocker, fdb);
+
+ if (removing && found) {
+ kfree(fdb);
+ hash_del(&found->entry);
+ } else if (!removing && !found) {
+ hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
+ }
+
+ spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
+
+ /* Check if adding and already exists, or removing and can't find */
+ if (!found != !removing) {
+ kfree(fdb);
+ if (!found && removing)
+ return 0;
+ /* Refreshing existing to update aging timers */
+ flags |= ROCKER_OP_FLAG_REFRESH;
+ }
+
+ return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id);
+}
+
+static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_fdb_tbl_entry *found;
+ unsigned long lock_flags;
+ int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
+ struct hlist_node *tmp;
+ int bkt;
+ int err = 0;
+
+ if (rocker_port->stp_state == BR_STATE_LEARNING ||
+ rocker_port->stp_state == BR_STATE_FORWARDING)
+ return 0;
+
+ spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+
+ hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
+ if (found->key.lport != rocker_port->lport)
+ continue;
+ if (!found->learned)
+ continue;
+ err = rocker_port_fdb_learn(rocker_port, flags,
+ found->key.addr,
+ found->key.vlan_id);
+ if (err)
+ goto err_out;
+ hash_del(&found->entry);
+ }
+
+err_out:
+ spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
+
+ return err;
+}
+
+static int rocker_port_router_mac(struct rocker_port *rocker_port,
+ int flags, __be16 vlan_id)
+{
+ u32 in_lport_mask = 0xffffffff;
+ __be16 eth_type;
+ const u8 *dst_mac_mask = ff_mac;
+ __be16 vlan_id_mask = htons(0xffff);
+ bool copy_to_cpu = false;
+ int err;
+
+ if (ntohs(vlan_id) == 0)
+ vlan_id = rocker_port->internal_vlan_id;
+
+ eth_type = htons(ETH_P_IP);
+ err = rocker_flow_tbl_term_mac(rocker_port,
+ rocker_port->lport, in_lport_mask,
+ eth_type, rocker_port->dev->dev_addr,
+ dst_mac_mask, vlan_id, vlan_id_mask,
+ copy_to_cpu, flags);
+ if (err)
+ return err;
+
+ eth_type = htons(ETH_P_IPV6);
+ err = rocker_flow_tbl_term_mac(rocker_port,
+ rocker_port->lport, in_lport_mask,
+ eth_type, rocker_port->dev->dev_addr,
+ dst_mac_mask, vlan_id, vlan_id_mask,
+ copy_to_cpu, flags);
+
+ return err;
+}
+
+static int rocker_port_fwding(struct rocker_port *rocker_port)
+{
+ bool pop_vlan;
+ u32 out_lport;
+ __be16 vlan_id;
+ u16 vid;
+ int flags = ROCKER_OP_FLAG_NOWAIT;
+ int err;
+
+ /* Port will be forwarding-enabled if its STP state is LEARNING
+ * or FORWARDING. Traffic from CPU can still egress, regardless of
+ * port STP state. Use L2 interface group on port VLANs as a way
+ * to toggle port forwarding: if forwarding is disabled, L2
+ * interface group will not exist.
+ */
+
+ if (rocker_port->stp_state != BR_STATE_LEARNING &&
+ rocker_port->stp_state != BR_STATE_FORWARDING)
+ flags |= ROCKER_OP_FLAG_REMOVE;
+
+ out_lport = rocker_port->lport;
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ if (!test_bit(vid, rocker_port->vlan_bitmap))
+ continue;
+ vlan_id = htons(vid);
+ pop_vlan = rocker_vlan_id_is_internal(vlan_id);
+ err = rocker_group_l2_interface(rocker_port, flags,
+ vlan_id, out_lport,
+ pop_vlan);
+ if (err) {
+ netdev_err(rocker_port->dev,
+ "Error (%d) port VLAN l2 group for lport %d\n",
+ err, out_lport);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
+{
+ bool want[ROCKER_CTRL_MAX] = { 0, };
+ int flags;
+ int err;
+ int i;
+
+ if (rocker_port->stp_state == state)
+ return 0;
+
+ rocker_port->stp_state = state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ /* port is completely disabled */
+ break;
+ case BR_STATE_LISTENING:
+ case BR_STATE_BLOCKING:
+ want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
+ break;
+ case BR_STATE_LEARNING:
+ case BR_STATE_FORWARDING:
+ want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
+ want[ROCKER_CTRL_IPV4_MCAST] = true;
+ want[ROCKER_CTRL_IPV6_MCAST] = true;
+ if (rocker_port_is_bridged(rocker_port))
+ want[ROCKER_CTRL_DFLT_BRIDGING] = true;
+ else
+ want[ROCKER_CTRL_LOCAL_ARP] = true;
+ break;
+ }
+
+ for (i = 0; i < ROCKER_CTRL_MAX; i++) {
+ if (want[i] != rocker_port->ctrls[i]) {
+ flags = ROCKER_OP_FLAG_NOWAIT |
+ (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
+ err = rocker_port_ctrl(rocker_port, flags,
+ &rocker_ctrls[i]);
+ if (err)
+ return err;
+ rocker_port->ctrls[i] = want[i];
+ }
+ }
+
+ err = rocker_port_fdb_flush(rocker_port);
+ if (err)
+ return err;
+
+ return rocker_port_fwding(rocker_port);
+}
+
+static struct rocker_internal_vlan_tbl_entry *
+rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
+{
+ struct rocker_internal_vlan_tbl_entry *found;
+
+ hash_for_each_possible(rocker->internal_vlan_tbl, found,
+ entry, ifindex) {
+ if (found->ifindex == ifindex)
+ return found;
+ }
+
+ return NULL;
+}
+
+static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
+ int ifindex)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_internal_vlan_tbl_entry *entry;
+ struct rocker_internal_vlan_tbl_entry *found;
+ unsigned long lock_flags;
+ int i;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return 0;
+
+ entry->ifindex = ifindex;
+
+ spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
+
+ found = rocker_internal_vlan_tbl_find(rocker, ifindex);
+ if (found) {
+ kfree(entry);
+ goto found;
+ }
+
+ found = entry;
+ hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
+
+ for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
+ if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
+ continue;
+ found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
+ goto found;
+ }
+
+ netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
+
+found:
+ found->ref_count++;
+ spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
+
+ return found->vlan_id;
+}
+
+static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
+ int ifindex)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_internal_vlan_tbl_entry *found;
+ unsigned long lock_flags;
+ unsigned long bit;
+
+ spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
+
+ found = rocker_internal_vlan_tbl_find(rocker, ifindex);
+ if (!found) {
+ netdev_err(rocker_port->dev,
+ "ifindex (%d) not found in internal VLAN tbl\n",
+ ifindex);
+ goto not_found;
+ }
+
+ if (--found->ref_count <= 0) {
+ bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
+ clear_bit(bit, rocker->internal_vlan_bitmap);
+ hash_del(&found->entry);
+ kfree(found);
+ }
+
+not_found:
+ spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
+}
+
+/*****************
+ * Net device ops
+ *****************/
+
+static int rocker_port_open(struct net_device *dev)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ u8 stp_state = rocker_port_is_bridged(rocker_port) ?
+ BR_STATE_BLOCKING : BR_STATE_FORWARDING;
+ int err;
+
+ err = rocker_port_dma_rings_init(rocker_port);
+ if (err)
+ return err;
+
+ err = request_irq(rocker_msix_tx_vector(rocker_port),
+ rocker_tx_irq_handler, 0,
+ rocker_driver_name, rocker_port);
+ if (err) {
+ netdev_err(rocker_port->dev, "cannot assign tx irq\n");
+ goto err_request_tx_irq;
+ }
+
+ err = request_irq(rocker_msix_rx_vector(rocker_port),
+ rocker_rx_irq_handler, 0,
+ rocker_driver_name, rocker_port);
+ if (err) {
+ netdev_err(rocker_port->dev, "cannot assign rx irq\n");
+ goto err_request_rx_irq;
+ }
+
+ err = rocker_port_stp_update(rocker_port, stp_state);
+ if (err)
+ goto err_stp_update;
+
+ napi_enable(&rocker_port->napi_tx);
+ napi_enable(&rocker_port->napi_rx);
+ rocker_port_set_enable(rocker_port, true);
+ netif_start_queue(dev);
+ return 0;
+
+err_stp_update:
+ free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
+err_request_rx_irq:
+ free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
+err_request_tx_irq:
+ rocker_port_dma_rings_fini(rocker_port);
+ return err;
+}
+
+static int rocker_port_stop(struct net_device *dev)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ rocker_port_set_enable(rocker_port, false);
+ napi_disable(&rocker_port->napi_rx);
+ napi_disable(&rocker_port->napi_tx);
+ rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
+ free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
+ free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
+ rocker_port_dma_rings_fini(rocker_port);
+
+ return 0;
+}
+
+static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct pci_dev *pdev = rocker->pdev;
+ struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
+ struct rocker_tlv *attr;
+ int rem;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_TX_FRAGS])
+ return;
+ rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
+ struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
+ dma_addr_t dma_handle;
+ size_t len;
+
+ if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
+ continue;
+ rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
+ attr);
+ if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
+ !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
+ continue;
+ dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
+ len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
+ pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
+ }
+}
+
+static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ char *buf, size_t buf_len)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct pci_dev *pdev = rocker->pdev;
+ dma_addr_t dma_handle;
+ struct rocker_tlv *frag;
+
+ dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
+ if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
+ if (net_ratelimit())
+ netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
+ return -EIO;
+ }
+ frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
+ if (!frag)
+ goto unmap_frag;
+ if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
+ dma_handle))
+ goto nest_cancel;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
+ buf_len))
+ goto nest_cancel;
+ rocker_tlv_nest_end(desc_info, frag);
+ return 0;
+
+nest_cancel:
+ rocker_tlv_nest_cancel(desc_info, frag);
+unmap_frag:
+ pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
+ return -EMSGSIZE;
+}
+
+static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_desc_info *desc_info;
+ struct rocker_tlv *frags;
+ int i;
+ int err;
+
+ desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
+ if (unlikely(!desc_info)) {
+ if (net_ratelimit())
+ netdev_err(dev, "tx ring full when queue awake\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ rocker_desc_cookie_ptr_set(desc_info, skb);
+
+ frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
+ if (!frags)
+ goto out;
+ err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
+ skb->data, skb_headlen(skb));
+ if (err)
+ goto nest_cancel;
+ if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
+ goto nest_cancel;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
+ skb_frag_address(frag),
+ skb_frag_size(frag));
+ if (err)
+ goto unmap_frags;
+ }
+ rocker_tlv_nest_end(desc_info, frags);
+
+ rocker_desc_gen_clear(desc_info);
+ rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
+
+ desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
+ if (!desc_info)
+ netif_stop_queue(dev);
+
+ return NETDEV_TX_OK;
+
+unmap_frags:
+ rocker_tx_desc_frags_unmap(rocker_port, desc_info);
+nest_cancel:
+ rocker_tlv_nest_cancel(desc_info, frags);
+out:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static int rocker_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
+ if (err)
+ return err;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ return 0;
+}
+
+static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
+ __be16 proto, u16 vid)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int err;
+
+ err = rocker_port_vlan(rocker_port, 0, vid);
+ if (err)
+ return err;
+
+ return rocker_port_router_mac(rocker_port, 0, htons(vid));
+}
+
+static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
+ __be16 proto, u16 vid)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int err;
+
+ err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE,
+ htons(vid));
+ if (err)
+ return err;
+
+ return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid);
+}
+
+static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid,
+ u16 nlm_flags)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
+ int flags = 0;
+
+ if (!rocker_port_is_bridged(rocker_port))
+ return -EINVAL;
+
+ return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+}
+
+static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
+ int flags = ROCKER_OP_FLAG_REMOVE;
+
+ if (!rocker_port_is_bridged(rocker_port))
+ return -EINVAL;
+
+ return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+}
+
+static int rocker_fdb_fill_info(struct sk_buff *skb,
+ struct rocker_port *rocker_port,
+ const unsigned char *addr, u16 vid,
+ u32 portid, u32 seq, int type,
+ unsigned int flags)
+{
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = NTF_SELF;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = rocker_port->dev->ifindex;
+ ndm->ndm_state = NUD_REACHABLE;
+
+ if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
+ goto nla_put_failure;
+
+ if (vid && nla_put_u16(skb, NDA_VLAN, vid))
+ goto nla_put_failure;
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int rocker_port_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev,
+ int idx)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_fdb_tbl_entry *found;
+ struct hlist_node *tmp;
+ int bkt;
+ unsigned long lock_flags;
+ const unsigned char *addr;
+ u16 vid;
+ int err;
+
+ spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+ hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
+ if (found->key.lport != rocker_port->lport)
+ continue;
+ if (idx < cb->args[0])
+ goto skip;
+ addr = found->key.addr;
+ vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id);
+ err = rocker_fdb_fill_info(skb, rocker_port, addr, vid,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH, NLM_F_MULTI);
+ if (err < 0)
+ break;
+skip:
+ ++idx;
+ }
+ spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
+ return idx;
+}
+
+static int rocker_port_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ struct nlattr *protinfo;
+ struct nlattr *attr;
+ int err;
+
+ protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
+ IFLA_PROTINFO);
+ if (protinfo) {
+ attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING);
+ if (attr) {
+ if (nla_len(attr) < sizeof(u8))
+ return -EINVAL;
+
+ if (nla_get_u8(attr))
+ rocker_port->brport_flags |= BR_LEARNING;
+ else
+ rocker_port->brport_flags &= ~BR_LEARNING;
+ err = rocker_port_set_learning(rocker_port);
+ if (err)
+ return err;
+ }
+ attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC);
+ if (attr) {
+ if (nla_len(attr) < sizeof(u8))
+ return -EINVAL;
+
+ if (nla_get_u8(attr))
+ rocker_port->brport_flags |= BR_LEARNING_SYNC;
+ else
+ rocker_port->brport_flags &= ~BR_LEARNING_SYNC;
+ }
+ }
+
+ return 0;
+}
+
+static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev,
+ u32 filter_mask)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ u16 mode = BRIDGE_MODE_UNDEF;
+ u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
+ rocker_port->brport_flags, mask);
+}
+
+static int rocker_port_switch_parent_id_get(struct net_device *dev,
+ struct netdev_phys_item_id *psid)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ struct rocker *rocker = rocker_port->rocker;
+
+ psid->id_len = sizeof(rocker->hw.id);
+ memcpy(&psid->id, &rocker->hw.id, psid->id_len);
+ return 0;
+}
+
+static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ return rocker_port_stp_update(rocker_port, state);
+}
+
+static const struct net_device_ops rocker_port_netdev_ops = {
+ .ndo_open = rocker_port_open,
+ .ndo_stop = rocker_port_stop,
+ .ndo_start_xmit = rocker_port_xmit,
+ .ndo_set_mac_address = rocker_port_set_mac_address,
+ .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid,
+ .ndo_fdb_add = rocker_port_fdb_add,
+ .ndo_fdb_del = rocker_port_fdb_del,
+ .ndo_fdb_dump = rocker_port_fdb_dump,
+ .ndo_bridge_setlink = rocker_port_bridge_setlink,
+ .ndo_bridge_getlink = rocker_port_bridge_getlink,
+ .ndo_switch_parent_id_get = rocker_port_switch_parent_id_get,
+ .ndo_switch_port_stp_update = rocker_port_switch_port_stp_update,
+};
+
+/********************
+ * ethtool interface
+ ********************/
+
+static int rocker_port_get_settings(struct net_device *dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
+}
+
+static int rocker_port_set_settings(struct net_device *dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
+}
+
+static void rocker_port_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
+}
+
+static const struct ethtool_ops rocker_port_ethtool_ops = {
+ .get_settings = rocker_port_get_settings,
+ .set_settings = rocker_port_set_settings,
+ .get_drvinfo = rocker_port_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+/*****************
+ * NAPI interface
+ *****************/
+
+static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
+{
+ return container_of(napi, struct rocker_port, napi_tx);
+}
+
+static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_desc_info *desc_info;
+ u32 credits = 0;
+ int err;
+
+ /* Cleanup tx descriptors */
+ while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
+ err = rocker_desc_err(desc_info);
+ if (err && net_ratelimit())
+ netdev_err(rocker_port->dev, "tx desc received with err %d\n",
+ err);
+ rocker_tx_desc_frags_unmap(rocker_port, desc_info);
+ dev_kfree_skb_any(rocker_desc_cookie_ptr_get(desc_info));
+ credits++;
+ }
+
+ if (credits && netif_queue_stopped(rocker_port->dev))
+ netif_wake_queue(rocker_port->dev);
+
+ napi_complete(napi);
+ rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
+
+ return 0;
+}
+
+static int rocker_port_rx_proc(struct rocker *rocker,
+ struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info)
+{
+ struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+ struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
+ size_t rx_len;
+
+ if (!skb)
+ return -ENOENT;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
+ return -EINVAL;
+
+ rocker_dma_rx_ring_skb_unmap(rocker, attrs);
+
+ rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
+ skb_put(skb, rx_len);
+ skb->protocol = eth_type_trans(skb, rocker_port->dev);
+ netif_receive_skb(skb);
+
+ return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
+}
+
+static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
+{
+ return container_of(napi, struct rocker_port, napi_rx);
+}
+
+static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_desc_info *desc_info;
+ u32 credits = 0;
+ int err;
+
+ /* Process rx descriptors */
+ while (credits < budget &&
+ (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
+ err = rocker_desc_err(desc_info);
+ if (err) {
+ if (net_ratelimit())
+ netdev_err(rocker_port->dev, "rx desc received with err %d\n",
+ err);
+ } else {
+ err = rocker_port_rx_proc(rocker, rocker_port,
+ desc_info);
+ if (err && net_ratelimit())
+ netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
+ err);
+ }
+ rocker_desc_gen_clear(desc_info);
+ rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
+ credits++;
+ }
+
+ if (credits < budget)
+ napi_complete(napi);
+
+ rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
+
+ return credits;
+}
+
+/*****************
+ * PCI driver ops
+ *****************/
+
+static void rocker_carrier_init(struct rocker_port *rocker_port)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
+ bool link_up;
+
+ link_up = link_status & (1 << rocker_port->lport);
+ if (link_up)
+ netif_carrier_on(rocker_port->dev);
+ else
+ netif_carrier_off(rocker_port->dev);
+}
+
+static void rocker_remove_ports(struct rocker *rocker)
+{
+ struct rocker_port *rocker_port;
+ int i;
+
+ for (i = 0; i < rocker->port_count; i++) {
+ rocker_port = rocker->ports[i];
+ rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
+ unregister_netdev(rocker_port->dev);
+ }
+ kfree(rocker->ports);
+}
+
+static void rocker_port_dev_addr_init(struct rocker *rocker,
+ struct rocker_port *rocker_port)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ int err;
+
+ err = rocker_cmd_get_port_settings_macaddr(rocker_port,
+ rocker_port->dev->dev_addr);
+ if (err) {
+ dev_warn(&pdev->dev, "failed to get mac address, using random\n");
+ eth_hw_addr_random(rocker_port->dev);
+ }
+}
+
+static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ struct rocker_port *rocker_port;
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct rocker_port));
+ if (!dev)
+ return -ENOMEM;
+ rocker_port = netdev_priv(dev);
+ rocker_port->dev = dev;
+ rocker_port->rocker = rocker;
+ rocker_port->port_number = port_number;
+ rocker_port->lport = port_number + 1;
+ rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
+
+ rocker_port_dev_addr_init(rocker, rocker_port);
+ dev->netdev_ops = &rocker_port_netdev_ops;
+ dev->ethtool_ops = &rocker_port_ethtool_ops;
+ netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
+ NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
+ NAPI_POLL_WEIGHT);
+ rocker_carrier_init(rocker_port);
+
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "register_netdev failed\n");
+ goto err_register_netdev;
+ }
+ rocker->ports[port_number] = rocker_port;
+
+ rocker_port_set_learning(rocker_port);
+
+ rocker_port->internal_vlan_id =
+ rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
+ err = rocker_port_ig_tbl(rocker_port, 0);
+ if (err) {
+ dev_err(&pdev->dev, "install ig port table failed\n");
+ goto err_port_ig_tbl;
+ }
+
+ return 0;
+
+err_port_ig_tbl:
+ unregister_netdev(dev);
+err_register_netdev:
+ free_netdev(dev);
+ return err;
+}
+
+static int rocker_probe_ports(struct rocker *rocker)
+{
+ int i;
+ size_t alloc_size;
+ int err;
+
+ alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
+ rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+ for (i = 0; i < rocker->port_count; i++) {
+ err = rocker_probe_port(rocker, i);
+ if (err)
+ goto remove_ports;
+ }
+ return 0;
+
+remove_ports:
+ rocker_remove_ports(rocker);
+ return err;
+}
+
+static int rocker_msix_init(struct rocker *rocker)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ int msix_entries;
+ int i;
+ int err;
+
+ msix_entries = pci_msix_vec_count(pdev);
+ if (msix_entries < 0)
+ return msix_entries;
+
+ if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
+ return -EINVAL;
+
+ rocker->msix_entries = kmalloc_array(msix_entries,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!rocker->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < msix_entries; i++)
+ rocker->msix_entries[i].entry = i;
+
+ err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
+ if (err < 0)
+ goto err_enable_msix;
+
+ return 0;
+
+err_enable_msix:
+ kfree(rocker->msix_entries);
+ return err;
+}
+
+static void rocker_msix_fini(struct rocker *rocker)
+{
+ pci_disable_msix(rocker->pdev);
+ kfree(rocker->msix_entries);
+}
+
+static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct rocker *rocker;
+ int err;
+
+ rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
+ if (!rocker)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto err_pci_enable_device;
+ }
+
+ err = pci_request_regions(pdev, rocker_driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ goto err_pci_request_regions;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
+ goto err_pci_set_dma_mask;
+ }
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+ goto err_pci_set_dma_mask;
+ }
+ }
+
+ if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
+ dev_err(&pdev->dev, "invalid PCI region size\n");
+ goto err_pci_resource_len_check;
+ }
+
+ rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!rocker->hw_addr) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+ pci_set_master(pdev);
+
+ rocker->pdev = pdev;
+ pci_set_drvdata(pdev, rocker);
+
+ rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
+
+ err = rocker_msix_init(rocker);
+ if (err) {
+ dev_err(&pdev->dev, "MSI-X init failed\n");
+ goto err_msix_init;
+ }
+
+ err = rocker_basic_hw_test(rocker);
+ if (err) {
+ dev_err(&pdev->dev, "basic hw test failed\n");
+ goto err_basic_hw_test;
+ }
+
+ rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
+
+ err = rocker_dma_rings_init(rocker);
+ if (err)
+ goto err_dma_rings_init;
+
+ err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
+ rocker_cmd_irq_handler, 0,
+ rocker_driver_name, rocker);
+ if (err) {
+ dev_err(&pdev->dev, "cannot assign cmd irq\n");
+ goto err_request_cmd_irq;
+ }
+
+ err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
+ rocker_event_irq_handler, 0,
+ rocker_driver_name, rocker);
+ if (err) {
+ dev_err(&pdev->dev, "cannot assign event irq\n");
+ goto err_request_event_irq;
+ }
+
+ rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
+
+ err = rocker_init_tbls(rocker);
+ if (err) {
+ dev_err(&pdev->dev, "cannot init rocker tables\n");
+ goto err_init_tbls;
+ }
+
+ err = rocker_probe_ports(rocker);
+ if (err) {
+ dev_err(&pdev->dev, "failed to probe ports\n");
+ goto err_probe_ports;
+ }
+
+ dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
+
+ return 0;
+
+err_probe_ports:
+ rocker_free_tbls(rocker);
+err_init_tbls:
+ free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
+err_request_event_irq:
+ free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
+err_request_cmd_irq:
+ rocker_dma_rings_fini(rocker);
+err_dma_rings_init:
+err_basic_hw_test:
+ rocker_msix_fini(rocker);
+err_msix_init:
+ iounmap(rocker->hw_addr);
+err_ioremap:
+err_pci_resource_len_check:
+err_pci_set_dma_mask:
+ pci_release_regions(pdev);
+err_pci_request_regions:
+ pci_disable_device(pdev);
+err_pci_enable_device:
+ kfree(rocker);
+ return err;
+}
+
+static void rocker_remove(struct pci_dev *pdev)
+{
+ struct rocker *rocker = pci_get_drvdata(pdev);
+
+ rocker_free_tbls(rocker);
+ rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
+ rocker_remove_ports(rocker);
+ free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
+ free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
+ rocker_dma_rings_fini(rocker);
+ rocker_msix_fini(rocker);
+ iounmap(rocker->hw_addr);
+ pci_release_regions(rocker->pdev);
+ pci_disable_device(rocker->pdev);
+ kfree(rocker);
+}
+
+static struct pci_driver rocker_pci_driver = {
+ .name = rocker_driver_name,
+ .id_table = rocker_pci_id_table,
+ .probe = rocker_probe,
+ .remove = rocker_remove,
+};
+
+/************************************
+ * Net device notifier event handler
+ ************************************/
+
+static bool rocker_port_dev_check(struct net_device *dev)
+{
+ return dev->netdev_ops == &rocker_port_netdev_ops;
+}
+
+static int rocker_port_bridge_join(struct rocker_port *rocker_port,
+ struct net_device *bridge)
+{
+ int err;
+
+ rocker_port_internal_vlan_id_put(rocker_port,
+ rocker_port->dev->ifindex);
+
+ rocker_port->bridge_dev = bridge;
+
+ /* Use bridge internal VLAN ID for untagged pkts */
+ err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
+ if (err)
+ return err;
+ rocker_port->internal_vlan_id =
+ rocker_port_internal_vlan_id_get(rocker_port,
+ bridge->ifindex);
+ err = rocker_port_vlan(rocker_port, 0, 0);
+
+ return err;
+}
+
+static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
+{
+ int err;
+
+ rocker_port_internal_vlan_id_put(rocker_port,
+ rocker_port->bridge_dev->ifindex);
+
+ rocker_port->bridge_dev = NULL;
+
+ /* Use port internal VLAN ID for untagged pkts */
+ err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
+ if (err)
+ return err;
+ rocker_port->internal_vlan_id =
+ rocker_port_internal_vlan_id_get(rocker_port,
+ rocker_port->dev->ifindex);
+ err = rocker_port_vlan(rocker_port, 0, 0);
+
+ return err;
+}
+
+static int rocker_port_master_changed(struct net_device *dev)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ struct net_device *master = netdev_master_upper_dev_get(dev);
+ int err = 0;
+
+ if (master && master->rtnl_link_ops &&
+ !strcmp(master->rtnl_link_ops->kind, "bridge"))
+ err = rocker_port_bridge_join(rocker_port, master);
+ else
+ err = rocker_port_bridge_leave(rocker_port);
+
+ return err;
+}
+
+static int rocker_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev;
+ int err;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ dev = netdev_notifier_info_to_dev(ptr);
+ if (!rocker_port_dev_check(dev))
+ return NOTIFY_DONE;
+ err = rocker_port_master_changed(dev);
+ if (err)
+ netdev_warn(dev,
+ "failed to reflect master change (err %d)\n",
+ err);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rocker_netdevice_nb __read_mostly = {
+ .notifier_call = rocker_netdevice_event,
+};
+
+/***********************
+ * Module init and exit
+ ***********************/
+
+static int __init rocker_module_init(void)
+{
+ int err;
+
+ register_netdevice_notifier(&rocker_netdevice_nb);
+ err = pci_register_driver(&rocker_pci_driver);
+ if (err)
+ goto err_pci_register_driver;
+ return 0;
+
+err_pci_register_driver:
+ unregister_netdevice_notifier(&rocker_netdevice_nb);
+ return err;
+}
+
+static void __exit rocker_module_exit(void)
+{
+ unregister_netdevice_notifier(&rocker_netdevice_nb);
+ pci_unregister_driver(&rocker_pci_driver);
+}
+
+module_init(rocker_module_init);
+module_exit(rocker_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
+MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
+MODULE_DESCRIPTION("Rocker switch device driver");
+MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
new file mode 100644
index 000000000000..8d2865ba634c
--- /dev/null
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -0,0 +1,428 @@
+/*
+ * drivers/net/ethernet/rocker/rocker.h - Rocker switch device driver
+ * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _ROCKER_H
+#define _ROCKER_H
+
+#include <linux/types.h>
+
+#define PCI_VENDOR_ID_REDHAT 0x1b36
+#define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006
+
+#define ROCKER_PCI_BAR0_SIZE 0x2000
+
+/* MSI-X vectors */
+enum {
+ ROCKER_MSIX_VEC_CMD,
+ ROCKER_MSIX_VEC_EVENT,
+ ROCKER_MSIX_VEC_TEST,
+ ROCKER_MSIX_VEC_RESERVED0,
+ __ROCKER_MSIX_VEC_TX,
+ __ROCKER_MSIX_VEC_RX,
+#define ROCKER_MSIX_VEC_TX(port) \
+ (__ROCKER_MSIX_VEC_TX + ((port) * 2))
+#define ROCKER_MSIX_VEC_RX(port) \
+ (__ROCKER_MSIX_VEC_RX + ((port) * 2))
+#define ROCKER_MSIX_VEC_COUNT(portcnt) \
+ (ROCKER_MSIX_VEC_RX((portcnt - 1)) + 1)
+};
+
+/* Rocker bogus registers */
+#define ROCKER_BOGUS_REG0 0x0000
+#define ROCKER_BOGUS_REG1 0x0004
+#define ROCKER_BOGUS_REG2 0x0008
+#define ROCKER_BOGUS_REG3 0x000c
+
+/* Rocker test registers */
+#define ROCKER_TEST_REG 0x0010
+#define ROCKER_TEST_REG64 0x0018 /* 8-byte */
+#define ROCKER_TEST_IRQ 0x0020
+#define ROCKER_TEST_DMA_ADDR 0x0028 /* 8-byte */
+#define ROCKER_TEST_DMA_SIZE 0x0030
+#define ROCKER_TEST_DMA_CTRL 0x0034
+
+/* Rocker test register ctrl */
+#define ROCKER_TEST_DMA_CTRL_CLEAR (1 << 0)
+#define ROCKER_TEST_DMA_CTRL_FILL (1 << 1)
+#define ROCKER_TEST_DMA_CTRL_INVERT (1 << 2)
+
+/* Rocker DMA ring register offsets */
+#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */
+#define ROCKER_DMA_DESC_SIZE(x) (0x1008 + (x) * 32)
+#define ROCKER_DMA_DESC_HEAD(x) (0x100c + (x) * 32)
+#define ROCKER_DMA_DESC_TAIL(x) (0x1010 + (x) * 32)
+#define ROCKER_DMA_DESC_CTRL(x) (0x1014 + (x) * 32)
+#define ROCKER_DMA_DESC_CREDITS(x) (0x1018 + (x) * 32)
+#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32)
+
+/* Rocker dma ctrl register bits */
+#define ROCKER_DMA_DESC_CTRL_RESET (1 << 0)
+
+/* Rocker DMA ring types */
+enum rocker_dma_type {
+ ROCKER_DMA_CMD,
+ ROCKER_DMA_EVENT,
+ __ROCKER_DMA_TX,
+ __ROCKER_DMA_RX,
+#define ROCKER_DMA_TX(port) (__ROCKER_DMA_TX + (port) * 2)
+#define ROCKER_DMA_RX(port) (__ROCKER_DMA_RX + (port) * 2)
+};
+
+/* Rocker DMA ring size limits and default sizes */
+#define ROCKER_DMA_SIZE_MIN 2ul
+#define ROCKER_DMA_SIZE_MAX 65536ul
+#define ROCKER_DMA_CMD_DEFAULT_SIZE 32ul
+#define ROCKER_DMA_EVENT_DEFAULT_SIZE 32ul
+#define ROCKER_DMA_TX_DEFAULT_SIZE 64ul
+#define ROCKER_DMA_TX_DESC_SIZE 256
+#define ROCKER_DMA_RX_DEFAULT_SIZE 64ul
+#define ROCKER_DMA_RX_DESC_SIZE 256
+
+/* Rocker DMA descriptor struct */
+struct rocker_desc {
+ u64 buf_addr;
+ u64 cookie;
+ u16 buf_size;
+ u16 tlv_size;
+ u16 resv[5];
+ u16 comp_err;
+};
+
+#define ROCKER_DMA_DESC_COMP_ERR_GEN (1 << 15)
+
+/* Rocker DMA TLV struct */
+struct rocker_tlv {
+ u32 type;
+ u16 len;
+};
+
+/* TLVs */
+enum {
+ ROCKER_TLV_CMD_UNSPEC,
+ ROCKER_TLV_CMD_TYPE, /* u16 */
+ ROCKER_TLV_CMD_INFO, /* nest */
+
+ __ROCKER_TLV_CMD_MAX,
+ ROCKER_TLV_CMD_MAX = __ROCKER_TLV_CMD_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_CMD_TYPE_UNSPEC,
+ ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS,
+ ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS,
+
+ __ROCKER_TLV_CMD_TYPE_MAX,
+ ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC,
+ ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, /* u32 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */
+ ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */
+
+ __ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
+ ROCKER_TLV_CMD_PORT_SETTINGS_MAX =
+ __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1,
+};
+
+enum rocker_port_mode {
+ ROCKER_PORT_MODE_OF_DPA,
+};
+
+enum {
+ ROCKER_TLV_EVENT_UNSPEC,
+ ROCKER_TLV_EVENT_TYPE, /* u16 */
+ ROCKER_TLV_EVENT_INFO, /* nest */
+
+ __ROCKER_TLV_EVENT_MAX,
+ ROCKER_TLV_EVENT_MAX = __ROCKER_TLV_EVENT_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_EVENT_TYPE_UNSPEC,
+ ROCKER_TLV_EVENT_TYPE_LINK_CHANGED,
+ ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN,
+
+ __ROCKER_TLV_EVENT_TYPE_MAX,
+ ROCKER_TLV_EVENT_TYPE_MAX = __ROCKER_TLV_EVENT_TYPE_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC,
+ ROCKER_TLV_EVENT_LINK_CHANGED_LPORT, /* u32 */
+ ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */
+
+ __ROCKER_TLV_EVENT_LINK_CHANGED_MAX,
+ ROCKER_TLV_EVENT_LINK_CHANGED_MAX =
+ __ROCKER_TLV_EVENT_LINK_CHANGED_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC,
+ ROCKER_TLV_EVENT_MAC_VLAN_LPORT, /* u32 */
+ ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */
+ ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */
+
+ __ROCKER_TLV_EVENT_MAC_VLAN_MAX,
+ ROCKER_TLV_EVENT_MAC_VLAN_MAX = __ROCKER_TLV_EVENT_MAC_VLAN_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_RX_UNSPEC,
+ ROCKER_TLV_RX_FLAGS, /* u16, see ROCKER_RX_FLAGS_ */
+ ROCKER_TLV_RX_CSUM, /* u16 */
+ ROCKER_TLV_RX_FRAG_ADDR, /* u64 */
+ ROCKER_TLV_RX_FRAG_MAX_LEN, /* u16 */
+ ROCKER_TLV_RX_FRAG_LEN, /* u16 */
+
+ __ROCKER_TLV_RX_MAX,
+ ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1,
+};
+
+#define ROCKER_RX_FLAGS_IPV4 (1 << 0)
+#define ROCKER_RX_FLAGS_IPV6 (1 << 1)
+#define ROCKER_RX_FLAGS_CSUM_CALC (1 << 2)
+#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD (1 << 3)
+#define ROCKER_RX_FLAGS_IP_FRAG (1 << 4)
+#define ROCKER_RX_FLAGS_TCP (1 << 5)
+#define ROCKER_RX_FLAGS_UDP (1 << 6)
+#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD (1 << 7)
+
+enum {
+ ROCKER_TLV_TX_UNSPEC,
+ ROCKER_TLV_TX_OFFLOAD, /* u8, see ROCKER_TX_OFFLOAD_ */
+ ROCKER_TLV_TX_L3_CSUM_OFF, /* u16 */
+ ROCKER_TLV_TX_TSO_MSS, /* u16 */
+ ROCKER_TLV_TX_TSO_HDR_LEN, /* u16 */
+ ROCKER_TLV_TX_FRAGS, /* array */
+
+ __ROCKER_TLV_TX_MAX,
+ ROCKER_TLV_TX_MAX = __ROCKER_TLV_TX_MAX - 1,
+};
+
+#define ROCKER_TX_OFFLOAD_NONE 0
+#define ROCKER_TX_OFFLOAD_IP_CSUM 1
+#define ROCKER_TX_OFFLOAD_TCP_UDP_CSUM 2
+#define ROCKER_TX_OFFLOAD_L3_CSUM 3
+#define ROCKER_TX_OFFLOAD_TSO 4
+
+#define ROCKER_TX_FRAGS_MAX 16
+
+enum {
+ ROCKER_TLV_TX_FRAG_UNSPEC,
+ ROCKER_TLV_TX_FRAG, /* nest */
+
+ __ROCKER_TLV_TX_FRAG_MAX,
+ ROCKER_TLV_TX_FRAG_MAX = __ROCKER_TLV_TX_FRAG_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_TX_FRAG_ATTR_UNSPEC,
+ ROCKER_TLV_TX_FRAG_ATTR_ADDR, /* u64 */
+ ROCKER_TLV_TX_FRAG_ATTR_LEN, /* u16 */
+
+ __ROCKER_TLV_TX_FRAG_ATTR_MAX,
+ ROCKER_TLV_TX_FRAG_ATTR_MAX = __ROCKER_TLV_TX_FRAG_ATTR_MAX - 1,
+};
+
+/* cmd info nested for OF-DPA msgs */
+enum {
+ ROCKER_TLV_OF_DPA_UNSPEC,
+ ROCKER_TLV_OF_DPA_TABLE_ID, /* u16 */
+ ROCKER_TLV_OF_DPA_PRIORITY, /* u32 */
+ ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */
+ ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */
+ ROCKER_TLV_OF_DPA_COOKIE, /* u64 */
+ ROCKER_TLV_OF_DPA_IN_LPORT, /* u32 */
+ ROCKER_TLV_OF_DPA_IN_LPORT_MASK, /* u32 */
+ ROCKER_TLV_OF_DPA_OUT_LPORT, /* u32 */
+ ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */
+ ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */
+ ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */
+ ROCKER_TLV_OF_DPA_GROUP_COUNT, /* u16 */
+ ROCKER_TLV_OF_DPA_GROUP_IDS, /* u32 array */
+ ROCKER_TLV_OF_DPA_VLAN_ID, /* __be16 */
+ ROCKER_TLV_OF_DPA_VLAN_ID_MASK, /* __be16 */
+ ROCKER_TLV_OF_DPA_VLAN_PCP, /* __be16 */
+ ROCKER_TLV_OF_DPA_VLAN_PCP_MASK, /* __be16 */
+ ROCKER_TLV_OF_DPA_VLAN_PCP_ACTION, /* u8 */
+ ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */
+ ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */
+ ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */
+ ROCKER_TLV_OF_DPA_TUN_LOG_LPORT, /* u32 */
+ ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */
+ ROCKER_TLV_OF_DPA_DST_MAC, /* binary */
+ ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */
+ ROCKER_TLV_OF_DPA_SRC_MAC, /* binary */
+ ROCKER_TLV_OF_DPA_SRC_MAC_MASK, /* binary */
+ ROCKER_TLV_OF_DPA_IP_PROTO, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_PROTO_MASK, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_DSCP, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_DSCP_MASK, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_DSCP_ACTION, /* u8 */
+ ROCKER_TLV_OF_DPA_NEW_IP_DSCP, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_ECN, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_ECN_MASK, /* u8 */
+ ROCKER_TLV_OF_DPA_DST_IP, /* __be32 */
+ ROCKER_TLV_OF_DPA_DST_IP_MASK, /* __be32 */
+ ROCKER_TLV_OF_DPA_SRC_IP, /* __be32 */
+ ROCKER_TLV_OF_DPA_SRC_IP_MASK, /* __be32 */
+ ROCKER_TLV_OF_DPA_DST_IPV6, /* binary */
+ ROCKER_TLV_OF_DPA_DST_IPV6_MASK, /* binary */
+ ROCKER_TLV_OF_DPA_SRC_IPV6, /* binary */
+ ROCKER_TLV_OF_DPA_SRC_IPV6_MASK, /* binary */
+ ROCKER_TLV_OF_DPA_SRC_ARP_IP, /* __be32 */
+ ROCKER_TLV_OF_DPA_SRC_ARP_IP_MASK, /* __be32 */
+ ROCKER_TLV_OF_DPA_L4_DST_PORT, /* __be16 */
+ ROCKER_TLV_OF_DPA_L4_DST_PORT_MASK, /* __be16 */
+ ROCKER_TLV_OF_DPA_L4_SRC_PORT, /* __be16 */
+ ROCKER_TLV_OF_DPA_L4_SRC_PORT_MASK, /* __be16 */
+ ROCKER_TLV_OF_DPA_ICMP_TYPE, /* u8 */
+ ROCKER_TLV_OF_DPA_ICMP_TYPE_MASK, /* u8 */
+ ROCKER_TLV_OF_DPA_ICMP_CODE, /* u8 */
+ ROCKER_TLV_OF_DPA_ICMP_CODE_MASK, /* u8 */
+ ROCKER_TLV_OF_DPA_IPV6_LABEL, /* __be32 */
+ ROCKER_TLV_OF_DPA_IPV6_LABEL_MASK, /* __be32 */
+ ROCKER_TLV_OF_DPA_QUEUE_ID_ACTION, /* u8 */
+ ROCKER_TLV_OF_DPA_NEW_QUEUE_ID, /* u8 */
+ ROCKER_TLV_OF_DPA_CLEAR_ACTIONS, /* u32 */
+ ROCKER_TLV_OF_DPA_POP_VLAN, /* u8 */
+ ROCKER_TLV_OF_DPA_TTL_CHECK, /* u8 */
+ ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, /* u8 */
+
+ __ROCKER_TLV_OF_DPA_MAX,
+ ROCKER_TLV_OF_DPA_MAX = __ROCKER_TLV_OF_DPA_MAX - 1,
+};
+
+/* OF-DPA table IDs */
+
+enum rocker_of_dpa_table_id {
+ ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT = 0,
+ ROCKER_OF_DPA_TABLE_ID_VLAN = 10,
+ ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC = 20,
+ ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING = 30,
+ ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING = 40,
+ ROCKER_OF_DPA_TABLE_ID_BRIDGING = 50,
+ ROCKER_OF_DPA_TABLE_ID_ACL_POLICY = 60,
+};
+
+/* OF-DPA flow stats */
+enum {
+ ROCKER_TLV_OF_DPA_FLOW_STAT_UNSPEC,
+ ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION, /* u32 */
+ ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS, /* u64 */
+ ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS, /* u64 */
+
+ __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX,
+ ROCKER_TLV_OF_DPA_FLOW_STAT_MAX = __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX - 1,
+};
+
+/* OF-DPA group types */
+enum rocker_of_dpa_group_type {
+ ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE = 0,
+ ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE,
+ ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST,
+ ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST,
+ ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD,
+ ROCKER_OF_DPA_GROUP_TYPE_L3_INTERFACE,
+ ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST,
+ ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP,
+ ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY,
+};
+
+/* OF-DPA group L2 overlay types */
+enum rocker_of_dpa_overlay_type {
+ ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_UCAST = 0,
+ ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_MCAST,
+ ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_UCAST,
+ ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_MCAST,
+};
+
+/* OF-DPA group ID encoding */
+#define ROCKER_GROUP_TYPE_SHIFT 28
+#define ROCKER_GROUP_TYPE_MASK 0xf0000000
+#define ROCKER_GROUP_VLAN_SHIFT 16
+#define ROCKER_GROUP_VLAN_MASK 0x0fff0000
+#define ROCKER_GROUP_PORT_SHIFT 0
+#define ROCKER_GROUP_PORT_MASK 0x0000ffff
+#define ROCKER_GROUP_TUNNEL_ID_SHIFT 12
+#define ROCKER_GROUP_TUNNEL_ID_MASK 0x0ffff000
+#define ROCKER_GROUP_SUBTYPE_SHIFT 10
+#define ROCKER_GROUP_SUBTYPE_MASK 0x00000c00
+#define ROCKER_GROUP_INDEX_SHIFT 0
+#define ROCKER_GROUP_INDEX_MASK 0x0000ffff
+#define ROCKER_GROUP_INDEX_LONG_SHIFT 0
+#define ROCKER_GROUP_INDEX_LONG_MASK 0x0fffffff
+
+#define ROCKER_GROUP_TYPE_GET(group_id) \
+ (((group_id) & ROCKER_GROUP_TYPE_MASK) >> ROCKER_GROUP_TYPE_SHIFT)
+#define ROCKER_GROUP_TYPE_SET(type) \
+ (((type) << ROCKER_GROUP_TYPE_SHIFT) & ROCKER_GROUP_TYPE_MASK)
+#define ROCKER_GROUP_VLAN_GET(group_id) \
+ (((group_id) & ROCKER_GROUP_VLAN_ID_MASK) >> ROCKER_GROUP_VLAN_ID_SHIFT)
+#define ROCKER_GROUP_VLAN_SET(vlan_id) \
+ (((vlan_id) << ROCKER_GROUP_VLAN_SHIFT) & ROCKER_GROUP_VLAN_MASK)
+#define ROCKER_GROUP_PORT_GET(group_id) \
+ (((group_id) & ROCKER_GROUP_PORT_MASK) >> ROCKER_GROUP_PORT_SHIFT)
+#define ROCKER_GROUP_PORT_SET(port) \
+ (((port) << ROCKER_GROUP_PORT_SHIFT) & ROCKER_GROUP_PORT_MASK)
+#define ROCKER_GROUP_INDEX_GET(group_id) \
+ (((group_id) & ROCKER_GROUP_INDEX_MASK) >> ROCKER_GROUP_INDEX_SHIFT)
+#define ROCKER_GROUP_INDEX_SET(index) \
+ (((index) << ROCKER_GROUP_INDEX_SHIFT) & ROCKER_GROUP_INDEX_MASK)
+#define ROCKER_GROUP_INDEX_LONG_GET(group_id) \
+ (((group_id) & ROCKER_GROUP_INDEX_LONG_MASK) >> \
+ ROCKER_GROUP_INDEX_LONG_SHIFT)
+#define ROCKER_GROUP_INDEX_LONG_SET(index) \
+ (((index) << ROCKER_GROUP_INDEX_LONG_SHIFT) & \
+ ROCKER_GROUP_INDEX_LONG_MASK)
+
+#define ROCKER_GROUP_NONE 0
+#define ROCKER_GROUP_L2_INTERFACE(vlan_id, port) \
+ (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) |\
+ ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_PORT_SET(port))
+#define ROCKER_GROUP_L2_REWRITE(index) \
+ (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE) |\
+ ROCKER_GROUP_INDEX_LONG_SET(index))
+#define ROCKER_GROUP_L2_MCAST(vlan_id, index) \
+ (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) |\
+ ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index))
+#define ROCKER_GROUP_L2_FLOOD(vlan_id, index) \
+ (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) |\
+ ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index))
+#define ROCKER_GROUP_L3_UNICAST(index) \
+ (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST) |\
+ ROCKER_GROUP_INDEX_LONG_SET(index))
+
+/* Rocker general purpose registers */
+#define ROCKER_CONTROL 0x0300
+#define ROCKER_PORT_PHYS_COUNT 0x0304
+#define ROCKER_PORT_PHYS_LINK_STATUS 0x0310 /* 8-byte */
+#define ROCKER_PORT_PHYS_ENABLE 0x0318 /* 8-byte */
+#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */
+
+/* Rocker control bits */
+#define ROCKER_CONTROL_RESET (1 << 0)
+
+#endif
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index b147d469a799..7fd6e275d1c2 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -90,9 +90,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
/* Get memory resource */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- goto err_out;
-
addr = devm_ioremap_resource(dev, res);
if (IS_ERR(addr))
return PTR_ERR(addr);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index a77f05ce8325..fbb6cfa0f5f1 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3689,6 +3689,11 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ptp_write_host_time = efx_ef10_ptp_write_host_time,
.ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
.ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+ .sriov_init = efx_ef10_sriov_init,
+ .sriov_fini = efx_ef10_sriov_fini,
+ .sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed,
+ .sriov_wanted = efx_ef10_sriov_wanted,
+ .sriov_reset = efx_ef10_sriov_reset,
.revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index b2cc590dd1dd..238482495e81 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1314,7 +1314,7 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
/* If RSS is requested for the PF *and* VFs then we can't write RSS
* table entries that are inaccessible to VFs
*/
- if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+ if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
count > efx_vf_size(efx)) {
netif_warn(efx, probe, efx->net_dev,
"Reducing number of RSS channels from %u to %u for "
@@ -1426,7 +1426,9 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}
/* RSS might be usable on VFs even if it is disabled on the PF */
- efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
+
+ efx->rss_spread = ((efx->n_rx_channels > 1 ||
+ !efx->type->sriov_wanted(efx)) ?
efx->n_rx_channels : efx_vf_size(efx));
return 0;
@@ -1614,7 +1616,7 @@ static int efx_probe_nic(struct efx_nic *efx)
goto fail2;
if (efx->n_channels > 1)
- get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
+ netdev_rss_key_fill(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
efx->rx_indir_table[i] =
ethtool_rxfh_indir_default(i, efx->rss_spread);
@@ -2166,7 +2168,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
}
ether_addr_copy(net_dev->dev_addr, new_addr);
- efx_sriov_mac_address_changed(efx);
+ efx->type->sriov_mac_address_changed(efx);
/* Reconfigure the MAC */
mutex_lock(&efx->mac_lock);
@@ -2210,10 +2212,10 @@ static const struct net_device_ops efx_farch_netdev_ops = {
.ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features,
#ifdef CONFIG_SFC_SRIOV
- .ndo_set_vf_mac = efx_sriov_set_vf_mac,
- .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
- .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
- .ndo_get_vf_config = efx_sriov_get_vf_config,
+ .ndo_set_vf_mac = efx_siena_sriov_set_vf_mac,
+ .ndo_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
+ .ndo_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
+ .ndo_get_vf_config = efx_siena_sriov_get_vf_config,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
@@ -2433,7 +2435,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
if (rc)
goto fail;
efx_restore_filters(efx);
- efx_sriov_reset(efx);
+ efx->type->sriov_reset(efx);
mutex_unlock(&efx->mac_lock);
@@ -2826,7 +2828,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_disable_interrupts(efx);
rtnl_unlock();
- efx_sriov_fini(efx);
+ efx->type->sriov_fini(efx);
efx_unregister_netdev(efx);
efx_mtd_remove(efx);
@@ -3023,7 +3025,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
if (rc)
goto fail4;
- rc = efx_sriov_init(efx);
+ rc = efx->type->sriov_init(efx);
if (rc)
netif_err(efx, probe, efx->net_dev,
"SR-IOV can't be enabled rc %d\n", rc);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index cad258a78708..4835bc0d0de8 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1086,19 +1086,29 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
0 : ARRAY_SIZE(efx->rx_indir_table));
}
-static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key)
+static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct efx_nic *efx = netdev_priv(net_dev);
- memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (indir)
+ memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
return 0;
}
-static int efx_ethtool_set_rxfh(struct net_device *net_dev,
- const u32 *indir, const u8 *key)
+static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ /* We do not allow change in unsupported parameters */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!indir)
+ return 0;
memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
efx->type->rx_push_rss_config(efx);
return 0;
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 157037546d30..f166c8ef38a3 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2766,6 +2766,11 @@ const struct efx_nic_type falcon_a1_nic_type = {
.mtd_write = falcon_mtd_write,
.mtd_sync = falcon_mtd_sync,
#endif
+ .sriov_init = efx_falcon_sriov_init,
+ .sriov_fini = efx_falcon_sriov_fini,
+ .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
+ .sriov_wanted = efx_falcon_sriov_wanted,
+ .sriov_reset = efx_falcon_sriov_reset,
.revision = EFX_REV_FALCON_A1,
.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
@@ -2862,6 +2867,11 @@ const struct efx_nic_type falcon_b0_nic_type = {
.mtd_write = falcon_mtd_write,
.mtd_sync = falcon_mtd_sync,
#endif
+ .sriov_init = efx_falcon_sriov_init,
+ .sriov_fini = efx_falcon_sriov_fini,
+ .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
+ .sriov_wanted = efx_falcon_sriov_wanted,
+ .sriov_reset = efx_falcon_sriov_reset,
.revision = EFX_REV_FALCON_B0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 6859437b59fb..75975328e020 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -226,6 +226,9 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
struct efx_special_buffer *buffer,
unsigned int len)
{
+#ifdef CONFIG_SFC_SRIOV
+ struct siena_nic_data *nic_data = efx->nic_data;
+#endif
len = ALIGN(len, EFX_BUF_SIZE);
if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
@@ -237,8 +240,8 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
buffer->index = efx->next_buffer_table;
efx->next_buffer_table += buffer->entries;
#ifdef CONFIG_SFC_SRIOV
- BUG_ON(efx_sriov_enabled(efx) &&
- efx->vf_buftbl_base < efx->next_buffer_table);
+ BUG_ON(efx_siena_sriov_enabled(efx) &&
+ nic_data->vf_buftbl_base < efx->next_buffer_table);
#endif
netif_dbg(efx, probe, efx->net_dev,
@@ -667,7 +670,7 @@ static int efx_farch_do_flush(struct efx_nic *efx)
* the firmware (though we will still have to poll for
* completion). If that fails, fall back to the old scheme.
*/
- if (efx_sriov_enabled(efx)) {
+ if (efx_siena_sriov_enabled(efx)) {
rc = efx_mcdi_flush_rxqs(efx);
if (!rc)
goto wait;
@@ -1195,13 +1198,13 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_tx_flush_done(efx, event);
- efx_sriov_tx_flush_done(efx, event);
+ efx_siena_sriov_tx_flush_done(efx, event);
break;
case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_rx_flush_done(efx, event);
- efx_sriov_rx_flush_done(efx, event);
+ efx_siena_sriov_rx_flush_done(efx, event);
break;
case FSE_AZ_EVQ_INIT_DONE_EV:
netif_dbg(efx, hw, efx->net_dev,
@@ -1240,7 +1243,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
} else
- efx_sriov_desc_fetch_err(efx, ev_sub_data);
+ efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
break;
case FSE_BZ_TX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
@@ -1250,7 +1253,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
} else
- efx_sriov_desc_fetch_err(efx, ev_sub_data);
+ efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
break;
default:
netif_vdbg(efx, hw, efx->net_dev,
@@ -1315,7 +1318,7 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
efx_farch_handle_driver_event(channel, &event);
break;
case FSE_CZ_EV_CODE_USER_EV:
- efx_sriov_event(channel, &event);
+ efx_siena_sriov_event(channel, &event);
break;
case FSE_CZ_EV_CODE_MCDI_EV:
efx_mcdi_process_event(channel, &event);
@@ -1668,6 +1671,10 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
{
unsigned vi_count, buftbl_min;
+#ifdef CONFIG_SFC_SRIOV
+ struct siena_nic_data *nic_data = efx->nic_data;
+#endif
+
/* Account for the buffer table entries backing the datapath channels
* and the descriptor caches for those channels.
*/
@@ -1678,10 +1685,10 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
#ifdef CONFIG_SFC_SRIOV
- if (efx_sriov_wanted(efx)) {
+ if (efx->type->sriov_wanted(efx)) {
unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
- efx->vf_buftbl_base = buftbl_min;
+ nic_data->vf_buftbl_base = buftbl_min;
vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
vi_count = max(vi_count, EFX_VI_BASE);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 5239cf9bdc56..d37928f01949 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1035,7 +1035,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
/* MAC stats are gather lazily. We can ignore this. */
break;
case MCDI_EVENT_CODE_FLR:
- efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
+ efx_siena_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
break;
case MCDI_EVENT_CODE_PTP_RX:
case MCDI_EVENT_CODE_PTP_FAULT:
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 9ede32064685..325dd94bca46 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -913,13 +913,6 @@ struct vfdi_status;
* @vf_count: Number of VFs intended to be enabled.
* @vf_init_count: Number of VFs that have been fully initialised.
* @vi_scale: log2 number of vnics per VF.
- * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
- * @vfdi_status: Common VFDI status page to be dmad to VF address space.
- * @local_addr_list: List of local addresses. Protected by %local_lock.
- * @local_page_list: List of DMA addressable pages used to broadcast
- * %local_addr_list. Protected by %local_lock.
- * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
- * @peer_work: Work item to broadcast peer addresses to VMs.
* @ptp_data: PTP state data
* @vpd_sn: Serial number read from VPD
* @monitor_work: Hardware monitor workitem
@@ -1060,17 +1053,10 @@ struct efx_nic {
wait_queue_head_t flush_wq;
#ifdef CONFIG_SFC_SRIOV
- struct efx_channel *vfdi_channel;
struct efx_vf *vf;
unsigned vf_count;
unsigned vf_init_count;
unsigned vi_scale;
- unsigned vf_buftbl_base;
- struct efx_buffer vfdi_status;
- struct list_head local_addr_list;
- struct list_head local_page_list;
- struct mutex local_lock;
- struct work_struct peer_work;
#endif
struct efx_ptp_data *ptp_data;
@@ -1344,6 +1330,11 @@ struct efx_nic_type {
int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
int (*ptp_set_ts_config)(struct efx_nic *efx,
struct hwtstamp_config *init);
+ int (*sriov_init)(struct efx_nic *efx);
+ void (*sriov_fini)(struct efx_nic *efx);
+ void (*sriov_mac_address_changed)(struct efx_nic *efx);
+ bool (*sriov_wanted)(struct efx_nic *efx);
+ void (*sriov_reset)(struct efx_nic *efx);
int revision;
unsigned int txd_ptr_tbl_base;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index f77cce034ad4..93d10cbbd1cf 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -378,12 +378,30 @@ enum {
/**
* struct siena_nic_data - Siena NIC state
+ * @efx: Pointer back to main interface structure
* @wol_filter_id: Wake-on-LAN packet filter id
* @stats: Hardware statistics
+ * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
+ * @vfdi_status: Common VFDI status page to be dmad to VF address space.
+ * @local_addr_list: List of local addresses. Protected by %local_lock.
+ * @local_page_list: List of DMA addressable pages used to broadcast
+ * %local_addr_list. Protected by %local_lock.
+ * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
+ * @peer_work: Work item to broadcast peer addresses to VMs.
*/
struct siena_nic_data {
+ struct efx_nic *efx;
int wol_filter_id;
u64 stats[SIENA_STAT_COUNT];
+#ifdef CONFIG_SFC_SRIOV
+ struct efx_channel *vfdi_channel;
+ unsigned vf_buftbl_base;
+ struct efx_buffer vfdi_status;
+ struct list_head local_addr_list;
+ struct list_head local_page_list;
+ struct mutex local_lock;
+ struct work_struct peer_work;
+#endif
};
enum {
@@ -522,62 +540,88 @@ struct efx_ef10_nic_data {
#ifdef CONFIG_SFC_SRIOV
-static inline bool efx_sriov_wanted(struct efx_nic *efx)
+/* SIENA */
+static inline bool efx_siena_sriov_wanted(struct efx_nic *efx)
{
return efx->vf_count != 0;
}
-static inline bool efx_sriov_enabled(struct efx_nic *efx)
+
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
{
return efx->vf_init_count != 0;
}
+
static inline unsigned int efx_vf_size(struct efx_nic *efx)
{
return 1 << efx->vi_scale;
}
int efx_init_sriov(void);
-void efx_sriov_probe(struct efx_nic *efx);
-int efx_sriov_init(struct efx_nic *efx);
-void efx_sriov_mac_address_changed(struct efx_nic *efx);
-void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
-void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
-void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
-void efx_sriov_reset(struct efx_nic *efx);
-void efx_sriov_fini(struct efx_nic *efx);
+void efx_siena_sriov_probe(struct efx_nic *efx);
+int efx_siena_sriov_init(struct efx_nic *efx);
+void efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
+void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
+void efx_siena_sriov_reset(struct efx_nic *efx);
+void efx_siena_sriov_fini(struct efx_nic *efx);
void efx_fini_sriov(void);
+/* EF10 */
+static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
+
#else
-static inline bool efx_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline bool efx_sriov_enabled(struct efx_nic *efx) { return false; }
+/* SIENA */
+static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; }
static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; }
-
static inline int efx_init_sriov(void) { return 0; }
-static inline void efx_sriov_probe(struct efx_nic *efx) {}
-static inline int efx_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_sriov_tx_flush_done(struct efx_nic *efx,
- efx_qword_t *event) {}
-static inline void efx_sriov_rx_flush_done(struct efx_nic *efx,
- efx_qword_t *event) {}
-static inline void efx_sriov_event(struct efx_channel *channel,
- efx_qword_t *event) {}
-static inline void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) {}
-static inline void efx_sriov_flr(struct efx_nic *efx, unsigned flr) {}
-static inline void efx_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_sriov_fini(struct efx_nic *efx) {}
+static inline void efx_siena_sriov_probe(struct efx_nic *efx) {}
+static inline int efx_siena_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_siena_sriov_tx_flush_done(struct efx_nic *efx,
+ efx_qword_t *event) {}
+static inline void efx_siena_sriov_rx_flush_done(struct efx_nic *efx,
+ efx_qword_t *event) {}
+static inline void efx_siena_sriov_event(struct efx_channel *channel,
+ efx_qword_t *event) {}
+static inline void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx,
+ unsigned dmaq) {}
+static inline void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr) {}
+static inline void efx_siena_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_siena_sriov_fini(struct efx_nic *efx) {}
static inline void efx_fini_sriov(void) {}
+/* EF10 */
+static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
+
#endif
-int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
-int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos);
-int efx_sriov_get_vf_config(struct net_device *dev, int vf,
- struct ifla_vf_info *ivf);
-int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
- bool spoofchk);
+/* FALCON */
+static inline bool efx_falcon_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline int efx_falcon_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_falcon_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_falcon_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_falcon_sriov_fini(struct efx_nic *efx) {}
+
+int efx_siena_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
+int efx_siena_sriov_set_vf_vlan(struct net_device *dev, int vf,
+ u16 vlan, u8 qos);
+int efx_siena_sriov_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivf);
+int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
+ bool spoofchk);
struct ethtool_ts_info;
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index ae696855f21a..3583f0208a6e 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -251,6 +251,7 @@ static int siena_probe_nic(struct efx_nic *efx)
nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL);
if (!nic_data)
return -ENOMEM;
+ nic_data->efx = efx;
efx->nic_data = nic_data;
if (efx_farch_fpga_ver(efx) != 0) {
@@ -306,7 +307,7 @@ static int siena_probe_nic(struct efx_nic *efx)
if (rc)
goto fail5;
- efx_sriov_probe(efx);
+ efx_siena_sriov_probe(efx);
efx_ptp_defer_probe_with_channel(efx);
return 0;
@@ -996,6 +997,11 @@ const struct efx_nic_type siena_a0_nic_type = {
#endif
.ptp_write_host_time = siena_ptp_write_host_time,
.ptp_set_ts_config = siena_ptp_set_ts_config,
+ .sriov_init = efx_siena_sriov_init,
+ .sriov_fini = efx_siena_sriov_fini,
+ .sriov_mac_address_changed = efx_siena_sriov_mac_address_changed,
+ .sriov_wanted = efx_siena_sriov_wanted,
+ .sriov_reset = efx_siena_sriov_reset,
.revision = EFX_REV_SIENA_A0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 43d2e64546ed..a8bbbad68a88 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -66,7 +66,7 @@ enum efx_vf_tx_filter_mode {
* @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr,
* @peer_page_addrs and @peer_page_count from simultaneous
* updates by the VM and consumption by
- * efx_sriov_update_vf_addr()
+ * efx_siena_sriov_update_vf_addr()
* @peer_page_addrs: Pointer to an array of guest pages for local addresses.
* @peer_page_count: Number of entries in @peer_page_count.
* @evq0_addrs: Array of guest pages backing evq0.
@@ -194,8 +194,8 @@ static unsigned abs_index(struct efx_vf *vf, unsigned index)
return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
}
-static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
- unsigned *vi_scale_out, unsigned *vf_total_out)
+static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable,
+ unsigned *vi_scale_out, unsigned *vf_total_out)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN);
@@ -227,18 +227,20 @@ static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
return 0;
}
-static void efx_sriov_usrev(struct efx_nic *efx, bool enabled)
+static void efx_siena_sriov_usrev(struct efx_nic *efx, bool enabled)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
efx_oword_t reg;
EFX_POPULATE_OWORD_2(reg,
FRF_CZ_USREV_DIS, enabled ? 0 : 1,
- FRF_CZ_DFLT_EVQ, efx->vfdi_channel->channel);
+ FRF_CZ_DFLT_EVQ, nic_data->vfdi_channel->channel);
efx_writeo(efx, &reg, FR_CZ_USR_EV_CFG);
}
-static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req,
- unsigned int count)
+static int efx_siena_sriov_memcpy(struct efx_nic *efx,
+ struct efx_memcpy_req *req,
+ unsigned int count)
{
MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1);
MCDI_DECLARE_STRUCT_PTR(record);
@@ -297,7 +299,7 @@ out:
/* The TX filter is entirely controlled by this driver, and is modified
* underneath the feet of the VF
*/
-static void efx_sriov_reset_tx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
@@ -341,7 +343,7 @@ static void efx_sriov_reset_tx_filter(struct efx_vf *vf)
}
/* The RX filter is managed here on behalf of the VF driver */
-static void efx_sriov_reset_rx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
@@ -380,22 +382,26 @@ static void efx_sriov_reset_rx_filter(struct efx_vf *vf)
}
}
-static void __efx_sriov_update_vf_addr(struct efx_vf *vf)
+static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
{
- efx_sriov_reset_tx_filter(vf);
- efx_sriov_reset_rx_filter(vf);
- queue_work(vfdi_workqueue, &vf->efx->peer_work);
+ struct efx_nic *efx = vf->efx;
+ struct siena_nic_data *nic_data = efx->nic_data;
+
+ efx_siena_sriov_reset_tx_filter(vf);
+ efx_siena_sriov_reset_rx_filter(vf);
+ queue_work(vfdi_workqueue, &nic_data->peer_work);
}
/* Push the peer list to this VF. The caller must hold status_lock to interlock
* with VFDI requests, and they must be serialised against manipulation of
* local_page_list, either by acquiring local_lock or by running from
- * efx_sriov_peer_work()
+ * efx_siena_sriov_peer_work()
*/
-static void __efx_sriov_push_vf_status(struct efx_vf *vf)
+static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
- struct vfdi_status *status = efx->vfdi_status.addr;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct vfdi_status *status = nic_data->vfdi_status.addr;
struct efx_memcpy_req copy[4];
struct efx_endpoint_page *epp;
unsigned int pos, count;
@@ -421,7 +427,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
*/
data_offset = offsetof(struct vfdi_status, version);
copy[1].from_rid = efx->pci_dev->devfn;
- copy[1].from_addr = efx->vfdi_status.dma_addr + data_offset;
+ copy[1].from_addr = nic_data->vfdi_status.dma_addr + data_offset;
copy[1].to_rid = vf->pci_rid;
copy[1].to_addr = vf->status_addr + data_offset;
copy[1].length = status->length - data_offset;
@@ -429,7 +435,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
/* Copy the peer pages */
pos = 2;
count = 0;
- list_for_each_entry(epp, &efx->local_page_list, link) {
+ list_for_each_entry(epp, &nic_data->local_page_list, link) {
if (count == vf->peer_page_count) {
/* The VF driver will know they need to provide more
* pages because peer_addr_count is too large.
@@ -444,7 +450,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
copy[pos].length = EFX_PAGE_SIZE;
if (++pos == ARRAY_SIZE(copy)) {
- efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
+ efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
pos = 0;
}
++count;
@@ -456,7 +462,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status,
generation_end);
copy[pos].length = sizeof(status->generation_end);
- efx_sriov_memcpy(efx, copy, pos + 1);
+ efx_siena_sriov_memcpy(efx, copy, pos + 1);
/* Notify the guest */
EFX_POPULATE_QWORD_3(event,
@@ -469,8 +475,8 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
&event);
}
-static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
- u64 *addr, unsigned count)
+static void efx_siena_sriov_bufs(struct efx_nic *efx, unsigned offset,
+ u64 *addr, unsigned count)
{
efx_qword_t buf;
unsigned pos;
@@ -539,7 +545,7 @@ static int efx_vfdi_init_evq(struct efx_vf *vf)
return VFDI_RC_EINVAL;
}
- efx_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count);
+ efx_siena_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count);
EFX_POPULATE_OWORD_3(reg,
FRF_CZ_TIMER_Q_EN, 1,
@@ -584,7 +590,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf)
}
if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask))
++vf->rxq_count;
- efx_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count);
+ efx_siena_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count);
label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL);
EFX_POPULATE_OWORD_6(reg,
@@ -628,7 +634,7 @@ static int efx_vfdi_init_txq(struct efx_vf *vf)
if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask))
++vf->txq_count;
mutex_unlock(&vf->txq_lock);
- efx_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count);
+ efx_siena_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count);
eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON;
@@ -742,8 +748,8 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL,
vf_offset + index);
}
- efx_sriov_bufs(efx, vf->buftbl_base, NULL,
- EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
+ efx_siena_sriov_bufs(efx, vf->buftbl_base, NULL,
+ EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
efx_vfdi_flush_clear(vf);
vf->evq0_count = 0;
@@ -754,6 +760,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
static int efx_vfdi_insert_filter(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
+ struct siena_nic_data *nic_data = efx->nic_data;
struct vfdi_req *req = vf->buf.addr;
unsigned vf_rxq = req->u.mac_filter.rxq;
unsigned flags;
@@ -776,17 +783,20 @@ static int efx_vfdi_insert_filter(struct efx_vf *vf)
vf->rx_filter_qid = vf_rxq;
vf->rx_filtering = true;
- efx_sriov_reset_rx_filter(vf);
- queue_work(vfdi_workqueue, &efx->peer_work);
+ efx_siena_sriov_reset_rx_filter(vf);
+ queue_work(vfdi_workqueue, &nic_data->peer_work);
return VFDI_RC_SUCCESS;
}
static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
{
+ struct efx_nic *efx = vf->efx;
+ struct siena_nic_data *nic_data = efx->nic_data;
+
vf->rx_filtering = false;
- efx_sriov_reset_rx_filter(vf);
- queue_work(vfdi_workqueue, &vf->efx->peer_work);
+ efx_siena_sriov_reset_rx_filter(vf);
+ queue_work(vfdi_workqueue, &nic_data->peer_work);
return VFDI_RC_SUCCESS;
}
@@ -794,6 +804,7 @@ static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
static int efx_vfdi_set_status_page(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
+ struct siena_nic_data *nic_data = efx->nic_data;
struct vfdi_req *req = vf->buf.addr;
u64 page_count = req->u.set_status_page.peer_page_count;
u64 max_page_count =
@@ -809,7 +820,7 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf)
return VFDI_RC_EINVAL;
}
- mutex_lock(&efx->local_lock);
+ mutex_lock(&nic_data->local_lock);
mutex_lock(&vf->status_lock);
vf->status_addr = req->u.set_status_page.dma_addr;
@@ -828,9 +839,9 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf)
}
}
- __efx_sriov_push_vf_status(vf);
+ __efx_siena_sriov_push_vf_status(vf);
mutex_unlock(&vf->status_lock);
- mutex_unlock(&efx->local_lock);
+ mutex_unlock(&nic_data->local_lock);
return VFDI_RC_SUCCESS;
}
@@ -857,7 +868,7 @@ static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
[VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page,
};
-static void efx_sriov_vfdi(struct work_struct *work)
+static void efx_siena_sriov_vfdi(struct work_struct *work)
{
struct efx_vf *vf = container_of(work, struct efx_vf, req);
struct efx_nic *efx = vf->efx;
@@ -872,7 +883,7 @@ static void efx_sriov_vfdi(struct work_struct *work)
copy[0].to_rid = efx->pci_dev->devfn;
copy[0].to_addr = vf->buf.dma_addr;
copy[0].length = EFX_PAGE_SIZE;
- rc = efx_sriov_memcpy(efx, copy, 1);
+ rc = efx_siena_sriov_memcpy(efx, copy, 1);
if (rc) {
/* If we can't get the request, we can't reply to the caller */
if (net_ratelimit())
@@ -916,7 +927,7 @@ static void efx_sriov_vfdi(struct work_struct *work)
copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op);
copy[1].length = sizeof(req->op);
- (void) efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
+ (void)efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
}
@@ -925,7 +936,8 @@ static void efx_sriov_vfdi(struct work_struct *work)
* event ring in guest memory with VFDI reset events, then (re-initialise) the
* event queue to raise an interrupt. The guest driver will then recover.
*/
-static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
+static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
+ struct efx_buffer *buffer)
{
struct efx_nic *efx = vf->efx;
struct efx_memcpy_req copy_req[4];
@@ -961,7 +973,7 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
copy_req[k].to_addr = vf->evq0_addrs[pos + k];
copy_req[k].length = EFX_PAGE_SIZE;
}
- rc = efx_sriov_memcpy(efx, copy_req, count);
+ rc = efx_siena_sriov_memcpy(efx, copy_req, count);
if (rc) {
if (net_ratelimit())
netif_err(efx, hw, efx->net_dev,
@@ -974,7 +986,7 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
/* Reinitialise, arm and trigger evq0 */
abs_evq = abs_index(vf, 0);
buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0);
- efx_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count);
+ efx_siena_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count);
EFX_POPULATE_OWORD_3(reg,
FRF_CZ_TIMER_Q_EN, 1,
@@ -992,19 +1004,19 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
mutex_unlock(&vf->status_lock);
}
-static void efx_sriov_reset_vf_work(struct work_struct *work)
+static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
{
struct efx_vf *vf = container_of(work, struct efx_vf, req);
struct efx_nic *efx = vf->efx;
struct efx_buffer buf;
if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
- efx_sriov_reset_vf(vf, &buf);
+ efx_siena_sriov_reset_vf(vf, &buf);
efx_nic_free_buffer(efx, &buf);
}
}
-static void efx_sriov_handle_no_channel(struct efx_nic *efx)
+static void efx_siena_sriov_handle_no_channel(struct efx_nic *efx)
{
netif_err(efx, drv, efx->net_dev,
"ERROR: IOV requires MSI-X and 1 additional interrupt"
@@ -1012,35 +1024,38 @@ static void efx_sriov_handle_no_channel(struct efx_nic *efx)
efx->vf_count = 0;
}
-static int efx_sriov_probe_channel(struct efx_channel *channel)
+static int efx_siena_sriov_probe_channel(struct efx_channel *channel)
{
- channel->efx->vfdi_channel = channel;
+ struct siena_nic_data *nic_data = channel->efx->nic_data;
+ nic_data->vfdi_channel = channel;
+
return 0;
}
static void
-efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
+efx_siena_sriov_get_channel_name(struct efx_channel *channel,
+ char *buf, size_t len)
{
snprintf(buf, len, "%s-iov", channel->efx->name);
}
-static const struct efx_channel_type efx_sriov_channel_type = {
- .handle_no_channel = efx_sriov_handle_no_channel,
- .pre_probe = efx_sriov_probe_channel,
+static const struct efx_channel_type efx_siena_sriov_channel_type = {
+ .handle_no_channel = efx_siena_sriov_handle_no_channel,
+ .pre_probe = efx_siena_sriov_probe_channel,
.post_remove = efx_channel_dummy_op_void,
- .get_name = efx_sriov_get_channel_name,
+ .get_name = efx_siena_sriov_get_channel_name,
/* no copy operation; channel must not be reallocated */
.keep_eventq = true,
};
-void efx_sriov_probe(struct efx_nic *efx)
+void efx_siena_sriov_probe(struct efx_nic *efx)
{
unsigned count;
if (!max_vfs)
return;
- if (efx_sriov_cmd(efx, false, &efx->vi_scale, &count))
+ if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count))
return;
if (count > 0 && count > max_vfs)
count = max_vfs;
@@ -1048,17 +1063,20 @@ void efx_sriov_probe(struct efx_nic *efx)
/* efx_nic_dimension_resources() will reduce vf_count as appopriate */
efx->vf_count = count;
- efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_sriov_channel_type;
+ efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_siena_sriov_channel_type;
}
/* Copy the list of individual addresses into the vfdi_status.peers
* array and auxillary pages, protected by %local_lock. Drop that lock
* and then broadcast the address list to every VF.
*/
-static void efx_sriov_peer_work(struct work_struct *data)
+static void efx_siena_sriov_peer_work(struct work_struct *data)
{
- struct efx_nic *efx = container_of(data, struct efx_nic, peer_work);
- struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
+ struct siena_nic_data *nic_data = container_of(data,
+ struct siena_nic_data,
+ peer_work);
+ struct efx_nic *efx = nic_data->efx;
+ struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
struct efx_vf *vf;
struct efx_local_addr *local_addr;
struct vfdi_endpoint *peer;
@@ -1068,11 +1086,11 @@ static void efx_sriov_peer_work(struct work_struct *data)
unsigned int peer_count;
unsigned int pos;
- mutex_lock(&efx->local_lock);
+ mutex_lock(&nic_data->local_lock);
/* Move the existing peer pages off %local_page_list */
INIT_LIST_HEAD(&pages);
- list_splice_tail_init(&efx->local_page_list, &pages);
+ list_splice_tail_init(&nic_data->local_page_list, &pages);
/* Populate the VF addresses starting from entry 1 (entry 0 is
* the PF address)
@@ -1094,7 +1112,7 @@ static void efx_sriov_peer_work(struct work_struct *data)
}
/* Fill the remaining addresses */
- list_for_each_entry(local_addr, &efx->local_addr_list, link) {
+ list_for_each_entry(local_addr, &nic_data->local_addr_list, link) {
ether_addr_copy(peer->mac_addr, local_addr->addr);
peer->tci = 0;
++peer;
@@ -1117,13 +1135,13 @@ static void efx_sriov_peer_work(struct work_struct *data)
list_del(&epp->link);
}
- list_add_tail(&epp->link, &efx->local_page_list);
+ list_add_tail(&epp->link, &nic_data->local_page_list);
peer = (struct vfdi_endpoint *)epp->ptr;
peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint);
}
}
vfdi_status->peer_count = peer_count;
- mutex_unlock(&efx->local_lock);
+ mutex_unlock(&nic_data->local_lock);
/* Free any now unused endpoint pages */
while (!list_empty(&pages)) {
@@ -1141,25 +1159,26 @@ static void efx_sriov_peer_work(struct work_struct *data)
mutex_lock(&vf->status_lock);
if (vf->status_addr)
- __efx_sriov_push_vf_status(vf);
+ __efx_siena_sriov_push_vf_status(vf);
mutex_unlock(&vf->status_lock);
}
}
-static void efx_sriov_free_local(struct efx_nic *efx)
+static void efx_siena_sriov_free_local(struct efx_nic *efx)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
struct efx_local_addr *local_addr;
struct efx_endpoint_page *epp;
- while (!list_empty(&efx->local_addr_list)) {
- local_addr = list_first_entry(&efx->local_addr_list,
+ while (!list_empty(&nic_data->local_addr_list)) {
+ local_addr = list_first_entry(&nic_data->local_addr_list,
struct efx_local_addr, link);
list_del(&local_addr->link);
kfree(local_addr);
}
- while (!list_empty(&efx->local_page_list)) {
- epp = list_first_entry(&efx->local_page_list,
+ while (!list_empty(&nic_data->local_page_list)) {
+ epp = list_first_entry(&nic_data->local_page_list,
struct efx_endpoint_page, link);
list_del(&epp->link);
dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
@@ -1168,7 +1187,7 @@ static void efx_sriov_free_local(struct efx_nic *efx)
}
}
-static int efx_sriov_vf_alloc(struct efx_nic *efx)
+static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
{
unsigned index;
struct efx_vf *vf;
@@ -1185,8 +1204,8 @@ static int efx_sriov_vf_alloc(struct efx_nic *efx)
vf->rx_filter_id = -1;
vf->tx_filter_mode = VF_TX_FILTER_AUTO;
vf->tx_filter_id = -1;
- INIT_WORK(&vf->req, efx_sriov_vfdi);
- INIT_WORK(&vf->reset_work, efx_sriov_reset_vf_work);
+ INIT_WORK(&vf->req, efx_siena_sriov_vfdi);
+ INIT_WORK(&vf->reset_work, efx_siena_sriov_reset_vf_work);
init_waitqueue_head(&vf->flush_waitq);
mutex_init(&vf->status_lock);
mutex_init(&vf->txq_lock);
@@ -1195,7 +1214,7 @@ static int efx_sriov_vf_alloc(struct efx_nic *efx)
return 0;
}
-static void efx_sriov_vfs_fini(struct efx_nic *efx)
+static void efx_siena_sriov_vfs_fini(struct efx_nic *efx)
{
struct efx_vf *vf;
unsigned int pos;
@@ -1212,9 +1231,10 @@ static void efx_sriov_vfs_fini(struct efx_nic *efx)
}
}
-static int efx_sriov_vfs_init(struct efx_nic *efx)
+static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
{
struct pci_dev *pci_dev = efx->pci_dev;
+ struct siena_nic_data *nic_data = efx->nic_data;
unsigned index, devfn, sriov, buftbl_base;
u16 offset, stride;
struct efx_vf *vf;
@@ -1227,7 +1247,7 @@ static int efx_sriov_vfs_init(struct efx_nic *efx)
pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride);
- buftbl_base = efx->vf_buftbl_base;
+ buftbl_base = nic_data->vf_buftbl_base;
devfn = pci_dev->devfn + offset;
for (index = 0; index < efx->vf_count; ++index) {
vf = efx->vf + index;
@@ -1253,13 +1273,14 @@ static int efx_sriov_vfs_init(struct efx_nic *efx)
return 0;
fail:
- efx_sriov_vfs_fini(efx);
+ efx_siena_sriov_vfs_fini(efx);
return rc;
}
-int efx_sriov_init(struct efx_nic *efx)
+int efx_siena_sriov_init(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
+ struct siena_nic_data *nic_data = efx->nic_data;
struct vfdi_status *vfdi_status;
int rc;
@@ -1271,15 +1292,15 @@ int efx_sriov_init(struct efx_nic *efx)
if (efx->vf_count == 0)
return 0;
- rc = efx_sriov_cmd(efx, true, NULL, NULL);
+ rc = efx_siena_sriov_cmd(efx, true, NULL, NULL);
if (rc)
goto fail_cmd;
- rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status),
- GFP_KERNEL);
+ rc = efx_nic_alloc_buffer(efx, &nic_data->vfdi_status,
+ sizeof(*vfdi_status), GFP_KERNEL);
if (rc)
goto fail_status;
- vfdi_status = efx->vfdi_status.addr;
+ vfdi_status = nic_data->vfdi_status.addr;
memset(vfdi_status, 0, sizeof(*vfdi_status));
vfdi_status->version = 1;
vfdi_status->length = sizeof(*vfdi_status);
@@ -1289,16 +1310,16 @@ int efx_sriov_init(struct efx_nic *efx)
vfdi_status->peer_count = 1 + efx->vf_count;
vfdi_status->timer_quantum_ns = efx->timer_quantum_ns;
- rc = efx_sriov_vf_alloc(efx);
+ rc = efx_siena_sriov_vf_alloc(efx);
if (rc)
goto fail_alloc;
- mutex_init(&efx->local_lock);
- INIT_WORK(&efx->peer_work, efx_sriov_peer_work);
- INIT_LIST_HEAD(&efx->local_addr_list);
- INIT_LIST_HEAD(&efx->local_page_list);
+ mutex_init(&nic_data->local_lock);
+ INIT_WORK(&nic_data->peer_work, efx_siena_sriov_peer_work);
+ INIT_LIST_HEAD(&nic_data->local_addr_list);
+ INIT_LIST_HEAD(&nic_data->local_page_list);
- rc = efx_sriov_vfs_init(efx);
+ rc = efx_siena_sriov_vfs_init(efx);
if (rc)
goto fail_vfs;
@@ -1307,7 +1328,7 @@ int efx_sriov_init(struct efx_nic *efx)
efx->vf_init_count = efx->vf_count;
rtnl_unlock();
- efx_sriov_usrev(efx, true);
+ efx_siena_sriov_usrev(efx, true);
/* At this point we must be ready to accept VFDI requests */
@@ -1321,34 +1342,35 @@ int efx_sriov_init(struct efx_nic *efx)
return 0;
fail_pci:
- efx_sriov_usrev(efx, false);
+ efx_siena_sriov_usrev(efx, false);
rtnl_lock();
efx->vf_init_count = 0;
rtnl_unlock();
- efx_sriov_vfs_fini(efx);
+ efx_siena_sriov_vfs_fini(efx);
fail_vfs:
- cancel_work_sync(&efx->peer_work);
- efx_sriov_free_local(efx);
+ cancel_work_sync(&nic_data->peer_work);
+ efx_siena_sriov_free_local(efx);
kfree(efx->vf);
fail_alloc:
- efx_nic_free_buffer(efx, &efx->vfdi_status);
+ efx_nic_free_buffer(efx, &nic_data->vfdi_status);
fail_status:
- efx_sriov_cmd(efx, false, NULL, NULL);
+ efx_siena_sriov_cmd(efx, false, NULL, NULL);
fail_cmd:
return rc;
}
-void efx_sriov_fini(struct efx_nic *efx)
+void efx_siena_sriov_fini(struct efx_nic *efx)
{
struct efx_vf *vf;
unsigned int pos;
+ struct siena_nic_data *nic_data = efx->nic_data;
if (efx->vf_init_count == 0)
return;
/* Disable all interfaces to reconfiguration */
- BUG_ON(efx->vfdi_channel->enabled);
- efx_sriov_usrev(efx, false);
+ BUG_ON(nic_data->vfdi_channel->enabled);
+ efx_siena_sriov_usrev(efx, false);
rtnl_lock();
efx->vf_init_count = 0;
rtnl_unlock();
@@ -1359,19 +1381,19 @@ void efx_sriov_fini(struct efx_nic *efx)
cancel_work_sync(&vf->req);
cancel_work_sync(&vf->reset_work);
}
- cancel_work_sync(&efx->peer_work);
+ cancel_work_sync(&nic_data->peer_work);
pci_disable_sriov(efx->pci_dev);
/* Tear down back-end state */
- efx_sriov_vfs_fini(efx);
- efx_sriov_free_local(efx);
+ efx_siena_sriov_vfs_fini(efx);
+ efx_siena_sriov_free_local(efx);
kfree(efx->vf);
- efx_nic_free_buffer(efx, &efx->vfdi_status);
- efx_sriov_cmd(efx, false, NULL, NULL);
+ efx_nic_free_buffer(efx, &nic_data->vfdi_status);
+ efx_siena_sriov_cmd(efx, false, NULL, NULL);
}
-void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event)
+void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
struct efx_vf *vf;
@@ -1428,7 +1450,7 @@ error:
vf->req_seqno = seq + 1;
}
-void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i)
+void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
{
struct efx_vf *vf;
@@ -1445,18 +1467,19 @@ void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i)
vf->evq0_count = 0;
}
-void efx_sriov_mac_address_changed(struct efx_nic *efx)
+void efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
{
- struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
if (!efx->vf_init_count)
return;
ether_addr_copy(vfdi_status->peers[0].mac_addr,
efx->net_dev->dev_addr);
- queue_work(vfdi_workqueue, &efx->peer_work);
+ queue_work(vfdi_workqueue, &nic_data->peer_work);
}
-void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_vf *vf;
unsigned queue, qid;
@@ -1475,7 +1498,7 @@ void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
wake_up(&vf->flush_waitq);
}
-void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_vf *vf;
unsigned ev_failed, queue, qid;
@@ -1500,7 +1523,7 @@ void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
}
/* Called from napi. Schedule the reset work item */
-void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
+void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
{
struct efx_vf *vf;
unsigned int rel;
@@ -1516,7 +1539,7 @@ void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
}
/* Reset all VFs */
-void efx_sriov_reset(struct efx_nic *efx)
+void efx_siena_sriov_reset(struct efx_nic *efx)
{
unsigned int vf_i;
struct efx_buffer buf;
@@ -1527,15 +1550,15 @@ void efx_sriov_reset(struct efx_nic *efx)
if (efx->vf_init_count == 0)
return;
- efx_sriov_usrev(efx, true);
- (void)efx_sriov_cmd(efx, true, NULL, NULL);
+ efx_siena_sriov_usrev(efx, true);
+ (void)efx_siena_sriov_cmd(efx, true, NULL, NULL);
if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
return;
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
vf = efx->vf + vf_i;
- efx_sriov_reset_vf(vf, &buf);
+ efx_siena_sriov_reset_vf(vf, &buf);
}
efx_nic_free_buffer(efx, &buf);
@@ -1543,8 +1566,8 @@ void efx_sriov_reset(struct efx_nic *efx)
int efx_init_sriov(void)
{
- /* A single threaded workqueue is sufficient. efx_sriov_vfdi() and
- * efx_sriov_peer_work() spend almost all their time sleeping for
+ /* A single threaded workqueue is sufficient. efx_siena_sriov_vfdi() and
+ * efx_siena_sriov_peer_work() spend almost all their time sleeping for
* MCDI to complete anyway
*/
vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
@@ -1559,7 +1582,7 @@ void efx_fini_sriov(void)
destroy_workqueue(vfdi_workqueue);
}
-int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_vf *vf;
@@ -1570,14 +1593,14 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
mutex_lock(&vf->status_lock);
ether_addr_copy(vf->addr.mac_addr, mac);
- __efx_sriov_update_vf_addr(vf);
+ __efx_siena_sriov_update_vf_addr(vf);
mutex_unlock(&vf->status_lock);
return 0;
}
-int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
- u16 vlan, u8 qos)
+int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
+ u16 vlan, u8 qos)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_vf *vf;
@@ -1590,14 +1613,14 @@ int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
mutex_lock(&vf->status_lock);
tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
vf->addr.tci = htons(tci);
- __efx_sriov_update_vf_addr(vf);
+ __efx_siena_sriov_update_vf_addr(vf);
mutex_unlock(&vf->status_lock);
return 0;
}
-int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
- bool spoofchk)
+int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+ bool spoofchk)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_vf *vf;
@@ -1620,8 +1643,8 @@ int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
return rc;
}
-int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
- struct ifla_vf_info *ivi)
+int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+ struct ifla_vf_info *ivi)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_vf *vf;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 77ed74561e5f..f9c87624a0af 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -59,6 +59,8 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_net.h>
+#include <linux/pm_runtime.h>
+
#include "smsc911x.h"
#define SMSC_CHIPNAME "smsc911x"
@@ -2338,6 +2340,9 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
free_netdev(dev);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
return 0;
}
@@ -2491,6 +2496,9 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
if (pdata->config.shift)
pdata->ops = &shifted_smsc911x_ops;
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
retval = smsc911x_init(dev);
if (retval < 0)
goto out_disable_resources;
@@ -2572,6 +2580,8 @@ out_unregister_netdev_5:
out_free_irq:
free_irq(dev->irq, dev);
out_disable_resources:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
(void)smsc911x_disable_resources(pdev);
out_enable_resources_fail:
smsc911x_free_resources(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index b02d4a3ffa37..7d3af190be55 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -14,62 +14,20 @@ config STMMAC_ETH
if STMMAC_ETH
config STMMAC_PLATFORM
- bool "STMMAC Platform bus support"
+ tristate "STMMAC Platform bus support"
depends on STMMAC_ETH
default y
---help---
- This selects the platform specific bus support for
- the stmmac device driver. This is the driver used
- on many embedded STM platforms based on ARM and SuperH
- processors.
+ This selects the platform specific bus support for the stmmac driver.
+ This is the driver used on several SoCs:
+ STi, Allwinner, Amlogic Meson, Altera SOCFPGA.
+
If you have a controller with this interface, say Y or M here.
If unsure, say N.
-config DWMAC_MESON
- bool "Amlogic Meson dwmac support"
- depends on STMMAC_PLATFORM && ARCH_MESON
- help
- Support for Ethernet controller on Amlogic Meson SoCs.
-
- This selects the Amlogic Meson SoC glue layer support for
- the stmmac device driver. This driver is used for Meson6 and
- Meson8 SoCs.
-
-config DWMAC_SOCFPGA
- bool "SOCFPGA dwmac support"
- depends on STMMAC_PLATFORM && MFD_SYSCON && (ARCH_SOCFPGA || COMPILE_TEST)
- help
- Support for ethernet controller on Altera SOCFPGA
-
- This selects the Altera SOCFPGA SoC glue layer support
- for the stmmac device driver. This driver is used for
- arria5 and cyclone5 FPGA SoCs.
-
-config DWMAC_SUNXI
- bool "Allwinner GMAC support"
- depends on STMMAC_PLATFORM && ARCH_SUNXI
- default y
- ---help---
- Support for Allwinner A20/A31 GMAC ethernet controllers.
-
- This selects Allwinner SoC glue layer support for the
- stmmac device driver. This driver is used for A20/A31
- GMAC ethernet controller.
-
-config DWMAC_STI
- bool "STi GMAC support"
- depends on STMMAC_PLATFORM && ARCH_STI
- default y
- ---help---
- Support for ethernet controller on STi SOCs.
-
- This selects STi SoC glue layer support for the stmmac
- device driver. This driver is used on for the STi series
- SOCs GMAC ethernet controller.
-
config STMMAC_PCI
- bool "STMMAC PCI bus support"
+ tristate "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
---help---
This is to select the Synopsys DWMAC available on PCI devices,
@@ -79,22 +37,4 @@ config STMMAC_PCI
D1215994A VIRTEX FPGA board.
If unsure, say N.
-
-config STMMAC_DEBUG_FS
- bool "Enable monitoring via sysFS "
- default n
- depends on STMMAC_ETH && DEBUG_FS
- ---help---
- The stmmac entry in /sys reports DMA TX/RX rings
- or (if supported) the HW cap register.
-
-config STMMAC_DA
- bool "STMMAC DMA arbitration scheme"
- default n
- ---help---
- Selecting this option, rx has priority over Tx (only for Giga
- Ethernet device).
- By default, the DMA arbitration scheme is based on Round-robin
- (rx:tx priority is 1:1).
-
endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 0533d0ba783d..ac4d5629d905 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,11 +1,12 @@
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
-stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
-stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
-stmmac-$(CONFIG_DWMAC_MESON) += dwmac-meson.o
-stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
-stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o
-stmmac-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
- chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
- dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
+ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
+ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y)
+
+obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
+stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o \
+ dwmac-sti.o dwmac-socfpga.o
+
+obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
+stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 593e6c4144a7..cd77289c3cfe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -44,6 +44,7 @@
#undef FRAME_FILTER_DEBUG
/* #define FRAME_FILTER_DEBUG */
+/* Extra statistic and debug information exposed by ethtool */
struct stmmac_extra_stats {
/* Transmit errors */
unsigned long tx_underflow ____cacheline_aligned;
@@ -220,6 +221,7 @@ enum dma_irq_status {
handle_tx = 0x8,
};
+/* EEE and LPI defines */
#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0)
#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1)
#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
@@ -229,6 +231,7 @@ enum dma_irq_status {
#define CORE_PCS_LINK_STATUS (1 << 6)
#define CORE_RGMII_IRQ (1 << 7)
+/* Physical Coding Sublayer */
struct rgmii_adv {
unsigned int pause;
unsigned int duplex;
@@ -294,6 +297,7 @@ struct dma_features {
#define JUMBO_LEN 9000
+/* Descriptors helpers */
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode,
@@ -341,6 +345,10 @@ struct stmmac_desc_ops {
int (*get_rx_timestamp_status) (void *desc, u32 ats);
};
+extern const struct stmmac_desc_ops enh_desc_ops;
+extern const struct stmmac_desc_ops ndesc_ops;
+
+/* Specific DMA helpers */
struct stmmac_dma_ops {
/* DMA core initialization */
int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb,
@@ -370,6 +378,7 @@ struct stmmac_dma_ops {
struct mac_device_info;
+/* Helpers to program the MAC core */
struct stmmac_ops {
/* MAC core initialization */
void (*core_init)(struct mac_device_info *hw, int mtu);
@@ -400,6 +409,7 @@ struct stmmac_ops {
void (*get_adv)(struct mac_device_info *hw, struct rgmii_adv *adv);
};
+/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
void (*config_sub_second_increment) (void __iomem *ioaddr);
@@ -410,6 +420,8 @@ struct stmmac_hwtimestamp {
u64(*get_systime) (void __iomem *ioaddr);
};
+extern const struct stmmac_hwtimestamp stmmac_ptp;
+
struct mac_link {
int port;
int duplex;
@@ -421,6 +433,7 @@ struct mii_regs {
unsigned int data; /* MII Data */
};
+/* Helpers to manage the descriptors for chain and ring modes */
struct stmmac_mode_ops {
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
unsigned int extend_desc);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index d225a603e604..cca028d632f6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -18,6 +18,8 @@
#include <linux/platform_device.h>
#include <linux/stmmac.h>
+#include "stmmac_platform.h"
+
#define ETHMAC_SPEED_100 BIT(1)
struct meson_dwmac {
@@ -56,7 +58,7 @@ static void *meson6_dwmac_setup(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
dwmac->reg = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dwmac->reg))
- return dwmac->reg;
+ return ERR_CAST(dwmac->reg);
return dwmac;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 3aad413e74b4..e97074cd5800 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -23,7 +23,9 @@
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/stmmac.h>
+
#include "stmmac.h"
+#include "stmmac_platform.h"
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index ccfe7e510418..0e137751e76e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -1,4 +1,4 @@
-/**
+/*
* dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer
*
* Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
@@ -22,6 +22,8 @@
#include <linux/of.h>
#include <linux/of_net.h>
+#include "stmmac_platform.h"
+
#define DWMAC_125MHZ 125000000
#define DWMAC_50MHZ 50000000
#define DWMAC_25MHZ 25000000
@@ -35,9 +37,8 @@
#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
iface == PHY_INTERFACE_MODE_GMII)
-/* STiH4xx register definitions (STiH415/STiH416/STiH407/STiH410 families) */
-
-/**
+/* STiH4xx register definitions (STiH415/STiH416/STiH407/STiH410 families)
+ *
* Below table summarizes the clock requirement and clock sources for
* supported phy interface modes with link speeds.
* ________________________________________________
@@ -76,9 +77,7 @@
#define STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7)
#define STIH4XX_ETH_SEL_TXCLK_NOT_CLK125 BIT(6)
-/* STiD127 register definitions */
-
-/**
+/* STiD127 register definitions
*-----------------------
* src |BIT(6)| BIT(7)|
*-----------------------
@@ -104,13 +103,13 @@
#define EN_MASK GENMASK(1, 1)
#define EN BIT(1)
-/**
+/*
* 3 bits [4:2]
* 000-GMII/MII
* 001-RGMII
* 010-SGMII
* 100-RMII
-*/
+ */
#define MII_PHY_SEL_MASK GENMASK(4, 2)
#define ETH_PHY_SEL_RMII BIT(4)
#define ETH_PHY_SEL_SGMII BIT(3)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 771cd15fca18..c5ea9ab75b03 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -1,4 +1,4 @@
-/**
+/*
* dwmac-sunxi.c - Allwinner sunxi DWMAC specific glue layer
*
* Copyright (C) 2013 Chen-Yu Tsai
@@ -22,6 +22,8 @@
#include <linux/of_net.h>
#include <linux/regulator/consumer.h>
+#include "stmmac_platform.h"
+
struct sunxi_priv_data {
int interface;
int clk_enabled;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 0c2058a69fd2..59d92e811750 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -70,10 +70,6 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
if (mb)
value |= DMA_BUS_MODE_MB;
-#ifdef CONFIG_STMMAC_DA
- value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
-#endif
-
if (atds)
value |= DMA_BUS_MODE_ATDS;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index c3c40650b309..c0a391983372 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -122,9 +122,7 @@ int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
void stmmac_set_ethtool_ops(struct net_device *netdev);
-extern const struct stmmac_desc_ops enh_desc_ops;
-extern const struct stmmac_desc_ops ndesc_ops;
-extern const struct stmmac_hwtimestamp stmmac_ptp;
+
int stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_resume(struct net_device *ndev);
@@ -136,77 +134,4 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
void stmmac_disable_eee_mode(struct stmmac_priv *priv);
bool stmmac_eee_init(struct stmmac_priv *priv);
-#ifdef CONFIG_STMMAC_PLATFORM
-#ifdef CONFIG_DWMAC_MESON
-extern const struct stmmac_of_data meson6_dwmac_data;
-#endif
-#ifdef CONFIG_DWMAC_SUNXI
-extern const struct stmmac_of_data sun7i_gmac_data;
-#endif
-#ifdef CONFIG_DWMAC_STI
-extern const struct stmmac_of_data stih4xx_dwmac_data;
-extern const struct stmmac_of_data stid127_dwmac_data;
-#endif
-#ifdef CONFIG_DWMAC_SOCFPGA
-extern const struct stmmac_of_data socfpga_gmac_data;
-#endif
-extern struct platform_driver stmmac_pltfr_driver;
-static inline int stmmac_register_platform(void)
-{
- int err;
-
- err = platform_driver_register(&stmmac_pltfr_driver);
- if (err)
- pr_err("stmmac: failed to register the platform driver\n");
-
- return err;
-}
-
-static inline void stmmac_unregister_platform(void)
-{
- platform_driver_unregister(&stmmac_pltfr_driver);
-}
-#else
-static inline int stmmac_register_platform(void)
-{
- pr_debug("stmmac: do not register the platf driver\n");
-
- return 0;
-}
-
-static inline void stmmac_unregister_platform(void)
-{
-}
-#endif /* CONFIG_STMMAC_PLATFORM */
-
-#ifdef CONFIG_STMMAC_PCI
-extern struct pci_driver stmmac_pci_driver;
-static inline int stmmac_register_pci(void)
-{
- int err;
-
- err = pci_register_driver(&stmmac_pci_driver);
- if (err)
- pr_err("stmmac: failed to register the PCI driver\n");
-
- return err;
-}
-
-static inline void stmmac_unregister_pci(void)
-{
- pci_unregister_driver(&stmmac_pci_driver);
-}
-#else
-static inline int stmmac_register_pci(void)
-{
- pr_debug("stmmac: do not register the PCI driver\n");
-
- return 0;
-}
-
-static inline void stmmac_unregister_pci(void)
-{
-}
-#endif /* CONFIG_STMMAC_PCI */
-
#endif /* __STMMAC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 18c46bb0f3bf..118a427d1942 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -44,10 +44,10 @@
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <linux/pinctrl/consumer.h>
-#ifdef CONFIG_STMMAC_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#endif /* CONFIG_STMMAC_DEBUG_FS */
+#endif /* CONFIG_DEBUG_FS */
#include <linux/net_tstamp.h>
#include "stmmac_ptp.h"
#include "stmmac.h"
@@ -116,7 +116,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
-#ifdef CONFIG_STMMAC_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
static int stmmac_init_fs(struct net_device *dev);
static void stmmac_exit_fs(void);
#endif
@@ -125,8 +125,8 @@ static void stmmac_exit_fs(void);
/**
* stmmac_verify_args - verify the driver parameters.
- * Description: it verifies if some wrong parameter is passed to the driver.
- * Note that wrong parameters are replaced with the default values.
+ * Description: it checks the driver parameters and set a default in case of
+ * errors.
*/
static void stmmac_verify_args(void)
{
@@ -191,14 +191,8 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
static void print_pkt(unsigned char *buf, int len)
{
- int j;
- pr_debug("len = %d byte, buf addr: 0x%p", len, buf);
- for (j = 0; j < len; j++) {
- if ((j % 16) == 0)
- pr_debug("\n %03x:", j);
- pr_debug(" %02x", buf[j]);
- }
- pr_debug("\n");
+ pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
}
/* minimum number of free TX descriptors required to wake up TX process */
@@ -210,7 +204,7 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
}
/**
- * stmmac_hw_fix_mac_speed: callback for speed selection
+ * stmmac_hw_fix_mac_speed - callback for speed selection
* @priv: driver private structure
* Description: on some platforms (e.g. ST), some HW system configuraton
* registers have to be set according to the link speed negotiated.
@@ -224,9 +218,10 @@ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
}
/**
- * stmmac_enable_eee_mode: Check and enter in LPI mode
+ * stmmac_enable_eee_mode - check and enter in LPI mode
* @priv: driver private structure
- * Description: this function is to verify and enter in LPI mode for EEE.
+ * Description: this function is to verify and enter in LPI mode in case of
+ * EEE.
*/
static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
{
@@ -237,7 +232,7 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
}
/**
- * stmmac_disable_eee_mode: disable/exit from EEE
+ * stmmac_disable_eee_mode - disable and exit from LPI mode
* @priv: driver private structure
* Description: this function is to exit and disable EEE in case of
* LPI state is true. This is called by the xmit.
@@ -250,7 +245,7 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv)
}
/**
- * stmmac_eee_ctrl_timer: EEE TX SW timer.
+ * stmmac_eee_ctrl_timer - EEE TX SW timer.
* @arg : data hook
* Description:
* if there is no data transfer and if we are not in LPI state,
@@ -265,13 +260,12 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
}
/**
- * stmmac_eee_init: init EEE
+ * stmmac_eee_init - init EEE
* @priv: driver private structure
* Description:
- * If the EEE support has been enabled while configuring the driver,
- * if the GMAC actually supports the EEE (from the HW cap reg) and the
- * phy can also manage EEE, so enable the LPI state and start the timer
- * to verify if the tx path can enter in LPI state.
+ * if the GMAC supports the EEE (from the HW cap reg) and the phy device
+ * can also manage EEE, this function enable the LPI state and start related
+ * timer.
*/
bool stmmac_eee_init(struct stmmac_priv *priv)
{
@@ -338,7 +332,7 @@ out:
return ret;
}
-/* stmmac_get_tx_hwtstamp: get HW TX timestamps
+/* stmmac_get_tx_hwtstamp - get HW TX timestamps
* @priv: driver private structure
* @entry : descriptor index to be used.
* @skb : the socket buffer
@@ -380,7 +374,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
return;
}
-/* stmmac_get_rx_hwtstamp: get HW RX timestamps
+/* stmmac_get_rx_hwtstamp - get HW RX timestamps
* @priv: driver private structure
* @entry : descriptor index to be used.
* @skb : the socket buffer
@@ -636,11 +630,11 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
}
/**
- * stmmac_init_ptp: init PTP
+ * stmmac_init_ptp - init PTP
* @priv: driver private structure
- * Description: this is to verify if the HW supports the PTPv1 or v2.
+ * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
* This is done by looking at the HW cap. register.
- * Also it registers the ptp driver.
+ * This function also registers the ptp driver.
*/
static int stmmac_init_ptp(struct stmmac_priv *priv)
{
@@ -682,9 +676,13 @@ static void stmmac_release_ptp(struct stmmac_priv *priv)
}
/**
- * stmmac_adjust_link
+ * stmmac_adjust_link - adjusts the link parameters
* @dev: net device structure
- * Description: it adjusts the link parameters.
+ * Description: this is the helper called by the physical abstraction layer
+ * drivers to communicate the phy link status. According the speed and duplex
+ * this driver can invoke registered glue-logic as well.
+ * It also invoke the eee initialization because it could happen when switch
+ * on different networks (that are eee capable).
*/
static void stmmac_adjust_link(struct net_device *dev)
{
@@ -774,7 +772,7 @@ static void stmmac_adjust_link(struct net_device *dev)
}
/**
- * stmmac_check_pcs_mode: verify if RGMII/SGMII is supported
+ * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
* @priv: driver private structure
* Description: this is to verify if the HW supports the PCS.
* Physical Coding Sublayer (PCS) interface that can be used when the MAC is
@@ -863,7 +861,7 @@ static int stmmac_init_phy(struct net_device *dev)
}
/**
- * stmmac_display_ring: display ring
+ * stmmac_display_ring - display ring
* @head: pointer to the head of the ring passed.
* @size: size of the ring.
* @extend_desc: to verify if extended descriptors are used.
@@ -931,7 +929,7 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
}
/**
- * stmmac_clear_descriptors: clear descriptors
+ * stmmac_clear_descriptors - clear descriptors
* @priv: driver private structure
* Description: this function is called to clear the tx and rx descriptors
* in case of both basic and extended descriptors are used.
@@ -963,6 +961,15 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
(i == txsize - 1));
}
+/**
+ * stmmac_init_rx_buffers - init the RX descriptor buffer.
+ * @priv: driver private structure
+ * @p: descriptor pointer
+ * @i: descriptor index
+ * @flags: gfp flag.
+ * Description: this function is called to allocate a receive buffer, perform
+ * the DMA mapping and init the descriptor.
+ */
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
int i, gfp_t flags)
{
@@ -1007,7 +1014,8 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
/**
* init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure
- * Description: this function initializes the DMA RX/TX descriptors
+ * @flags: gfp flag.
+ * Description: this function initializes the DMA RX/TX descriptors
* and allocates the socket buffers. It suppors the chained and ring
* modes.
*/
@@ -1144,6 +1152,14 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
}
}
+/**
+ * alloc_dma_desc_resources - alloc TX/RX resources.
+ * @priv: private structure
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
static int alloc_dma_desc_resources(struct stmmac_priv *priv)
{
unsigned int txsize = priv->dma_tx_size;
@@ -1255,8 +1271,8 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
/**
* stmmac_dma_operation_mode - HW DMA operation mode
* @priv: driver private structure
- * Description: it sets the DMA operation mode: tx/rx DMA thresholds
- * or Store-And-Forward capability.
+ * Description: it is used for configuring the DMA operation mode register in
+ * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
*/
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
@@ -1277,9 +1293,9 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
}
/**
- * stmmac_tx_clean:
+ * stmmac_tx_clean - to manage the transmission completion
* @priv: driver private structure
- * Description: it reclaims resources after transmission completes.
+ * Description: it reclaims the transmit resources after transmission completes.
*/
static void stmmac_tx_clean(struct stmmac_priv *priv)
{
@@ -1378,10 +1394,10 @@ static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
}
/**
- * stmmac_tx_err: irq tx error mng function
+ * stmmac_tx_err - to manage the tx error
* @priv: driver private structure
* Description: it cleans the descriptors and restarts the transmission
- * in case of errors.
+ * in case of transmission errors.
*/
static void stmmac_tx_err(struct stmmac_priv *priv)
{
@@ -1409,12 +1425,11 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
}
/**
- * stmmac_dma_interrupt: DMA ISR
+ * stmmac_dma_interrupt - DMA ISR
* @priv: driver private structure
* Description: this is the DMA ISR. It is called by the main ISR.
- * It calls the dwmac dma routine to understand which type of interrupt
- * happened. In case of there is a Normal interrupt and either TX or RX
- * interrupt happened so the NAPI is scheduled.
+ * It calls the dwmac dma routine and schedule poll method in case of some
+ * work can be done.
*/
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
@@ -1457,6 +1472,12 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
pr_info(" No MAC Management Counters available\n");
}
+/**
+ * stmmac_get_synopsys_id - return the SYINID.
+ * @priv: driver private structure
+ * Description: this simple function is to decode and return the SYINID
+ * starting from the HW core register.
+ */
static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
{
u32 hwid = priv->hw->synopsys_uid;
@@ -1475,11 +1496,11 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
}
/**
- * stmmac_selec_desc_mode: to select among: normal/alternate/extend descriptors
+ * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
* @priv: driver private structure
* Description: select the Enhanced/Alternate or Normal descriptors.
- * In case of Enhanced/Alternate, it looks at the extended descriptors are
- * supported by the HW cap. register.
+ * In case of Enhanced/Alternate, it checks if the extended descriptors are
+ * supported by the HW capability register.
*/
static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
{
@@ -1501,7 +1522,7 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
}
/**
- * stmmac_get_hw_features: get MAC capabilities from the HW cap. register.
+ * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
* @priv: driver private structure
* Description:
* new GMAC chip generations have a new register to indicate the
@@ -1559,7 +1580,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
}
/**
- * stmmac_check_ether_addr: check if the MAC addr is valid
+ * stmmac_check_ether_addr - check if the MAC addr is valid
* @priv: driver private structure
* Description:
* it is to verify if the MAC address is valid, in case of failures it
@@ -1578,7 +1599,7 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
}
/**
- * stmmac_init_dma_engine: DMA init.
+ * stmmac_init_dma_engine - DMA init.
* @priv: driver private structure
* Description:
* It inits the DMA invoking the specific MAC/GMAC callback.
@@ -1607,7 +1628,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
}
/**
- * stmmac_tx_timer: mitigation sw timer for tx.
+ * stmmac_tx_timer - mitigation sw timer for tx.
* @data: data pointer
* Description:
* This is the timer handler to directly invoke the stmmac_tx_clean.
@@ -1620,7 +1641,7 @@ static void stmmac_tx_timer(unsigned long data)
}
/**
- * stmmac_init_tx_coalesce: init tx mitigation options.
+ * stmmac_init_tx_coalesce - init tx mitigation options.
* @priv: driver private structure
* Description:
* This inits the transmit coalesce parameters: i.e. timer rate,
@@ -1639,10 +1660,13 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
}
/**
- * stmmac_hw_setup: setup mac in a usable state.
+ * stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
* Description:
- * This function sets up the ip in a usable state.
+ * this is the main function to setup the HW in a usable state because the
+ * dma engine is reset, the core registers are configured (e.g. AXI,
+ * Checksum features, timers). The DMA is ready to start receiving and
+ * transmitting.
* Return value:
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
@@ -1688,7 +1712,7 @@ static int stmmac_hw_setup(struct net_device *dev)
if (ret && ret != -EOPNOTSUPP)
pr_warn("%s: failed PTP initialisation\n", __func__);
-#ifdef CONFIG_STMMAC_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
ret = stmmac_init_fs(dev);
if (ret < 0)
pr_warn("%s: failed debugFS registration\n", __func__);
@@ -1870,7 +1894,7 @@ static int stmmac_release(struct net_device *dev)
netif_carrier_off(dev);
-#ifdef CONFIG_STMMAC_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
stmmac_exit_fs();
#endif
@@ -1880,7 +1904,7 @@ static int stmmac_release(struct net_device *dev)
}
/**
- * stmmac_xmit: Tx entry point of the driver
+ * stmmac_xmit - Tx entry point of the driver
* @skb : the socket buffer
* @dev : device pointer
* Description : this is the tx entry point of the driver.
@@ -2055,7 +2079,7 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
/**
- * stmmac_rx_refill: refill used skb preallocated buffers
+ * stmmac_rx_refill - refill used skb preallocated buffers
* @priv: driver private structure
* Description : this is to reallocate the skb for the reception process
* that is based on zero-copy.
@@ -2106,7 +2130,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
}
/**
- * stmmac_rx_refill: refill used skb preallocated buffers
+ * stmmac_rx - manage the receive process
* @priv: driver private structure
* @limit: napi bugget.
* Description : this the function called by the napi poll method.
@@ -2375,8 +2399,11 @@ static int stmmac_set_features(struct net_device *netdev,
* @irq: interrupt number.
* @dev_id: to pass the net device pointer.
* Description: this is the main driver interrupt service routine.
- * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
- * interrupts.
+ * It can call:
+ * o DMA service routine (to manage incoming frame reception and transmission
+ * status)
+ * o Core interrupts to manage: remote wake-up, management counter, LPI
+ * interrupts.
*/
static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
{
@@ -2457,7 +2484,7 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return ret;
}
-#ifdef CONFIG_STMMAC_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
static struct dentry *stmmac_fs_dir;
static struct dentry *stmmac_rings_status;
static struct dentry *stmmac_dma_cap;
@@ -2642,7 +2669,7 @@ static void stmmac_exit_fs(void)
debugfs_remove(stmmac_dma_cap);
debugfs_remove(stmmac_fs_dir);
}
-#endif /* CONFIG_STMMAC_DEBUG_FS */
+#endif /* CONFIG_DEBUG_FS */
static const struct net_device_ops stmmac_netdev_ops = {
.ndo_open = stmmac_open,
@@ -2663,11 +2690,10 @@ static const struct net_device_ops stmmac_netdev_ops = {
/**
* stmmac_hw_init - Init the MAC device
* @priv: driver private structure
- * Description: this function detects which MAC device
- * (GMAC/MAC10-100) has to attached, checks the HW capability
- * (if supported) and sets the driver's features (for example
- * to use the ring or chaine mode or support the normal/enh
- * descriptor structure).
+ * Description: this function is to configure the MAC device according to
+ * some platform parameters or the HW capability register. It prepares the
+ * driver to use either ring or chain modes and to setup either enhanced or
+ * normal descriptors.
*/
static int stmmac_hw_init(struct stmmac_priv *priv)
{
@@ -2891,6 +2917,7 @@ error_clk_get:
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
/**
* stmmac_dvr_remove
@@ -2920,8 +2947,15 @@ int stmmac_dvr_remove(struct net_device *ndev)
return 0;
}
+EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
-#ifdef CONFIG_PM
+/**
+ * stmmac_suspend - suspend callback
+ * @ndev: net device pointer
+ * Description: this is the function to suspend the device and it is called
+ * by the platform driver to stop the network queue, release the resources,
+ * program the PMT register (for WoL), clean and release driver resources.
+ */
int stmmac_suspend(struct net_device *ndev)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -2963,7 +2997,14 @@ int stmmac_suspend(struct net_device *ndev)
priv->oldduplex = -1;
return 0;
}
+EXPORT_SYMBOL_GPL(stmmac_suspend);
+/**
+ * stmmac_resume - resume callback
+ * @ndev: net device pointer
+ * Description: when resume this function is invoked to setup the DMA and CORE
+ * in a usable state.
+ */
int stmmac_resume(struct net_device *ndev)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -3009,37 +3050,7 @@ int stmmac_resume(struct net_device *ndev)
return 0;
}
-#endif /* CONFIG_PM */
-
-/* Driver can be configured w/ and w/ both PCI and Platf drivers
- * depending on the configuration selected.
- */
-static int __init stmmac_init(void)
-{
- int ret;
-
- ret = stmmac_register_platform();
- if (ret)
- goto err;
- ret = stmmac_register_pci();
- if (ret)
- goto err_pci;
- return 0;
-err_pci:
- stmmac_unregister_platform();
-err:
- pr_err("stmmac: driver registration failed\n");
- return ret;
-}
-
-static void __exit stmmac_exit(void)
-{
- stmmac_unregister_platform();
- stmmac_unregister_pci();
-}
-
-module_init(stmmac_init);
-module_exit(stmmac_exit);
+EXPORT_SYMBOL_GPL(stmmac_resume);
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index e17a970eaf2b..054520d67de4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -26,34 +26,26 @@
#include <linux/pci.h>
#include "stmmac.h"
-static struct plat_stmmacenet_data plat_dat;
-static struct stmmac_mdio_bus_data mdio_data;
-static struct stmmac_dma_cfg dma_cfg;
-
-static void stmmac_default_data(void)
+static void stmmac_default_data(struct plat_stmmacenet_data *plat)
{
- memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
-
- plat_dat.bus_id = 1;
- plat_dat.phy_addr = 0;
- plat_dat.interface = PHY_INTERFACE_MODE_GMII;
- plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
- plat_dat.has_gmac = 1;
- plat_dat.force_sf_dma_mode = 1;
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->interface = PHY_INTERFACE_MODE_GMII;
+ plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->has_gmac = 1;
+ plat->force_sf_dma_mode = 1;
- mdio_data.phy_reset = NULL;
- mdio_data.phy_mask = 0;
- plat_dat.mdio_bus_data = &mdio_data;
+ plat->mdio_bus_data->phy_reset = NULL;
+ plat->mdio_bus_data->phy_mask = 0;
- dma_cfg.pbl = 32;
- dma_cfg.burst_len = DMA_AXI_BLEN_256;
- plat_dat.dma_cfg = &dma_cfg;
+ plat->dma_cfg->pbl = 32;
+ plat->dma_cfg->burst_len = DMA_AXI_BLEN_256;
/* Set default value for multicast hash bins */
- plat_dat.multicast_filter_bins = HASH_TABLE_SIZE;
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
/* Set default value for unicast filter entries */
- plat_dat.unicast_filter_entries = 1;
+ plat->unicast_filter_entries = 1;
}
/**
@@ -71,64 +63,61 @@ static void stmmac_default_data(void)
static int stmmac_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int ret = 0;
- void __iomem *addr = NULL;
- struct stmmac_priv *priv = NULL;
+ struct plat_stmmacenet_data *plat;
+ struct stmmac_priv *priv;
int i;
+ int ret;
+
+ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+
+ plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
+ GFP_KERNEL);
+ if (!plat->dma_cfg)
+ return -ENOMEM;
/* Enable pci device */
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret) {
- pr_err("%s : ERROR: failed to enable %s device\n", __func__,
- pci_name(pdev));
+ dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
+ __func__);
return ret;
}
- if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) {
- pr_err("%s: ERROR: failed to get PCI region\n", __func__);
- ret = -ENODEV;
- goto err_out_req_reg_failed;
- }
/* Get the base address of device */
- for (i = 0; i <= 5; i++) {
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
- addr = pci_iomap(pdev, i, 0);
- if (addr == NULL) {
- pr_err("%s: ERROR: cannot map register memory aborting",
- __func__);
- ret = -EIO;
- goto err_out_map_failed;
- }
+ ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
+ if (ret)
+ return ret;
break;
}
+
pci_set_master(pdev);
- stmmac_default_data();
+ stmmac_default_data(plat);
- priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr);
+ priv = stmmac_dvr_probe(&pdev->dev, plat, pcim_iomap_table(pdev)[i]);
if (IS_ERR(priv)) {
- pr_err("%s: main driver probe failed", __func__);
- ret = PTR_ERR(priv);
- goto err_out;
+ dev_err(&pdev->dev, "%s: main driver probe failed\n", __func__);
+ return PTR_ERR(priv);
}
priv->dev->irq = pdev->irq;
priv->wol_irq = pdev->irq;
pci_set_drvdata(pdev, priv->dev);
- pr_debug("STMMAC platform driver registration completed");
+ dev_dbg(&pdev->dev, "STMMAC PCI driver registration completed\n");
return 0;
-
-err_out:
- pci_clear_master(pdev);
-err_out_map_failed:
- pci_release_regions(pdev);
-err_out_req_reg_failed:
- pci_disable_device(pdev);
-
- return ret;
}
/**
@@ -141,39 +130,30 @@ err_out_req_reg_failed:
static void stmmac_pci_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
stmmac_dvr_remove(ndev);
-
- pci_iounmap(pdev, priv->ioaddr);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
}
-#ifdef CONFIG_PM
-static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int stmmac_pci_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *ndev = pci_get_drvdata(pdev);
- int ret;
-
- ret = stmmac_suspend(ndev);
- pci_save_state(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- return ret;
+ return stmmac_suspend(ndev);
}
-static int stmmac_pci_resume(struct pci_dev *pdev)
+static int stmmac_pci_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *ndev = pci_get_drvdata(pdev);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
return stmmac_resume(ndev);
}
#endif
+static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
+
#define STMMAC_VENDOR_ID 0x700
#define STMMAC_DEVICE_ID 0x1108
@@ -185,17 +165,18 @@ static const struct pci_device_id stmmac_id_table[] = {
MODULE_DEVICE_TABLE(pci, stmmac_id_table);
-struct pci_driver stmmac_pci_driver = {
+static struct pci_driver stmmac_pci_driver = {
.name = STMMAC_RESOURCE_NAME,
.id_table = stmmac_id_table,
.probe = stmmac_pci_probe,
.remove = stmmac_pci_remove,
-#ifdef CONFIG_PM
- .suspend = stmmac_pci_suspend,
- .resume = stmmac_pci_resume,
-#endif
+ .driver = {
+ .pm = &stmmac_pm_ops,
+ },
};
+module_pci_driver(stmmac_pci_driver);
+
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 58a1a0a423d4..4032b170fe24 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -27,25 +27,19 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
+
#include "stmmac.h"
+#include "stmmac_platform.h"
static const struct of_device_id stmmac_dt_ids[] = {
-#ifdef CONFIG_DWMAC_MESON
+ /* SoC specific glue layers should come before generic bindings */
{ .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
-#endif
-#ifdef CONFIG_DWMAC_SUNXI
{ .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
-#endif
-#ifdef CONFIG_DWMAC_STI
{ .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
{ .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data},
{ .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data},
{ .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
-#endif
-#ifdef CONFIG_DWMAC_SOCFPGA
{ .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
-#endif
- /* SoC specific glue layers should come before generic bindings */
{ .compatible = "st,spear600-gmac"},
{ .compatible = "snps,dwmac-3.610"},
{ .compatible = "snps,dwmac-3.70a"},
@@ -57,7 +51,11 @@ MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
#ifdef CONFIG_OF
-/* This function validates the number of Multicast filtering bins specified
+/**
+ * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins
+ * @mcast_bins: Multicast filtering bins
+ * Description:
+ * this function validates the number of Multicast filtering bins specified
* by the configuration through the device tree. The Synopsys GMAC supports
* 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC
* number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds
@@ -83,7 +81,11 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins)
return x;
}
-/* This function validates the number of Unicast address entries supported
+/**
+ * dwmac1000_validate_ucast_entries - validate the Unicast address entries
+ * @ucast_entries: number of Unicast address entries
+ * Description:
+ * This function validates the number of Unicast address entries supported
* by a particular Synopsys 10/100/1000 controller. The Synopsys controller
* supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter
* logic. This function validates a valid, supported configuration is
@@ -109,6 +111,15 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
return x;
}
+/**
+ * stmmac_probe_config_dt - parse device-tree driver parameters
+ * @pdev: platform_device structure
+ * @plat: driver data platform structure
+ * @mac: MAC address to use
+ * Description:
+ * this function is to read the driver parameters from device-tree and
+ * set some private fields that will be used by the main at runtime.
+ */
static int stmmac_probe_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat,
const char **mac)
@@ -242,11 +253,11 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
#endif /* CONFIG_OF */
/**
- * stmmac_pltfr_probe
+ * stmmac_pltfr_probe - platform driver probe.
* @pdev: platform device pointer
- * Description: platform_device probe function. It allocates
- * the necessary resources and invokes the main to init
- * the net device, register the mdio bus etc.
+ * Description: platform_device probe function. It is to allocate
+ * the necessary platform resources, invoke custom helper (if required) and
+ * invoke the main probe function.
*/
static int stmmac_pltfr_probe(struct platform_device *pdev)
{
@@ -369,7 +380,14 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
return ret;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+/**
+ * stmmac_pltfr_suspend
+ * @dev: device pointer
+ * Description: this function is invoked when suspend the driver and it direcly
+ * call the main suspend function and then, if required, on some platform, it
+ * can call an exit helper.
+ */
static int stmmac_pltfr_suspend(struct device *dev)
{
int ret;
@@ -384,6 +402,13 @@ static int stmmac_pltfr_suspend(struct device *dev)
return ret;
}
+/**
+ * stmmac_pltfr_resume
+ * @dev: device pointer
+ * Description: this function is invoked when resume the driver before calling
+ * the main resume function, on some platforms, it can call own init helper
+ * if required.
+ */
static int stmmac_pltfr_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
@@ -395,13 +420,12 @@ static int stmmac_pltfr_resume(struct device *dev)
return stmmac_resume(ndev);
}
-
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops,
- stmmac_pltfr_suspend, stmmac_pltfr_resume);
+ stmmac_pltfr_suspend, stmmac_pltfr_resume);
-struct platform_driver stmmac_pltfr_driver = {
+static struct platform_driver stmmac_pltfr_driver = {
.probe = stmmac_pltfr_probe,
.remove = stmmac_pltfr_remove,
.driver = {
@@ -409,9 +433,11 @@ struct platform_driver stmmac_pltfr_driver = {
.owner = THIS_MODULE,
.pm = &stmmac_pltfr_pm_ops,
.of_match_table = of_match_ptr(stmmac_dt_ids),
- },
+ },
};
+module_platform_driver(stmmac_pltfr_driver);
+
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
new file mode 100644
index 000000000000..25dd1f7ace02
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -0,0 +1,28 @@
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __STMMAC_PLATFORM_H__
+#define __STMMAC_PLATFORM_H__
+
+extern const struct stmmac_of_data meson6_dwmac_data;
+extern const struct stmmac_of_data sun7i_gmac_data;
+extern const struct stmmac_of_data stih4xx_dwmac_data;
+extern const struct stmmac_of_data stid127_dwmac_data;
+extern const struct stmmac_of_data socfpga_gmac_data;
+
+#endif /* __STMMAC_PLATFORM_H__ */
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 02d370e58110..3dc1f68b322d 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -5179,8 +5179,7 @@ static void cas_remove_one(struct pci_dev *pdev)
cp = netdev_priv(dev);
unregister_netdev(dev);
- if (cp->fw_data)
- vfree(cp->fw_data);
+ vfree(cp->fw_data);
mutex_lock(&cp->pm_mutex);
cancel_work_sync(&cp->reset_task);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 904fd1ab5f6e..4aaa3240453a 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6651,13 +6651,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (skb->len < ETH_ZLEN) {
- unsigned int pad_bytes = ETH_ZLEN - skb->len;
-
- if (skb_pad(skb, pad_bytes))
- goto out;
- skb_put(skb, pad_bytes);
- }
+ if (eth_skb_pad(skb))
+ goto out;
len = sizeof(struct tx_pkt_hdr) + 15;
if (skb_headroom(skb) < len) {
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 3652afd3ec78..90c86cd3be14 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -15,12 +15,14 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/mutex.h>
+#include <linux/highmem.h>
#include <linux/if_vlan.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <linux/icmpv6.h>
#endif
+#include <net/ip.h>
#include <net/icmp.h>
#include <net/route.h>
@@ -40,6 +42,8 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
+#define VNET_MAX_TXQS 16
+
/* Heuristic for the number of times to exponentially backoff and
* retry sending an LDC trigger when EAGAIN is encountered
*/
@@ -49,6 +53,8 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
/* Ordered from largest major to lowest */
static struct vio_version vnet_versions[] = {
+ { .major = 1, .minor = 8 },
+ { .major = 1, .minor = 7 },
{ .major = 1, .minor = 6 },
{ .major = 1, .minor = 0 },
};
@@ -71,13 +77,19 @@ static int vnet_handle_unknown(struct vnet_port *port, void *arg)
return -ECONNRESET;
}
+static int vnet_port_alloc_tx_ring(struct vnet_port *port);
+
static int vnet_send_attr(struct vio_driver_state *vio)
{
struct vnet_port *port = to_vnet_port(vio);
struct net_device *dev = port->vp->dev;
struct vio_net_attr_info pkt;
int framelen = ETH_FRAME_LEN;
- int i;
+ int i, err;
+
+ err = vnet_port_alloc_tx_ring(to_vnet_port(vio));
+ if (err)
+ return err;
memset(&pkt, 0, sizeof(pkt));
pkt.tag.type = VIO_TYPE_CTRL;
@@ -108,8 +120,15 @@ static int vnet_send_attr(struct vio_driver_state *vio)
pkt.mtu = framelen + VLAN_HLEN;
}
- pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
pkt.cflags = 0;
+ if (vio_version_after_eq(vio, 1, 7) && port->tso) {
+ pkt.cflags |= VNET_LSO_IPV4_CAPAB;
+ if (!port->tsolen)
+ port->tsolen = VNET_MAXTSO;
+ pkt.ipv4_lso_maxlen = port->tsolen;
+ }
+
+ pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
"ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
@@ -163,6 +182,26 @@ static int handle_attr_info(struct vio_driver_state *vio,
}
port->rmtu = localmtu;
+ /* LSO negotiation */
+ if (vio_version_after_eq(vio, 1, 7))
+ port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
+ else
+ port->tso = false;
+ if (port->tso) {
+ if (!port->tsolen)
+ port->tsolen = VNET_MAXTSO;
+ port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
+ if (port->tsolen < VNET_MINTSO) {
+ port->tso = false;
+ port->tsolen = 0;
+ pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
+ }
+ pkt->ipv4_lso_maxlen = port->tsolen;
+ } else {
+ pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
+ pkt->ipv4_lso_maxlen = 0;
+ }
+
/* for version >= 1.6, ACK packet mode we support */
if (vio_version_after_eq(vio, 1, 6)) {
pkt->xfer_mode = VIO_NEW_DRING_MODE;
@@ -274,10 +313,42 @@ static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
return skb;
}
-static int vnet_rx_one(struct vnet_port *port, unsigned int len,
- struct ldc_trans_cookie *cookies, int ncookies)
+static inline void vnet_fullcsum(struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+ int offset = skb_transport_offset(skb);
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
+ if (iph->protocol != IPPROTO_TCP &&
+ iph->protocol != IPPROTO_UDP)
+ return;
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_level = 1;
+ skb->csum = 0;
+ if (iph->protocol == IPPROTO_TCP) {
+ struct tcphdr *ptcp = tcp_hdr(skb);
+
+ ptcp->check = 0;
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - offset, IPPROTO_TCP,
+ skb->csum);
+ } else if (iph->protocol == IPPROTO_UDP) {
+ struct udphdr *pudp = udp_hdr(skb);
+
+ pudp->check = 0;
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - offset, IPPROTO_UDP,
+ skb->csum);
+ }
+}
+
+static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
{
struct net_device *dev = port->vp->dev;
+ unsigned int len = desc->size;
unsigned int copy_len;
struct sk_buff *skb;
int err;
@@ -299,7 +370,7 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len,
skb_put(skb, copy_len);
err = ldc_copy(port->vio.lp, LDC_COPY_IN,
skb->data, copy_len, 0,
- cookies, ncookies);
+ desc->cookies, desc->ncookies);
if (unlikely(err < 0)) {
dev->stats.rx_frame_errors++;
goto out_free_skb;
@@ -309,11 +380,33 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len,
skb_trim(skb, len);
skb->protocol = eth_type_trans(skb, dev);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
+ if (vio_version_after_eq(&port->vio, 1, 8)) {
+ struct vio_net_dext *dext = vio_net_ext(desc);
- netif_rx(skb);
+ if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
+ if (skb->protocol == ETH_P_IP) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+ iph->check = 0;
+ ip_send_check(iph);
+ }
+ }
+ if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
+ skb->ip_summed == CHECKSUM_NONE)
+ vnet_fullcsum(skb);
+ if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_level = 0;
+ if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK)
+ skb->csum_level = 1;
+ }
+ }
+
+ skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ napi_gro_receive(&port->napi, skb);
return 0;
out_free_skb:
@@ -430,6 +523,7 @@ static int vnet_walk_rx_one(struct vnet_port *port,
struct vio_driver_state *vio = &port->vio;
int err;
+ BUG_ON(desc == NULL);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -444,7 +538,7 @@ static int vnet_walk_rx_one(struct vnet_port *port,
desc->cookies[0].cookie_addr,
desc->cookies[0].cookie_size);
- err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
+ err = vnet_rx_one(port, desc);
if (err == -ECONNRESET)
return err;
desc->hdr.state = VIO_DESC_DONE;
@@ -456,10 +550,11 @@ static int vnet_walk_rx_one(struct vnet_port *port,
}
static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
- u32 start, u32 end)
+ u32 start, u32 end, int *npkts, int budget)
{
struct vio_driver_state *vio = &port->vio;
int ack_start = -1, ack_end = -1;
+ bool send_ack = true;
end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr);
@@ -471,6 +566,7 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
return err;
if (err != 0)
break;
+ (*npkts)++;
if (ack_start == -1)
ack_start = start;
ack_end = start;
@@ -482,13 +578,26 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
return err;
ack_start = -1;
}
+ if ((*npkts) >= budget) {
+ send_ack = false;
+ break;
+ }
}
if (unlikely(ack_start == -1))
ack_start = ack_end = prev_idx(start, dr);
- return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED);
+ if (send_ack) {
+ port->napi_resume = false;
+ return vnet_send_ack(port, dr, ack_start, ack_end,
+ VIO_DRING_STOPPED);
+ } else {
+ port->napi_resume = true;
+ port->napi_stop_idx = ack_end;
+ return 1;
+ }
}
-static int vnet_rx(struct vnet_port *port, void *msgbuf)
+static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
+ int budget)
{
struct vio_dring_data *pkt = msgbuf;
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
@@ -505,11 +614,13 @@ static int vnet_rx(struct vnet_port *port, void *msgbuf)
return 0;
}
- dr->rcv_nxt++;
+ if (!port->napi_resume)
+ dr->rcv_nxt++;
/* XXX Validate pkt->start_idx and pkt->end_idx XXX */
- return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx);
+ return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
+ npkts, budget);
}
static int idx_is_pending(struct vio_dring_state *dr, u32 end)
@@ -535,19 +646,26 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
struct vnet *vp;
u32 end;
struct vio_net_desc *desc;
+ struct netdev_queue *txq;
+
if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
return 0;
end = pkt->end_idx;
- if (unlikely(!idx_is_pending(dr, end)))
+ vp = port->vp;
+ dev = vp->dev;
+ netif_tx_lock(dev);
+ if (unlikely(!idx_is_pending(dr, end))) {
+ netif_tx_unlock(dev);
return 0;
+ }
/* sync for race conditions with vnet_start_xmit() and tell xmit it
* is time to send a trigger.
*/
dr->cons = next_idx(end, dr);
desc = vio_dring_entry(dr, dr->cons);
- if (desc->hdr.state == VIO_DESC_READY && port->start_cons) {
+ if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
/* vnet_start_xmit() just populated this dring but missed
* sending the "start" LDC message to the consumer.
* Send a "start" trigger on its behalf.
@@ -559,11 +677,10 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
} else {
port->start_cons = true;
}
+ netif_tx_unlock(dev);
-
- vp = port->vp;
- dev = vp->dev;
- if (unlikely(netif_queue_stopped(dev) &&
+ txq = netdev_get_tx_queue(dev, port->q_index);
+ if (unlikely(netif_tx_queue_stopped(txq) &&
vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
return 1;
@@ -591,58 +708,64 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf)
return 0;
}
-static void maybe_tx_wakeup(unsigned long param)
+/* Got back a STOPPED LDC message on port. If the queue is stopped,
+ * wake it up so that we'll send out another START message at the
+ * next TX.
+ */
+static void maybe_tx_wakeup(struct vnet_port *port)
{
- struct vnet *vp = (struct vnet *)param;
- struct net_device *dev = vp->dev;
+ struct netdev_queue *txq;
- netif_tx_lock(dev);
- if (likely(netif_queue_stopped(dev))) {
- struct vnet_port *port;
- int wake = 1;
+ txq = netdev_get_tx_queue(port->vp->dev, port->q_index);
+ __netif_tx_lock(txq, smp_processor_id());
+ if (likely(netif_tx_queue_stopped(txq))) {
+ struct vio_dring_state *dr;
- list_for_each_entry(port, &vp->port_list, list) {
- struct vio_dring_state *dr;
-
- dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- if (vnet_tx_dring_avail(dr) <
- VNET_TX_WAKEUP_THRESH(dr)) {
- wake = 0;
- break;
- }
- }
- if (wake)
- netif_wake_queue(dev);
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ netif_tx_wake_queue(txq);
}
- netif_tx_unlock(dev);
+ __netif_tx_unlock(txq);
}
-static void vnet_event(void *arg, int event)
+static inline bool port_is_up(struct vnet_port *vnet)
+{
+ struct vio_driver_state *vio = &vnet->vio;
+
+ return !!(vio->hs_state & VIO_HS_COMPLETE);
+}
+
+static int vnet_event_napi(struct vnet_port *port, int budget)
{
- struct vnet_port *port = arg;
struct vio_driver_state *vio = &port->vio;
- unsigned long flags;
int tx_wakeup, err;
+ int npkts = 0;
+ int event = (port->rx_event & LDC_EVENT_RESET);
- spin_lock_irqsave(&vio->lock, flags);
-
+ldc_ctrl:
if (unlikely(event == LDC_EVENT_RESET ||
event == LDC_EVENT_UP)) {
vio_link_state_change(vio, event);
- spin_unlock_irqrestore(&vio->lock, flags);
if (event == LDC_EVENT_RESET) {
port->rmtu = 0;
+ port->tso = true;
+ port->tsolen = 0;
vio_port_up(vio);
}
- return;
+ port->rx_event = 0;
+ return 0;
}
+ /* We may have multiple LDC events in rx_event. Unroll send_events() */
+ event = (port->rx_event & LDC_EVENT_UP);
+ port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP);
+ if (event == LDC_EVENT_UP)
+ goto ldc_ctrl;
+ event = port->rx_event;
+ if (!(event & LDC_EVENT_DATA_READY))
+ return 0;
- if (unlikely(event != LDC_EVENT_DATA_READY)) {
- pr_warn("Unexpected LDC event %d\n", event);
- spin_unlock_irqrestore(&vio->lock, flags);
- return;
- }
+ /* we dont expect any other bits than RESET, UP, DATA_READY */
+ BUG_ON(event != LDC_EVENT_DATA_READY);
tx_wakeup = err = 0;
while (1) {
@@ -651,6 +774,20 @@ static void vnet_event(void *arg, int event)
u64 raw[8];
} msgbuf;
+ if (port->napi_resume) {
+ struct vio_dring_data *pkt =
+ (struct vio_dring_data *)&msgbuf;
+ struct vio_dring_state *dr =
+ &port->vio.drings[VIO_DRIVER_RX_RING];
+
+ pkt->tag.type = VIO_TYPE_DATA;
+ pkt->tag.stype = VIO_SUBTYPE_INFO;
+ pkt->tag.stype_env = VIO_DRING_DATA;
+ pkt->seq = dr->rcv_nxt;
+ pkt->start_idx = next_idx(port->napi_stop_idx, dr);
+ pkt->end_idx = -1;
+ goto napi_resume;
+ }
err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
if (unlikely(err < 0)) {
if (err == -ECONNRESET)
@@ -667,10 +804,22 @@ static void vnet_event(void *arg, int event)
err = vio_validate_sid(vio, &msgbuf.tag);
if (err < 0)
break;
-
+napi_resume:
if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
- err = vnet_rx(port, &msgbuf);
+ if (!port_is_up(port)) {
+ /* failures like handshake_failure()
+ * may have cleaned up dring, but
+ * NAPI polling may bring us here.
+ */
+ err = -ECONNRESET;
+ break;
+ }
+ err = vnet_rx(port, &msgbuf, &npkts, budget);
+ if (npkts >= budget)
+ break;
+ if (npkts == 0)
+ break;
} else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
err = vnet_ack(port, &msgbuf);
if (err > 0)
@@ -691,15 +840,34 @@ static void vnet_event(void *arg, int event)
if (err == -ECONNRESET)
break;
}
- spin_unlock(&vio->lock);
- /* Kick off a tasklet to wake the queue. We cannot call
- * maybe_tx_wakeup directly here because we could deadlock on
- * netif_tx_lock() with dev_watchdog()
- */
if (unlikely(tx_wakeup && err != -ECONNRESET))
- tasklet_schedule(&port->vp->vnet_tx_wakeup);
+ maybe_tx_wakeup(port);
+ return npkts;
+}
+
+static int vnet_poll(struct napi_struct *napi, int budget)
+{
+ struct vnet_port *port = container_of(napi, struct vnet_port, napi);
+ struct vio_driver_state *vio = &port->vio;
+ int processed = vnet_event_napi(port, budget);
+
+ if (processed < budget) {
+ napi_complete(napi);
+ port->rx_event &= ~LDC_EVENT_DATA_READY;
+ vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
+ }
+ return processed;
+}
+
+static void vnet_event(void *arg, int event)
+{
+ struct vnet_port *port = arg;
+ struct vio_driver_state *vio = &port->vio;
+
+ port->rx_event |= event;
+ vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
+ napi_schedule(&port->napi);
- local_irq_restore(flags);
}
static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
@@ -746,26 +914,19 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
return err;
}
-static inline bool port_is_up(struct vnet_port *vnet)
-{
- struct vio_driver_state *vio = &vnet->vio;
-
- return !!(vio->hs_state & VIO_HS_COMPLETE);
-}
-
struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
{
unsigned int hash = vnet_hashfn(skb->data);
struct hlist_head *hp = &vp->port_hash[hash];
struct vnet_port *port;
- hlist_for_each_entry(port, hp, hash) {
+ hlist_for_each_entry_rcu(port, hp, hash) {
if (!port_is_up(port))
continue;
if (ether_addr_equal(port->raddr, skb->data))
return port;
}
- list_for_each_entry(port, &vp->port_list, list) {
+ list_for_each_entry_rcu(port, &vp->port_list, list) {
if (!port->switch_port)
continue;
if (!port_is_up(port))
@@ -775,18 +936,6 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
return NULL;
}
-struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
-{
- struct vnet_port *ret;
- unsigned long flags;
-
- spin_lock_irqsave(&vp->lock, flags);
- ret = __tx_port_find(vp, skb);
- spin_unlock_irqrestore(&vp->lock, flags);
-
- return ret;
-}
-
static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
unsigned *pending)
{
@@ -847,11 +996,10 @@ static void vnet_clean_timer_expire(unsigned long port0)
struct vnet_port *port = (struct vnet_port *)port0;
struct sk_buff *freeskbs;
unsigned pending;
- unsigned long flags;
- spin_lock_irqsave(&port->vio.lock, flags);
+ netif_tx_lock(port->vp->dev);
freeskbs = vnet_clean_tx_ring(port, &pending);
- spin_unlock_irqrestore(&port->vio.lock, flags);
+ netif_tx_unlock(port->vp->dev);
vnet_free_skbs(freeskbs);
@@ -862,11 +1010,54 @@ static void vnet_clean_timer_expire(unsigned long port0)
del_timer(&port->clean_timer);
}
-static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart,
- int *plen)
+static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
+ struct ldc_trans_cookie *cookies, int ncookies,
+ unsigned int map_perm)
+{
+ int i, nc, err, blen;
+
+ /* header */
+ blen = skb_headlen(skb);
+ if (blen < ETH_ZLEN)
+ blen = ETH_ZLEN;
+ blen += VNET_PACKET_SKIP;
+ blen += 8 - (blen & 7);
+
+ err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies,
+ ncookies, map_perm);
+ if (err < 0)
+ return err;
+ nc = err;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+ u8 *vaddr;
+
+ if (nc < ncookies) {
+ vaddr = kmap_atomic(skb_frag_page(f));
+ blen = skb_frag_size(f);
+ blen += 8 - (blen & 7);
+ err = ldc_map_single(lp, vaddr + f->page_offset,
+ blen, cookies + nc, ncookies - nc,
+ map_perm);
+ kunmap_atomic(vaddr);
+ } else {
+ err = -EMSGSIZE;
+ }
+
+ if (err < 0) {
+ ldc_unmap(lp, cookies, nc);
+ return err;
+ }
+ nc += err;
+ }
+ return nc;
+}
+
+static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
{
struct sk_buff *nskb;
- int len, pad;
+ int i, len, pad, docopy;
len = skb->len;
pad = 0;
@@ -876,51 +1067,223 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart,
}
len += VNET_PACKET_SKIP;
pad += 8 - (len & 7);
- len += 8 - (len & 7);
+ /* make sure we have enough cookies and alignment in every frag */
+ docopy = skb_shinfo(skb)->nr_frags >= ncookies;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+
+ docopy |= f->page_offset & 7;
+ }
if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
skb_tailroom(skb) < pad ||
- skb_headroom(skb) < VNET_PACKET_SKIP) {
- nskb = alloc_and_align_skb(skb->dev, skb->len);
+ skb_headroom(skb) < VNET_PACKET_SKIP || docopy) {
+ int start = 0, offset;
+ __wsum csum;
+
+ len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
+ nskb = alloc_and_align_skb(skb->dev, len);
+ if (nskb == NULL) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
skb_reserve(nskb, VNET_PACKET_SKIP);
- if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
+
+ nskb->protocol = skb->protocol;
+ offset = skb_mac_header(skb) - skb->data;
+ skb_set_mac_header(nskb, offset);
+ offset = skb_network_header(skb) - skb->data;
+ skb_set_network_header(nskb, offset);
+ offset = skb_transport_header(skb) - skb->data;
+ skb_set_transport_header(nskb, offset);
+
+ offset = 0;
+ nskb->csum_offset = skb->csum_offset;
+ nskb->ip_summed = skb->ip_summed;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ start = skb_checksum_start_offset(skb);
+ if (start) {
+ struct iphdr *iph = ip_hdr(nskb);
+ int offset = start + nskb->csum_offset;
+
+ if (skb_copy_bits(skb, 0, nskb->data, start)) {
+ dev_kfree_skb(nskb);
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ *(__sum16 *)(skb->data + offset) = 0;
+ csum = skb_copy_and_csum_bits(skb, start,
+ nskb->data + start,
+ skb->len - start, 0);
+ if (iph->protocol == IPPROTO_TCP ||
+ iph->protocol == IPPROTO_UDP) {
+ csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - start,
+ iph->protocol, csum);
+ }
+ *(__sum16 *)(nskb->data + offset) = csum;
+
+ nskb->ip_summed = CHECKSUM_NONE;
+ } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
dev_kfree_skb(nskb);
dev_kfree_skb(skb);
return NULL;
}
(void)skb_put(nskb, skb->len);
+ if (skb_is_gso(skb)) {
+ skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
+ skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
+ }
dev_kfree_skb(skb);
skb = nskb;
}
-
- *pstart = skb->data - VNET_PACKET_SKIP;
- *plen = len;
return skb;
}
+static u16
+vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct vnet *vp = netdev_priv(dev);
+ struct vnet_port *port = __tx_port_find(vp, skb);
+
+ if (port == NULL)
+ return 0;
+ return port->q_index;
+}
+
+static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev);
+
+static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
+{
+ struct net_device *dev = port->vp->dev;
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct sk_buff *segs;
+ int maclen, datalen;
+ int status;
+ int gso_size, gso_type, gso_segs;
+ int hlen = skb_transport_header(skb) - skb_mac_header(skb);
+ int proto = IPPROTO_IP;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ proto = ip_hdr(skb)->protocol;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ proto = ipv6_hdr(skb)->nexthdr;
+
+ if (proto == IPPROTO_TCP)
+ hlen += tcp_hdr(skb)->doff * 4;
+ else if (proto == IPPROTO_UDP)
+ hlen += sizeof(struct udphdr);
+ else {
+ pr_err("vnet_handle_offloads GSO with unknown transport "
+ "protocol %d tproto %d\n", skb->protocol, proto);
+ hlen = 128; /* XXX */
+ }
+ datalen = port->tsolen - hlen;
+
+ gso_size = skb_shinfo(skb)->gso_size;
+ gso_type = skb_shinfo(skb)->gso_type;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (port->tso && gso_size < datalen)
+ gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
+
+ if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(dev, port->q_index);
+ netif_tx_stop_queue(txq);
+ if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
+ return NETDEV_TX_BUSY;
+ netif_tx_wake_queue(txq);
+ }
+
+ maclen = skb_network_header(skb) - skb_mac_header(skb);
+ skb_pull(skb, maclen);
+
+ if (port->tso && gso_size < datalen) {
+ /* segment to TSO size */
+ skb_shinfo(skb)->gso_size = datalen;
+ skb_shinfo(skb)->gso_segs = gso_segs;
+
+ segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
+
+ /* restore gso_size & gso_segs */
+ skb_shinfo(skb)->gso_size = gso_size;
+ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len - hlen,
+ gso_size);
+ } else
+ segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
+ if (IS_ERR(segs)) {
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ skb_push(skb, maclen);
+ skb_reset_mac_header(skb);
+
+ status = 0;
+ while (segs) {
+ struct sk_buff *curr = segs;
+
+ segs = segs->next;
+ curr->next = NULL;
+ if (port->tso && curr->len > dev->mtu) {
+ skb_shinfo(curr)->gso_size = gso_size;
+ skb_shinfo(curr)->gso_type = gso_type;
+ skb_shinfo(curr)->gso_segs =
+ DIV_ROUND_UP(curr->len - hlen, gso_size);
+ } else
+ skb_shinfo(curr)->gso_size = 0;
+
+ skb_push(curr, maclen);
+ skb_reset_mac_header(curr);
+ memcpy(skb_mac_header(curr), skb_mac_header(skb),
+ maclen);
+ curr->csum_start = skb_transport_header(curr) - curr->head;
+ if (ip_hdr(curr)->protocol == IPPROTO_TCP)
+ curr->csum_offset = offsetof(struct tcphdr, check);
+ else if (ip_hdr(curr)->protocol == IPPROTO_UDP)
+ curr->csum_offset = offsetof(struct udphdr, check);
+
+ if (!(status & NETDEV_TX_MASK))
+ status = vnet_start_xmit(curr, dev);
+ if (status & NETDEV_TX_MASK)
+ dev_kfree_skb_any(curr);
+ }
+
+ if (!(status & NETDEV_TX_MASK))
+ dev_kfree_skb_any(skb);
+ return status;
+}
+
static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vnet *vp = netdev_priv(dev);
- struct vnet_port *port = tx_port_find(vp, skb);
+ struct vnet_port *port = NULL;
struct vio_dring_state *dr;
struct vio_net_desc *d;
- unsigned long flags;
unsigned int len;
struct sk_buff *freeskbs = NULL;
int i, err, txi;
- void *start = NULL;
- int nlen = 0;
unsigned pending = 0;
+ struct netdev_queue *txq;
- if (unlikely(!port))
+ rcu_read_lock();
+ port = __tx_port_find(vp, skb);
+ if (unlikely(!port)) {
+ rcu_read_unlock();
goto out_dropped;
+ }
- skb = vnet_skb_shape(skb, &start, &nlen);
-
- if (unlikely(!skb))
- goto out_dropped;
+ if (skb_is_gso(skb) && skb->len > port->tsolen) {
+ err = vnet_handle_offloads(port, skb);
+ rcu_read_unlock();
+ return err;
+ }
- if (skb->len > port->rmtu) {
+ if (!skb_is_gso(skb) && skb->len > port->rmtu) {
unsigned long localmtu = port->rmtu - ETH_HLEN;
if (vio_version_after_eq(&port->vio, 1, 3))
@@ -937,6 +1300,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
fl4.saddr = ip_hdr(skb)->saddr;
rt = ip_route_output_key(dev_net(dev), &fl4);
+ rcu_read_unlock();
if (!IS_ERR(rt)) {
skb_dst_set(skb, &rt->dst);
icmp_send(skb, ICMP_DEST_UNREACH,
@@ -951,18 +1315,26 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto out_dropped;
}
- spin_lock_irqsave(&port->vio.lock, flags);
+ skb = vnet_skb_shape(skb, 2);
+
+ if (unlikely(!skb))
+ goto out_dropped;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ vnet_fullcsum(skb);
dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ i = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, i);
if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
- if (!netif_queue_stopped(dev)) {
- netif_stop_queue(dev);
+ if (!netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
dev->stats.tx_errors++;
}
- spin_unlock_irqrestore(&port->vio.lock, flags);
+ rcu_read_unlock();
return NETDEV_TX_BUSY;
}
@@ -978,16 +1350,15 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (len < ETH_ZLEN)
len = ETH_ZLEN;
- port->tx_bufs[txi].skb = skb;
- skb = NULL;
-
- err = ldc_map_single(port->vio.lp, start, nlen,
- port->tx_bufs[txi].cookies, VNET_MAXCOOKIES,
- (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
+ err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2,
+ (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
if (err < 0) {
netdev_info(dev, "tx buffer map error %d\n", err);
- goto out_dropped_unlock;
+ goto out_dropped;
}
+
+ port->tx_bufs[txi].skb = skb;
+ skb = NULL;
port->tx_bufs[txi].ncookies = err;
/* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
@@ -1003,6 +1374,21 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
d->ncookies = port->tx_bufs[txi].ncookies;
for (i = 0; i < d->ncookies; i++)
d->cookies[i] = port->tx_bufs[txi].cookies[i];
+ if (vio_version_after_eq(&port->vio, 1, 7)) {
+ struct vio_net_dext *dext = vio_net_ext(d);
+
+ memset(dext, 0, sizeof(*dext));
+ if (skb_is_gso(port->tx_bufs[txi].skb)) {
+ dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
+ ->gso_size;
+ dext->flags |= VNET_PKT_IPV4_LSO;
+ }
+ if (vio_version_after_eq(&port->vio, 1, 8) &&
+ !port->switch_port) {
+ dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
+ dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK;
+ }
+ }
/* This has to be a non-SMP write barrier because we are writing
* to memory which is shared with the peer LDOM.
@@ -1039,7 +1425,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_info(dev, "TX trigger error %d\n", err);
d->hdr.state = VIO_DESC_FREE;
dev->stats.tx_carrier_errors++;
- goto out_dropped_unlock;
+ goto out_dropped;
}
ldc_start_done:
@@ -1050,31 +1436,29 @@ ldc_start_done:
dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
- netif_stop_queue(dev);
+ netif_tx_stop_queue(txq);
if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
- netif_wake_queue(dev);
+ netif_tx_wake_queue(txq);
}
- spin_unlock_irqrestore(&port->vio.lock, flags);
+ (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
+ rcu_read_unlock();
vnet_free_skbs(freeskbs);
- (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
-
return NETDEV_TX_OK;
-out_dropped_unlock:
- spin_unlock_irqrestore(&port->vio.lock, flags);
-
out_dropped:
- if (skb)
- dev_kfree_skb(skb);
- vnet_free_skbs(freeskbs);
if (pending)
(void)mod_timer(&port->clean_timer,
jiffies + VNET_CLEAN_TIMEOUT);
else if (port)
del_timer(&port->clean_timer);
+ if (port)
+ rcu_read_unlock();
+ if (skb)
+ dev_kfree_skb(skb);
+ vnet_free_skbs(freeskbs);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -1087,14 +1471,14 @@ static void vnet_tx_timeout(struct net_device *dev)
static int vnet_open(struct net_device *dev)
{
netif_carrier_on(dev);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
static int vnet_close(struct net_device *dev)
{
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
return 0;
@@ -1204,18 +1588,17 @@ static void vnet_set_rx_mode(struct net_device *dev)
{
struct vnet *vp = netdev_priv(dev);
struct vnet_port *port;
- unsigned long flags;
- spin_lock_irqsave(&vp->lock, flags);
- if (!list_empty(&vp->port_list)) {
- port = list_entry(vp->port_list.next, struct vnet_port, list);
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &vp->port_list, list) {
if (port->switch_port) {
__update_mc_list(vp, dev);
__send_mc_list(vp, port);
+ break;
}
}
- spin_unlock_irqrestore(&vp->lock, flags);
+ rcu_read_unlock();
}
static int vnet_change_mtu(struct net_device *dev, int new_mtu)
@@ -1295,18 +1678,20 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
}
}
-static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
+static int vnet_port_alloc_tx_ring(struct vnet_port *port)
{
struct vio_dring_state *dr;
- unsigned long len;
+ unsigned long len, elen;
int i, err, ncookies;
void *dring;
dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- len = (VNET_TX_RING_SIZE *
- (sizeof(struct vio_net_desc) +
- (sizeof(struct ldc_trans_cookie) * 2)));
+ elen = sizeof(struct vio_net_desc) +
+ sizeof(struct ldc_trans_cookie) * 2;
+ if (vio_version_after_eq(&port->vio, 1, 7))
+ elen += sizeof(struct vio_net_dext);
+ len = VNET_TX_RING_SIZE * elen;
ncookies = VIO_MAX_RING_COOKIES;
dring = ldc_alloc_exp_dring(port->vio.lp, len,
@@ -1320,8 +1705,7 @@ static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
}
dr->base = dring;
- dr->entry_size = (sizeof(struct vio_net_desc) +
- (sizeof(struct ldc_trans_cookie) * 2));
+ dr->entry_size = elen;
dr->num_entries = VNET_TX_RING_SIZE;
dr->prod = dr->cons = 0;
port->start_cons = true; /* need an initial trigger */
@@ -1342,6 +1726,21 @@ err_out:
return err;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vnet_poll_controller(struct net_device *dev)
+{
+ struct vnet *vp = netdev_priv(dev);
+ struct vnet_port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ if (!list_empty(&vp->port_list)) {
+ port = list_entry(vp->port_list.next, struct vnet_port, list);
+ napi_schedule(&port->napi);
+ }
+ spin_unlock_irqrestore(&vp->lock, flags);
+}
+#endif
static LIST_HEAD(vnet_list);
static DEFINE_MUTEX(vnet_list_mutex);
@@ -1354,6 +1753,10 @@ static const struct net_device_ops vnet_ops = {
.ndo_tx_timeout = vnet_tx_timeout,
.ndo_change_mtu = vnet_change_mtu,
.ndo_start_xmit = vnet_start_xmit,
+ .ndo_select_queue = vnet_select_queue,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = vnet_poll_controller,
+#endif
};
static struct vnet *vnet_new(const u64 *local_mac)
@@ -1362,7 +1765,7 @@ static struct vnet *vnet_new(const u64 *local_mac)
struct vnet *vp;
int err, i;
- dev = alloc_etherdev(sizeof(*vp));
+ dev = alloc_etherdev_mqs(sizeof(*vp), VNET_MAX_TXQS, 1);
if (!dev)
return ERR_PTR(-ENOMEM);
dev->needed_headroom = VNET_PACKET_SKIP + 8;
@@ -1374,7 +1777,6 @@ static struct vnet *vnet_new(const u64 *local_mac)
vp = netdev_priv(dev);
spin_lock_init(&vp->lock);
- tasklet_init(&vp->vnet_tx_wakeup, maybe_tx_wakeup, (unsigned long)vp);
vp->dev = dev;
INIT_LIST_HEAD(&vp->port_list);
@@ -1387,6 +1789,10 @@ static struct vnet *vnet_new(const u64 *local_mac)
dev->ethtool_ops = &vnet_ethtool_ops;
dev->watchdog_timeo = VNET_TX_TIMEOUT;
+ dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
+ NETIF_F_HW_CSUM | NETIF_F_SG;
+ dev->features = dev->hw_features;
+
err = register_netdev(dev);
if (err) {
pr_err("Cannot register net device, aborting\n");
@@ -1434,7 +1840,6 @@ static void vnet_cleanup(void)
vp = list_first_entry(&vnet_list, struct vnet, list);
list_del(&vp->list);
dev = vp->dev;
- tasklet_kill(&vp->vnet_tx_wakeup);
/* vio_unregister_driver() should have cleaned up port_list */
BUG_ON(!list_empty(&vp->port_list));
unregister_netdev(dev);
@@ -1489,6 +1894,25 @@ static void print_version(void)
const char *remote_macaddr_prop = "remote-mac-address";
+static void
+vnet_port_add_txq(struct vnet_port *port)
+{
+ struct vnet *vp = port->vp;
+ int n;
+
+ n = vp->nports++;
+ n = n & (VNET_MAX_TXQS - 1);
+ port->q_index = n;
+ netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index));
+}
+
+static void
+vnet_port_rm_txq(struct vnet_port *port)
+{
+ port->vp->nports--;
+ netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index));
+}
+
static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct mdesc_handle *hp;
@@ -1536,9 +1960,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (err)
goto err_out_free_port;
- err = vnet_port_alloc_tx_bufs(port);
- if (err)
- goto err_out_free_ldc;
+ netif_napi_add(port->vp->dev, &port->napi, vnet_poll, NAPI_POLL_WEIGHT);
INIT_HLIST_NODE(&port->hash);
INIT_LIST_HEAD(&port->list);
@@ -1547,13 +1969,17 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
switch_port = 1;
port->switch_port = switch_port;
+ port->tso = true;
+ port->tsolen = 0;
spin_lock_irqsave(&vp->lock, flags);
if (switch_port)
- list_add(&port->list, &vp->port_list);
+ list_add_rcu(&port->list, &vp->port_list);
else
- list_add_tail(&port->list, &vp->port_list);
- hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]);
+ list_add_tail_rcu(&port->list, &vp->port_list);
+ hlist_add_head_rcu(&port->hash,
+ &vp->port_hash[vnet_hashfn(port->raddr)]);
+ vnet_port_add_txq(port);
spin_unlock_irqrestore(&vp->lock, flags);
dev_set_drvdata(&vdev->dev, port);
@@ -1564,15 +1990,13 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
setup_timer(&port->clean_timer, vnet_clean_timer_expire,
(unsigned long)port);
+ napi_enable(&port->napi);
vio_port_up(&port->vio);
mdesc_release(hp);
return 0;
-err_out_free_ldc:
- vio_ldc_free(&port->vio);
-
err_out_free_port:
kfree(port);
@@ -1586,17 +2010,18 @@ static int vnet_port_remove(struct vio_dev *vdev)
struct vnet_port *port = dev_get_drvdata(&vdev->dev);
if (port) {
- struct vnet *vp = port->vp;
- unsigned long flags;
del_timer_sync(&port->vio.timer);
- del_timer_sync(&port->clean_timer);
- spin_lock_irqsave(&vp->lock, flags);
- list_del(&port->list);
- hlist_del(&port->hash);
- spin_unlock_irqrestore(&vp->lock, flags);
+ napi_disable(&port->napi);
+
+ list_del_rcu(&port->list);
+ hlist_del_rcu(&port->hash);
+ synchronize_rcu();
+ del_timer_sync(&port->clean_timer);
+ vnet_port_rm_txq(port);
+ netif_napi_del(&port->napi);
vnet_port_free_tx_bufs(port);
vio_ldc_free(&port->vio);
diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h
index c91104542619..01ca78191683 100644
--- a/drivers/net/ethernet/sun/sunvnet.h
+++ b/drivers/net/ethernet/sun/sunvnet.h
@@ -20,6 +20,9 @@
#define VNET_TX_RING_SIZE 512
#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4)
+#define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */
+#define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */
+
/* VNET packets are sent in buffers with the first 6 bytes skipped
* so that after the ethernet header the IPv4/IPv6 headers are aligned
* properly.
@@ -40,8 +43,9 @@ struct vnet_port {
struct hlist_node hash;
u8 raddr[ETH_ALEN];
- u8 switch_port;
- u8 __pad;
+ unsigned switch_port:1;
+ unsigned tso:1;
+ unsigned __pad:14;
struct vnet *vp;
@@ -56,6 +60,13 @@ struct vnet_port {
struct timer_list clean_timer;
u64 rmtu;
+ u16 tsolen;
+
+ struct napi_struct napi;
+ u32 napi_stop_idx;
+ bool napi_resume;
+ int rx_event;
+ u16 q_index;
};
static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
@@ -97,7 +108,7 @@ struct vnet {
struct list_head list;
u64 local_mac;
- struct tasklet_struct vnet_tx_wakeup;
+ int nports;
};
#endif /* _SUNVNET_H */
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 5d8cb7956113..605dd909bcc3 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_TI
bool "Texas Instruments (TI) devices"
default y
- depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX || ARCH_KEYSTONE))
+ depends on PCI || EISA || AR7 || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -32,7 +32,7 @@ config TI_DAVINCI_EMAC
config TI_DAVINCI_MDIO
tristate "TI DaVinci MDIO Support"
- depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX || ARCH_KEYSTONE )
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE
select PHYLIB
---help---
This driver supports TI's DaVinci MDIO module.
@@ -42,7 +42,7 @@ config TI_DAVINCI_MDIO
config TI_DAVINCI_CPDMA
tristate "TI DaVinci CPDMA Support"
- depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS
---help---
This driver supports TI's DaVinci CPDMA dma engine.
@@ -58,7 +58,7 @@ config TI_CPSW_PHY_SEL
config TI_CPSW
tristate "TI CPSW Switch Support"
- depends on ARM && (ARCH_DAVINCI || SOC_AM33XX)
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS
select TI_DAVINCI_CPDMA
select TI_DAVINCI_MDIO
select TI_CPSW_PHY_SEL
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 0f56b1c0e082..70a930ac4fa9 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -638,14 +638,12 @@ static int w5100_hw_probe(struct platform_device *pdev)
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -ENXIO;
- mem_size = resource_size(mem);
-
priv->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
+ mem_size = resource_size(mem);
+
spin_lock_init(&priv->reg_lock);
priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
if (priv->indirect) {
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index f961f14a0473..7974b7d90fcc 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -558,14 +558,12 @@ static int w5300_hw_probe(struct platform_device *pdev)
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -ENXIO;
- mem_size = resource_size(mem);
-
priv->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
+ mem_size = resource_size(mem);
+
spin_lock_init(&priv->reg_lock);
priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE;
if (priv->indirect) {
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 629077050fce..9c2d91ea0af4 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -224,8 +224,7 @@ static void temac_dma_bd_release(struct net_device *ndev)
dma_free_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
lp->tx_bd_v, lp->tx_bd_p);
- if (lp->rx_skb)
- kfree(lp->rx_skb);
+ kfree(lp->rx_skb);
}
/**
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 28dbbdc393eb..24858799c204 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1200,8 +1200,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
unregister_netdev(ndev);
- if (lp->phy_node)
- of_node_put(lp->phy_node);
+ of_node_put(lp->phy_node);
lp->phy_node = NULL;
xemaclite_remove_ndev(ndev);
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index caed6eee289c..7f975a2c8990 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -414,7 +414,7 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
* ================
*
* Overview:
- * Retrieves the address range used to access control and status
+ * Retrieves the address ranges used to access control and status
* registers.
*
* Returns:
@@ -422,8 +422,8 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
*
* Arguments:
* bdev - pointer to device information
- * bar_start - pointer to store the start address
- * bar_len - pointer to store the length of the area
+ * bar_start - pointer to store the start addresses
+ * bar_len - pointer to store the lengths of the areas
*
* Assumptions:
* I am sure there are some.
@@ -442,38 +442,47 @@ static void dfx_get_bars(struct device *bdev,
if (dfx_bus_pci) {
int num = dfx_use_mmio ? 0 : 1;
- *bar_start = pci_resource_start(to_pci_dev(bdev), num);
- *bar_len = pci_resource_len(to_pci_dev(bdev), num);
+ bar_start[0] = pci_resource_start(to_pci_dev(bdev), num);
+ bar_len[0] = pci_resource_len(to_pci_dev(bdev), num);
+ bar_start[2] = bar_start[1] = 0;
+ bar_len[2] = bar_len[1] = 0;
}
if (dfx_bus_eisa) {
unsigned long base_addr = to_eisa_device(bdev)->base_addr;
- resource_size_t bar;
+ resource_size_t bar_lo;
+ resource_size_t bar_hi;
if (dfx_use_mmio) {
- bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2);
- bar <<= 8;
- bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1);
- bar <<= 8;
- bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0);
- bar <<= 16;
- *bar_start = bar;
- bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2);
- bar <<= 8;
- bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1);
- bar <<= 8;
- bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0);
- bar <<= 16;
- *bar_len = (bar | PI_MEM_ADD_MASK_M) + 1;
+ bar_lo = inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_2);
+ bar_lo <<= 8;
+ bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_1);
+ bar_lo <<= 8;
+ bar_lo |= inb(base_addr + PI_ESIC_K_MEM_ADD_LO_CMP_0);
+ bar_lo <<= 8;
+ bar_start[0] = bar_lo;
+ bar_hi = inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_2);
+ bar_hi <<= 8;
+ bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_1);
+ bar_hi <<= 8;
+ bar_hi |= inb(base_addr + PI_ESIC_K_MEM_ADD_HI_CMP_0);
+ bar_hi <<= 8;
+ bar_len[0] = ((bar_hi - bar_lo) | PI_MEM_ADD_MASK_M) +
+ 1;
} else {
- *bar_start = base_addr;
- *bar_len = PI_ESIC_K_CSR_IO_LEN +
- PI_ESIC_K_BURST_HOLDOFF_LEN;
+ bar_start[0] = base_addr;
+ bar_len[0] = PI_ESIC_K_CSR_IO_LEN;
}
+ bar_start[1] = base_addr + PI_DEFEA_K_BURST_HOLDOFF;
+ bar_len[1] = PI_ESIC_K_BURST_HOLDOFF_LEN;
+ bar_start[2] = base_addr + PI_ESIC_K_ESIC_CSR;
+ bar_len[2] = PI_ESIC_K_ESIC_CSR_LEN;
}
if (dfx_bus_tc) {
- *bar_start = to_tc_dev(bdev)->resource.start +
- PI_TC_K_CSR_OFFSET;
- *bar_len = PI_TC_K_CSR_LEN;
+ bar_start[0] = to_tc_dev(bdev)->resource.start +
+ PI_TC_K_CSR_OFFSET;
+ bar_len[0] = PI_TC_K_CSR_LEN;
+ bar_start[2] = bar_start[1] = 0;
+ bar_len[2] = bar_len[1] = 0;
}
}
@@ -518,13 +527,14 @@ static int dfx_register(struct device *bdev)
{
static int version_disp;
int dfx_bus_pci = dev_is_pci(bdev);
+ int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
const char *print_name = dev_name(bdev);
struct net_device *dev;
DFX_board_t *bp; /* board pointer */
- resource_size_t bar_start = 0; /* pointer to port */
- resource_size_t bar_len = 0; /* resource length */
+ resource_size_t bar_start[3]; /* pointers to ports */
+ resource_size_t bar_len[3]; /* resource length */
int alloc_size; /* total buffer size used */
struct resource *region;
int err = 0;
@@ -542,10 +552,13 @@ static int dfx_register(struct device *bdev)
}
/* Enable PCI device. */
- if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) {
- printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n",
- print_name);
- goto err_out;
+ if (dfx_bus_pci) {
+ err = pci_enable_device(to_pci_dev(bdev));
+ if (err) {
+ pr_err("%s: Cannot enable PCI device, aborting\n",
+ print_name);
+ goto err_out;
+ }
}
SET_NETDEV_DEV(dev, bdev);
@@ -554,31 +567,62 @@ static int dfx_register(struct device *bdev)
bp->bus_dev = bdev;
dev_set_drvdata(bdev, dev);
- dfx_get_bars(bdev, &bar_start, &bar_len);
+ dfx_get_bars(bdev, bar_start, bar_len);
+ if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) {
+ pr_err("%s: Cannot use MMIO, no address set, aborting\n",
+ print_name);
+ pr_err("%s: Run ECU and set adapter's MMIO location\n",
+ print_name);
+ pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\""
+ "\n", print_name);
+ err = -ENXIO;
+ goto err_out;
+ }
if (dfx_use_mmio)
- region = request_mem_region(bar_start, bar_len, print_name);
+ region = request_mem_region(bar_start[0], bar_len[0],
+ print_name);
else
- region = request_region(bar_start, bar_len, print_name);
+ region = request_region(bar_start[0], bar_len[0], print_name);
if (!region) {
- printk(KERN_ERR "%s: Cannot reserve I/O resource "
- "0x%lx @ 0x%lx, aborting\n",
- print_name, (long)bar_len, (long)bar_start);
+ pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, "
+ "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name,
+ (long)bar_len[0], (long)bar_start[0]);
err = -EBUSY;
goto err_out_disable;
}
+ if (bar_start[1] != 0) {
+ region = request_region(bar_start[1], bar_len[1], print_name);
+ if (!region) {
+ pr_err("%s: Cannot reserve I/O resource "
+ "0x%lx @ 0x%lx, aborting\n", print_name,
+ (long)bar_len[1], (long)bar_start[1]);
+ err = -EBUSY;
+ goto err_out_csr_region;
+ }
+ }
+ if (bar_start[2] != 0) {
+ region = request_region(bar_start[2], bar_len[2], print_name);
+ if (!region) {
+ pr_err("%s: Cannot reserve I/O resource "
+ "0x%lx @ 0x%lx, aborting\n", print_name,
+ (long)bar_len[2], (long)bar_start[2]);
+ err = -EBUSY;
+ goto err_out_bh_region;
+ }
+ }
/* Set up I/O base address. */
if (dfx_use_mmio) {
- bp->base.mem = ioremap_nocache(bar_start, bar_len);
+ bp->base.mem = ioremap_nocache(bar_start[0], bar_len[0]);
if (!bp->base.mem) {
printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
err = -ENOMEM;
- goto err_out_region;
+ goto err_out_esic_region;
}
} else {
- bp->base.port = bar_start;
- dev->base_addr = bar_start;
+ bp->base.port = bar_start[0];
+ dev->base_addr = bar_start[0];
}
/* Initialize new device structure */
@@ -587,7 +631,7 @@ static int dfx_register(struct device *bdev)
if (dfx_bus_pci)
pci_set_master(to_pci_dev(bdev));
- if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) {
+ if (dfx_driver_init(dev, print_name, bar_start[0]) != DFX_K_SUCCESS) {
err = -ENODEV;
goto err_out_unmap;
}
@@ -615,11 +659,19 @@ err_out_unmap:
if (dfx_use_mmio)
iounmap(bp->base.mem);
-err_out_region:
+err_out_esic_region:
+ if (bar_start[2] != 0)
+ release_region(bar_start[2], bar_len[2]);
+
+err_out_bh_region:
+ if (bar_start[1] != 0)
+ release_region(bar_start[1], bar_len[1]);
+
+err_out_csr_region:
if (dfx_use_mmio)
- release_mem_region(bar_start, bar_len);
+ release_mem_region(bar_start[0], bar_len[0]);
else
- release_region(bar_start, bar_len);
+ release_region(bar_start[0], bar_len[0]);
err_out_disable:
if (dfx_bus_pci)
@@ -711,13 +763,14 @@ static void dfx_bus_init(struct net_device *dev)
}
/*
- * Enable memory decoding (MEMCS0) and/or port decoding
+ * Enable memory decoding (MEMCS1) and/or port decoding
* (IOCS1/IOCS0) as appropriate in Function Control
- * Register. IOCS0 is used for PDQ registers, taking 16
- * 32-bit words, while IOCS1 is used for the Burst Holdoff
- * register, taking a single 32-bit word only. We use the
- * slot-specific I/O range as per the ESIC spec, that is
- * set bits 15:12 in the mask registers to mask them out.
+ * Register. MEMCS1 or IOCS0 is used for PDQ registers,
+ * taking 16 32-bit words, while IOCS1 is used for the
+ * Burst Holdoff register, taking a single 32-bit word
+ * only. We use the slot-specific I/O range as per the
+ * ESIC spec, that is set bits 15:12 in the mask registers
+ * to mask them out.
*/
/* Set the decode range of the board. */
@@ -742,9 +795,11 @@ static void dfx_bus_init(struct net_device *dev)
outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_0);
/* Enable the decoders. */
- val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0;
+ val = PI_FUNCTION_CNTRL_M_IOCS1;
if (dfx_use_mmio)
- val |= PI_FUNCTION_CNTRL_M_MEMCS0;
+ val |= PI_FUNCTION_CNTRL_M_MEMCS1;
+ else
+ val |= PI_FUNCTION_CNTRL_M_IOCS0;
outb(val, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
/*
@@ -838,6 +893,12 @@ static void dfx_bus_uninit(struct net_device *dev)
val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
+
+ /* Disable the board. */
+ outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL);
+
+ /* Disable memory and port decoders. */
+ outb(0, base_addr + PI_ESIC_K_FUNCTION_CNTRL);
}
if (dfx_bus_pci) {
/* Disable interrupts at PCI bus interface chip (PFI) */
@@ -1061,8 +1122,8 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
board_name = "DEFEA";
if (dfx_bus_pci)
board_name = "DEFPA";
- pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
- print_name, board_name, dfx_use_mmio ? "" : "I/O ",
+ pr_info("%s: %s at %s addr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
+ print_name, board_name, dfx_use_mmio ? "MMIO" : "I/O",
(long long)bar_start, dev->irq, dev->dev_addr);
/*
@@ -3636,8 +3697,8 @@ static void dfx_unregister(struct device *bdev)
int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
- resource_size_t bar_start = 0; /* pointer to port */
- resource_size_t bar_len = 0; /* resource length */
+ resource_size_t bar_start[3]; /* pointers to ports */
+ resource_size_t bar_len[3]; /* resource lengths */
int alloc_size; /* total buffer size used */
unregister_netdev(dev);
@@ -3655,12 +3716,16 @@ static void dfx_unregister(struct device *bdev)
dfx_bus_uninit(dev);
- dfx_get_bars(bdev, &bar_start, &bar_len);
+ dfx_get_bars(bdev, bar_start, bar_len);
+ if (bar_start[2] != 0)
+ release_region(bar_start[2], bar_len[2]);
+ if (bar_start[1] != 0)
+ release_region(bar_start[1], bar_len[1]);
if (dfx_use_mmio) {
iounmap(bp->base.mem);
- release_mem_region(bar_start, bar_len);
+ release_mem_region(bar_start[0], bar_len[0]);
} else
- release_region(bar_start, bar_len);
+ release_region(bar_start[0], bar_len[0]);
if (dfx_bus_pci)
pci_disable_device(to_pci_dev(bdev));
diff --git a/drivers/net/fddi/defxx.h b/drivers/net/fddi/defxx.h
index 9527f0182fd4..9d30fde2ef3c 100644
--- a/drivers/net/fddi/defxx.h
+++ b/drivers/net/fddi/defxx.h
@@ -1481,9 +1481,11 @@ typedef union
#define PI_ESIC_K_CSR_IO_LEN 0x40 /* 64 bytes */
#define PI_ESIC_K_BURST_HOLDOFF_LEN 0x04 /* 4 bytes */
+#define PI_ESIC_K_ESIC_CSR_LEN 0x40 /* 64 bytes */
#define PI_DEFEA_K_CSR_IO 0x000
#define PI_DEFEA_K_BURST_HOLDOFF 0x040
+#define PI_ESIC_K_ESIC_CSR 0xC80
#define PI_ESIC_K_SLOT_ID 0xC80
#define PI_ESIC_K_SLOT_CNTRL 0xC84
@@ -1556,7 +1558,7 @@ typedef union
#define PI_BURST_HOLDOFF_V_RESERVED 1
#define PI_BURST_HOLDOFF_V_MEM_MAP 0
-/* Define the implicit mask of the Memory Address Mask Register. */
+/* Define the implicit mask of the Memory Address Compare registers. */
#define PI_MEM_ADD_MASK_M 0x3ff
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index c3c4051a089d..daca0dee88f3 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -675,8 +675,7 @@ out_free:
kfree(xbuff);
kfree(rbuff);
- if (dev)
- free_netdev(dev);
+ free_netdev(dev);
out:
return err;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 7d76c9523395..dd867e6cabd6 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -440,7 +440,8 @@ static int negotiate_nvsp_ver(struct hv_device *device,
/* NVSPv2 only: Send NDIS config */
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
- init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu;
+ init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
+ ETH_HLEN;
init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
ret = vmbus_sendpacket(device->channel, init_packet,
@@ -560,9 +561,7 @@ int netvsc_device_remove(struct hv_device *device)
vmbus_close(device->channel);
/* Release all resources */
- if (net_device->sub_cb_buf)
- vfree(net_device->sub_cb_buf);
-
+ vfree(net_device->sub_cb_buf);
free_netvsc_device(net_device);
return 0;
}
@@ -765,6 +764,9 @@ int netvsc_send(struct hv_device *device,
out_channel = device->channel;
packet->channel = out_channel;
+ if (out_channel->rescind)
+ return -ENODEV;
+
if (packet->page_buf_cnt) {
ret = vmbus_sendpacket_pagebuffer(out_channel,
packet->page_buf,
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 78ec33f5100b..15d82eda0baf 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -193,7 +193,9 @@ static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
struct flow_keys flow;
int data_len;
- if (!skb_flow_dissect(skb, &flow) || flow.n_proto != htons(ETH_P_IP))
+ if (!skb_flow_dissect(skb, &flow) ||
+ !(flow.n_proto == htons(ETH_P_IP) ||
+ flow.n_proto == htons(ETH_P_IPV6)))
return false;
if (flow.ip_proto == IPPROTO_TCP)
@@ -697,9 +699,10 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
return -ENODEV;
if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
- limit = NETVSC_MTU;
+ limit = NETVSC_MTU - ETH_HLEN;
- if (mtu < 68 || mtu > limit)
+ /* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */
+ if (mtu < ETH_DATA_LEN || mtu > limit)
return -EINVAL;
nvdev->start_remove = true;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 2b86f0b6f6d1..ec0c40a8f653 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -728,7 +728,8 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
rssp->flag = 0;
rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
- NDIS_HASH_TCP_IPV4;
+ NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
+ NDIS_HASH_TCP_IPV6;
rssp->indirect_tabsize = 4*ITAB_NUM;
rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
rssp->hashkey_size = HASH_KEYLEN;
@@ -957,6 +958,9 @@ static int rndis_filter_close_device(struct rndis_device *dev)
return 0;
ret = rndis_filter_set_packet_filter(dev, 0);
+ if (ret == -ENODEV)
+ ret = 0;
+
if (ret == 0)
dev->state = RNDIS_DEV_INITIALIZED;
@@ -997,6 +1001,7 @@ int rndis_filter_device_add(struct hv_device *dev,
int t;
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
+ u32 mtu, size;
rndis_device = get_rndis_device();
if (!rndis_device)
@@ -1028,6 +1033,14 @@ int rndis_filter_device_add(struct hv_device *dev,
return ret;
}
+ /* Get the MTU from the host */
+ size = sizeof(u32);
+ ret = rndis_filter_query_device(rndis_device,
+ RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
+ &mtu, &size);
+ if (ret == 0 && size == sizeof(u32))
+ net_device->ndev->mtu = mtu;
+
/* Get the mac address */
ret = rndis_filter_query_device_mac(rndis_device);
if (ret != 0) {
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 391a916622a9..1a3c3e57aa0b 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -10,16 +10,6 @@ menuconfig IEEE802154_DRIVERS
If you say N, all options in this submenu will be skipped and
disabled.
-config IEEE802154_FAKEHARD
- tristate "Fake LR-WPAN driver with several interconnected devices"
- depends on IEEE802154_DRIVERS
- ---help---
- Say Y here to enable the fake driver that serves as an example
- of HardMAC device driver.
-
- This driver can also be built as a module. To do so say M here.
- The module will be called 'fakehard'.
-
config IEEE802154_FAKELB
depends on IEEE802154_DRIVERS && MAC802154
tristate "IEEE 802.15.4 loopback driver"
diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile
index 655cb95e6e24..d77fa4d77e27 100644
--- a/drivers/net/ieee802154/Makefile
+++ b/drivers/net/ieee802154/Makefile
@@ -1,4 +1,3 @@
-obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index c9d2a752abd7..1c0135620c62 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
* Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
@@ -33,10 +29,10 @@
#include <linux/regmap.h>
#include <linux/skbuff.h>
#include <linux/of_gpio.h>
+#include <linux/ieee802154.h>
-#include <net/ieee802154.h>
#include <net/mac802154.h>
-#include <net/wpan-phy.h>
+#include <net/cfg802154.h>
struct at86rf230_local;
/* at86rf2xx chip depend data.
@@ -50,15 +46,11 @@ struct at86rf2xx_chip_data {
u16 t_off_to_tx_on;
u16 t_frame;
u16 t_p_ack;
- /* short interframe spacing time */
- u16 t_sifs;
- /* long interframe spacing time */
- u16 t_lifs;
/* completion timeout for tx in msecs */
u16 t_tx_timeout;
int rssi_base_val;
- int (*set_channel)(struct at86rf230_local *, int, int);
+ int (*set_channel)(struct at86rf230_local *, u8, u8);
int (*get_desense_steps)(struct at86rf230_local *, s32);
};
@@ -74,12 +66,14 @@ struct at86rf230_state_change {
void (*complete)(void *context);
u8 from_state;
u8 to_state;
+
+ bool irq_enable;
};
struct at86rf230_local {
struct spi_device *spi;
- struct ieee802154_dev *dev;
+ struct ieee802154_hw *hw;
struct at86rf2xx_chip_data *data;
struct regmap *regmap;
@@ -89,10 +83,10 @@ struct at86rf230_local {
struct at86rf230_state_change irq;
bool tx_aret;
+ s8 max_frame_retries;
bool is_tx;
/* spinlock for is_tx protection */
spinlock_t lock;
- struct completion tx_complete;
struct sk_buff *tx_skb;
struct at86rf230_state_change tx;
};
@@ -291,10 +285,11 @@ struct at86rf230_local {
#define AT86RF2XX_NUMREGS 0x3F
-static int
+static void
at86rf230_async_state_change(struct at86rf230_local *lp,
struct at86rf230_state_change *ctx,
- const u8 state, void (*complete)(void *context));
+ const u8 state, void (*complete)(void *context),
+ const bool irq_enable);
static inline int
__at86rf230_write(struct at86rf230_local *lp,
@@ -451,7 +446,8 @@ at86rf230_async_error_recover(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL);
+ at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL, false);
+ ieee802154_wake_queue(lp->hw);
}
static void
@@ -461,21 +457,31 @@ at86rf230_async_error(struct at86rf230_local *lp,
dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
- at86rf230_async_error_recover);
+ at86rf230_async_error_recover, false);
}
/* Generic function to get some register value in async mode */
-static int
+static void
at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg,
struct at86rf230_state_change *ctx,
- void (*complete)(void *context))
+ void (*complete)(void *context),
+ const bool irq_enable)
{
+ int rc;
+
u8 *tx_buf = ctx->buf;
tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
ctx->trx.len = 2;
ctx->msg.complete = complete;
- return spi_async(lp->spi, &ctx->msg);
+ ctx->irq_enable = irq_enable;
+ rc = spi_async(lp->spi, &ctx->msg);
+ if (rc) {
+ if (irq_enable)
+ enable_irq(lp->spi->irq);
+
+ at86rf230_async_error(lp, ctx, rc);
+ }
}
static void
@@ -512,7 +518,8 @@ at86rf230_async_state_assert(void *context)
if (ctx->to_state == STATE_TX_ON) {
at86rf230_async_state_change(lp, ctx,
STATE_FORCE_TX_ON,
- ctx->complete);
+ ctx->complete,
+ ctx->irq_enable);
return;
}
}
@@ -535,7 +542,6 @@ at86rf230_async_state_delay(void *context)
struct at86rf230_local *lp = ctx->lp;
struct at86rf2xx_chip_data *c = lp->data;
bool force = false;
- int rc;
/* The force state changes are will show as normal states in the
* state status subregister. We change the to_state to the
@@ -604,10 +610,9 @@ at86rf230_async_state_delay(void *context)
udelay(1);
change:
- rc = at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
- at86rf230_async_state_assert);
- if (rc)
- dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
+ at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
+ at86rf230_async_state_assert,
+ ctx->irq_enable);
}
static void
@@ -622,10 +627,9 @@ at86rf230_async_state_change_start(void *context)
/* Check for "possible" STATE_TRANSITION_IN_PROGRESS */
if (trx_state == STATE_TRANSITION_IN_PROGRESS) {
udelay(1);
- rc = at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
- at86rf230_async_state_change_start);
- if (rc)
- dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
+ at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
+ at86rf230_async_state_change_start,
+ ctx->irq_enable);
return;
}
@@ -647,20 +651,27 @@ at86rf230_async_state_change_start(void *context)
ctx->trx.len = 2;
ctx->msg.complete = at86rf230_async_state_delay;
rc = spi_async(lp->spi, &ctx->msg);
- if (rc)
- dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
+ if (rc) {
+ if (ctx->irq_enable)
+ enable_irq(lp->spi->irq);
+
+ at86rf230_async_error(lp, &lp->state, rc);
+ }
}
-static int
+static void
at86rf230_async_state_change(struct at86rf230_local *lp,
struct at86rf230_state_change *ctx,
- const u8 state, void (*complete)(void *context))
+ const u8 state, void (*complete)(void *context),
+ const bool irq_enable)
{
/* Initialization for the state change context */
ctx->to_state = state;
ctx->complete = complete;
- return at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
- at86rf230_async_state_change_start);
+ ctx->irq_enable = irq_enable;
+ at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
+ at86rf230_async_state_change_start,
+ irq_enable);
}
static void
@@ -681,17 +692,16 @@ at86rf230_sync_state_change(struct at86rf230_local *lp, unsigned int state)
{
int rc;
- rc = at86rf230_async_state_change(lp, &lp->state, state,
- at86rf230_sync_state_change_complete);
- if (rc) {
- at86rf230_async_error(lp, &lp->state, rc);
- return rc;
- }
+ at86rf230_async_state_change(lp, &lp->state, state,
+ at86rf230_sync_state_change_complete,
+ false);
rc = wait_for_completion_timeout(&lp->state_complete,
msecs_to_jiffies(100));
- if (!rc)
+ if (!rc) {
+ at86rf230_async_error(lp, &lp->state, -ETIMEDOUT);
return -ETIMEDOUT;
+ }
return 0;
}
@@ -701,8 +711,14 @@ at86rf230_tx_complete(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
+ struct sk_buff *skb = lp->tx_skb;
+
+ enable_irq(lp->spi->irq);
- complete(&lp->tx_complete);
+ if (lp->max_frame_retries <= 0)
+ ieee802154_xmit_complete(lp->hw, skb, true);
+ else
+ ieee802154_xmit_complete(lp->hw, skb, false);
}
static void
@@ -710,12 +726,9 @@ at86rf230_tx_on(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- int rc;
- rc = at86rf230_async_state_change(lp, &lp->irq, STATE_RX_AACK_ON,
- at86rf230_tx_complete);
- if (rc)
- at86rf230_async_error(lp, ctx, rc);
+ at86rf230_async_state_change(lp, &lp->irq, STATE_RX_AACK_ON,
+ at86rf230_tx_complete, true);
}
static void
@@ -723,12 +736,9 @@ at86rf230_tx_trac_error(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- int rc;
- rc = at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
- at86rf230_tx_on);
- if (rc)
- at86rf230_async_error(lp, ctx, rc);
+ at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
+ at86rf230_tx_on, true);
}
static void
@@ -738,17 +748,14 @@ at86rf230_tx_trac_check(void *context)
struct at86rf230_local *lp = ctx->lp;
const u8 *buf = ctx->buf;
const u8 trac = (buf[1] & 0xe0) >> 5;
- int rc;
/* If trac status is different than zero we need to do a state change
* to STATE_FORCE_TRX_OFF then STATE_TX_ON to recover the transceiver
* state to TX_ON.
*/
if (trac) {
- rc = at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
- at86rf230_tx_trac_error);
- if (rc)
- at86rf230_async_error(lp, ctx, rc);
+ at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
+ at86rf230_tx_trac_error, true);
return;
}
@@ -761,51 +768,29 @@ at86rf230_tx_trac_status(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- int rc;
- rc = at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx,
- at86rf230_tx_trac_check);
- if (rc)
- at86rf230_async_error(lp, ctx, rc);
+ at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx,
+ at86rf230_tx_trac_check, true);
}
static void
at86rf230_rx(struct at86rf230_local *lp,
- const u8 *data, u8 len)
+ const u8 *data, const u8 len, const u8 lqi)
{
- u8 lqi;
struct sk_buff *skb;
u8 rx_local_buf[AT86RF2XX_MAX_BUF];
- if (len < 2)
- return;
-
- /* read full frame buffer and invalid lqi value to lowest
- * indicator if frame was is in a corrupted state.
- */
- if (len > IEEE802154_MTU) {
- lqi = 0;
- len = IEEE802154_MTU;
- dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
- } else {
- lqi = data[len];
- }
-
memcpy(rx_local_buf, data, len);
enable_irq(lp->spi->irq);
- skb = alloc_skb(IEEE802154_MTU, GFP_ATOMIC);
+ skb = dev_alloc_skb(IEEE802154_MTU);
if (!skb) {
dev_vdbg(&lp->spi->dev, "failed to allocate sk_buff\n");
return;
}
memcpy(skb_put(skb, len), rx_local_buf, len);
-
- /* We do not put CRC into the frame */
- skb_trim(skb, len - 2);
-
- ieee802154_rx_irqsafe(lp->dev, skb, lqi);
+ ieee802154_rx_irqsafe(lp->hw, skb, lqi);
}
static void
@@ -814,20 +799,31 @@ at86rf230_rx_read_frame_complete(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
const u8 *buf = lp->irq.buf;
- const u8 len = buf[1];
+ u8 len = buf[1];
+
+ if (!ieee802154_is_valid_psdu_len(len)) {
+ dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
+ len = IEEE802154_MTU;
+ }
- at86rf230_rx(lp, buf + 2, len);
+ at86rf230_rx(lp, buf + 2, len, buf[2 + len]);
}
-static int
+static void
at86rf230_rx_read_frame(struct at86rf230_local *lp)
{
+ int rc;
+
u8 *buf = lp->irq.buf;
buf[0] = CMD_FB;
lp->irq.trx.len = AT86RF2XX_MAX_BUF;
lp->irq.msg.complete = at86rf230_rx_read_frame_complete;
- return spi_async(lp->spi, &lp->irq.msg);
+ rc = spi_async(lp->spi, &lp->irq.msg);
+ if (rc) {
+ enable_irq(lp->spi->irq);
+ at86rf230_async_error(lp, &lp->irq, rc);
+ }
}
static void
@@ -835,7 +831,6 @@ at86rf230_rx_trac_check(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- int rc;
/* Possible check on trac status here. This could be useful to make
* some stats why receive is failed. Not used at the moment, but it's
@@ -843,34 +838,31 @@ at86rf230_rx_trac_check(void *context)
* The programming guide say do it so.
*/
- rc = at86rf230_rx_read_frame(lp);
- if (rc) {
- enable_irq(lp->spi->irq);
- at86rf230_async_error(lp, ctx, rc);
- }
+ at86rf230_rx_read_frame(lp);
}
-static int
+static void
at86rf230_irq_trx_end(struct at86rf230_local *lp)
{
spin_lock(&lp->lock);
if (lp->is_tx) {
lp->is_tx = 0;
spin_unlock(&lp->lock);
- enable_irq(lp->spi->irq);
if (lp->tx_aret)
- return at86rf230_async_state_change(lp, &lp->irq,
- STATE_FORCE_TX_ON,
- at86rf230_tx_trac_status);
+ at86rf230_async_state_change(lp, &lp->irq,
+ STATE_FORCE_TX_ON,
+ at86rf230_tx_trac_status,
+ true);
else
- return at86rf230_async_state_change(lp, &lp->irq,
- STATE_RX_AACK_ON,
- at86rf230_tx_complete);
+ at86rf230_async_state_change(lp, &lp->irq,
+ STATE_RX_AACK_ON,
+ at86rf230_tx_complete,
+ true);
} else {
spin_unlock(&lp->lock);
- return at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
- at86rf230_rx_trac_check);
+ at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
+ at86rf230_rx_trac_check, true);
}
}
@@ -881,12 +873,9 @@ at86rf230_irq_status(void *context)
struct at86rf230_local *lp = ctx->lp;
const u8 *buf = lp->irq.buf;
const u8 irq = buf[1];
- int rc;
if (irq & IRQ_TRX_END) {
- rc = at86rf230_irq_trx_end(lp);
- if (rc)
- at86rf230_async_error(lp, ctx, rc);
+ at86rf230_irq_trx_end(lp);
} else {
enable_irq(lp->spi->irq);
dev_err(&lp->spi->dev, "not supported irq %02x received\n",
@@ -901,13 +890,14 @@ static irqreturn_t at86rf230_isr(int irq, void *data)
u8 *buf = ctx->buf;
int rc;
- disable_irq_nosync(lp->spi->irq);
+ disable_irq_nosync(irq);
buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG;
ctx->trx.len = 2;
ctx->msg.complete = at86rf230_irq_status;
rc = spi_async(lp->spi, &ctx->msg);
if (rc) {
+ enable_irq(irq);
at86rf230_async_error(lp, ctx, rc);
return IRQ_NONE;
}
@@ -960,22 +950,18 @@ at86rf230_xmit_tx_on(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- int rc;
- rc = at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
- at86rf230_write_frame);
- if (rc)
- at86rf230_async_error(lp, ctx, rc);
+ at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
+ at86rf230_write_frame, false);
}
static int
-at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
+at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
struct at86rf230_state_change *ctx = &lp->tx;
void (*tx_complete)(void *context) = at86rf230_write_frame;
- int rc;
lp->tx_skb = skb;
@@ -986,61 +972,39 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
if (lp->tx_aret)
tx_complete = at86rf230_xmit_tx_on;
- rc = at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
- tx_complete);
- if (rc) {
- at86rf230_async_error(lp, ctx, rc);
- return rc;
- }
- rc = wait_for_completion_interruptible_timeout(&lp->tx_complete,
- msecs_to_jiffies(lp->data->t_tx_timeout));
- if (!rc) {
- at86rf230_async_error(lp, ctx, rc);
- return -ETIMEDOUT;
- }
-
- /* Interfame spacing time, which is phy depend.
- * TODO
- * Move this handling in MAC 802.15.4 layer.
- * This is currently a workaround to avoid fragmenation issues.
- */
- if (skb->len > 18)
- usleep_range(lp->data->t_lifs, lp->data->t_lifs + 10);
- else
- usleep_range(lp->data->t_sifs, lp->data->t_sifs + 10);
+ at86rf230_async_state_change(lp, ctx, STATE_TX_ON, tx_complete, false);
return 0;
}
static int
-at86rf230_ed(struct ieee802154_dev *dev, u8 *level)
+at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
{
- might_sleep();
BUG_ON(!level);
*level = 0xbe;
return 0;
}
static int
-at86rf230_start(struct ieee802154_dev *dev)
+at86rf230_start(struct ieee802154_hw *hw)
{
- return at86rf230_sync_state_change(dev->priv, STATE_RX_AACK_ON);
+ return at86rf230_sync_state_change(hw->priv, STATE_RX_AACK_ON);
}
static void
-at86rf230_stop(struct ieee802154_dev *dev)
+at86rf230_stop(struct ieee802154_hw *hw)
{
- at86rf230_sync_state_change(dev->priv, STATE_FORCE_TRX_OFF);
+ at86rf230_sync_state_change(hw->priv, STATE_FORCE_TRX_OFF);
}
static int
-at86rf23x_set_channel(struct at86rf230_local *lp, int page, int channel)
+at86rf23x_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
{
return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
}
static int
-at86rf212_set_channel(struct at86rf230_local *lp, int page, int channel)
+at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
{
int rc;
@@ -1061,44 +1025,60 @@ at86rf212_set_channel(struct at86rf230_local *lp, int page, int channel)
if (rc < 0)
return rc;
+ /* This sets the symbol_duration according frequency on the 212.
+ * TODO move this handling while set channel and page in cfg802154.
+ * We can do that, this timings are according 802.15.4 standard.
+ * If we do that in cfg802154, this is a more generic calculation.
+ *
+ * This should also protected from ifs_timer. Means cancel timer and
+ * init with a new value. For now, this is okay.
+ */
+ if (channel == 0) {
+ if (page == 0) {
+ /* SUB:0 and BPSK:0 -> BPSK-20 */
+ lp->hw->phy->symbol_duration = 50;
+ } else {
+ /* SUB:1 and BPSK:0 -> BPSK-40 */
+ lp->hw->phy->symbol_duration = 25;
+ }
+ } else {
+ if (page == 0)
+ /* SUB:0 and BPSK:1 -> OQPSK-100/200/400 */
+ lp->hw->phy->symbol_duration = 40;
+ else
+ /* SUB:1 and BPSK:1 -> OQPSK-250/500/1000 */
+ lp->hw->phy->symbol_duration = 16;
+ }
+
+ lp->hw->phy->lifs_period = IEEE802154_LIFS_PERIOD *
+ lp->hw->phy->symbol_duration;
+ lp->hw->phy->sifs_period = IEEE802154_SIFS_PERIOD *
+ lp->hw->phy->symbol_duration;
+
return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
}
static int
-at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
+at86rf230_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
int rc;
- might_sleep();
-
- if (page < 0 || page > 31 ||
- !(lp->dev->phy->channels_supported[page] & BIT(channel))) {
- WARN_ON(1);
- return -EINVAL;
- }
-
rc = lp->data->set_channel(lp, page, channel);
- if (rc < 0)
- return rc;
-
/* Wait for PLL */
usleep_range(lp->data->t_channel_switch,
lp->data->t_channel_switch + 10);
- dev->phy->current_channel = channel;
- dev->phy->current_page = page;
-
- return 0;
+ return rc;
}
static int
-at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
+at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
struct ieee802154_hw_addr_filt *filt,
unsigned long changed)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
- if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
+ if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
u16 addr = le16_to_cpu(filt->short_addr);
dev_vdbg(&lp->spi->dev,
@@ -1107,7 +1087,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
__at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
}
- if (changed & IEEE802515_AFILT_PANID_CHANGED) {
+ if (changed & IEEE802154_AFILT_PANID_CHANGED) {
u16 pan = le16_to_cpu(filt->pan_id);
dev_vdbg(&lp->spi->dev,
@@ -1116,7 +1096,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
__at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
}
- if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
+ if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
u8 i, addr[8];
memcpy(addr, &filt->ieee_addr, 8);
@@ -1126,7 +1106,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
__at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
}
- if (changed & IEEE802515_AFILT_PANC_CHANGED) {
+ if (changed & IEEE802154_AFILT_PANC_CHANGED) {
dev_vdbg(&lp->spi->dev,
"at86rf230_set_hw_addr_filt called for panc change\n");
if (filt->pan_coord)
@@ -1139,9 +1119,9 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
}
static int
-at86rf230_set_txpower(struct ieee802154_dev *dev, int db)
+at86rf230_set_txpower(struct ieee802154_hw *hw, int db)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
/* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five
* bits decrease power in 1dB steps. 0x60 represents extra PA gain of
@@ -1158,17 +1138,17 @@ at86rf230_set_txpower(struct ieee802154_dev *dev, int db)
}
static int
-at86rf230_set_lbt(struct ieee802154_dev *dev, bool on)
+at86rf230_set_lbt(struct ieee802154_hw *hw, bool on)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
return at86rf230_write_subreg(lp, SR_CSMA_LBT_MODE, on);
}
static int
-at86rf230_set_cca_mode(struct ieee802154_dev *dev, u8 mode)
+at86rf230_set_cca_mode(struct ieee802154_hw *hw, u8 mode)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
return at86rf230_write_subreg(lp, SR_CCA_MODE, mode);
}
@@ -1186,9 +1166,9 @@ at86rf23x_get_desens_steps(struct at86rf230_local *lp, s32 level)
}
static int
-at86rf230_set_cca_ed_level(struct ieee802154_dev *dev, s32 level)
+at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 level)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
if (level < lp->data->rssi_base_val || level > 30)
return -EINVAL;
@@ -1198,15 +1178,12 @@ at86rf230_set_cca_ed_level(struct ieee802154_dev *dev, s32 level)
}
static int
-at86rf230_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be,
+at86rf230_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be,
u8 retries)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
int rc;
- if (min_be > max_be || max_be > 8 || retries > 5)
- return -EINVAL;
-
rc = at86rf230_write_subreg(lp, SR_MIN_BE, min_be);
if (rc)
return rc;
@@ -1219,15 +1196,13 @@ at86rf230_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be,
}
static int
-at86rf230_set_frame_retries(struct ieee802154_dev *dev, s8 retries)
+at86rf230_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
{
- struct at86rf230_local *lp = dev->priv;
+ struct at86rf230_local *lp = hw->priv;
int rc = 0;
- if (retries < -1 || retries > 15)
- return -EINVAL;
-
lp->tx_aret = retries >= 0;
+ lp->max_frame_retries = retries;
if (retries >= 0)
rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
@@ -1235,9 +1210,36 @@ at86rf230_set_frame_retries(struct ieee802154_dev *dev, s8 retries)
return rc;
}
-static struct ieee802154_ops at86rf230_ops = {
+static int
+at86rf230_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
+{
+ struct at86rf230_local *lp = hw->priv;
+ int rc;
+
+ if (on) {
+ rc = at86rf230_write_subreg(lp, SR_AACK_DIS_ACK, 1);
+ if (rc < 0)
+ return rc;
+
+ rc = at86rf230_write_subreg(lp, SR_AACK_PROM_MODE, 1);
+ if (rc < 0)
+ return rc;
+ } else {
+ rc = at86rf230_write_subreg(lp, SR_AACK_PROM_MODE, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = at86rf230_write_subreg(lp, SR_AACK_DIS_ACK, 0);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static const struct ieee802154_ops at86rf230_ops = {
.owner = THIS_MODULE,
- .xmit = at86rf230_xmit,
+ .xmit_async = at86rf230_xmit,
.ed = at86rf230_ed,
.set_channel = at86rf230_channel,
.start = at86rf230_start,
@@ -1249,6 +1251,7 @@ static struct ieee802154_ops at86rf230_ops = {
.set_cca_ed_level = at86rf230_set_cca_ed_level,
.set_csma_params = at86rf230_set_csma_params,
.set_frame_retries = at86rf230_set_frame_retries,
+ .set_promiscuous_mode = at86rf230_set_promiscuous_mode,
};
static struct at86rf2xx_chip_data at86rf233_data = {
@@ -1259,8 +1262,6 @@ static struct at86rf2xx_chip_data at86rf233_data = {
.t_off_to_tx_on = 80,
.t_frame = 4096,
.t_p_ack = 545,
- .t_sifs = 192,
- .t_lifs = 480,
.t_tx_timeout = 2000,
.rssi_base_val = -91,
.set_channel = at86rf23x_set_channel,
@@ -1275,8 +1276,6 @@ static struct at86rf2xx_chip_data at86rf231_data = {
.t_off_to_tx_on = 110,
.t_frame = 4096,
.t_p_ack = 545,
- .t_sifs = 192,
- .t_lifs = 480,
.t_tx_timeout = 2000,
.rssi_base_val = -91,
.set_channel = at86rf23x_set_channel,
@@ -1291,8 +1290,6 @@ static struct at86rf2xx_chip_data at86rf212_data = {
.t_off_to_tx_on = 200,
.t_frame = 4096,
.t_p_ack = 545,
- .t_sifs = 192,
- .t_lifs = 480,
.t_tx_timeout = 2000,
.rssi_base_val = -100,
.set_channel = at86rf212_set_channel,
@@ -1354,7 +1351,11 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
return -EINVAL;
}
- return 0;
+ /* Force setting slotted operation bit to 0. Sometimes the atben
+ * sets this bit and I don't know why. We set this always force
+ * to zero while probing.
+ */
+ return at86rf230_write_subreg(lp, SR_SLOTTED_OPERATION, 0);
}
static struct at86rf230_platform_data *
@@ -1409,9 +1410,10 @@ at86rf230_detect_device(struct at86rf230_local *lp)
return -EINVAL;
}
- lp->dev->extra_tx_headroom = 0;
- lp->dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
- IEEE802154_HW_TXPOWER | IEEE802154_HW_CSMA;
+ lp->hw->extra_tx_headroom = 0;
+ lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AACK |
+ IEEE802154_HW_TXPOWER | IEEE802154_HW_ARET |
+ IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS;
switch (part) {
case 2:
@@ -1421,15 +1423,19 @@ at86rf230_detect_device(struct at86rf230_local *lp)
case 3:
chip = "at86rf231";
lp->data = &at86rf231_data;
- lp->dev->phy->channels_supported[0] = 0x7FFF800;
+ lp->hw->phy->channels_supported[0] = 0x7FFF800;
+ lp->hw->phy->current_channel = 11;
+ lp->hw->phy->symbol_duration = 16;
break;
case 7:
chip = "at86rf212";
if (version == 1) {
lp->data = &at86rf212_data;
- lp->dev->flags |= IEEE802154_HW_LBT;
- lp->dev->phy->channels_supported[0] = 0x00007FF;
- lp->dev->phy->channels_supported[2] = 0x00007FF;
+ lp->hw->flags |= IEEE802154_HW_LBT;
+ lp->hw->phy->channels_supported[0] = 0x00007FF;
+ lp->hw->phy->channels_supported[2] = 0x00007FF;
+ lp->hw->phy->current_channel = 5;
+ lp->hw->phy->symbol_duration = 25;
} else {
rc = -ENOTSUPP;
}
@@ -1437,7 +1443,9 @@ at86rf230_detect_device(struct at86rf230_local *lp)
case 11:
chip = "at86rf233";
lp->data = &at86rf233_data;
- lp->dev->phy->channels_supported[0] = 0x7FFF800;
+ lp->hw->phy->channels_supported[0] = 0x7FFF800;
+ lp->hw->phy->current_channel = 13;
+ lp->hw->phy->symbol_duration = 16;
break;
default:
chip = "unkown";
@@ -1478,7 +1486,7 @@ at86rf230_setup_spi_messages(struct at86rf230_local *lp)
static int at86rf230_probe(struct spi_device *spi)
{
struct at86rf230_platform_data *pdata;
- struct ieee802154_dev *dev;
+ struct ieee802154_hw *hw;
struct at86rf230_local *lp;
unsigned int status;
int rc, irq_type;
@@ -1517,14 +1525,16 @@ static int at86rf230_probe(struct spi_device *spi)
usleep_range(120, 240);
}
- dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
- if (!dev)
+ hw = ieee802154_alloc_hw(sizeof(*lp), &at86rf230_ops);
+ if (!hw)
return -ENOMEM;
- lp = dev->priv;
- lp->dev = dev;
+ lp = hw->priv;
+ lp->hw = hw;
lp->spi = spi;
- dev->parent = &spi->dev;
+ hw->parent = &spi->dev;
+ hw->vif_data_size = sizeof(*lp);
+ ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
lp->regmap = devm_regmap_init_spi(spi, &at86rf230_regmap_spi_config);
if (IS_ERR(lp->regmap)) {
@@ -1541,7 +1551,6 @@ static int at86rf230_probe(struct spi_device *spi)
goto free_dev;
spin_lock_init(&lp->lock);
- init_completion(&lp->tx_complete);
init_completion(&lp->state_complete);
spi_set_drvdata(spi, lp);
@@ -1564,14 +1573,14 @@ static int at86rf230_probe(struct spi_device *spi)
if (rc)
goto free_dev;
- rc = ieee802154_register_device(lp->dev);
+ rc = ieee802154_register_hw(lp->hw);
if (rc)
goto free_dev;
return rc;
free_dev:
- ieee802154_free_device(lp->dev);
+ ieee802154_free_hw(lp->hw);
return rc;
}
@@ -1582,8 +1591,8 @@ static int at86rf230_remove(struct spi_device *spi)
/* mask all at86rf230 irq's */
at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
- ieee802154_unregister_device(lp->dev);
- ieee802154_free_device(lp->dev);
+ ieee802154_unregister_hw(lp->hw);
+ ieee802154_free_hw(lp->hw);
dev_dbg(&spi->dev, "unregistered at86rf230\n");
return 0;
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index 8a5ac7ab2300..f9df9fa86d5f 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -21,10 +21,10 @@
#include <linux/skbuff.h>
#include <linux/pinctrl/consumer.h>
#include <linux/of_gpio.h>
+#include <linux/ieee802154.h>
#include <net/mac802154.h>
-#include <net/wpan-phy.h>
-#include <net/ieee802154.h>
+#include <net/cfg802154.h>
#define SPI_COMMAND_BUFFER 3
#define HIGH 1
@@ -193,7 +193,7 @@
/* Driver private information */
struct cc2520_private {
struct spi_device *spi; /* SPI device structure */
- struct ieee802154_dev *dev; /* IEEE-802.15.4 device */
+ struct ieee802154_hw *hw; /* IEEE-802.15.4 device */
u8 *buf; /* SPI TX/Rx data buffer */
struct mutex buffer_mutex; /* SPI buffer mutex */
bool is_tx; /* Flag for sync b/w Tx and Rx */
@@ -453,20 +453,20 @@ cc2520_read_rxfifo(struct cc2520_private *priv, u8 *data, u8 len, u8 *lqi)
return status;
}
-static int cc2520_start(struct ieee802154_dev *dev)
+static int cc2520_start(struct ieee802154_hw *hw)
{
- return cc2520_cmd_strobe(dev->priv, CC2520_CMD_SRXON);
+ return cc2520_cmd_strobe(hw->priv, CC2520_CMD_SRXON);
}
-static void cc2520_stop(struct ieee802154_dev *dev)
+static void cc2520_stop(struct ieee802154_hw *hw)
{
- cc2520_cmd_strobe(dev->priv, CC2520_CMD_SRFOFF);
+ cc2520_cmd_strobe(hw->priv, CC2520_CMD_SRFOFF);
}
static int
-cc2520_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
+cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
{
- struct cc2520_private *priv = dev->priv;
+ struct cc2520_private *priv = hw->priv;
unsigned long flags;
int rc;
u8 status = 0;
@@ -524,7 +524,7 @@ static int cc2520_rx(struct cc2520_private *priv)
if (len < 2 || len > IEEE802154_MTU)
return -EINVAL;
- skb = alloc_skb(len, GFP_KERNEL);
+ skb = dev_alloc_skb(len);
if (!skb)
return -ENOMEM;
@@ -536,7 +536,7 @@ static int cc2520_rx(struct cc2520_private *priv)
skb_trim(skb, skb->len - 2);
- ieee802154_rx_irqsafe(priv->dev, skb, lqi);
+ ieee802154_rx_irqsafe(priv->hw, skb, lqi);
dev_vdbg(&priv->spi->dev, "RXFIFO: %x %x\n", len, lqi);
@@ -544,9 +544,9 @@ static int cc2520_rx(struct cc2520_private *priv)
}
static int
-cc2520_ed(struct ieee802154_dev *dev, u8 *level)
+cc2520_ed(struct ieee802154_hw *hw, u8 *level)
{
- struct cc2520_private *priv = dev->priv;
+ struct cc2520_private *priv = hw->priv;
u8 status = 0xff;
u8 rssi;
int ret;
@@ -569,12 +569,11 @@ cc2520_ed(struct ieee802154_dev *dev, u8 *level)
}
static int
-cc2520_set_channel(struct ieee802154_dev *dev, int page, int channel)
+cc2520_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
{
- struct cc2520_private *priv = dev->priv;
+ struct cc2520_private *priv = hw->priv;
int ret;
- might_sleep();
dev_dbg(&priv->spi->dev, "trying to set channel\n");
BUG_ON(page != 0);
@@ -588,12 +587,12 @@ cc2520_set_channel(struct ieee802154_dev *dev, int page, int channel)
}
static int
-cc2520_filter(struct ieee802154_dev *dev,
+cc2520_filter(struct ieee802154_hw *hw,
struct ieee802154_hw_addr_filt *filt, unsigned long changed)
{
- struct cc2520_private *priv = dev->priv;
+ struct cc2520_private *priv = hw->priv;
- if (changed & IEEE802515_AFILT_PANID_CHANGED) {
+ if (changed & IEEE802154_AFILT_PANID_CHANGED) {
u16 panid = le16_to_cpu(filt->pan_id);
dev_vdbg(&priv->spi->dev,
@@ -602,7 +601,7 @@ cc2520_filter(struct ieee802154_dev *dev,
sizeof(panid), (u8 *)&panid);
}
- if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
+ if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
dev_vdbg(&priv->spi->dev,
"cc2520_filter called for IEEE addr\n");
cc2520_write_ram(priv, CC2520RAM_IEEEADDR,
@@ -610,7 +609,7 @@ cc2520_filter(struct ieee802154_dev *dev,
(u8 *)&filt->ieee_addr);
}
- if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
+ if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
u16 addr = le16_to_cpu(filt->short_addr);
dev_vdbg(&priv->spi->dev,
@@ -619,7 +618,7 @@ cc2520_filter(struct ieee802154_dev *dev,
sizeof(addr), (u8 *)&addr);
}
- if (changed & IEEE802515_AFILT_PANC_CHANGED) {
+ if (changed & IEEE802154_AFILT_PANC_CHANGED) {
dev_vdbg(&priv->spi->dev,
"cc2520_filter called for panc change\n");
if (filt->pan_coord)
@@ -631,11 +630,11 @@ cc2520_filter(struct ieee802154_dev *dev,
return 0;
}
-static struct ieee802154_ops cc2520_ops = {
+static const struct ieee802154_ops cc2520_ops = {
.owner = THIS_MODULE,
.start = cc2520_start,
.stop = cc2520_stop,
- .xmit = cc2520_tx,
+ .xmit_sync = cc2520_tx,
.ed = cc2520_ed,
.set_channel = cc2520_set_channel,
.set_hw_addr_filt = cc2520_filter,
@@ -645,27 +644,29 @@ static int cc2520_register(struct cc2520_private *priv)
{
int ret = -ENOMEM;
- priv->dev = ieee802154_alloc_device(sizeof(*priv), &cc2520_ops);
- if (!priv->dev)
+ priv->hw = ieee802154_alloc_hw(sizeof(*priv), &cc2520_ops);
+ if (!priv->hw)
goto err_ret;
- priv->dev->priv = priv;
- priv->dev->parent = &priv->spi->dev;
- priv->dev->extra_tx_headroom = 0;
+ priv->hw->priv = priv;
+ priv->hw->parent = &priv->spi->dev;
+ priv->hw->extra_tx_headroom = 0;
+ priv->hw->vif_data_size = sizeof(*priv);
/* We do support only 2.4 Ghz */
- priv->dev->phy->channels_supported[0] = 0x7FFF800;
- priv->dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
+ priv->hw->phy->channels_supported[0] = 0x7FFF800;
+ priv->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
+ IEEE802154_HW_AFILT;
dev_vdbg(&priv->spi->dev, "registered cc2520\n");
- ret = ieee802154_register_device(priv->dev);
+ ret = ieee802154_register_hw(priv->hw);
if (ret)
goto err_free_device;
return 0;
err_free_device:
- ieee802154_free_device(priv->dev);
+ ieee802154_free_hw(priv->hw);
err_ret:
return ret;
}
@@ -857,7 +858,7 @@ static int cc2520_probe(struct spi_device *spi)
pinctrl = devm_pinctrl_get_select_default(&spi->dev);
if (IS_ERR(pinctrl))
dev_warn(&spi->dev,
- "pinctrl pins are not configured");
+ "pinctrl pins are not configured\n");
pdata = cc2520_get_platform_data(spi);
if (!pdata) {
@@ -1002,8 +1003,8 @@ static int cc2520_remove(struct spi_device *spi)
mutex_destroy(&priv->buffer_mutex);
flush_work(&priv->fifop_irqwork);
- ieee802154_unregister_device(priv->dev);
- ieee802154_free_device(priv->dev);
+ ieee802154_unregister_hw(priv->hw);
+ ieee802154_free_hw(priv->hw);
return 0;
}
diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
deleted file mode 100644
index 6cbc56ad9ff4..000000000000
--- a/drivers/net/ieee802154/fakehard.c
+++ /dev/null
@@ -1,430 +0,0 @@
-/*
- * Sample driver for HardMAC IEEE 802.15.4 devices
- *
- * Copyright (C) 2009 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Written by:
- * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-
-#include <net/af_ieee802154.h>
-#include <net/ieee802154_netdev.h>
-#include <net/ieee802154.h>
-#include <net/nl802154.h>
-#include <net/wpan-phy.h>
-
-struct fakehard_priv {
- struct wpan_phy *phy;
-};
-
-static struct wpan_phy *fake_to_phy(const struct net_device *dev)
-{
- struct fakehard_priv *priv = netdev_priv(dev);
- return priv->phy;
-}
-
-/**
- * fake_get_phy - Return a phy corresponding to this device.
- * @dev: The network device for which to return the wan-phy object
- *
- * This function returns a wpan-phy object corresponding to the passed
- * network device. Reference counter for wpan-phy object is incremented,
- * so when the wpan-phy isn't necessary, you should drop the reference
- * via @wpan_phy_put() call.
- */
-static struct wpan_phy *fake_get_phy(const struct net_device *dev)
-{
- struct wpan_phy *phy = fake_to_phy(dev);
- return to_phy(get_device(&phy->dev));
-}
-
-/**
- * fake_get_pan_id - Retrieve the PAN ID of the device.
- * @dev: The network device to retrieve the PAN of.
- *
- * Return the ID of the PAN from the PIB.
- */
-static __le16 fake_get_pan_id(const struct net_device *dev)
-{
- BUG_ON(dev->type != ARPHRD_IEEE802154);
-
- return cpu_to_le16(0xeba1);
-}
-
-/**
- * fake_get_short_addr - Retrieve the short address of the device.
- * @dev: The network device to retrieve the short address of.
- *
- * Returns the IEEE 802.15.4 short-form address cached for this
- * device. If the device has not yet had a short address assigned
- * then this should return 0xFFFF to indicate a lack of association.
- */
-static __le16 fake_get_short_addr(const struct net_device *dev)
-{
- BUG_ON(dev->type != ARPHRD_IEEE802154);
-
- return cpu_to_le16(0x1);
-}
-
-/**
- * fake_get_dsn - Retrieve the DSN of the device.
- * @dev: The network device to retrieve the DSN for.
- *
- * Returns the IEEE 802.15.4 DSN for the network device.
- * The DSN is the sequence number which will be added to each
- * packet or MAC command frame by the MAC during transmission.
- *
- * DSN means 'Data Sequence Number'.
- *
- * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006
- * document.
- */
-static u8 fake_get_dsn(const struct net_device *dev)
-{
- BUG_ON(dev->type != ARPHRD_IEEE802154);
-
- return 0x00; /* DSN are implemented in HW, so return just 0 */
-}
-
-/**
- * fake_assoc_req - Make an association request to the HW.
- * @dev: The network device which we are associating to a network.
- * @addr: The coordinator with which we wish to associate.
- * @channel: The channel on which to associate.
- * @cap: The capability information field to use in the association.
- *
- * Start an association with a coordinator. The coordinator's address
- * and PAN ID can be found in @addr.
- *
- * Note: This is in section 7.3.1 and 7.5.3.1 of the IEEE
- * 802.15.4-2006 document.
- */
-static int fake_assoc_req(struct net_device *dev,
- struct ieee802154_addr *addr, u8 channel, u8 page, u8 cap)
-{
- struct wpan_phy *phy = fake_to_phy(dev);
-
- mutex_lock(&phy->pib_lock);
- phy->current_channel = channel;
- phy->current_page = page;
- mutex_unlock(&phy->pib_lock);
-
- /* We simply emulate it here */
- return ieee802154_nl_assoc_confirm(dev, fake_get_short_addr(dev),
- IEEE802154_SUCCESS);
-}
-
-/**
- * fake_assoc_resp - Send an association response to a device.
- * @dev: The network device on which to send the response.
- * @addr: The address of the device to respond to.
- * @short_addr: The assigned short address for the device (if any).
- * @status: The result of the association request.
- *
- * Queue the association response of the coordinator to another
- * device's attempt to associate with the network which we
- * coordinate. This is then added to the indirect-send queue to be
- * transmitted to the end device when it polls for data.
- *
- * Note: This is in section 7.3.2 and 7.5.3.1 of the IEEE
- * 802.15.4-2006 document.
- */
-static int fake_assoc_resp(struct net_device *dev,
- struct ieee802154_addr *addr, __le16 short_addr, u8 status)
-{
- return 0;
-}
-
-/**
- * fake_disassoc_req - Disassociate a device from a network.
- * @dev: The network device on which we're disassociating a device.
- * @addr: The device to disassociate from the network.
- * @reason: The reason to give to the device for being disassociated.
- *
- * This sends a disassociation notification to the device being
- * disassociated from the network.
- *
- * Note: This is in section 7.5.3.2 of the IEEE 802.15.4-2006
- * document, with the reason described in 7.3.3.2.
- */
-static int fake_disassoc_req(struct net_device *dev,
- struct ieee802154_addr *addr, u8 reason)
-{
- return ieee802154_nl_disassoc_confirm(dev, IEEE802154_SUCCESS);
-}
-
-/**
- * fake_start_req - Start an IEEE 802.15.4 PAN.
- * @dev: The network device on which to start the PAN.
- * @addr: The coordinator address to use when starting the PAN.
- * @channel: The channel on which to start the PAN.
- * @bcn_ord: Beacon order.
- * @sf_ord: Superframe order.
- * @pan_coord: Whether or not we are the PAN coordinator or just
- * requesting a realignment perhaps?
- * @blx: Battery Life Extension feature bitfield.
- * @coord_realign: Something to realign something else.
- *
- * If pan_coord is non-zero then this starts a network with the
- * provided parameters, otherwise it attempts a coordinator
- * realignment of the stated network instead.
- *
- * Note: This is in section 7.5.2.3 of the IEEE 802.15.4-2006
- * document, with 7.3.8 describing coordinator realignment.
- */
-static int fake_start_req(struct net_device *dev,
- struct ieee802154_addr *addr, u8 channel, u8 page,
- u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
- u8 coord_realign)
-{
- struct wpan_phy *phy = fake_to_phy(dev);
-
- mutex_lock(&phy->pib_lock);
- phy->current_channel = channel;
- phy->current_page = page;
- mutex_unlock(&phy->pib_lock);
-
- /* We don't emulate beacons here at all, so START should fail */
- ieee802154_nl_start_confirm(dev, IEEE802154_INVALID_PARAMETER);
- return 0;
-}
-
-/**
- * fake_scan_req - Start a channel scan.
- * @dev: The network device on which to perform a channel scan.
- * @type: The type of scan to perform.
- * @channels: The channel bitmask to scan.
- * @duration: How long to spend on each channel.
- *
- * This starts either a passive (energy) scan or an active (PAN) scan
- * on the channels indicated in the @channels bitmask. The duration of
- * the scan is measured in terms of superframe duration. Specifically,
- * the scan will spend aBaseSuperFrameDuration * ((2^n) + 1) on each
- * channel.
- *
- * Note: This is in section 7.5.2.1 of the IEEE 802.15.4-2006 document.
- */
-static int fake_scan_req(struct net_device *dev, u8 type, u32 channels,
- u8 page, u8 duration)
-{
- u8 edl[27] = {};
- return ieee802154_nl_scan_confirm(dev, IEEE802154_SUCCESS, type,
- channels, page,
- type == IEEE802154_MAC_SCAN_ED ? edl : NULL);
-}
-
-static struct ieee802154_mlme_ops fake_mlme = {
- .assoc_req = fake_assoc_req,
- .assoc_resp = fake_assoc_resp,
- .disassoc_req = fake_disassoc_req,
- .start_req = fake_start_req,
- .scan_req = fake_scan_req,
-
- .get_phy = fake_get_phy,
-
- .get_pan_id = fake_get_pan_id,
- .get_short_addr = fake_get_short_addr,
- .get_dsn = fake_get_dsn,
-};
-
-static int ieee802154_fake_open(struct net_device *dev)
-{
- netif_start_queue(dev);
- return 0;
-}
-
-static int ieee802154_fake_close(struct net_device *dev)
-{
- netif_stop_queue(dev);
- return 0;
-}
-
-static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
- /* FIXME: do hardware work here ... */
-
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-
-static int ieee802154_fake_ioctl(struct net_device *dev, struct ifreq *ifr,
- int cmd)
-{
- struct sockaddr_ieee802154 *sa =
- (struct sockaddr_ieee802154 *)&ifr->ifr_addr;
- u16 pan_id, short_addr;
-
- switch (cmd) {
- case SIOCGIFADDR:
- /* FIXME: fixed here, get from device IRL */
- pan_id = le16_to_cpu(fake_get_pan_id(dev));
- short_addr = le16_to_cpu(fake_get_short_addr(dev));
- if (pan_id == IEEE802154_PANID_BROADCAST ||
- short_addr == IEEE802154_ADDR_BROADCAST)
- return -EADDRNOTAVAIL;
-
- sa->family = AF_IEEE802154;
- sa->addr.addr_type = IEEE802154_ADDR_SHORT;
- sa->addr.pan_id = pan_id;
- sa->addr.short_addr = short_addr;
- return 0;
- }
- return -ENOIOCTLCMD;
-}
-
-static int ieee802154_fake_mac_addr(struct net_device *dev, void *p)
-{
- return -EBUSY; /* HW address is built into the device */
-}
-
-static const struct net_device_ops fake_ops = {
- .ndo_open = ieee802154_fake_open,
- .ndo_stop = ieee802154_fake_close,
- .ndo_start_xmit = ieee802154_fake_xmit,
- .ndo_do_ioctl = ieee802154_fake_ioctl,
- .ndo_set_mac_address = ieee802154_fake_mac_addr,
-};
-
-static void ieee802154_fake_destruct(struct net_device *dev)
-{
- struct wpan_phy *phy = fake_to_phy(dev);
-
- wpan_phy_unregister(phy);
- free_netdev(dev);
- wpan_phy_free(phy);
-}
-
-static void ieee802154_fake_setup(struct net_device *dev)
-{
- dev->addr_len = IEEE802154_ADDR_LEN;
- memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
- dev->features = NETIF_F_HW_CSUM;
- dev->needed_tailroom = 2; /* FCS */
- dev->mtu = 127;
- dev->tx_queue_len = 10;
- dev->type = ARPHRD_IEEE802154;
- dev->flags = IFF_NOARP | IFF_BROADCAST;
- dev->watchdog_timeo = 0;
- dev->destructor = ieee802154_fake_destruct;
-}
-
-
-static int ieee802154fake_probe(struct platform_device *pdev)
-{
- struct net_device *dev;
- struct fakehard_priv *priv;
- struct wpan_phy *phy = wpan_phy_alloc(0);
- int err;
-
- if (!phy)
- return -ENOMEM;
-
- dev = alloc_netdev(sizeof(struct fakehard_priv), "hardwpan%d",
- NET_NAME_UNKNOWN, ieee802154_fake_setup);
- if (!dev) {
- wpan_phy_free(phy);
- return -ENOMEM;
- }
-
- memcpy(dev->dev_addr, "\xba\xbe\xca\xfe\xde\xad\xbe\xef",
- dev->addr_len);
-
- /*
- * For now we'd like to emulate 2.4 GHz-only device,
- * both O-QPSK and CSS
- */
- /* 2.4 GHz O-QPSK 802.15.4-2003 */
- phy->channels_supported[0] |= 0x7FFF800;
- /* 2.4 GHz CSS 802.15.4a-2007 */
- phy->channels_supported[3] |= 0x3fff;
-
- phy->transmit_power = 0xbf;
-
- dev->netdev_ops = &fake_ops;
- dev->ml_priv = &fake_mlme;
-
- priv = netdev_priv(dev);
- priv->phy = phy;
-
- wpan_phy_set_dev(phy, &pdev->dev);
- SET_NETDEV_DEV(dev, &phy->dev);
-
- platform_set_drvdata(pdev, dev);
-
- err = wpan_phy_register(phy);
- if (err)
- goto err_phy_reg;
-
- err = register_netdev(dev);
- if (err)
- goto err_netdev_reg;
-
- dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n");
- return 0;
-
-err_netdev_reg:
- wpan_phy_unregister(phy);
-err_phy_reg:
- free_netdev(dev);
- wpan_phy_free(phy);
- return err;
-}
-
-static int ieee802154fake_remove(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
- unregister_netdev(dev);
- return 0;
-}
-
-static struct platform_device *ieee802154fake_dev;
-
-static struct platform_driver ieee802154fake_driver = {
- .probe = ieee802154fake_probe,
- .remove = ieee802154fake_remove,
- .driver = {
- .name = "ieee802154hardmac",
- .owner = THIS_MODULE,
- },
-};
-
-static __init int fake_init(void)
-{
- ieee802154fake_dev = platform_device_register_simple(
- "ieee802154hardmac", -1, NULL, 0);
- return platform_driver_register(&ieee802154fake_driver);
-}
-
-static __exit void fake_exit(void)
-{
- platform_driver_unregister(&ieee802154fake_driver);
- platform_device_unregister(ieee802154fake_dev);
-}
-
-module_init(fake_init);
-module_exit(fake_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 27d83207d24c..96947d724189 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
@@ -29,12 +25,12 @@
#include <linux/device.h>
#include <linux/spinlock.h>
#include <net/mac802154.h>
-#include <net/wpan-phy.h>
+#include <net/cfg802154.h>
static int numlbs = 1;
struct fakelb_dev_priv {
- struct ieee802154_dev *dev;
+ struct ieee802154_hw *hw;
struct list_head list;
struct fakelb_priv *fake;
@@ -49,9 +45,8 @@ struct fakelb_priv {
};
static int
-fakelb_hw_ed(struct ieee802154_dev *dev, u8 *level)
+fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
{
- might_sleep();
BUG_ON(!level);
*level = 0xbe;
@@ -59,14 +54,10 @@ fakelb_hw_ed(struct ieee802154_dev *dev, u8 *level)
}
static int
-fakelb_hw_channel(struct ieee802154_dev *dev, int page, int channel)
+fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
{
pr_debug("set channel to %d\n", channel);
- might_sleep();
- dev->phy->current_page = page;
- dev->phy->current_channel = channel;
-
return 0;
}
@@ -78,19 +69,17 @@ fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb)
spin_lock(&priv->lock);
if (priv->working) {
newskb = pskb_copy(skb, GFP_ATOMIC);
- ieee802154_rx_irqsafe(priv->dev, newskb, 0xcc);
+ ieee802154_rx_irqsafe(priv->hw, newskb, 0xcc);
}
spin_unlock(&priv->lock);
}
static int
-fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
+fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
{
- struct fakelb_dev_priv *priv = dev->priv;
+ struct fakelb_dev_priv *priv = hw->priv;
struct fakelb_priv *fake = priv->fake;
- might_sleep();
-
read_lock_bh(&fake->lock);
if (priv->list.next == priv->list.prev) {
/* we are the only one device */
@@ -99,8 +88,8 @@ fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
struct fakelb_dev_priv *dp;
list_for_each_entry(dp, &priv->fake->list, list) {
if (dp != priv &&
- (dp->dev->phy->current_channel ==
- priv->dev->phy->current_channel))
+ (dp->hw->phy->current_channel ==
+ priv->hw->phy->current_channel))
fakelb_hw_deliver(dp, skb);
}
}
@@ -110,8 +99,8 @@ fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
}
static int
-fakelb_hw_start(struct ieee802154_dev *dev) {
- struct fakelb_dev_priv *priv = dev->priv;
+fakelb_hw_start(struct ieee802154_hw *hw) {
+ struct fakelb_dev_priv *priv = hw->priv;
int ret = 0;
spin_lock(&priv->lock);
@@ -125,17 +114,17 @@ fakelb_hw_start(struct ieee802154_dev *dev) {
}
static void
-fakelb_hw_stop(struct ieee802154_dev *dev) {
- struct fakelb_dev_priv *priv = dev->priv;
+fakelb_hw_stop(struct ieee802154_hw *hw) {
+ struct fakelb_dev_priv *priv = hw->priv;
spin_lock(&priv->lock);
priv->working = 0;
spin_unlock(&priv->lock);
}
-static struct ieee802154_ops fakelb_ops = {
+static const struct ieee802154_ops fakelb_ops = {
.owner = THIS_MODULE,
- .xmit = fakelb_hw_xmit,
+ .xmit_sync = fakelb_hw_xmit,
.ed = fakelb_hw_ed,
.set_channel = fakelb_hw_channel,
.start = fakelb_hw_start,
@@ -150,54 +139,54 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
{
struct fakelb_dev_priv *priv;
int err;
- struct ieee802154_dev *ieee;
+ struct ieee802154_hw *hw;
- ieee = ieee802154_alloc_device(sizeof(*priv), &fakelb_ops);
- if (!ieee)
+ hw = ieee802154_alloc_hw(sizeof(*priv), &fakelb_ops);
+ if (!hw)
return -ENOMEM;
- priv = ieee->priv;
- priv->dev = ieee;
+ priv = hw->priv;
+ priv->hw = hw;
/* 868 MHz BPSK 802.15.4-2003 */
- ieee->phy->channels_supported[0] |= 1;
+ hw->phy->channels_supported[0] |= 1;
/* 915 MHz BPSK 802.15.4-2003 */
- ieee->phy->channels_supported[0] |= 0x7fe;
+ hw->phy->channels_supported[0] |= 0x7fe;
/* 2.4 GHz O-QPSK 802.15.4-2003 */
- ieee->phy->channels_supported[0] |= 0x7FFF800;
+ hw->phy->channels_supported[0] |= 0x7FFF800;
/* 868 MHz ASK 802.15.4-2006 */
- ieee->phy->channels_supported[1] |= 1;
+ hw->phy->channels_supported[1] |= 1;
/* 915 MHz ASK 802.15.4-2006 */
- ieee->phy->channels_supported[1] |= 0x7fe;
+ hw->phy->channels_supported[1] |= 0x7fe;
/* 868 MHz O-QPSK 802.15.4-2006 */
- ieee->phy->channels_supported[2] |= 1;
+ hw->phy->channels_supported[2] |= 1;
/* 915 MHz O-QPSK 802.15.4-2006 */
- ieee->phy->channels_supported[2] |= 0x7fe;
+ hw->phy->channels_supported[2] |= 0x7fe;
/* 2.4 GHz CSS 802.15.4a-2007 */
- ieee->phy->channels_supported[3] |= 0x3fff;
+ hw->phy->channels_supported[3] |= 0x3fff;
/* UWB Sub-gigahertz 802.15.4a-2007 */
- ieee->phy->channels_supported[4] |= 1;
+ hw->phy->channels_supported[4] |= 1;
/* UWB Low band 802.15.4a-2007 */
- ieee->phy->channels_supported[4] |= 0x1e;
+ hw->phy->channels_supported[4] |= 0x1e;
/* UWB High band 802.15.4a-2007 */
- ieee->phy->channels_supported[4] |= 0xffe0;
+ hw->phy->channels_supported[4] |= 0xffe0;
/* 750 MHz O-QPSK 802.15.4c-2009 */
- ieee->phy->channels_supported[5] |= 0xf;
+ hw->phy->channels_supported[5] |= 0xf;
/* 750 MHz MPSK 802.15.4c-2009 */
- ieee->phy->channels_supported[5] |= 0xf0;
+ hw->phy->channels_supported[5] |= 0xf0;
/* 950 MHz BPSK 802.15.4d-2009 */
- ieee->phy->channels_supported[6] |= 0x3ff;
+ hw->phy->channels_supported[6] |= 0x3ff;
/* 950 MHz GFSK 802.15.4d-2009 */
- ieee->phy->channels_supported[6] |= 0x3ffc00;
+ hw->phy->channels_supported[6] |= 0x3ffc00;
INIT_LIST_HEAD(&priv->list);
priv->fake = fake;
spin_lock_init(&priv->lock);
- ieee->parent = dev;
+ hw->parent = dev;
- err = ieee802154_register_device(ieee);
+ err = ieee802154_register_hw(hw);
if (err)
goto err_reg;
@@ -208,7 +197,7 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
return 0;
err_reg:
- ieee802154_free_device(priv->dev);
+ ieee802154_free_hw(priv->hw);
return err;
}
@@ -218,8 +207,8 @@ static void fakelb_del(struct fakelb_dev_priv *priv)
list_del(&priv->list);
write_unlock_bh(&priv->fake->lock);
- ieee802154_unregister_device(priv->dev);
- ieee802154_free_device(priv->dev);
+ ieee802154_unregister_hw(priv->hw);
+ ieee802154_free_hw(priv->hw);
}
static int fakelb_probe(struct platform_device *pdev)
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 07e0b887c350..a200fa16beae 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -13,18 +13,14 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <net/wpan-phy.h>
+#include <linux/ieee802154.h>
+#include <net/cfg802154.h>
#include <net/mac802154.h>
-#include <net/ieee802154.h>
/* MRF24J40 Short Address Registers */
#define REG_RXMCR 0x00 /* Receive MAC control */
@@ -43,6 +39,8 @@
#define REG_TXSTBL 0x2E /* TX Stabilization */
#define REG_INTSTAT 0x31 /* Interrupt Status */
#define REG_INTCON 0x32 /* Interrupt Control */
+#define REG_GPIO 0x33 /* GPIO */
+#define REG_TRISGPIO 0x34 /* GPIO direction */
#define REG_RFCTL 0x36 /* RF Control Mode Register */
#define REG_BBREG1 0x39 /* Baseband Registers */
#define REG_BBREG2 0x3A /* */
@@ -63,6 +61,7 @@
#define REG_SLPCON1 0x220
#define REG_WAKETIMEL 0x222 /* Wake-up Time Match Value Low */
#define REG_WAKETIMEH 0x223 /* Wake-up Time Match Value High */
+#define REG_TESTMODE 0x22F /* Test mode */
#define REG_RX_FIFO 0x300 /* Receive FIFO */
/* Device configuration: Only channels 11-26 on page 0 are supported. */
@@ -75,10 +74,12 @@
#define RX_FIFO_SIZE 144 /* From datasheet */
#define SET_CHANNEL_DELAY_US 192 /* From datasheet */
+enum mrf24j40_modules { MRF24J40, MRF24J40MA, MRF24J40MC };
+
/* Device Private Data */
struct mrf24j40 {
struct spi_device *spi;
- struct ieee802154_dev *dev;
+ struct ieee802154_hw *hw;
struct mutex buffer_mutex; /* only used to protect buf */
struct completion tx_complete;
@@ -331,9 +332,9 @@ out:
return ret;
}
-static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
+static int mrf24j40_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
{
- struct mrf24j40 *devrec = dev->priv;
+ struct mrf24j40 *devrec = hw->priv;
u8 val;
int ret = 0;
@@ -382,7 +383,7 @@ err:
return ret;
}
-static int mrf24j40_ed(struct ieee802154_dev *dev, u8 *level)
+static int mrf24j40_ed(struct ieee802154_hw *hw, u8 *level)
{
/* TODO: */
pr_warn("mrf24j40: ed not implemented\n");
@@ -390,9 +391,9 @@ static int mrf24j40_ed(struct ieee802154_dev *dev, u8 *level)
return 0;
}
-static int mrf24j40_start(struct ieee802154_dev *dev)
+static int mrf24j40_start(struct ieee802154_hw *hw)
{
- struct mrf24j40 *devrec = dev->priv;
+ struct mrf24j40 *devrec = hw->priv;
u8 val;
int ret;
@@ -407,9 +408,9 @@ static int mrf24j40_start(struct ieee802154_dev *dev)
return 0;
}
-static void mrf24j40_stop(struct ieee802154_dev *dev)
+static void mrf24j40_stop(struct ieee802154_hw *hw)
{
- struct mrf24j40 *devrec = dev->priv;
+ struct mrf24j40 *devrec = hw->priv;
u8 val;
int ret;
@@ -422,10 +423,9 @@ static void mrf24j40_stop(struct ieee802154_dev *dev)
write_short_reg(devrec, REG_INTCON, val);
}
-static int mrf24j40_set_channel(struct ieee802154_dev *dev,
- int page, int channel)
+static int mrf24j40_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
{
- struct mrf24j40 *devrec = dev->priv;
+ struct mrf24j40 *devrec = hw->priv;
u8 val;
int ret;
@@ -453,15 +453,15 @@ static int mrf24j40_set_channel(struct ieee802154_dev *dev,
return 0;
}
-static int mrf24j40_filter(struct ieee802154_dev *dev,
+static int mrf24j40_filter(struct ieee802154_hw *hw,
struct ieee802154_hw_addr_filt *filt,
unsigned long changed)
{
- struct mrf24j40 *devrec = dev->priv;
+ struct mrf24j40 *devrec = hw->priv;
dev_dbg(printdev(devrec), "filter\n");
- if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
+ if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
/* Short Addr */
u8 addrh, addrl;
@@ -474,7 +474,7 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
"Set short addr to %04hx\n", filt->short_addr);
}
- if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
+ if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
/* Device Address */
u8 i, addr[8];
@@ -490,7 +490,7 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
#endif
}
- if (changed & IEEE802515_AFILT_PANID_CHANGED) {
+ if (changed & IEEE802154_AFILT_PANID_CHANGED) {
/* PAN ID */
u8 panidl, panidh;
@@ -502,7 +502,7 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
dev_dbg(printdev(devrec), "Set PANID to %04hx\n", filt->pan_id);
}
- if (changed & IEEE802515_AFILT_PANC_CHANGED) {
+ if (changed & IEEE802154_AFILT_PANC_CHANGED) {
/* Pan Coordinator */
u8 val;
int ret;
@@ -543,7 +543,7 @@ static int mrf24j40_handle_rx(struct mrf24j40 *devrec)
val |= 4; /* SET RXDECINV */
write_short_reg(devrec, REG_BBREG1, val);
- skb = alloc_skb(len, GFP_KERNEL);
+ skb = dev_alloc_skb(len);
if (!skb) {
ret = -ENOMEM;
goto out;
@@ -563,7 +563,7 @@ static int mrf24j40_handle_rx(struct mrf24j40 *devrec)
/* TODO: Other drivers call ieee20154_rx_irqsafe() here (eg: cc2040,
* also from a workqueue). I think irqsafe is not necessary here.
* Can someone confirm? */
- ieee802154_rx_irqsafe(devrec->dev, skb, lqi);
+ ieee802154_rx_irqsafe(devrec->hw, skb, lqi);
dev_dbg(printdev(devrec), "RX Handled\n");
@@ -578,9 +578,9 @@ out:
return ret;
}
-static struct ieee802154_ops mrf24j40_ops = {
+static const struct ieee802154_ops mrf24j40_ops = {
.owner = THIS_MODULE,
- .xmit = mrf24j40_tx,
+ .xmit_sync = mrf24j40_tx,
.ed = mrf24j40_ed,
.start = mrf24j40_start,
.stop = mrf24j40_stop,
@@ -691,6 +691,28 @@ static int mrf24j40_hw_init(struct mrf24j40 *devrec)
if (ret)
goto err_ret;
+ if (spi_get_device_id(devrec->spi)->driver_data == MRF24J40MC) {
+ /* Enable external amplifier.
+ * From MRF24J40MC datasheet section 1.3: Operation.
+ */
+ read_long_reg(devrec, REG_TESTMODE, &val);
+ val |= 0x7; /* Configure GPIO 0-2 to control amplifier */
+ write_long_reg(devrec, REG_TESTMODE, val);
+
+ read_short_reg(devrec, REG_TRISGPIO, &val);
+ val |= 0x8; /* Set GPIO3 as output. */
+ write_short_reg(devrec, REG_TRISGPIO, val);
+
+ read_short_reg(devrec, REG_GPIO, &val);
+ val |= 0x8; /* Set GPIO3 HIGH to enable U5 voltage regulator */
+ write_short_reg(devrec, REG_GPIO, val);
+
+ /* Reduce TX pwr to meet FCC requirements.
+ * From MRF24J40MC datasheet section 3.1.1
+ */
+ write_long_reg(devrec, REG_RFCON3, 0x28);
+ }
+
return 0;
err_ret:
@@ -722,17 +744,18 @@ static int mrf24j40_probe(struct spi_device *spi)
/* Register with the 802154 subsystem */
- devrec->dev = ieee802154_alloc_device(0, &mrf24j40_ops);
- if (!devrec->dev)
+ devrec->hw = ieee802154_alloc_hw(0, &mrf24j40_ops);
+ if (!devrec->hw)
goto err_ret;
- devrec->dev->priv = devrec;
- devrec->dev->parent = &devrec->spi->dev;
- devrec->dev->phy->channels_supported[0] = CHANNEL_MASK;
- devrec->dev->flags = IEEE802154_HW_OMIT_CKSUM|IEEE802154_HW_AACK;
+ devrec->hw->priv = devrec;
+ devrec->hw->parent = &devrec->spi->dev;
+ devrec->hw->phy->channels_supported[0] = CHANNEL_MASK;
+ devrec->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
+ IEEE802154_HW_AFILT;
dev_dbg(printdev(devrec), "registered mrf24j40\n");
- ret = ieee802154_register_device(devrec->dev);
+ ret = ieee802154_register_hw(devrec->hw);
if (ret)
goto err_register_device;
@@ -757,9 +780,9 @@ static int mrf24j40_probe(struct spi_device *spi)
err_irq:
err_hw_init:
- ieee802154_unregister_device(devrec->dev);
+ ieee802154_unregister_hw(devrec->hw);
err_register_device:
- ieee802154_free_device(devrec->dev);
+ ieee802154_free_hw(devrec->hw);
err_ret:
return ret;
}
@@ -770,8 +793,8 @@ static int mrf24j40_remove(struct spi_device *spi)
dev_dbg(printdev(devrec), "remove\n");
- ieee802154_unregister_device(devrec->dev);
- ieee802154_free_device(devrec->dev);
+ ieee802154_unregister_hw(devrec->hw);
+ ieee802154_free_hw(devrec->hw);
/* TODO: Will ieee802154_free_device() wait until ->xmit() is
* complete? */
@@ -779,8 +802,9 @@ static int mrf24j40_remove(struct spi_device *spi)
}
static const struct spi_device_id mrf24j40_ids[] = {
- { "mrf24j40", 0 },
- { "mrf24j40ma", 0 },
+ { "mrf24j40", MRF24J40 },
+ { "mrf24j40ma", MRF24J40MA },
+ { "mrf24j40mc", MRF24J40MC },
{ },
};
MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
diff --git a/drivers/net/ipvlan/Makefile b/drivers/net/ipvlan/Makefile
new file mode 100644
index 000000000000..df79910192d6
--- /dev/null
+++ b/drivers/net/ipvlan/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Ethernet Ipvlan driver
+#
+
+obj-$(CONFIG_IPVLAN) += ipvlan.o
+
+ipvlan-objs := ipvlan_core.o ipvlan_main.o
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
new file mode 100644
index 000000000000..2729f64b3e7e
--- /dev/null
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ */
+#ifndef __IPVLAN_H
+#define __IPVLAN_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rculist.h>
+#include <linux/notifier.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_link.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/inetdevice.h>
+#include <net/ip.h>
+#include <net/ip6_route.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/addrconf.h>
+
+#define IPVLAN_DRV "ipvlan"
+#define IPV_DRV_VER "0.1"
+
+#define IPVLAN_HASH_SIZE (1 << BITS_PER_BYTE)
+#define IPVLAN_HASH_MASK (IPVLAN_HASH_SIZE - 1)
+
+#define IPVLAN_MAC_FILTER_BITS 8
+#define IPVLAN_MAC_FILTER_SIZE (1 << IPVLAN_MAC_FILTER_BITS)
+#define IPVLAN_MAC_FILTER_MASK (IPVLAN_MAC_FILTER_SIZE - 1)
+
+typedef enum {
+ IPVL_IPV6 = 0,
+ IPVL_ICMPV6,
+ IPVL_IPV4,
+ IPVL_ARP,
+} ipvl_hdr_type;
+
+struct ipvl_pcpu_stats {
+ u64 rx_pkts;
+ u64 rx_bytes;
+ u64 rx_mcast;
+ u64 tx_pkts;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 rx_errs;
+ u32 tx_drps;
+};
+
+struct ipvl_port;
+
+struct ipvl_dev {
+ struct net_device *dev;
+ struct list_head pnode;
+ struct ipvl_port *port;
+ struct net_device *phy_dev;
+ struct list_head addrs;
+ int ipv4cnt;
+ int ipv6cnt;
+ struct ipvl_pcpu_stats *pcpu_stats;
+ DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
+ netdev_features_t sfeatures;
+ u32 msg_enable;
+ u16 mtu_adj;
+};
+
+struct ipvl_addr {
+ struct ipvl_dev *master; /* Back pointer to master */
+ union {
+ struct in6_addr ip6; /* IPv6 address on logical interface */
+ struct in_addr ip4; /* IPv4 address on logical interface */
+ } ipu;
+#define ip6addr ipu.ip6
+#define ip4addr ipu.ip4
+ struct hlist_node hlnode; /* Hash-table linkage */
+ struct list_head anode; /* logical-interface linkage */
+ struct rcu_head rcu;
+ ipvl_hdr_type atype;
+};
+
+struct ipvl_port {
+ struct net_device *dev;
+ struct hlist_head hlhead[IPVLAN_HASH_SIZE];
+ struct list_head ipvlans;
+ struct rcu_head rcu;
+ int count;
+ u16 mode;
+};
+
+static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
+{
+ return rcu_dereference(d->rx_handler_data);
+}
+
+static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d)
+{
+ return rtnl_dereference(d->rx_handler_data);
+}
+
+void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev);
+void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval);
+void ipvlan_init_secret(void);
+unsigned int ipvlan_mac_hash(const unsigned char *addr);
+rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb);
+int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev);
+void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr);
+bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6);
+struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
+ const void *iaddr, bool is_v6);
+void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync);
+#endif /* __IPVLAN_H */
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
new file mode 100644
index 000000000000..a14d87783245
--- /dev/null
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -0,0 +1,607 @@
+/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ */
+
+#include "ipvlan.h"
+
+static u32 ipvlan_jhash_secret;
+
+void ipvlan_init_secret(void)
+{
+ net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
+}
+
+static void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
+ unsigned int len, bool success, bool mcast)
+{
+ if (!ipvlan)
+ return;
+
+ if (likely(success)) {
+ struct ipvl_pcpu_stats *pcptr;
+
+ pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
+ u64_stats_update_begin(&pcptr->syncp);
+ pcptr->rx_pkts++;
+ pcptr->rx_bytes += len;
+ if (mcast)
+ pcptr->rx_mcast++;
+ u64_stats_update_end(&pcptr->syncp);
+ } else {
+ this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
+ }
+}
+
+static u8 ipvlan_get_v6_hash(const void *iaddr)
+{
+ const struct in6_addr *ip6_addr = iaddr;
+
+ return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
+ IPVLAN_HASH_MASK;
+}
+
+static u8 ipvlan_get_v4_hash(const void *iaddr)
+{
+ const struct in_addr *ip4_addr = iaddr;
+
+ return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
+ IPVLAN_HASH_MASK;
+}
+
+struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
+ const void *iaddr, bool is_v6)
+{
+ struct ipvl_addr *addr;
+ u8 hash;
+
+ hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
+ ipvlan_get_v4_hash(iaddr);
+ hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode) {
+ if (is_v6 && addr->atype == IPVL_IPV6 &&
+ ipv6_addr_equal(&addr->ip6addr, iaddr))
+ return addr;
+ else if (!is_v6 && addr->atype == IPVL_IPV4 &&
+ addr->ip4addr.s_addr ==
+ ((struct in_addr *)iaddr)->s_addr)
+ return addr;
+ }
+ return NULL;
+}
+
+void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
+{
+ struct ipvl_port *port = ipvlan->port;
+ u8 hash;
+
+ hash = (addr->atype == IPVL_IPV6) ?
+ ipvlan_get_v6_hash(&addr->ip6addr) :
+ ipvlan_get_v4_hash(&addr->ip4addr);
+ hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
+}
+
+void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync)
+{
+ hlist_del_rcu(&addr->hlnode);
+ if (sync)
+ synchronize_rcu();
+}
+
+bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
+{
+ struct ipvl_port *port = ipvlan->port;
+ struct ipvl_addr *addr;
+
+ list_for_each_entry(addr, &ipvlan->addrs, anode) {
+ if ((is_v6 && addr->atype == IPVL_IPV6 &&
+ ipv6_addr_equal(&addr->ip6addr, iaddr)) ||
+ (!is_v6 && addr->atype == IPVL_IPV4 &&
+ addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr))
+ return true;
+ }
+
+ if (ipvlan_ht_addr_lookup(port, iaddr, is_v6))
+ return true;
+
+ return false;
+}
+
+static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
+{
+ void *lyr3h = NULL;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_ARP): {
+ struct arphdr *arph;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*arph))))
+ return NULL;
+
+ arph = arp_hdr(skb);
+ *type = IPVL_ARP;
+ lyr3h = arph;
+ break;
+ }
+ case htons(ETH_P_IP): {
+ u32 pktlen;
+ struct iphdr *ip4h;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
+ return NULL;
+
+ ip4h = ip_hdr(skb);
+ pktlen = ntohs(ip4h->tot_len);
+ if (ip4h->ihl < 5 || ip4h->version != 4)
+ return NULL;
+ if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
+ return NULL;
+
+ *type = IPVL_IPV4;
+ lyr3h = ip4h;
+ break;
+ }
+ case htons(ETH_P_IPV6): {
+ struct ipv6hdr *ip6h;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
+ return NULL;
+
+ ip6h = ipv6_hdr(skb);
+ if (ip6h->version != 6)
+ return NULL;
+
+ *type = IPVL_IPV6;
+ lyr3h = ip6h;
+ /* Only Neighbour Solicitation pkts need different treatment */
+ if (ipv6_addr_any(&ip6h->saddr) &&
+ ip6h->nexthdr == NEXTHDR_ICMP) {
+ *type = IPVL_ICMPV6;
+ lyr3h = ip6h + 1;
+ }
+ break;
+ }
+ default:
+ return NULL;
+ }
+
+ return lyr3h;
+}
+
+unsigned int ipvlan_mac_hash(const unsigned char *addr)
+{
+ u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
+ ipvlan_jhash_secret);
+
+ return hash & IPVLAN_MAC_FILTER_MASK;
+}
+
+static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
+ const struct ipvl_dev *in_dev, bool local)
+{
+ struct ethhdr *eth = eth_hdr(skb);
+ struct ipvl_dev *ipvlan;
+ struct sk_buff *nskb;
+ unsigned int len;
+ unsigned int mac_hash;
+ int ret;
+
+ if (skb->protocol == htons(ETH_P_PAUSE))
+ return;
+
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+ if (local && (ipvlan == in_dev))
+ continue;
+
+ mac_hash = ipvlan_mac_hash(eth->h_dest);
+ if (!test_bit(mac_hash, ipvlan->mac_filters))
+ continue;
+
+ ret = NET_RX_DROP;
+ len = skb->len + ETH_HLEN;
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ goto mcast_acct;
+
+ if (ether_addr_equal(eth->h_dest, ipvlan->phy_dev->broadcast))
+ nskb->pkt_type = PACKET_BROADCAST;
+ else
+ nskb->pkt_type = PACKET_MULTICAST;
+
+ nskb->dev = ipvlan->dev;
+ if (local)
+ ret = dev_forward_skb(ipvlan->dev, nskb);
+ else
+ ret = netif_rx(nskb);
+mcast_acct:
+ ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
+ }
+
+ /* Locally generated? ...Forward a copy to the main-device as
+ * well. On the RX side we'll ignore it (wont give it to any
+ * of the virtual devices.
+ */
+ if (local) {
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (nskb) {
+ if (ether_addr_equal(eth->h_dest, port->dev->broadcast))
+ nskb->pkt_type = PACKET_BROADCAST;
+ else
+ nskb->pkt_type = PACKET_MULTICAST;
+
+ dev_forward_skb(port->dev, nskb);
+ }
+ }
+}
+
+static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
+ bool local)
+{
+ struct ipvl_dev *ipvlan = addr->master;
+ struct net_device *dev = ipvlan->dev;
+ unsigned int len;
+ rx_handler_result_t ret = RX_HANDLER_CONSUMED;
+ bool success = false;
+
+ len = skb->len + ETH_HLEN;
+ if (unlikely(!(dev->flags & IFF_UP))) {
+ kfree_skb(skb);
+ goto out;
+ }
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ skb->dev = dev;
+ skb->pkt_type = PACKET_HOST;
+
+ if (local) {
+ if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
+ success = true;
+ } else {
+ ret = RX_HANDLER_ANOTHER;
+ success = true;
+ }
+
+out:
+ ipvlan_count_rx(ipvlan, len, success, false);
+ return ret;
+}
+
+static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port,
+ void *lyr3h, int addr_type,
+ bool use_dest)
+{
+ struct ipvl_addr *addr = NULL;
+
+ if (addr_type == IPVL_IPV6) {
+ struct ipv6hdr *ip6h;
+ struct in6_addr *i6addr;
+
+ ip6h = (struct ipv6hdr *)lyr3h;
+ i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
+ addr = ipvlan_ht_addr_lookup(port, i6addr, true);
+ } else if (addr_type == IPVL_ICMPV6) {
+ struct nd_msg *ndmh;
+ struct in6_addr *i6addr;
+
+ /* Make sure that the NeighborSolicitation ICMPv6 packets
+ * are handled to avoid DAD issue.
+ */
+ ndmh = (struct nd_msg *)lyr3h;
+ if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
+ i6addr = &ndmh->target;
+ addr = ipvlan_ht_addr_lookup(port, i6addr, true);
+ }
+ } else if (addr_type == IPVL_IPV4) {
+ struct iphdr *ip4h;
+ __be32 *i4addr;
+
+ ip4h = (struct iphdr *)lyr3h;
+ i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
+ addr = ipvlan_ht_addr_lookup(port, i4addr, false);
+ } else if (addr_type == IPVL_ARP) {
+ struct arphdr *arph;
+ unsigned char *arp_ptr;
+ __be32 dip;
+
+ arph = (struct arphdr *)lyr3h;
+ arp_ptr = (unsigned char *)(arph + 1);
+ if (use_dest)
+ arp_ptr += (2 * port->dev->addr_len) + 4;
+ else
+ arp_ptr += port->dev->addr_len;
+
+ memcpy(&dip, arp_ptr, 4);
+ addr = ipvlan_ht_addr_lookup(port, &dip, false);
+ }
+
+ return addr;
+}
+
+static int ipvlan_process_v4_outbound(struct sk_buff *skb)
+{
+ const struct iphdr *ip4h = ip_hdr(skb);
+ struct net_device *dev = skb->dev;
+ struct rtable *rt;
+ int err, ret = NET_XMIT_DROP;
+ struct flowi4 fl4 = {
+ .flowi4_oif = dev->iflink,
+ .flowi4_tos = RT_TOS(ip4h->tos),
+ .flowi4_flags = FLOWI_FLAG_ANYSRC,
+ .daddr = ip4h->daddr,
+ .saddr = ip4h->saddr,
+ };
+
+ rt = ip_route_output_flow(dev_net(dev), &fl4, NULL);
+ if (IS_ERR(rt))
+ goto err;
+
+ if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
+ ip_rt_put(rt);
+ goto err;
+ }
+ skb_dst_drop(skb);
+ skb_dst_set(skb, &rt->dst);
+ err = ip_local_out(skb);
+ if (unlikely(net_xmit_eval(err)))
+ dev->stats.tx_errors++;
+ else
+ ret = NET_XMIT_SUCCESS;
+ goto out;
+err:
+ dev->stats.tx_errors++;
+ kfree_skb(skb);
+out:
+ return ret;
+}
+
+static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct net_device *dev = skb->dev;
+ struct dst_entry *dst;
+ int err, ret = NET_XMIT_DROP;
+ struct flowi6 fl6 = {
+ .flowi6_iif = skb->dev->ifindex,
+ .daddr = ip6h->daddr,
+ .saddr = ip6h->saddr,
+ .flowi6_flags = FLOWI_FLAG_ANYSRC,
+ .flowlabel = ip6_flowinfo(ip6h),
+ .flowi6_mark = skb->mark,
+ .flowi6_proto = ip6h->nexthdr,
+ };
+
+ dst = ip6_route_output(dev_net(dev), NULL, &fl6);
+ if (IS_ERR(dst))
+ goto err;
+
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst);
+ err = ip6_local_out(skb);
+ if (unlikely(net_xmit_eval(err)))
+ dev->stats.tx_errors++;
+ else
+ ret = NET_XMIT_SUCCESS;
+ goto out;
+err:
+ dev->stats.tx_errors++;
+ kfree_skb(skb);
+out:
+ return ret;
+}
+
+static int ipvlan_process_outbound(struct sk_buff *skb,
+ const struct ipvl_dev *ipvlan)
+{
+ struct ethhdr *ethh = eth_hdr(skb);
+ int ret = NET_XMIT_DROP;
+
+ /* In this mode we dont care about multicast and broadcast traffic */
+ if (is_multicast_ether_addr(ethh->h_dest)) {
+ pr_warn_ratelimited("Dropped {multi|broad}cast of type= [%x]\n",
+ ntohs(skb->protocol));
+ kfree_skb(skb);
+ goto out;
+ }
+
+ /* The ipvlan is a pseudo-L2 device, so the packets that we receive
+ * will have L2; which need to discarded and processed further
+ * in the net-ns of the main-device.
+ */
+ if (skb_mac_header_was_set(skb)) {
+ skb_pull(skb, sizeof(*ethh));
+ skb->mac_header = (typeof(skb->mac_header))~0U;
+ skb_reset_network_header(skb);
+ }
+
+ if (skb->protocol == htons(ETH_P_IPV6))
+ ret = ipvlan_process_v6_outbound(skb);
+ else if (skb->protocol == htons(ETH_P_IP))
+ ret = ipvlan_process_v4_outbound(skb);
+ else {
+ pr_warn_ratelimited("Dropped outbound packet type=%x\n",
+ ntohs(skb->protocol));
+ kfree_skb(skb);
+ }
+out:
+ return ret;
+}
+
+static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
+{
+ const struct ipvl_dev *ipvlan = netdev_priv(dev);
+ void *lyr3h;
+ struct ipvl_addr *addr;
+ int addr_type;
+
+ lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ if (!lyr3h)
+ goto out;
+
+ addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
+ if (addr)
+ return ipvlan_rcv_frame(addr, skb, true);
+
+out:
+ skb->dev = ipvlan->phy_dev;
+ return ipvlan_process_outbound(skb, ipvlan);
+}
+
+static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
+{
+ const struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ethhdr *eth = eth_hdr(skb);
+ struct ipvl_addr *addr;
+ void *lyr3h;
+ int addr_type;
+
+ if (ether_addr_equal(eth->h_dest, eth->h_source)) {
+ lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ if (lyr3h) {
+ addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
+ if (addr)
+ return ipvlan_rcv_frame(addr, skb, true);
+ }
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_XMIT_DROP;
+
+ /* Packet definitely does not belong to any of the
+ * virtual devices, but the dest is local. So forward
+ * the skb for the main-dev. At the RX side we just return
+ * RX_PASS for it to be processed further on the stack.
+ */
+ return dev_forward_skb(ipvlan->phy_dev, skb);
+
+ } else if (is_multicast_ether_addr(eth->h_dest)) {
+ u8 ip_summed = skb->ip_summed;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ ipvlan_multicast_frame(ipvlan->port, skb, ipvlan, true);
+ skb->ip_summed = ip_summed;
+ }
+
+ skb->dev = ipvlan->phy_dev;
+ return dev_queue_xmit(skb);
+}
+
+int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev);
+
+ if (!port)
+ goto out;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
+ goto out;
+
+ switch(port->mode) {
+ case IPVLAN_MODE_L2:
+ return ipvlan_xmit_mode_l2(skb, dev);
+ case IPVLAN_MODE_L3:
+ return ipvlan_xmit_mode_l3(skb, dev);
+ }
+
+ /* Should not reach here */
+ WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n",
+ port->mode);
+out:
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+}
+
+static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
+{
+ struct ethhdr *eth = eth_hdr(skb);
+ struct ipvl_addr *addr;
+ void *lyr3h;
+ int addr_type;
+
+ if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
+ lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ if (!lyr3h)
+ return true;
+
+ addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
+ if (addr)
+ return false;
+ }
+
+ return true;
+}
+
+static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
+ struct ipvl_port *port)
+{
+ void *lyr3h;
+ int addr_type;
+ struct ipvl_addr *addr;
+ struct sk_buff *skb = *pskb;
+ rx_handler_result_t ret = RX_HANDLER_PASS;
+
+ lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ if (!lyr3h)
+ goto out;
+
+ addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
+ if (addr)
+ ret = ipvlan_rcv_frame(addr, skb, false);
+
+out:
+ return ret;
+}
+
+static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
+ struct ipvl_port *port)
+{
+ struct sk_buff *skb = *pskb;
+ struct ethhdr *eth = eth_hdr(skb);
+ rx_handler_result_t ret = RX_HANDLER_PASS;
+ void *lyr3h;
+ int addr_type;
+
+ if (is_multicast_ether_addr(eth->h_dest)) {
+ if (ipvlan_external_frame(skb, port))
+ ipvlan_multicast_frame(port, skb, NULL, false);
+ } else {
+ struct ipvl_addr *addr;
+
+ lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ if (!lyr3h)
+ return ret;
+
+ addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
+ if (addr)
+ ret = ipvlan_rcv_frame(addr, skb, false);
+ }
+
+ return ret;
+}
+
+rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
+
+ if (!port)
+ return RX_HANDLER_PASS;
+
+ switch (port->mode) {
+ case IPVLAN_MODE_L2:
+ return ipvlan_handle_mode_l2(pskb, port);
+ case IPVLAN_MODE_L3:
+ return ipvlan_handle_mode_l3(pskb, port);
+ }
+
+ /* Should not reach here */
+ WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
+ port->mode);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
new file mode 100644
index 000000000000..4f4099d5603d
--- /dev/null
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -0,0 +1,795 @@
+/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ */
+
+#include "ipvlan.h"
+
+void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev)
+{
+ ipvlan->dev->mtu = dev->mtu - ipvlan->mtu_adj;
+}
+
+void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval)
+{
+ struct ipvl_dev *ipvlan;
+
+ if (port->mode != nval) {
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+ if (nval == IPVLAN_MODE_L3)
+ ipvlan->dev->flags |= IFF_NOARP;
+ else
+ ipvlan->dev->flags &= ~IFF_NOARP;
+ }
+ port->mode = nval;
+ }
+}
+
+static int ipvlan_port_create(struct net_device *dev)
+{
+ struct ipvl_port *port;
+ int err, idx;
+
+ if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) {
+ netdev_err(dev, "Master is either lo or non-ether device\n");
+ return -EINVAL;
+ }
+
+ if (netif_is_macvlan_port(dev)) {
+ netdev_err(dev, "Master is a macvlan port.\n");
+ return -EBUSY;
+ }
+
+ port = kzalloc(sizeof(struct ipvl_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->dev = dev;
+ port->mode = IPVLAN_MODE_L3;
+ INIT_LIST_HEAD(&port->ipvlans);
+ for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++)
+ INIT_HLIST_HEAD(&port->hlhead[idx]);
+
+ err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port);
+ if (err)
+ goto err;
+
+ dev->priv_flags |= IFF_IPVLAN_MASTER;
+ return 0;
+
+err:
+ kfree_rcu(port, rcu);
+ return err;
+}
+
+static void ipvlan_port_destroy(struct net_device *dev)
+{
+ struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
+
+ dev->priv_flags &= ~IFF_IPVLAN_MASTER;
+ netdev_rx_handler_unregister(dev);
+ kfree_rcu(port, rcu);
+}
+
+/* ipvlan network devices have devices nesting below it and are a special
+ * "super class" of normal network devices; split their locks off into a
+ * separate class since they always nest.
+ */
+static struct lock_class_key ipvlan_netdev_xmit_lock_key;
+static struct lock_class_key ipvlan_netdev_addr_lock_key;
+
+#define IPVLAN_FEATURES \
+ (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
+ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
+ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
+
+#define IPVLAN_STATE_MASK \
+ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
+
+static void ipvlan_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock, &ipvlan_netdev_xmit_lock_key);
+}
+
+static void ipvlan_set_lockdep_class(struct net_device *dev)
+{
+ lockdep_set_class(&dev->addr_list_lock, &ipvlan_netdev_addr_lock_key);
+ netdev_for_each_tx_queue(dev, ipvlan_set_lockdep_class_one, NULL);
+}
+
+static int ipvlan_init(struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ const struct net_device *phy_dev = ipvlan->phy_dev;
+
+ dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
+ (phy_dev->state & IPVLAN_STATE_MASK);
+ dev->features = phy_dev->features & IPVLAN_FEATURES;
+ dev->features |= NETIF_F_LLTX;
+ dev->gso_max_size = phy_dev->gso_max_size;
+ dev->iflink = phy_dev->ifindex;
+ dev->hard_header_len = phy_dev->hard_header_len;
+
+ ipvlan_set_lockdep_class(dev);
+
+ ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats);
+ if (!ipvlan->pcpu_stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ipvlan_uninit(struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ipvl_port *port = ipvlan->port;
+
+ free_percpu(ipvlan->pcpu_stats);
+
+ port->count -= 1;
+ if (!port->count)
+ ipvlan_port_destroy(port->dev);
+}
+
+static int ipvlan_open(struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct net_device *phy_dev = ipvlan->phy_dev;
+ struct ipvl_addr *addr;
+
+ if (ipvlan->port->mode == IPVLAN_MODE_L3)
+ dev->flags |= IFF_NOARP;
+ else
+ dev->flags &= ~IFF_NOARP;
+
+ if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
+ ipvlan_ht_addr_add(ipvlan, addr);
+ }
+ return dev_uc_add(phy_dev, phy_dev->dev_addr);
+}
+
+static int ipvlan_stop(struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct net_device *phy_dev = ipvlan->phy_dev;
+ struct ipvl_addr *addr;
+
+ dev_uc_unsync(phy_dev, dev);
+ dev_mc_unsync(phy_dev, dev);
+
+ dev_uc_del(phy_dev, phy_dev->dev_addr);
+
+ if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
+ ipvlan_ht_addr_del(addr, !dev->dismantle);
+ }
+ return 0;
+}
+
+static netdev_tx_t ipvlan_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ const struct ipvl_dev *ipvlan = netdev_priv(dev);
+ int skblen = skb->len;
+ int ret;
+
+ ret = ipvlan_queue_xmit(skb, dev);
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+ struct ipvl_pcpu_stats *pcptr;
+
+ pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
+
+ u64_stats_update_begin(&pcptr->syncp);
+ pcptr->tx_pkts++;
+ pcptr->tx_bytes += skblen;
+ u64_stats_update_end(&pcptr->syncp);
+ } else {
+ this_cpu_inc(ipvlan->pcpu_stats->tx_drps);
+ }
+ return ret;
+}
+
+static netdev_features_t ipvlan_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES);
+}
+
+static void ipvlan_change_rx_flags(struct net_device *dev, int change)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct net_device *phy_dev = ipvlan->phy_dev;
+
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(phy_dev, dev->flags & IFF_ALLMULTI? 1 : -1);
+}
+
+static void ipvlan_set_broadcast_mac_filter(struct ipvl_dev *ipvlan, bool set)
+{
+ struct net_device *dev = ipvlan->dev;
+ unsigned int hashbit = ipvlan_mac_hash(dev->broadcast);
+
+ if (set && !test_bit(hashbit, ipvlan->mac_filters))
+ __set_bit(hashbit, ipvlan->mac_filters);
+ else if (!set && test_bit(hashbit, ipvlan->mac_filters))
+ __clear_bit(hashbit, ipvlan->mac_filters);
+}
+
+static void ipvlan_set_multicast_mac_filter(struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ bitmap_fill(ipvlan->mac_filters, IPVLAN_MAC_FILTER_SIZE);
+ } else {
+ struct netdev_hw_addr *ha;
+ DECLARE_BITMAP(mc_filters, IPVLAN_MAC_FILTER_SIZE);
+
+ bitmap_zero(mc_filters, IPVLAN_MAC_FILTER_SIZE);
+ netdev_for_each_mc_addr(ha, dev)
+ __set_bit(ipvlan_mac_hash(ha->addr), mc_filters);
+
+ bitmap_copy(ipvlan->mac_filters, mc_filters,
+ IPVLAN_MAC_FILTER_SIZE);
+ }
+ dev_uc_sync(ipvlan->phy_dev, dev);
+ dev_mc_sync(ipvlan->phy_dev, dev);
+}
+
+static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *s)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ if (ipvlan->pcpu_stats) {
+ struct ipvl_pcpu_stats *pcptr;
+ u64 rx_pkts, rx_bytes, rx_mcast, tx_pkts, tx_bytes;
+ u32 rx_errs = 0, tx_drps = 0;
+ u32 strt;
+ int idx;
+
+ for_each_possible_cpu(idx) {
+ pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx);
+ do {
+ strt= u64_stats_fetch_begin_irq(&pcptr->syncp);
+ rx_pkts = pcptr->rx_pkts;
+ rx_bytes = pcptr->rx_bytes;
+ rx_mcast = pcptr->rx_mcast;
+ tx_pkts = pcptr->tx_pkts;
+ tx_bytes = pcptr->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&pcptr->syncp,
+ strt));
+
+ s->rx_packets += rx_pkts;
+ s->rx_bytes += rx_bytes;
+ s->multicast += rx_mcast;
+ s->tx_packets += tx_pkts;
+ s->tx_bytes += tx_bytes;
+
+ /* u32 values are updated without syncp protection. */
+ rx_errs += pcptr->rx_errs;
+ tx_drps += pcptr->tx_drps;
+ }
+ s->rx_errors = rx_errs;
+ s->rx_dropped = rx_errs;
+ s->tx_dropped = tx_drps;
+ }
+ return s;
+}
+
+static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct net_device *phy_dev = ipvlan->phy_dev;
+
+ return vlan_vid_add(phy_dev, proto, vid);
+}
+
+static int ipvlan_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct net_device *phy_dev = ipvlan->phy_dev;
+
+ vlan_vid_del(phy_dev, proto, vid);
+ return 0;
+}
+
+static const struct net_device_ops ipvlan_netdev_ops = {
+ .ndo_init = ipvlan_init,
+ .ndo_uninit = ipvlan_uninit,
+ .ndo_open = ipvlan_open,
+ .ndo_stop = ipvlan_stop,
+ .ndo_start_xmit = ipvlan_start_xmit,
+ .ndo_fix_features = ipvlan_fix_features,
+ .ndo_change_rx_flags = ipvlan_change_rx_flags,
+ .ndo_set_rx_mode = ipvlan_set_multicast_mac_filter,
+ .ndo_get_stats64 = ipvlan_get_stats64,
+ .ndo_vlan_rx_add_vid = ipvlan_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ipvlan_vlan_rx_kill_vid,
+};
+
+static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned len)
+{
+ const struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct net_device *phy_dev = ipvlan->phy_dev;
+
+ /* TODO Probably use a different field than dev_addr so that the
+ * mac-address on the virtual device is portable and can be carried
+ * while the packets use the mac-addr on the physical device.
+ */
+ return dev_hard_header(skb, phy_dev, type, daddr,
+ saddr ? : dev->dev_addr, len);
+}
+
+static const struct header_ops ipvlan_header_ops = {
+ .create = ipvlan_hard_header,
+ .rebuild = eth_rebuild_header,
+ .parse = eth_header_parse,
+ .cache = eth_header_cache,
+ .cache_update = eth_header_cache_update,
+};
+
+static int ipvlan_ethtool_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ const struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ return __ethtool_get_settings(ipvlan->phy_dev, cmd);
+}
+
+static void ipvlan_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, IPVLAN_DRV, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, IPV_DRV_VER, sizeof(drvinfo->version));
+}
+
+static u32 ipvlan_ethtool_get_msglevel(struct net_device *dev)
+{
+ const struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ return ipvlan->msg_enable;
+}
+
+static void ipvlan_ethtool_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ ipvlan->msg_enable = value;
+}
+
+static const struct ethtool_ops ipvlan_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_settings = ipvlan_ethtool_get_settings,
+ .get_drvinfo = ipvlan_ethtool_get_drvinfo,
+ .get_msglevel = ipvlan_ethtool_get_msglevel,
+ .set_msglevel = ipvlan_ethtool_set_msglevel,
+};
+
+static int ipvlan_nl_changelink(struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
+
+ if (data && data[IFLA_IPVLAN_MODE]) {
+ u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
+
+ ipvlan_set_port_mode(port, nmode);
+ }
+ return 0;
+}
+
+static size_t ipvlan_nl_getsize(const struct net_device *dev)
+{
+ return (0
+ + nla_total_size(2) /* IFLA_IPVLAN_MODE */
+ );
+}
+
+static int ipvlan_nl_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (data && data[IFLA_IPVLAN_MODE]) {
+ u16 mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
+
+ if (mode < IPVLAN_MODE_L2 || mode >= IPVLAN_MODE_MAX)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ipvlan_nl_fillinfo(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
+ int ret = -EINVAL;
+
+ if (!port)
+ goto err;
+
+ ret = -EMSGSIZE;
+ if (nla_put_u16(skb, IFLA_IPVLAN_MODE, port->mode))
+ goto err;
+
+ return 0;
+
+err:
+ return ret;
+}
+
+static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ipvl_port *port;
+ struct net_device *phy_dev;
+ int err;
+
+ if (!tb[IFLA_LINK])
+ return -EINVAL;
+
+ phy_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ if (!phy_dev)
+ return -ENODEV;
+
+ if (netif_is_ipvlan(phy_dev)) {
+ struct ipvl_dev *tmp = netdev_priv(phy_dev);
+
+ phy_dev = tmp->phy_dev;
+ } else if (!netif_is_ipvlan_port(phy_dev)) {
+ err = ipvlan_port_create(phy_dev);
+ if (err < 0)
+ return err;
+ }
+
+ port = ipvlan_port_get_rtnl(phy_dev);
+ if (data && data[IFLA_IPVLAN_MODE])
+ port->mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
+
+ ipvlan->phy_dev = phy_dev;
+ ipvlan->dev = dev;
+ ipvlan->port = port;
+ ipvlan->sfeatures = IPVLAN_FEATURES;
+ INIT_LIST_HEAD(&ipvlan->addrs);
+ ipvlan->ipv4cnt = 0;
+ ipvlan->ipv6cnt = 0;
+
+ /* TODO Probably put random address here to be presented to the
+ * world but keep using the physical-dev address for the outgoing
+ * packets.
+ */
+ memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN);
+
+ dev->priv_flags |= IFF_IPVLAN_SLAVE;
+
+ port->count += 1;
+ err = register_netdevice(dev);
+ if (err < 0)
+ goto ipvlan_destroy_port;
+
+ err = netdev_upper_dev_link(phy_dev, dev);
+ if (err)
+ goto ipvlan_destroy_port;
+
+ list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
+ netif_stacked_transfer_operstate(phy_dev, dev);
+ return 0;
+
+ipvlan_destroy_port:
+ port->count -= 1;
+ if (!port->count)
+ ipvlan_port_destroy(phy_dev);
+
+ return err;
+}
+
+static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ipvl_addr *addr, *next;
+
+ if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
+ list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
+ ipvlan_ht_addr_del(addr, !dev->dismantle);
+ list_del_rcu(&addr->anode);
+ }
+ }
+ list_del_rcu(&ipvlan->pnode);
+ unregister_netdevice_queue(dev, head);
+ netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
+}
+
+static void ipvlan_link_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+ dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->netdev_ops = &ipvlan_netdev_ops;
+ dev->destructor = free_netdev;
+ dev->header_ops = &ipvlan_header_ops;
+ dev->ethtool_ops = &ipvlan_ethtool_ops;
+ dev->tx_queue_len = 0;
+}
+
+static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
+{
+ [IFLA_IPVLAN_MODE] = { .type = NLA_U16 },
+};
+
+static struct rtnl_link_ops ipvlan_link_ops = {
+ .kind = "ipvlan",
+ .priv_size = sizeof(struct ipvl_dev),
+
+ .get_size = ipvlan_nl_getsize,
+ .policy = ipvlan_nl_policy,
+ .validate = ipvlan_nl_validate,
+ .fill_info = ipvlan_nl_fillinfo,
+ .changelink = ipvlan_nl_changelink,
+ .maxtype = IFLA_IPVLAN_MAX,
+
+ .setup = ipvlan_link_setup,
+ .newlink = ipvlan_link_new,
+ .dellink = ipvlan_link_delete,
+};
+
+static int ipvlan_link_register(struct rtnl_link_ops *ops)
+{
+ return rtnl_link_register(ops);
+}
+
+static int ipvlan_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct ipvl_dev *ipvlan, *next;
+ struct ipvl_port *port;
+ LIST_HEAD(lst_kill);
+
+ if (!netif_is_ipvlan_port(dev))
+ return NOTIFY_DONE;
+
+ port = ipvlan_port_get_rtnl(dev);
+
+ switch (event) {
+ case NETDEV_CHANGE:
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode)
+ netif_stacked_transfer_operstate(ipvlan->phy_dev,
+ ipvlan->dev);
+ break;
+
+ case NETDEV_UNREGISTER:
+ if (dev->reg_state != NETREG_UNREGISTERING)
+ break;
+
+ list_for_each_entry_safe(ipvlan, next, &port->ipvlans,
+ pnode)
+ ipvlan->dev->rtnl_link_ops->dellink(ipvlan->dev,
+ &lst_kill);
+ unregister_netdevice_many(&lst_kill);
+ break;
+
+ case NETDEV_FEAT_CHANGE:
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+ ipvlan->dev->features = dev->features & IPVLAN_FEATURES;
+ ipvlan->dev->gso_max_size = dev->gso_max_size;
+ netdev_features_change(ipvlan->dev);
+ }
+ break;
+
+ case NETDEV_CHANGEMTU:
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode)
+ ipvlan_adjust_mtu(ipvlan, dev);
+ break;
+
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid underlying device to change its type. */
+ return NOTIFY_BAD;
+ }
+ return NOTIFY_DONE;
+}
+
+static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
+{
+ struct ipvl_addr *addr;
+
+ if (ipvlan_addr_busy(ipvlan, ip6_addr, true)) {
+ netif_err(ipvlan, ifup, ipvlan->dev,
+ "Failed to add IPv6=%pI6c addr for %s intf\n",
+ ip6_addr, ipvlan->dev->name);
+ return -EINVAL;
+ }
+ addr = kzalloc(sizeof(struct ipvl_addr), GFP_ATOMIC);
+ if (!addr)
+ return -ENOMEM;
+
+ addr->master = ipvlan;
+ memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
+ addr->atype = IPVL_IPV6;
+ list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
+ ipvlan->ipv6cnt++;
+ ipvlan_ht_addr_add(ipvlan, addr);
+
+ return 0;
+}
+
+static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
+{
+ struct ipvl_addr *addr;
+
+ addr = ipvlan_ht_addr_lookup(ipvlan->port, ip6_addr, true);
+ if (!addr)
+ return;
+
+ ipvlan_ht_addr_del(addr, true);
+ list_del_rcu(&addr->anode);
+ ipvlan->ipv6cnt--;
+ WARN_ON(ipvlan->ipv6cnt < 0);
+ kfree_rcu(addr, rcu);
+
+ return;
+}
+
+static int ipvlan_addr6_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct inet6_ifaddr *if6 = (struct inet6_ifaddr *)ptr;
+ struct net_device *dev = (struct net_device *)if6->idev->dev;
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ if (!netif_is_ipvlan(dev))
+ return NOTIFY_DONE;
+
+ if (!ipvlan || !ipvlan->port)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UP:
+ if (ipvlan_add_addr6(ipvlan, &if6->addr))
+ return NOTIFY_BAD;
+ break;
+
+ case NETDEV_DOWN:
+ ipvlan_del_addr6(ipvlan, &if6->addr);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
+{
+ struct ipvl_addr *addr;
+
+ if (ipvlan_addr_busy(ipvlan, ip4_addr, false)) {
+ netif_err(ipvlan, ifup, ipvlan->dev,
+ "Failed to add IPv4=%pI4 on %s intf.\n",
+ ip4_addr, ipvlan->dev->name);
+ return -EINVAL;
+ }
+ addr = kzalloc(sizeof(struct ipvl_addr), GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+
+ addr->master = ipvlan;
+ memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
+ addr->atype = IPVL_IPV4;
+ list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
+ ipvlan->ipv4cnt++;
+ ipvlan_ht_addr_add(ipvlan, addr);
+ ipvlan_set_broadcast_mac_filter(ipvlan, true);
+
+ return 0;
+}
+
+static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
+{
+ struct ipvl_addr *addr;
+
+ addr = ipvlan_ht_addr_lookup(ipvlan->port, ip4_addr, false);
+ if (!addr)
+ return;
+
+ ipvlan_ht_addr_del(addr, true);
+ list_del_rcu(&addr->anode);
+ ipvlan->ipv4cnt--;
+ WARN_ON(ipvlan->ipv4cnt < 0);
+ if (!ipvlan->ipv4cnt)
+ ipvlan_set_broadcast_mac_filter(ipvlan, false);
+ kfree_rcu(addr, rcu);
+
+ return;
+}
+
+static int ipvlan_addr4_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *if4 = (struct in_ifaddr *)ptr;
+ struct net_device *dev = (struct net_device *)if4->ifa_dev->dev;
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct in_addr ip4_addr;
+
+ if (!netif_is_ipvlan(dev))
+ return NOTIFY_DONE;
+
+ if (!ipvlan || !ipvlan->port)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UP:
+ ip4_addr.s_addr = if4->ifa_address;
+ if (ipvlan_add_addr4(ipvlan, &ip4_addr))
+ return NOTIFY_BAD;
+ break;
+
+ case NETDEV_DOWN:
+ ip4_addr.s_addr = if4->ifa_address;
+ ipvlan_del_addr4(ipvlan, &ip4_addr);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = {
+ .notifier_call = ipvlan_addr4_event,
+};
+
+static struct notifier_block ipvlan_notifier_block __read_mostly = {
+ .notifier_call = ipvlan_device_event,
+};
+
+static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = {
+ .notifier_call = ipvlan_addr6_event,
+};
+
+static int __init ipvlan_init_module(void)
+{
+ int err;
+
+ ipvlan_init_secret();
+ register_netdevice_notifier(&ipvlan_notifier_block);
+ register_inet6addr_notifier(&ipvlan_addr6_notifier_block);
+ register_inetaddr_notifier(&ipvlan_addr4_notifier_block);
+
+ err = ipvlan_link_register(&ipvlan_link_ops);
+ if (err < 0)
+ goto error;
+
+ return 0;
+error:
+ unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
+ unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
+ unregister_netdevice_notifier(&ipvlan_notifier_block);
+ return err;
+}
+
+static void __exit ipvlan_cleanup_module(void)
+{
+ rtnl_link_unregister(&ipvlan_link_ops);
+ unregister_netdevice_notifier(&ipvlan_notifier_block);
+ unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
+ unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
+}
+
+module_init(ipvlan_init_module);
+module_exit(ipvlan_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>");
+MODULE_DESCRIPTION("Driver for L3 (IPv6/IPv4) based VLANs");
+MODULE_ALIAS_RTNL_LINK("ipvlan");
diff --git a/drivers/net/irda/act200l-sir.c b/drivers/net/irda/act200l-sir.c
index 8ff084f1d236..e8917511e1aa 100644
--- a/drivers/net/irda/act200l-sir.c
+++ b/drivers/net/irda/act200l-sir.c
@@ -107,8 +107,6 @@ static int act200l_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
/* Power on the dongle */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -124,8 +122,6 @@ static int act200l_open(struct sir_dev *dev)
static int act200l_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
/* Power off the dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -143,8 +139,6 @@ static int act200l_change_speed(struct sir_dev *dev, unsigned speed)
u8 control[3];
int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
/* Clear DTR and set RTS to enter command mode */
sirdev_set_dtr_rts(dev, FALSE, TRUE);
@@ -212,8 +206,6 @@ static int act200l_reset(struct sir_dev *dev)
};
int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__ );
-
switch (state) {
case SIRDEV_STATE_DONGLE_RESET:
/* Reset the dongle : set RTS low for 25 ms */
@@ -240,7 +232,8 @@ static int act200l_reset(struct sir_dev *dev)
dev->speed = 9600;
break;
default:
- IRDA_ERROR("%s(), unknown state %d\n", __func__, state);
+ net_err_ratelimited("%s(), unknown state %d\n",
+ __func__, state);
ret = -1;
break;
}
diff --git a/drivers/net/irda/actisys-sir.c b/drivers/net/irda/actisys-sir.c
index 50b2141a6103..e224b8b99517 100644
--- a/drivers/net/irda/actisys-sir.c
+++ b/drivers/net/irda/actisys-sir.c
@@ -165,8 +165,7 @@ static int actisys_change_speed(struct sir_dev *dev, unsigned speed)
int ret = 0;
int i = 0;
- IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __func__,
- speed, dev->speed);
+ pr_debug("%s(), speed=%d (was %d)\n", __func__, speed, dev->speed);
/* dongle was already resetted from irda_request state machine,
* we are in known state (dongle default)
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index befa45f809c3..588680a72fa1 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -154,12 +154,10 @@ static int __init ali_ircc_init(void)
int reg, revision;
int i = 0;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
-
ret = platform_driver_register(&ali_ircc_driver);
if (ret) {
- IRDA_ERROR("%s, Can't register driver!\n",
- ALI_IRCC_DRIVER_NAME);
+ net_err_ratelimited("%s, Can't register driver!\n",
+ ALI_IRCC_DRIVER_NAME);
return ret;
}
@@ -168,7 +166,7 @@ static int __init ali_ircc_init(void)
/* Probe for all the ALi chipsets we know about */
for (chip= chips; chip->name; chip++, i++)
{
- IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__, chip->name);
+ pr_debug("%s(), Probing for %s ...\n", __func__, chip->name);
/* Try all config registers for this chip */
for (cfg=0; cfg<2; cfg++)
@@ -198,12 +196,13 @@ static int __init ali_ircc_init(void)
if (reg == chip->cid_value)
{
- IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __func__, cfg_base);
+ pr_debug("%s(), Chip found at 0x%03x\n",
+ __func__, cfg_base);
outb(0x1F, cfg_base);
revision = inb(cfg_base+1);
- IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __func__,
- chip->name, revision);
+ pr_debug("%s(), Found %s chip, revision=%d\n",
+ __func__, chip->name, revision);
/*
* If the user supplies the base address, then
@@ -225,15 +224,14 @@ static int __init ali_ircc_init(void)
}
else
{
- IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __func__, chip->name, cfg_base);
+ pr_debug("%s(), No %s chip at 0x%03x\n",
+ __func__, chip->name, cfg_base);
}
/* Exit configuration */
outb(0xbb, cfg_base);
}
}
- IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
-
if (ret)
platform_driver_unregister(&ali_ircc_driver);
@@ -250,8 +248,6 @@ static void __exit ali_ircc_cleanup(void)
{
int i;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
-
for (i=0; i < ARRAY_SIZE(dev_self); i++) {
if (dev_self[i])
ali_ircc_close(dev_self[i]);
@@ -259,7 +255,6 @@ static void __exit ali_ircc_cleanup(void)
platform_driver_unregister(&ali_ircc_driver);
- IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
}
static const struct net_device_ops ali_ircc_sir_ops = {
@@ -289,11 +284,9 @@ static int ali_ircc_open(int i, chipio_t *info)
int dongle_id;
int err;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
-
if (i >= ARRAY_SIZE(dev_self)) {
- IRDA_ERROR("%s(), maximum number of supported chips reached!\n",
- __func__);
+ net_err_ratelimited("%s(), maximum number of supported chips reached!\n",
+ __func__);
return -ENOMEM;
}
@@ -303,8 +296,8 @@ static int ali_ircc_open(int i, chipio_t *info)
dev = alloc_irdadev(sizeof(*self));
if (dev == NULL) {
- IRDA_ERROR("%s(), can't allocate memory for control block!\n",
- __func__);
+ net_err_ratelimited("%s(), can't allocate memory for control block!\n",
+ __func__);
return -ENOMEM;
}
@@ -328,8 +321,8 @@ static int ali_ircc_open(int i, chipio_t *info)
/* Reserve the ioports that we need */
if (!request_region(self->io.fir_base, self->io.fir_ext,
ALI_IRCC_DRIVER_NAME)) {
- IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __func__,
- self->io.fir_base);
+ net_warn_ratelimited("%s(), can't get iobase of 0x%03x\n",
+ __func__, self->io.fir_base);
err = -ENODEV;
goto err_out1;
}
@@ -380,19 +373,20 @@ static int ali_ircc_open(int i, chipio_t *info)
err = register_netdev(dev);
if (err) {
- IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
+ net_err_ratelimited("%s(), register_netdev() failed!\n",
+ __func__);
goto err_out4;
}
- IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
+ net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
/* Check dongle id */
dongle_id = ali_ircc_read_dongle_id(i, info);
- IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __func__,
- ALI_IRCC_DRIVER_NAME, dongle_types[dongle_id]);
+ net_info_ratelimited("%s(), %s, Found dongle: %s\n",
+ __func__, ALI_IRCC_DRIVER_NAME,
+ dongle_types[dongle_id]);
self->io.dongle_id = dongle_id;
- IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
return 0;
@@ -421,8 +415,6 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
{
int iobase;
- IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__);
-
IRDA_ASSERT(self != NULL, return -1;);
iobase = self->io.fir_base;
@@ -431,7 +423,7 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
unregister_netdev(self->netdev);
/* Release the PORT that this driver is using */
- IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __func__, self->io.fir_base);
+ pr_debug("%s(), Releasing Region %03x\n", __func__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
@@ -445,7 +437,6 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
dev_self[self->index] = NULL;
free_netdev(self->netdev);
- IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
return 0;
}
@@ -488,7 +479,6 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
int cfg_base = info->cfg_base;
int hi, low, reg;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
/* Enter Configuration */
outb(chip->entr1, cfg_base);
@@ -507,13 +497,13 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
info->sir_base = info->fir_base;
- IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__, info->fir_base);
+ pr_debug("%s(), probing fir_base=0x%03x\n", __func__, info->fir_base);
/* Read IRQ control register */
outb(0x70, cfg_base);
reg = inb(cfg_base+1);
info->irq = reg & 0x0f;
- IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq);
+ pr_debug("%s(), probing irq=%d\n", __func__, info->irq);
/* Read DMA channel */
outb(0x74, cfg_base);
@@ -521,26 +511,26 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
info->dma = reg & 0x07;
if(info->dma == 0x04)
- IRDA_WARNING("%s(), No DMA channel assigned !\n", __func__);
+ net_warn_ratelimited("%s(), No DMA channel assigned !\n",
+ __func__);
else
- IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma);
+ pr_debug("%s(), probing dma=%d\n", __func__, info->dma);
/* Read Enabled Status */
outb(0x30, cfg_base);
reg = inb(cfg_base+1);
info->enabled = (reg & 0x80) && (reg & 0x01);
- IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __func__, info->enabled);
+ pr_debug("%s(), probing enabled=%d\n", __func__, info->enabled);
/* Read Power Status */
outb(0x22, cfg_base);
reg = inb(cfg_base+1);
info->suspended = (reg & 0x20);
- IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __func__, info->suspended);
+ pr_debug("%s(), probing suspended=%d\n", __func__, info->suspended);
/* Exit configuration */
outb(0xbb, cfg_base);
- IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
return 0;
}
@@ -558,7 +548,6 @@ static int ali_ircc_setup(chipio_t *info)
int version;
int iobase = info->fir_base;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
/* Locking comments :
* Most operations here need to be protected. We are called before
@@ -578,8 +567,8 @@ static int ali_ircc_setup(chipio_t *info)
/* Should be 0x00 in the M1535/M1535D */
if(version != 0x00)
{
- IRDA_ERROR("%s, Wrong chip version %02x\n",
- ALI_IRCC_DRIVER_NAME, version);
+ net_err_ratelimited("%s, Wrong chip version %02x\n",
+ ALI_IRCC_DRIVER_NAME, version);
return -1;
}
@@ -612,14 +601,13 @@ static int ali_ircc_setup(chipio_t *info)
/* Switch to SIR space */
FIR2SIR(iobase);
- IRDA_MESSAGE("%s, driver loaded (Benjamin Kong)\n",
- ALI_IRCC_DRIVER_NAME);
+ net_info_ratelimited("%s, driver loaded (Benjamin Kong)\n",
+ ALI_IRCC_DRIVER_NAME);
/* Enable receive interrupts */
// outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM
// Turn on the interrupts in ali_ircc_net_open
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
return 0;
}
@@ -636,7 +624,6 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info)
int dongle_id, reg;
int cfg_base = info->cfg_base;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
/* Enter Configuration */
outb(chips[i].entr1, cfg_base);
@@ -650,13 +637,12 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info)
outb(0xf0, cfg_base);
reg = inb(cfg_base+1);
dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01);
- IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __func__,
- dongle_id, dongle_types[dongle_id]);
+ pr_debug("%s(), probing dongle_id=%d, dongle_types=%s\n",
+ __func__, dongle_id, dongle_types[dongle_id]);
/* Exit configuration */
outb(0xbb, cfg_base);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
return dongle_id;
}
@@ -673,7 +659,6 @@ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)
struct ali_ircc_cb *self;
int ret;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
self = netdev_priv(dev);
@@ -687,7 +672,6 @@ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)
spin_unlock(&self->lock);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
return ret;
}
/*
@@ -701,7 +685,6 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
__u8 eir, OldMessageCount;
int iobase, tmp;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__);
iobase = self->io.fir_base;
@@ -714,10 +697,10 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
//self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM
eir = self->InterruptID & self->ier; /* Mask out the interesting ones */
- IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __func__,self->InterruptID);
- IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __func__,self->LineStatus);
- IRDA_DEBUG(1, "%s(), self->ier = %x\n", __func__,self->ier);
- IRDA_DEBUG(1, "%s(), eir = %x\n", __func__,eir);
+ pr_debug("%s(), self->InterruptID = %x\n", __func__, self->InterruptID);
+ pr_debug("%s(), self->LineStatus = %x\n", __func__, self->LineStatus);
+ pr_debug("%s(), self->ier = %x\n", __func__, self->ier);
+ pr_debug("%s(), eir = %x\n", __func__, eir);
/* Disable interrupts */
SetCOMInterrupts(self, FALSE);
@@ -728,7 +711,8 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
{
if (self->io.direction == IO_XMIT) /* TX */
{
- IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __func__);
+ pr_debug("%s(), ******* IIR_EOM (Tx) *******\n",
+ __func__);
if(ali_ircc_dma_xmit_complete(self))
{
@@ -747,23 +731,27 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
}
else /* RX */
{
- IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __func__);
+ pr_debug("%s(), ******* IIR_EOM (Rx) *******\n",
+ __func__);
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
- IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ********\n", __func__);
+ pr_debug("%s(), ******* self->rcvFramesOverflow = TRUE ********\n",
+ __func__);
}
if (ali_ircc_dma_receive_complete(self))
{
- IRDA_DEBUG(1, "%s(), ******* receive complete ********\n", __func__);
+ pr_debug("%s(), ******* receive complete ********\n",
+ __func__);
self->ier = IER_EOM;
}
else
{
- IRDA_DEBUG(1, "%s(), ******* Not receive complete ********\n", __func__);
+ pr_debug("%s(), ******* Not receive complete ********\n",
+ __func__);
self->ier = IER_EOM | IER_TIMER;
}
@@ -776,7 +764,8 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
- IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE *******\n", __func__);
+ pr_debug("%s(), ******* self->rcvFramesOverflow = TRUE *******\n",
+ __func__);
}
/* Disable Timer */
switch_bank(iobase, BANK1);
@@ -808,7 +797,6 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
/* Restore Interrupt */
SetCOMInterrupts(self, TRUE);
- IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __func__);
return IRQ_RETVAL(eir);
}
@@ -823,7 +811,6 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
int iobase;
int iir, lsr;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
iobase = self->io.sir_base;
@@ -832,13 +819,13 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
/* Clear interrupt */
lsr = inb(iobase+UART_LSR);
- IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __func__,
- iir, lsr, iobase);
+ pr_debug("%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
+ __func__, iir, lsr, iobase);
switch (iir)
{
case UART_IIR_RLSI:
- IRDA_DEBUG(2, "%s(), RLSI\n", __func__);
+ pr_debug("%s(), RLSI\n", __func__);
break;
case UART_IIR_RDI:
/* Receive interrupt */
@@ -852,15 +839,14 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
}
break;
default:
- IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __func__, iir);
+ pr_debug("%s(), unhandled IIR=%#x\n",
+ __func__, iir);
break;
}
}
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
-
return IRQ_RETVAL(iir);
}
@@ -876,7 +862,6 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
int boguscount = 0;
int iobase;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
IRDA_ASSERT(self != NULL, return;);
iobase = self->io.sir_base;
@@ -891,12 +876,11 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
/* Make sure we don't stay here too long */
if (boguscount++ > 32) {
- IRDA_DEBUG(2,"%s(), breaking!\n", __func__);
+ pr_debug("%s(), breaking!\n", __func__);
break;
}
} while (inb(iobase+UART_LSR) & UART_LSR_DR);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
}
/*
@@ -913,7 +897,6 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
IRDA_ASSERT(self != NULL, return;);
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
iobase = self->io.sir_base;
@@ -932,16 +915,18 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
{
/* We must wait until all data are gone */
while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT))
- IRDA_DEBUG(1, "%s(), UART_LSR_THRE\n", __func__ );
+ pr_debug("%s(), UART_LSR_THRE\n", __func__);
- IRDA_DEBUG(1, "%s(), Changing speed! self->new_speed = %d\n", __func__ , self->new_speed);
+ pr_debug("%s(), Changing speed! self->new_speed = %d\n",
+ __func__, self->new_speed);
ali_ircc_change_speed(self, self->new_speed);
self->new_speed = 0;
// benjamin 2000/11/10 06:32PM
if (self->io.speed > 115200)
{
- IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", __func__ );
+ pr_debug("%s(), ali_ircc_change_speed from UART_LSR_TEMT\n",
+ __func__);
self->ier = IER_EOM;
// SetCOMInterrupts(self, TRUE);
@@ -959,7 +944,6 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
outb(UART_IER_RDI, iobase+UART_IER);
}
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
}
static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
@@ -967,9 +951,8 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
struct net_device *dev = self->netdev;
int iobase;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
- IRDA_DEBUG(2, "%s(), setting speed = %d\n", __func__ , baud);
+ pr_debug("%s(), setting speed = %d\n", __func__, baud);
/* This function *must* be called with irq off and spin-lock.
* - Jean II */
@@ -1008,7 +991,6 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
netif_wake_queue(self->netdev);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
}
static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
@@ -1018,14 +1000,14 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
struct ali_ircc_cb *self = priv;
struct net_device *dev;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(self != NULL, return;);
dev = self->netdev;
iobase = self->io.fir_base;
- IRDA_DEBUG(1, "%s(), self->io.speed = %d, change to speed = %d\n", __func__ ,self->io.speed,baud);
+ pr_debug("%s(), self->io.speed = %d, change to speed = %d\n",
+ __func__, self->io.speed, baud);
/* Come from SIR speed */
if(self->io.speed <=115200)
@@ -1039,7 +1021,6 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
// Set Dongle Speed mode
ali_ircc_change_dongle_speed(self, baud);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
/*
@@ -1057,9 +1038,8 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
int lcr; /* Line control reg */
int divisor;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
- IRDA_DEBUG(1, "%s(), Setting speed to: %d\n", __func__ , speed);
+ pr_debug("%s(), Setting speed to: %d\n", __func__, speed);
IRDA_ASSERT(self != NULL, return;);
@@ -1113,7 +1093,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
spin_unlock_irqrestore(&self->lock, flags);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
@@ -1123,14 +1102,14 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
int iobase,dongle_id;
int tmp = 0;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */
dongle_id = self->io.dongle_id;
/* We are already locked, no need to do it again */
- IRDA_DEBUG(1, "%s(), Set Speed for %s , Speed = %d\n", __func__ , dongle_types[dongle_id], speed);
+ pr_debug("%s(), Set Speed for %s , Speed = %d\n",
+ __func__, dongle_types[dongle_id], speed);
switch_bank(iobase, BANK2);
tmp = inb(iobase+FIR_IRDA_CR);
@@ -1294,7 +1273,6 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
switch_bank(iobase, BANK0);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
/*
@@ -1307,11 +1285,10 @@ static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
{
int actual = 0;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
/* Tx FIFO should be empty! */
if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
- IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __func__ );
+ pr_debug("%s(), failed, fifo not empty!\n", __func__);
return 0;
}
@@ -1323,7 +1300,6 @@ static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
actual++;
}
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return actual;
}
@@ -1339,7 +1315,6 @@ static int ali_ircc_net_open(struct net_device *dev)
int iobase;
char hwname[32];
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
@@ -1352,9 +1327,8 @@ static int ali_ircc_net_open(struct net_device *dev)
/* Request IRQ and install Interrupt Handler */
if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev))
{
- IRDA_WARNING("%s, unable to allocate irq=%d\n",
- ALI_IRCC_DRIVER_NAME,
- self->io.irq);
+ net_warn_ratelimited("%s, unable to allocate irq=%d\n",
+ ALI_IRCC_DRIVER_NAME, self->io.irq);
return -EAGAIN;
}
@@ -1363,9 +1337,8 @@ static int ali_ircc_net_open(struct net_device *dev)
* failure.
*/
if (request_dma(self->io.dma, dev->name)) {
- IRDA_WARNING("%s, unable to allocate dma=%d\n",
- ALI_IRCC_DRIVER_NAME,
- self->io.dma);
+ net_warn_ratelimited("%s, unable to allocate dma=%d\n",
+ ALI_IRCC_DRIVER_NAME, self->io.dma);
free_irq(self->io.irq, dev);
return -EAGAIN;
}
@@ -1385,7 +1358,6 @@ static int ali_ircc_net_open(struct net_device *dev)
*/
self->irlap = irlap_open(dev, &self->qos, hwname);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return 0;
}
@@ -1402,7 +1374,6 @@ static int ali_ircc_net_close(struct net_device *dev)
struct ali_ircc_cb *self;
//int iobase;
- IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
@@ -1425,7 +1396,6 @@ static int ali_ircc_net_close(struct net_device *dev)
free_irq(self->io.irq, dev);
free_dma(self->io.dma);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return 0;
}
@@ -1445,7 +1415,6 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
__u32 speed;
int mtt, diff;
- IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
self = netdev_priv(dev);
iobase = self->io.fir_base;
@@ -1499,7 +1468,8 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
diff = self->now.tv_usec - self->stamp.tv_usec;
/* self->stamp is set from ali_ircc_dma_receive_complete() */
- IRDA_DEBUG(1, "%s(), ******* diff = %d *******\n", __func__ , diff);
+ pr_debug("%s(), ******* diff = %d *******\n",
+ __func__, diff);
if (diff < 0)
diff += 1000000;
@@ -1521,7 +1491,8 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
/* Adjust for timer resolution */
mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */
- IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __func__ , mtt);
+ pr_debug("%s(), ************** mtt = %d ***********\n",
+ __func__, mtt);
/* Setup timer */
if (mtt == 1) /* 500 us */
@@ -1578,7 +1549,6 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
return NETDEV_TX_OK;
}
@@ -1589,7 +1559,6 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
unsigned char FIFO_OPTI, Hi, Lo;
- IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
iobase = self->io.fir_base;
@@ -1640,7 +1609,8 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
tmp = inb(iobase+FIR_LCR_B);
tmp &= ~0x20; // Disable SIP
outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
- IRDA_DEBUG(1, "%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
+ pr_debug("%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n",
+ __func__, inb(iobase + FIR_LCR_B));
outb(0, iobase+FIR_LSR);
@@ -1650,7 +1620,6 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
switch_bank(iobase, BANK0);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
@@ -1658,7 +1627,6 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
int iobase;
int ret = TRUE;
- IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
iobase = self->io.fir_base;
@@ -1671,7 +1639,8 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT)
{
- IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__);
+ net_err_ratelimited("%s(), ********* LSR_FRAME_ABORT *********\n",
+ __func__);
self->netdev->stats.tx_errors++;
self->netdev->stats.tx_fifo_errors++;
}
@@ -1714,7 +1683,6 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
switch_bank(iobase, BANK0);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
return ret;
}
@@ -1729,7 +1697,6 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
{
int iobase, tmp;
- IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
iobase = self->io.fir_base;
@@ -1767,7 +1734,8 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
//switch_bank(iobase, BANK0);
tmp = inb(iobase+FIR_LCR_B);
outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
- IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
+ pr_debug("%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n",
+ __func__, inb(iobase + FIR_LCR_B));
/* Set Rx Threshold */
switch_bank(iobase, BANK1);
@@ -1779,7 +1747,6 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
switch_bank(iobase, BANK0);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
return 0;
}
@@ -1790,8 +1757,6 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
__u8 status, MessageCount;
int len, i, iobase, val;
- IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
-
st_fifo = &self->st_fifo;
iobase = self->io.fir_base;
@@ -1799,7 +1764,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
MessageCount = inb(iobase+ FIR_LSR)&0x07;
if (MessageCount > 0)
- IRDA_DEBUG(0, "%s(), Message count = %d,\n", __func__ , MessageCount);
+ pr_debug("%s(), Message count = %d\n", __func__, MessageCount);
for (i=0; i<=MessageCount; i++)
{
@@ -1812,11 +1777,11 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
len = len << 8;
len |= inb(iobase+FIR_RX_DSR_LO);
- IRDA_DEBUG(1, "%s(), RX Length = 0x%.2x,\n", __func__ , len);
- IRDA_DEBUG(1, "%s(), RX Status = 0x%.2x,\n", __func__ , status);
+ pr_debug("%s(), RX Length = 0x%.2x,\n", __func__ , len);
+ pr_debug("%s(), RX Status = 0x%.2x,\n", __func__ , status);
if (st_fifo->tail >= MAX_RX_WINDOW) {
- IRDA_DEBUG(0, "%s(), window is full!\n", __func__ );
+ pr_debug("%s(), window is full!\n", __func__);
continue;
}
@@ -1839,7 +1804,8 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
/* Check for errors */
if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
{
- IRDA_DEBUG(0,"%s(), ************* RX Errors ************\n", __func__ );
+ pr_debug("%s(), ************* RX Errors ************\n",
+ __func__);
/* Skip frame */
self->netdev->stats.rx_errors++;
@@ -1849,29 +1815,34 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
if (status & LSR_FIFO_UR)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************\n", __func__ );
+ pr_debug("%s(), ************* FIFO Errors ************\n",
+ __func__);
}
if (status & LSR_FRAME_ERROR)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************\n", __func__ );
+ pr_debug("%s(), ************* FRAME Errors ************\n",
+ __func__);
}
if (status & LSR_CRC_ERROR)
{
self->netdev->stats.rx_crc_errors++;
- IRDA_DEBUG(0,"%s(), ************* CRC Errors ************\n", __func__ );
+ pr_debug("%s(), ************* CRC Errors ************\n",
+ __func__);
}
if(self->rcvFramesOverflow)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************\n", __func__ );
+ pr_debug("%s(), ************* Overran DMA buffer ************\n",
+ __func__);
}
if(len == 0)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 *********\n", __func__ );
+ pr_debug("%s(), ********** Receive Frame Size = 0 *********\n",
+ __func__);
}
}
else
@@ -1883,7 +1854,8 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
val = inb(iobase+FIR_BSR);
if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
{
- IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", __func__ );
+ pr_debug("%s(), ************* BSR_FIFO_NOT_EMPTY ************\n",
+ __func__);
/* Put this entry back in fifo */
st_fifo->head--;
@@ -1918,9 +1890,6 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
skb = dev_alloc_skb(len+1);
if (skb == NULL)
{
- IRDA_WARNING("%s(), memory squeeze, "
- "dropping frame.\n",
- __func__);
self->netdev->stats.rx_dropped++;
return FALSE;
@@ -1947,7 +1916,6 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
switch_bank(iobase, BANK0);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
return TRUE;
}
@@ -1967,7 +1935,6 @@ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
int iobase;
__u32 speed;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
@@ -2016,7 +1983,6 @@ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
dev_kfree_skb(skb);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return NETDEV_TX_OK;
}
@@ -2035,7 +2001,6 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
unsigned long flags;
int ret = 0;
- IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
@@ -2043,11 +2008,11 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
- IRDA_DEBUG(1, "%s(), SIOCSBANDWIDTH\n", __func__ );
+ pr_debug("%s(), SIOCSBANDWIDTH\n", __func__);
/*
* This function will also be used by IrLAP to change the
* speed, so we still must allow for speed change within
@@ -2061,13 +2026,13 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
spin_unlock_irqrestore(&self->lock, flags);
break;
case SIOCSMEDIABUSY: /* Set media busy */
- IRDA_DEBUG(1, "%s(), SIOCSMEDIABUSY\n", __func__ );
+ pr_debug("%s(), SIOCSMEDIABUSY\n", __func__);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
irda_device_set_media_busy(self->netdev, TRUE);
break;
case SIOCGRECEIVING: /* Check if we are receiving right now */
- IRDA_DEBUG(2, "%s(), SIOCGRECEIVING\n", __func__ );
+ pr_debug("%s(), SIOCGRECEIVING\n", __func__);
/* This is protected */
irq->ifr_receiving = ali_ircc_is_receiving(self);
break;
@@ -2075,7 +2040,6 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
ret = -EOPNOTSUPP;
}
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return ret;
}
@@ -2092,7 +2056,6 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
int status = FALSE;
int iobase;
- IRDA_DEBUG(2, "%s(), ---------------- Start -----------------\n", __func__ );
IRDA_ASSERT(self != NULL, return FALSE;);
@@ -2106,7 +2069,8 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0)
{
/* We are receiving something */
- IRDA_DEBUG(1, "%s(), We are receiving something\n", __func__ );
+ pr_debug("%s(), We are receiving something\n",
+ __func__);
status = TRUE;
}
switch_bank(iobase, BANK0);
@@ -2118,7 +2082,6 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
spin_unlock_irqrestore(&self->lock, flags);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
return status;
}
@@ -2127,7 +2090,7 @@ static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state)
{
struct ali_ircc_cb *self = platform_get_drvdata(dev);
- IRDA_MESSAGE("%s, Suspending\n", ALI_IRCC_DRIVER_NAME);
+ net_info_ratelimited("%s, Suspending\n", ALI_IRCC_DRIVER_NAME);
if (self->io.suspended)
return 0;
@@ -2148,7 +2111,7 @@ static int ali_ircc_resume(struct platform_device *dev)
ali_ircc_net_open(self->netdev);
- IRDA_MESSAGE("%s, Waking up\n", ALI_IRCC_DRIVER_NAME);
+ net_info_ratelimited("%s, Waking up\n", ALI_IRCC_DRIVER_NAME);
self->io.suspended = 0;
@@ -2164,7 +2127,8 @@ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
int iobase = self->io.fir_base; /* or sir_base */
- IRDA_DEBUG(2, "%s(), -------- Start -------- ( Enable = %d )\n", __func__ , enable);
+ pr_debug("%s(), -------- Start -------- ( Enable = %d )\n",
+ __func__, enable);
/* Enable the interrupt which we wish to */
if (enable){
@@ -2205,14 +2169,12 @@ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
else
outb(newMask, iobase+UART_IER);
- IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
}
static void SIR2FIR(int iobase)
{
//unsigned char tmp;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
/* Already protected (change_speed() or setup()), no need to lock.
* Jean II */
@@ -2228,14 +2190,12 @@ static void SIR2FIR(int iobase)
//tmp |= 0x20;
//outb(tmp, iobase+FIR_LCR_B);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
static void FIR2SIR(int iobase)
{
unsigned char val;
- IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
/* Already protected (change_speed() or setup()), no need to lock.
* Jean II */
@@ -2251,7 +2211,6 @@ static void FIR2SIR(int iobase)
val = inb(iobase+UART_LSR);
val = inb(iobase+UART_MSR);
- IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
}
MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>");
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index a87a82ca111f..b337e6d23a88 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -232,7 +232,7 @@ char head=tete;
for (i=0;i<len;i+=16) {
for (j=0;j<16 && i+j<len;j++) { sprintf(&dump[3*j],"%02x.",data[i+j]); }
dump [3*j]=0;
- IRDA_DEBUG (2, "%c%s\n",head , dump);
+ pr_debug("%c%s\n", head, dump);
head='+';
}
}
@@ -245,8 +245,6 @@ toshoboe_dumpregs (struct toshoboe_cb *self)
{
__u32 ringbase;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
ringbase = INB (OBOE_RING_BASE0) << 10;
ringbase |= INB (OBOE_RING_BASE1) << 18;
ringbase |= INB (OBOE_RING_BASE2) << 26;
@@ -293,8 +291,6 @@ static void
toshoboe_disablebm (struct toshoboe_cb *self)
{
__u8 command;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
pci_read_config_byte (self->pdev, PCI_COMMAND, &command);
command &= ~PCI_COMMAND_MASTER;
pci_write_config_byte (self->pdev, PCI_COMMAND, command);
@@ -305,8 +301,6 @@ toshoboe_disablebm (struct toshoboe_cb *self)
static void
toshoboe_stopchip (struct toshoboe_cb *self)
{
- IRDA_DEBUG (4, "%s()\n", __func__);
-
/*Disable interrupts */
OUTB (0x0, OBOE_IER);
/*Disable DMA, Disable Rx, Disable Tx */
@@ -350,7 +344,7 @@ toshoboe_setbaud (struct toshoboe_cb *self)
__u16 pconfig = 0;
__u8 config0l = 0;
- IRDA_DEBUG (2, "%s(%d/%d)\n", __func__, self->speed, self->io.speed);
+ pr_debug("%s(%d/%d)\n", __func__, self->speed, self->io.speed);
switch (self->speed)
{
@@ -482,7 +476,6 @@ toshoboe_setbaud (struct toshoboe_cb *self)
static void
toshoboe_enablebm (struct toshoboe_cb *self)
{
- IRDA_DEBUG (4, "%s()\n", __func__);
pci_set_master (self->pdev);
}
@@ -492,8 +485,6 @@ toshoboe_initring (struct toshoboe_cb *self)
{
int i;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
for (i = 0; i < TX_SLOTS; ++i)
{
self->ring->tx[i].len = 0;
@@ -550,8 +541,6 @@ toshoboe_startchip (struct toshoboe_cb *self)
{
__u32 physaddr;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
toshoboe_initring (self);
toshoboe_enablebm (self);
OUTBP (OBOE_CONFIG1_RESET, OBOE_CONFIG1);
@@ -636,9 +625,8 @@ toshoboe_makemttpacket (struct toshoboe_cb *self, void *buf, int mtt)
xbofs=xbofs/80000; /*Eight bits per byte, and mtt is in us*/
xbofs++;
- IRDA_DEBUG (2, DRIVER_NAME
- ": generated mtt of %d bytes for %d us at %d baud\n"
- , xbofs,mtt,self->speed);
+ pr_debug(DRIVER_NAME ": generated mtt of %d bytes for %d us at %d baud\n",
+ xbofs, mtt, self->speed);
if (xbofs > TX_LEN)
{
@@ -824,8 +812,6 @@ toshoboe_probe (struct toshoboe_cb *self)
#endif
unsigned long flags;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
if (request_irq (self->io.irq, toshoboe_probeinterrupt,
self->io.irqflags, "toshoboe", (void *) self))
{
@@ -983,10 +969,10 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
IRDA_ASSERT (self != NULL, return NETDEV_TX_OK; );
- IRDA_DEBUG (1, "%s.tx:%x(%x)%x\n", __func__
- ,skb->len,self->txpending,INB (OBOE_ENABLEH));
+ pr_debug("%s.tx:%x(%x)%x\n",
+ __func__, skb->len, self->txpending, INB(OBOE_ENABLEH));
if (!cb->magic) {
- IRDA_DEBUG (2, "%s.Not IrLAP:%x\n", __func__, cb->magic);
+ pr_debug("%s.Not IrLAP:%x\n", __func__, cb->magic);
#ifdef DUMP_PACKETS
_dumpbufs(skb->data,skb->len,'>');
#endif
@@ -1012,8 +998,8 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
if (self->txpending || skb->len)
{
self->new_speed = speed;
- IRDA_DEBUG (1, "%s: Queued TxDone scheduled speed change %d\n" ,
- __func__, speed);
+ pr_debug("%s: Queued TxDone scheduled speed change %d\n" ,
+ __func__, speed);
/* if no data, that's all! */
if (!skb->len)
{
@@ -1055,8 +1041,7 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
/* which we will add a wrong checksum to */
mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt);
- IRDA_DEBUG (1, "%s.mtt:%x(%x)%d\n", __func__
- ,skb->len,mtt,self->txpending);
+ pr_debug("%s.mtt:%x(%x)%d\n", __func__, skb->len, mtt, self->txpending);
if (mtt)
{
self->ring->tx[self->txs].len = mtt & 0xfff;
@@ -1099,8 +1084,9 @@ dumpbufs(skb->data,skb->len,'>');
if (self->ring->tx[self->txs].control & OBOE_CTL_TX_HW_OWNS)
{
- IRDA_DEBUG (0, "%s.ful:%x(%x)%x\n", __func__
- ,skb->len, self->ring->tx[self->txs].control, self->txpending);
+ pr_debug("%s.ful:%x(%x)%x\n",
+ __func__, skb->len, self->ring->tx[self->txs].control,
+ self->txpending);
toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
spin_unlock_irqrestore(&self->spinlock, flags);
return NETDEV_TX_BUSY;
@@ -1177,8 +1163,7 @@ toshoboe_interrupt (int irq, void *dev_id)
if (self->ring->tx[i].control & OBOE_CTL_TX_HW_OWNS)
self->txpending++;
}
- IRDA_DEBUG (1, "%s.txd(%x)%x/%x\n", __func__
- ,irqstat,txp,self->txpending);
+ pr_debug("%s.txd(%x)%x/%x\n", __func__, irqstat, txp, self->txpending);
txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
@@ -1206,8 +1191,8 @@ toshoboe_interrupt (int irq, void *dev_id)
if ((!self->txpending) && (self->new_speed))
{
self->speed = self->new_speed;
- IRDA_DEBUG (1, "%s: Executed TxDone scheduled speed change %d\n",
- __func__, self->speed);
+ pr_debug("%s: Executed TxDone scheduled speed change %d\n",
+ __func__, self->speed);
toshoboe_setbaud (self);
}
@@ -1222,8 +1207,8 @@ toshoboe_interrupt (int irq, void *dev_id)
{
int len = self->ring->rx[self->rxs].len;
skb = NULL;
- IRDA_DEBUG (3, "%s.rcv:%x(%x)\n", __func__
- ,len,self->ring->rx[self->rxs].control);
+ pr_debug("%s.rcv:%x(%x)\n", __func__
+ , len, self->ring->rx[self->rxs].control);
#ifdef DUMP_PACKETS
dumpbufs(self->rx_bufs[self->rxs],len,'<');
@@ -1244,7 +1229,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
len -= 2;
else
len = 0;
- IRDA_DEBUG (1, "%s.SIR:%x(%x)\n", __func__, len,enable);
+ pr_debug("%s.SIR:%x(%x)\n", __func__, len, enable);
}
#ifdef USE_MIR
@@ -1254,7 +1239,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
len -= 2;
else
len = 0;
- IRDA_DEBUG (2, "%s.MIR:%x(%x)\n", __func__, len,enable);
+ pr_debug("%s.MIR:%x(%x)\n", __func__, len, enable);
}
#endif
else if (enable & OBOE_ENABLEH_FIRON)
@@ -1263,10 +1248,10 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
len -= 4; /*FIXME: check this */
else
len = 0;
- IRDA_DEBUG (1, "%s.FIR:%x(%x)\n", __func__, len,enable);
+ pr_debug("%s.FIR:%x(%x)\n", __func__, len, enable);
}
else
- IRDA_DEBUG (0, "%s.?IR:%x(%x)\n", __func__, len,enable);
+ pr_debug("%s.?IR:%x(%x)\n", __func__, len, enable);
if (len)
{
@@ -1299,8 +1284,8 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
/* (SIR) data is splitted in several slots. */
/* we have to join all the received buffers received */
/*in a large buffer before checking CRC. */
- IRDA_DEBUG (0, "%s.err:%x(%x)\n", __func__
- ,len,self->ring->rx[self->rxs].control);
+ pr_debug("%s.err:%x(%x)\n", __func__
+ , len, self->ring->rx[self->rxs].control);
}
self->ring->rx[self->rxs].len = 0x0;
@@ -1327,8 +1312,8 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
if (irqstat & OBOE_INT_SIP)
{
self->int_sip++;
- IRDA_DEBUG (1, "%s.sip:%x(%x)%x\n", __func__
- ,self->int_sip,irqstat,self->txpending);
+ pr_debug("%s.sip:%x(%x)%x\n",
+ __func__, self->int_sip, irqstat, self->txpending);
}
return IRQ_HANDLED;
}
@@ -1341,8 +1326,6 @@ toshoboe_net_open (struct net_device *dev)
unsigned long flags;
int rc;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
self = netdev_priv(dev);
if (self->async)
@@ -1379,8 +1362,6 @@ toshoboe_net_close (struct net_device *dev)
{
struct toshoboe_cb *self;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
IRDA_ASSERT (dev != NULL, return -1; );
self = netdev_priv(dev);
@@ -1424,7 +1405,7 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
IRDA_ASSERT (self != NULL, return -1; );
- IRDA_DEBUG (5, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
/* Disable interrupts & save flags */
spin_lock_irqsave(&self->spinlock, flags);
@@ -1436,8 +1417,8 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
* speed, so we still must allow for speed change within
* interrupt context.
*/
- IRDA_DEBUG (1, "%s(BANDWIDTH), %s, (%X/%ld\n", __func__
- ,dev->name, INB (OBOE_STATUS), irq->ifr_baudrate );
+ pr_debug("%s(BANDWIDTH), %s, (%X/%ld\n",
+ __func__, dev->name, INB(OBOE_STATUS), irq->ifr_baudrate);
if (!in_interrupt () && !capable (CAP_NET_ADMIN)) {
ret = -EPERM;
goto out;
@@ -1449,8 +1430,9 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
self->new_speed = irq->ifr_baudrate;
break;
case SIOCSMEDIABUSY: /* Set media busy */
- IRDA_DEBUG (1, "%s(MEDIABUSY), %s, (%X/%x)\n", __func__
- ,dev->name, INB (OBOE_STATUS), capable (CAP_NET_ADMIN) );
+ pr_debug("%s(MEDIABUSY), %s, (%X/%x)\n",
+ __func__, dev->name,
+ INB(OBOE_STATUS), capable(CAP_NET_ADMIN));
if (!capable (CAP_NET_ADMIN)) {
ret = -EPERM;
goto out;
@@ -1459,11 +1441,11 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
break;
case SIOCGRECEIVING: /* Check if we are receiving right now */
irq->ifr_receiving = (INB (OBOE_STATUS) & OBOE_STATUS_RXBUSY) ? 1 : 0;
- IRDA_DEBUG (3, "%s(RECEIVING), %s, (%X/%x)\n", __func__
- ,dev->name, INB (OBOE_STATUS), irq->ifr_receiving );
+ pr_debug("%s(RECEIVING), %s, (%X/%x)\n",
+ __func__, dev->name, INB(OBOE_STATUS), irq->ifr_receiving);
break;
default:
- IRDA_DEBUG (1, "%s(?), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+ pr_debug("%s(?), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
ret = -EOPNOTSUPP;
}
out:
@@ -1490,8 +1472,6 @@ toshoboe_close (struct pci_dev *pci_dev)
int i;
struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
- IRDA_DEBUG (4, "%s()\n", __func__);
-
IRDA_ASSERT (self != NULL, return; );
if (!self->stopped)
@@ -1538,8 +1518,6 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
int ok = 0;
int err;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
if ((err=pci_enable_device(pci_dev)))
return err;
@@ -1700,8 +1678,6 @@ toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
unsigned long flags;
int i = 10;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
if (!self || self->stopped)
return 0;
@@ -1728,8 +1704,6 @@ toshoboe_wakeup (struct pci_dev *pci_dev)
struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
unsigned long flags;
- IRDA_DEBUG (4, "%s()\n", __func__);
-
if (!self || !self->stopped)
return 0;
diff --git a/drivers/net/irda/girbil-sir.c b/drivers/net/irda/girbil-sir.c
index 96cdecff349d..7e0a5b8c6d53 100644
--- a/drivers/net/irda/girbil-sir.c
+++ b/drivers/net/irda/girbil-sir.c
@@ -86,8 +86,6 @@ static int girbil_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power on dongle */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -102,8 +100,6 @@ static int girbil_open(struct sir_dev *dev)
static int girbil_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -126,8 +122,6 @@ static int girbil_change_speed(struct sir_dev *dev, unsigned speed)
u8 control[2];
static int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* dongle alread reset - port and dongle at default speed */
switch(state) {
@@ -179,7 +173,8 @@ static int girbil_change_speed(struct sir_dev *dev, unsigned speed)
break;
default:
- IRDA_ERROR("%s - undefined state %d\n", __func__, state);
+ net_err_ratelimited("%s - undefined state %d\n",
+ __func__, state);
ret = -EINVAL;
break;
}
@@ -209,8 +204,6 @@ static int girbil_reset(struct sir_dev *dev)
u8 control = GIRBIL_TXEN | GIRBIL_RXEN;
int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
switch (state) {
case SIRDEV_STATE_DONGLE_RESET:
/* Reset dongle */
@@ -241,7 +234,8 @@ static int girbil_reset(struct sir_dev *dev)
break;
default:
- IRDA_ERROR("%s(), undefined state %d\n", __func__, state);
+ net_err_ratelimited("%s(), undefined state %d\n",
+ __func__, state);
ret = -1;
break;
}
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 925b78cc9797..48b2f9a321b7 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -176,12 +176,13 @@ static void irda_usb_build_header(struct irda_usb_cb *self,
(!force) && (self->speed != -1)) {
/* No speed and xbofs change here
* (we'll do it later in the write callback) */
- IRDA_DEBUG(2, "%s(), not changing speed yet\n", __func__);
+ pr_debug("%s(), not changing speed yet\n", __func__);
*header = 0;
return;
}
- IRDA_DEBUG(2, "%s(), changing speed to %d\n", __func__, self->new_speed);
+ pr_debug("%s(), changing speed to %d\n",
+ __func__, self->new_speed);
self->speed = self->new_speed;
/* We will do ` self->new_speed = -1; ' in the completion
* handler just in case the current URB fail - Jean II */
@@ -227,7 +228,8 @@ static void irda_usb_build_header(struct irda_usb_cb *self,
/* Set the negotiated additional XBOFS */
if (self->new_xbofs != -1) {
- IRDA_DEBUG(2, "%s(), changing xbofs to %d\n", __func__, self->new_xbofs);
+ pr_debug("%s(), changing xbofs to %d\n",
+ __func__, self->new_xbofs);
self->xbofs = self->new_xbofs;
/* We will do ` self->new_xbofs = -1; ' in the completion
* handler just in case the current URB fail - Jean II */
@@ -301,13 +303,13 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
struct urb *urb;
int ret;
- IRDA_DEBUG(2, "%s(), speed=%d, xbofs=%d\n", __func__,
- self->new_speed, self->new_xbofs);
+ pr_debug("%s(), speed=%d, xbofs=%d\n", __func__,
+ self->new_speed, self->new_xbofs);
/* Grab the speed URB */
urb = self->speed_urb;
if (urb->status != 0) {
- IRDA_WARNING("%s(), URB still in use!\n", __func__);
+ net_warn_ratelimited("%s(), URB still in use!\n", __func__);
return;
}
@@ -333,7 +335,7 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
/* Irq disabled -> GFP_ATOMIC */
if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) {
- IRDA_WARNING("%s(), failed Speed URB\n", __func__);
+ net_warn_ratelimited("%s(), failed Speed URB\n", __func__);
}
}
@@ -346,8 +348,6 @@ static void speed_bulk_callback(struct urb *urb)
{
struct irda_usb_cb *self = urb->context;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* We should always have a context */
IRDA_ASSERT(self != NULL, return;);
/* We should always be called for the speed URB */
@@ -356,7 +356,8 @@ static void speed_bulk_callback(struct urb *urb)
/* Check for timeout and other USB nasties */
if (urb->status != 0) {
/* I get a lot of -ECONNABORTED = -103 here - Jean II */
- IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
+ pr_debug("%s(), URB complete status %d, transfer_flags 0x%04X\n",
+ __func__, urb->status, urb->transfer_flags);
/* Don't do anything here, that might confuse the USB layer.
* Instead, we will wait for irda_usb_net_timeout(), the
@@ -391,7 +392,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
s16 xbofs;
int res, mtt;
- IRDA_DEBUG(4, "%s() on %s\n", __func__, netdev->name);
+ pr_debug("%s() on %s\n", __func__, netdev->name);
netif_stop_queue(netdev);
@@ -402,7 +403,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
* We need to check self->present under the spinlock because
* of irda_usb_disconnect() is synchronous - Jean II */
if (!self->present) {
- IRDA_DEBUG(0, "%s(), Device is gone...\n", __func__);
+ pr_debug("%s(), Device is gone...\n", __func__);
goto drop;
}
@@ -435,7 +436,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
}
if (urb->status != 0) {
- IRDA_WARNING("%s(), URB still in use!\n", __func__);
+ net_warn_ratelimited("%s(), URB still in use!\n", __func__);
goto drop;
}
@@ -522,7 +523,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
/* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */
if ((res = usb_submit_urb(urb, GFP_ATOMIC))) {
- IRDA_WARNING("%s(), failed Tx URB\n", __func__);
+ net_warn_ratelimited("%s(), failed Tx URB\n", __func__);
netdev->stats.tx_errors++;
/* Let USB recover : We will catch that in the watchdog */
/*netif_start_queue(netdev);*/
@@ -554,8 +555,6 @@ static void write_bulk_callback(struct urb *urb)
struct sk_buff *skb = urb->context;
struct irda_usb_cb *self = ((struct irda_skb_cb *) skb->cb)->context;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* We should always have a context */
IRDA_ASSERT(self != NULL, return;);
/* We should always be called for the speed URB */
@@ -568,7 +567,8 @@ static void write_bulk_callback(struct urb *urb)
/* Check for timeout and other USB nasties */
if (urb->status != 0) {
/* I get a lot of -ECONNABORTED = -103 here - Jean II */
- IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
+ pr_debug("%s(), URB complete status %d, transfer_flags 0x%04X\n",
+ __func__, urb->status, urb->transfer_flags);
/* Don't do anything here, that might confuse the USB layer,
* and we could go in recursion and blow the kernel stack...
@@ -587,7 +587,7 @@ static void write_bulk_callback(struct urb *urb)
/* If the network is closed, stop everything */
if ((!self->netopen) || (!self->present)) {
- IRDA_DEBUG(0, "%s(), Network is gone...\n", __func__);
+ pr_debug("%s(), Network is gone...\n", __func__);
spin_unlock_irqrestore(&self->lock, flags);
return;
}
@@ -598,7 +598,7 @@ static void write_bulk_callback(struct urb *urb)
(self->new_xbofs != self->xbofs)) {
/* We haven't changed speed yet (because of
* IUC_SPEED_BUG), so do it now - Jean II */
- IRDA_DEBUG(1, "%s(), Changing speed now...\n", __func__);
+ pr_debug("%s(), Changing speed now...\n", __func__);
irda_usb_change_speed_xbofs(self);
} else {
/* New speed and xbof is now committed in hardware */
@@ -630,7 +630,7 @@ static void irda_usb_net_timeout(struct net_device *netdev)
struct urb *urb;
int done = 0; /* If we have made any progress */
- IRDA_DEBUG(0, "%s(), Network layer thinks we timed out!\n", __func__);
+ pr_debug("%s(), Network layer thinks we timed out!\n", __func__);
IRDA_ASSERT(self != NULL, return;);
/* Protect us from USB callbacks, net Tx and else. */
@@ -638,7 +638,7 @@ static void irda_usb_net_timeout(struct net_device *netdev)
/* self->present *MUST* be read under spinlock */
if (!self->present) {
- IRDA_WARNING("%s(), device not present!\n", __func__);
+ net_warn_ratelimited("%s(), device not present!\n", __func__);
netif_stop_queue(netdev);
spin_unlock_irqrestore(&self->lock, flags);
return;
@@ -647,7 +647,8 @@ static void irda_usb_net_timeout(struct net_device *netdev)
/* Check speed URB */
urb = self->speed_urb;
if (urb->status != 0) {
- IRDA_DEBUG(0, "%s: Speed change timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags);
+ pr_debug("%s: Speed change timed out, urb->status=%d, urb->transfer_flags=0x%04X\n",
+ netdev->name, urb->status, urb->transfer_flags);
switch (urb->status) {
case -EINPROGRESS:
@@ -672,7 +673,8 @@ static void irda_usb_net_timeout(struct net_device *netdev)
if (urb->status != 0) {
struct sk_buff *skb = urb->context;
- IRDA_DEBUG(0, "%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags);
+ pr_debug("%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n",
+ netdev->name, urb->status, urb->transfer_flags);
/* Increase error count */
netdev->stats.tx_errors++;
@@ -761,8 +763,6 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc
struct irda_skb_cb *cb;
int ret;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* This should never happen */
IRDA_ASSERT(skb != NULL, return;);
IRDA_ASSERT(urb != NULL, return;);
@@ -783,8 +783,8 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc
if (ret) {
/* If this ever happen, we are in deep s***.
* Basically, the Rx path will stop... */
- IRDA_WARNING("%s(), Failed to submit Rx URB %d\n",
- __func__, ret);
+ net_warn_ratelimited("%s(), Failed to submit Rx URB %d\n",
+ __func__, ret);
}
}
@@ -805,7 +805,7 @@ static void irda_usb_receive(struct urb *urb)
struct urb *next_urb;
unsigned int len, docopy;
- IRDA_DEBUG(2, "%s(), len=%d\n", __func__, urb->actual_length);
+ pr_debug("%s(), len=%d\n", __func__, urb->actual_length);
/* Find ourselves */
cb = (struct irda_skb_cb *) skb->cb;
@@ -815,7 +815,7 @@ static void irda_usb_receive(struct urb *urb)
/* If the network is closed or the device gone, stop everything */
if ((!self->netopen) || (!self->present)) {
- IRDA_DEBUG(0, "%s(), Network is gone!\n", __func__);
+ pr_debug("%s(), Network is gone!\n", __func__);
/* Don't re-submit the URB : will stall the Rx path */
return;
}
@@ -838,7 +838,8 @@ static void irda_usb_receive(struct urb *urb)
/* Usually precursor to a hot-unplug on OHCI. */
default:
self->netdev->stats.rx_errors++;
- IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
+ pr_debug("%s(), RX status %d, transfer_flags 0x%04X\n",
+ __func__, urb->status, urb->transfer_flags);
break;
}
/* If we received an error, we don't want to resubmit the
@@ -859,7 +860,7 @@ static void irda_usb_receive(struct urb *urb)
/* Check for empty frames */
if (urb->actual_length <= self->header_length) {
- IRDA_WARNING("%s(), empty frame!\n", __func__);
+ net_warn_ratelimited("%s(), empty frame!\n", __func__);
goto done;
}
@@ -964,8 +965,6 @@ static void irda_usb_rx_defer_expired(unsigned long data)
struct irda_skb_cb *cb;
struct urb *next_urb;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Find ourselves */
cb = (struct irda_skb_cb *) skb->cb;
IRDA_ASSERT(cb != NULL, return;);
@@ -1049,8 +1048,8 @@ static int stir421x_fw_upload(struct irda_usb_cb *self,
self->bulk_out_ep),
patch_block, block_size,
&actual_len, msecs_to_jiffies(500));
- IRDA_DEBUG(3,"%s(): Bulk send %u bytes, ret=%d\n",
- __func__, actual_len, ret);
+ pr_debug("%s(): Bulk send %u bytes, ret=%d\n",
+ __func__, actual_len, ret);
if (ret < 0)
break;
@@ -1088,8 +1087,8 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
return ret;
/* We get a patch from userspace */
- IRDA_MESSAGE("%s(): Received firmware %s (%zu bytes)\n",
- __func__, stir421x_fw_name, fw->size);
+ net_info_ratelimited("%s(): Received firmware %s (%zu bytes)\n",
+ __func__, stir421x_fw_name, fw->size);
ret = -EINVAL;
@@ -1112,8 +1111,8 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
+ ((build / 10) << 4)
+ (build % 10);
- IRDA_DEBUG(3, "%s(): Firmware Product version %ld\n",
- __func__, fw_version);
+ pr_debug("%s(): Firmware Product version %ld\n",
+ __func__, fw_version);
}
}
@@ -1169,8 +1168,6 @@ static int irda_usb_net_open(struct net_device *netdev)
char hwname[16];
int i;
- IRDA_DEBUG(1, "%s()\n", __func__);
-
IRDA_ASSERT(netdev != NULL, return -1;);
self = netdev_priv(netdev);
IRDA_ASSERT(self != NULL, return -1;);
@@ -1179,13 +1176,13 @@ static int irda_usb_net_open(struct net_device *netdev)
/* Can only open the device if it's there */
if(!self->present) {
spin_unlock_irqrestore(&self->lock, flags);
- IRDA_WARNING("%s(), device not present!\n", __func__);
+ net_warn_ratelimited("%s(), device not present!\n", __func__);
return -1;
}
if(self->needspatch) {
spin_unlock_irqrestore(&self->lock, flags);
- IRDA_WARNING("%s(), device needs patch\n", __func__) ;
+ net_warn_ratelimited("%s(), device needs patch\n", __func__);
return -EIO ;
}
@@ -1227,8 +1224,6 @@ static int irda_usb_net_open(struct net_device *netdev)
if (!skb) {
/* If this ever happen, we are in deep s***.
* Basically, we can't start the Rx path... */
- IRDA_WARNING("%s(), Failed to allocate Rx skb\n",
- __func__);
return -1;
}
//skb_reserve(newskb, USB_IRDA_HEADER - 1);
@@ -1251,8 +1246,6 @@ static int irda_usb_net_close(struct net_device *netdev)
struct irda_usb_cb *self;
int i;
- IRDA_DEBUG(1, "%s()\n", __func__);
-
IRDA_ASSERT(netdev != NULL, return -1;);
self = netdev_priv(netdev);
IRDA_ASSERT(self != NULL, return -1;);
@@ -1306,7 +1299,7 @@ static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
self = netdev_priv(dev);
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -1356,7 +1349,6 @@ static inline void irda_usb_init_qos(struct irda_usb_cb *self)
{
struct irda_class_desc *desc;
- IRDA_DEBUG(3, "%s()\n", __func__);
desc = self->irda_desc;
@@ -1372,8 +1364,10 @@ static inline void irda_usb_init_qos(struct irda_usb_cb *self)
self->qos.window_size.bits = desc->bmWindowSize;
self->qos.data_size.bits = desc->bmDataSize;
- IRDA_DEBUG(0, "%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n",
- __func__, self->qos.baud_rate.bits, self->qos.data_size.bits, self->qos.window_size.bits, self->qos.additional_bofs.bits, self->qos.min_turn_time.bits);
+ pr_debug("%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n",
+ __func__, self->qos.baud_rate.bits, self->qos.data_size.bits,
+ self->qos.window_size.bits, self->qos.additional_bofs.bits,
+ self->qos.min_turn_time.bits);
/* Don't always trust what the dongle tell us */
if(self->capability & IUC_SIR_ONLY)
@@ -1416,8 +1410,6 @@ static inline int irda_usb_open(struct irda_usb_cb *self)
{
struct net_device *netdev = self->netdev;
- IRDA_DEBUG(1, "%s()\n", __func__);
-
netdev->netdev_ops = &irda_usb_netdev_ops;
irda_usb_init_qos(self);
@@ -1432,8 +1424,6 @@ static inline int irda_usb_open(struct irda_usb_cb *self)
*/
static inline void irda_usb_close(struct irda_usb_cb *self)
{
- IRDA_DEBUG(1, "%s()\n", __func__);
-
/* Remove netdevice */
unregister_netdev(self->netdev);
@@ -1505,13 +1495,15 @@ static inline int irda_usb_parse_endpoints(struct irda_usb_cb *self, struct usb_
/* This is our interrupt endpoint */
self->bulk_int_ep = ep;
} else {
- IRDA_ERROR("%s(), Unrecognised endpoint %02X.\n", __func__, ep);
+ net_err_ratelimited("%s(), Unrecognised endpoint %02X\n",
+ __func__, ep);
}
}
}
- IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n",
- __func__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep);
+ pr_debug("%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n",
+ __func__, self->bulk_in_ep, self->bulk_out_ep,
+ self->bulk_out_mtu, self->bulk_int_ep);
return (self->bulk_in_ep != 0) && (self->bulk_out_ep != 0);
}
@@ -1573,13 +1565,13 @@ static inline struct irda_class_desc *irda_usb_find_class_desc(struct usb_interf
0, intf->altsetting->desc.bInterfaceNumber, desc,
sizeof(*desc), 500);
- IRDA_DEBUG(1, "%s(), ret=%d\n", __func__, ret);
+ pr_debug("%s(), ret=%d\n", __func__, ret);
if (ret < sizeof(*desc)) {
- IRDA_WARNING("usb-irda: class_descriptor read %s (%d)\n",
- (ret<0) ? "failed" : "too short", ret);
+ net_warn_ratelimited("usb-irda: class_descriptor read %s (%d)\n",
+ ret < 0 ? "failed" : "too short", ret);
}
else if (desc->bDescriptorType != USB_DT_IRDA) {
- IRDA_WARNING("usb-irda: bad class_descriptor type\n");
+ net_warn_ratelimited("usb-irda: bad class_descriptor type\n");
}
else {
#ifdef IU_DUMP_CLASS_DESC
@@ -1622,9 +1614,9 @@ static int irda_usb_probe(struct usb_interface *intf,
* don't need to check if the dongle is really ours.
* Jean II */
- IRDA_MESSAGE("IRDA-USB found at address %d, Vendor: %x, Product: %x\n",
- dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct));
+ net_info_ratelimited("IRDA-USB found at address %d, Vendor: %x, Product: %x\n",
+ dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
net = alloc_irdadev(sizeof(*self));
if (!net)
@@ -1680,7 +1672,8 @@ static int irda_usb_probe(struct usb_interface *intf,
* specify an alternate, but very few driver do like this.
* Jean II */
ret = usb_set_interface(dev, intf->altsetting->desc.bInterfaceNumber, 0);
- IRDA_DEBUG(1, "usb-irda: set interface %d result %d\n", intf->altsetting->desc.bInterfaceNumber, ret);
+ pr_debug("usb-irda: set interface %d result %d\n",
+ intf->altsetting->desc.bInterfaceNumber, ret);
switch (ret) {
case 0:
break;
@@ -1688,10 +1681,11 @@ static int irda_usb_probe(struct usb_interface *intf,
/* Martin Diehl says if we get a -EPIPE we should
* be fine and we don't need to do a usb_clear_halt().
* - Jean II */
- IRDA_DEBUG(0, "%s(), Received -EPIPE, ignoring...\n", __func__);
+ pr_debug("%s(), Received -EPIPE, ignoring...\n",
+ __func__);
break;
default:
- IRDA_DEBUG(0, "%s(), Unknown error %d\n", __func__, ret);
+ pr_debug("%s(), Unknown error %d\n", __func__, ret);
ret = -EIO;
goto err_out_3;
}
@@ -1700,7 +1694,7 @@ static int irda_usb_probe(struct usb_interface *intf,
interface = intf->cur_altsetting;
if(!irda_usb_parse_endpoints(self, interface->endpoint,
interface->desc.bNumEndpoints)) {
- IRDA_ERROR("%s(), Bogus endpoints...\n", __func__);
+ net_err_ratelimited("%s(), Bogus endpoints...\n", __func__);
ret = -EIO;
goto err_out_3;
}
@@ -1717,7 +1711,7 @@ static int irda_usb_probe(struct usb_interface *intf,
ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0),
0x02, 0x40, 0, 0, NULL, 0, 500);
if (ret < 0) {
- IRDA_DEBUG (0, "usb_control_msg failed %d\n", ret);
+ pr_debug("usb_control_msg failed %d\n", ret);
goto err_out_3;
} else {
mdelay(10);
@@ -1746,7 +1740,7 @@ static int irda_usb_probe(struct usb_interface *intf,
if (ret)
goto err_out_5;
- IRDA_MESSAGE("IrDA: Registered device %s\n", net->name);
+ net_info_ratelimited("IrDA: Registered device %s\n", net->name);
usb_set_intfdata(intf, self);
if (self->needspatch) {
@@ -1754,7 +1748,7 @@ static int irda_usb_probe(struct usb_interface *intf,
ret = stir421x_patch_device(self);
self->needspatch = (ret < 0);
if (self->needspatch) {
- IRDA_ERROR("STIR421X: Couldn't upload patch\n");
+ net_err_ratelimited("STIR421X: Couldn't upload patch\n");
goto err_out_6;
}
@@ -1809,8 +1803,6 @@ static void irda_usb_disconnect(struct usb_interface *intf)
struct irda_usb_cb *self = usb_get_intfdata(intf);
int i;
- IRDA_DEBUG(1, "%s()\n", __func__);
-
usb_set_intfdata(intf, NULL);
if (!self)
return;
@@ -1859,7 +1851,7 @@ static void irda_usb_disconnect(struct usb_interface *intf)
/* Free self and network device */
free_netdev(self->netdev);
- IRDA_DEBUG(0, "%s(), USB IrDA Disconnected\n", __func__);
+ pr_debug("%s(), USB IrDA Disconnected\n", __func__);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 24b6dddd7f2f..696852eb23c3 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -231,7 +231,7 @@ static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
dev = priv->dev;
if (!dev) {
- IRDA_WARNING("%s(), not ready yet!\n", __func__);
+ net_warn_ratelimited("%s(), not ready yet!\n", __func__);
return;
}
@@ -240,7 +240,7 @@ static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
* Characters received with a parity error, etc?
*/
if (fp && *fp++) {
- IRDA_DEBUG(0, "Framing or parity error!\n");
+ pr_debug("Framing or parity error!\n");
sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */
return;
}
@@ -387,7 +387,7 @@ static int irtty_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
IRDA_ASSERT(priv != NULL, return -ENODEV;);
IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EBADR;);
- IRDA_DEBUG(3, "%s(cmd=0x%X)\n", __func__, cmd);
+ pr_debug("%s(cmd=0x%X)\n", __func__, cmd);
dev = priv->dev;
IRDA_ASSERT(dev != NULL, return -1;);
@@ -477,7 +477,7 @@ static int irtty_open(struct tty_struct *tty)
mutex_unlock(&irtty_mutex);
- IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __func__, tty->name);
+ pr_debug("%s - %s: irda line discipline opened\n", __func__, tty->name);
return 0;
@@ -528,7 +528,7 @@ static void irtty_close(struct tty_struct *tty)
kfree(priv);
- IRDA_DEBUG(0, "%s - %s: irda line discipline closed\n", __func__, tty->name);
+ pr_debug("%s - %s: irda line discipline closed\n", __func__, tty->name);
}
/* ------------------------------------------------------- */
@@ -555,8 +555,8 @@ static int __init irtty_sir_init(void)
int err;
if ((err = tty_register_ldisc(N_IRDA, &irda_ldisc)) != 0)
- IRDA_ERROR("IrDA: can't register line discipline (err = %d)\n",
- err);
+ net_err_ratelimited("IrDA: can't register line discipline (err = %d)\n",
+ err);
return err;
}
@@ -565,8 +565,8 @@ static void __exit irtty_sir_cleanup(void)
int err;
if ((err = tty_unregister_ldisc(N_IRDA))) {
- IRDA_ERROR("%s(), can't unregister line discipline (err = %d)\n",
- __func__, err);
+ net_err_ratelimited("%s(), can't unregister line discipline (err = %d)\n",
+ __func__, err);
}
}
diff --git a/drivers/net/irda/litelink-sir.c b/drivers/net/irda/litelink-sir.c
index 6827777cbeea..8eefcb44bac3 100644
--- a/drivers/net/irda/litelink-sir.c
+++ b/drivers/net/irda/litelink-sir.c
@@ -76,8 +76,6 @@ static int litelink_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power up dongle */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -93,8 +91,6 @@ static int litelink_open(struct sir_dev *dev)
static int litelink_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -111,8 +107,6 @@ static int litelink_change_speed(struct sir_dev *dev, unsigned speed)
{
int i;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* dongle already reset by irda-thread - current speed (dongle and
* port) is the default speed (115200 for litelink!)
*/
@@ -154,8 +148,6 @@ static int litelink_change_speed(struct sir_dev *dev, unsigned speed)
*/
static int litelink_reset(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* probably the power-up can be dropped here, but with only
* 15 usec delay it's not worth the risk unless somebody with
* the hardware confirms it doesn't break anything...
diff --git a/drivers/net/irda/ma600-sir.c b/drivers/net/irda/ma600-sir.c
index a9a81358477b..a764817b47f1 100644
--- a/drivers/net/irda/ma600-sir.c
+++ b/drivers/net/irda/ma600-sir.c
@@ -65,13 +65,11 @@ static struct dongle_driver ma600 = {
static int __init ma600_sir_init(void)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
return irda_register_dongle(&ma600);
}
static void __exit ma600_sir_cleanup(void)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
irda_unregister_dongle(&ma600);
}
@@ -86,8 +84,6 @@ static int ma600_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
sirdev_set_dtr_rts(dev, TRUE, TRUE);
/* Explicitly set the speeds we can accept */
@@ -104,8 +100,6 @@ static int ma600_open(struct sir_dev *dev)
static int ma600_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -174,8 +168,8 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
{
u8 byte;
- IRDA_DEBUG(2, "%s(), speed=%d (was %d)\n", __func__,
- speed, dev->speed);
+ pr_debug("%s(), speed=%d (was %d)\n", __func__,
+ speed, dev->speed);
/* dongle already reset, dongle and port at default speed (9600) */
@@ -198,13 +192,13 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
sirdev_raw_read(dev, &byte, sizeof(byte));
if (byte != get_control_byte(speed)) {
- IRDA_WARNING("%s(): bad control byte read-back %02x != %02x\n",
- __func__, (unsigned) byte,
- (unsigned) get_control_byte(speed));
+ net_warn_ratelimited("%s(): bad control byte read-back %02x != %02x\n",
+ __func__, (unsigned)byte,
+ (unsigned)get_control_byte(speed));
return -1;
}
else
- IRDA_DEBUG(2, "%s() control byte write read OK\n", __func__);
+ pr_debug("%s() control byte write read OK\n", __func__);
#endif
/* Set DTR, Set RTS */
@@ -236,8 +230,6 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
static int ma600_reset(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Reset the dongle : set DTR low for 10 ms */
sirdev_set_dtr_rts(dev, FALSE, TRUE);
msleep(10);
diff --git a/drivers/net/irda/mcp2120-sir.c b/drivers/net/irda/mcp2120-sir.c
index 5e2f4859cee7..2e33f91bfe8f 100644
--- a/drivers/net/irda/mcp2120-sir.c
+++ b/drivers/net/irda/mcp2120-sir.c
@@ -63,8 +63,6 @@ static int mcp2120_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* seems no explicit power-on required here and reset switching it on anyway */
qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
@@ -76,8 +74,6 @@ static int mcp2120_open(struct sir_dev *dev)
static int mcp2120_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power off dongle */
/* reset and inhibit mcp2120 */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -102,8 +98,6 @@ static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
u8 control[2];
static int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
switch (state) {
case SIRDEV_STATE_DONGLE_SPEED:
/* Set DTR to enter command mode */
@@ -155,7 +149,8 @@ static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
break;
default:
- IRDA_ERROR("%s(), undefine state %d\n", __func__, state);
+ net_err_ratelimited("%s(), undefine state %d\n",
+ __func__, state);
ret = -EINVAL;
break;
}
@@ -187,8 +182,6 @@ static int mcp2120_reset(struct sir_dev *dev)
unsigned delay = 0;
int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
switch (state) {
case SIRDEV_STATE_DONGLE_RESET:
//printk("mcp2120_reset: dongle_reset\n");
@@ -213,7 +206,8 @@ static int mcp2120_reset(struct sir_dev *dev)
break;
default:
- IRDA_ERROR("%s(), undefined state %d\n", __func__, state);
+ net_err_ratelimited("%s(), undefined state %d\n",
+ __func__, state);
ret = -EINVAL;
break;
}
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 16f8ffb50e04..e4d678fbeb2f 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -197,14 +197,14 @@ error:
/* Setup a communication between mcs7780 and agilent chip. */
static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs)
{
- IRDA_WARNING("This transceiver type is not supported yet.\n");
+ net_warn_ratelimited("This transceiver type is not supported yet\n");
return 1;
}
/* Setup a communication between mcs7780 and sharp chip. */
static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs)
{
- IRDA_WARNING("This transceiver type is not supported yet.\n");
+ net_warn_ratelimited("This transceiver type is not supported yet\n");
return 1;
}
@@ -213,9 +213,9 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
{
int ret = 0;
__u16 rval;
- char *msg;
+ const char *msg;
- msg = "Basic transceiver setup error.";
+ msg = "Basic transceiver setup error";
/* read value of MODE Register, set the DRIVER and RESET bits
* and write value back out to MODE Register
@@ -261,7 +261,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
if(unlikely(ret))
goto error;
- msg = "transceiver model specific setup error.";
+ msg = "transceiver model specific setup error";
switch (mcs->transceiver_type) {
case MCS_TSC_VISHAY:
ret = mcs_setup_transceiver_vishay(mcs);
@@ -276,8 +276,8 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
break;
default:
- IRDA_WARNING("Unknown transceiver type: %d\n",
- mcs->transceiver_type);
+ net_warn_ratelimited("Unknown transceiver type: %d\n",
+ mcs->transceiver_type);
ret = 1;
}
if (unlikely(ret))
@@ -300,7 +300,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
goto error;
}
- msg = "transceiver reset.";
+ msg = "transceiver reset";
ret = mcs_get_reg(mcs, MCS_MODE_REG, &rval);
if (unlikely(ret != 2))
@@ -315,7 +315,7 @@ static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
return ret;
error:
- IRDA_ERROR("%s\n", msg);
+ net_err_ratelimited("%s\n", msg);
return ret;
}
@@ -399,8 +399,8 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
new_len = len - 2;
if(unlikely(new_len <= 0)) {
- IRDA_ERROR("%s short frame length %d\n",
- mcs->netdev->name, new_len);
+ net_err_ratelimited("%s short frame length %d\n",
+ mcs->netdev->name, new_len);
++mcs->netdev->stats.rx_errors;
++mcs->netdev->stats.rx_length_errors;
return;
@@ -409,8 +409,8 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
fcs = irda_calc_crc16(~fcs, buf, len);
if(fcs != GOOD_FCS) {
- IRDA_ERROR("crc error calc 0x%x len %d\n",
- fcs, new_len);
+ net_err_ratelimited("crc error calc 0x%x len %d\n",
+ fcs, new_len);
mcs->netdev->stats.rx_errors++;
mcs->netdev->stats.rx_crc_errors++;
return;
@@ -452,8 +452,8 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
new_len = len - 4;
if(unlikely(new_len <= 0)) {
- IRDA_ERROR("%s short frame length %d\n",
- mcs->netdev->name, new_len);
+ net_err_ratelimited("%s short frame length %d\n",
+ mcs->netdev->name, new_len);
++mcs->netdev->stats.rx_errors;
++mcs->netdev->stats.rx_length_errors;
return;
@@ -461,7 +461,8 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
fcs = ~(crc32_le(~0, buf, new_len));
if(fcs != get_unaligned_le32(buf + new_len)) {
- IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len);
+ net_err_ratelimited("crc error calc 0x%x len %d\n",
+ fcs, new_len);
mcs->netdev->stats.rx_errors++;
mcs->netdev->stats.rx_crc_errors++;
return;
@@ -583,7 +584,7 @@ static int mcs_speed_change(struct mcs_cb *mcs)
} while(cnt++ < 100 && (rval & MCS_IRINTX));
if (cnt > 100) {
- IRDA_ERROR("unable to change speed\n");
+ net_err_ratelimited("unable to change speed\n");
ret = -EIO;
goto error;
}
@@ -634,8 +635,8 @@ static int mcs_speed_change(struct mcs_cb *mcs)
default:
ret = 1;
- IRDA_WARNING("Unknown transceiver type: %d\n",
- mcs->transceiver_type);
+ net_warn_ratelimited("Unknown transceiver type: %d\n",
+ mcs->transceiver_type);
}
if (unlikely(ret))
goto error;
@@ -731,7 +732,7 @@ static int mcs_net_open(struct net_device *netdev)
sprintf(hwname, "usb#%d", mcs->usbdev->devnum);
mcs->irlap = irlap_open(netdev, &mcs->qos, hwname);
if (!mcs->irlap) {
- IRDA_ERROR("mcs7780: irlap_open failed\n");
+ net_err_ratelimited("mcs7780: irlap_open failed\n");
goto error2;
}
@@ -851,7 +852,7 @@ static netdev_tx_t mcs_hard_xmit(struct sk_buff *skb,
mcs->out_buf, wraplen, mcs_send_irq, mcs);
if ((ret = usb_submit_urb(mcs->tx_urb, GFP_ATOMIC))) {
- IRDA_ERROR("failed tx_urb: %d\n", ret);
+ net_err_ratelimited("failed tx_urb: %d\n", ret);
switch (ret) {
case -ENODEV:
case -EPIPE:
@@ -893,13 +894,13 @@ static int mcs_probe(struct usb_interface *intf,
if (!ndev)
goto error1;
- IRDA_DEBUG(1, "MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum);
+ pr_debug("MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum);
SET_NETDEV_DEV(ndev, &intf->dev);
ret = usb_reset_configuration(udev);
if (ret != 0) {
- IRDA_ERROR("mcs7780: usb reset configuration failed\n");
+ net_err_ratelimited("mcs7780: usb reset configuration failed\n");
goto error2;
}
@@ -941,8 +942,8 @@ static int mcs_probe(struct usb_interface *intf,
if (ret != 0)
goto error2;
- IRDA_DEBUG(1, "IrDA: Registered MosChip MCS7780 device as %s\n",
- ndev->name);
+ pr_debug("IrDA: Registered MosChip MCS7780 device as %s\n",
+ ndev->name);
mcs->transceiver_type = transceiver_type;
mcs->sir_tweak = sir_tweak;
@@ -972,7 +973,7 @@ static void mcs_disconnect(struct usb_interface *intf)
free_netdev(mcs->netdev);
usb_set_intfdata(intf, NULL);
- IRDA_DEBUG(0, "MCS7780 now disconnected.\n");
+ pr_debug("MCS7780 now disconnected.\n");
}
module_usb_driver(mcs_driver);
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 66bc03bdb138..e7317b104bfb 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -211,7 +211,8 @@ static int __init nsc_ircc_init(void)
ret = platform_driver_register(&nsc_ircc_driver);
if (ret) {
- IRDA_ERROR("%s, Can't register driver!\n", driver_name);
+ net_err_ratelimited("%s, Can't register driver!\n",
+ driver_name);
return ret;
}
@@ -225,8 +226,8 @@ static int __init nsc_ircc_init(void)
/* Probe for all the NSC chipsets we know about */
for (chip = chips; chip->name ; chip++) {
- IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__,
- chip->name);
+ pr_debug("%s(), Probing for %s ...\n", __func__,
+ chip->name);
/* Try all config registers for this chip */
for (cfg = 0; cfg < ARRAY_SIZE(chip->cfg); cfg++) {
@@ -237,7 +238,8 @@ static int __init nsc_ircc_init(void)
/* Read index register */
reg = inb(cfg_base);
if (reg == 0xff) {
- IRDA_DEBUG(2, "%s() no chip at 0x%03x\n", __func__, cfg_base);
+ pr_debug("%s() no chip at 0x%03x\n",
+ __func__, cfg_base);
continue;
}
@@ -245,8 +247,9 @@ static int __init nsc_ircc_init(void)
outb(chip->cid_index, cfg_base);
id = inb(cfg_base+1);
if ((id & chip->cid_mask) == chip->cid_value) {
- IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n",
- __func__, chip->name, id & ~chip->cid_mask);
+ pr_debug("%s() Found %s chip, revision=%d\n",
+ __func__, chip->name,
+ id & ~chip->cid_mask);
/*
* If we found a correct PnP setting,
@@ -260,7 +263,8 @@ static int __init nsc_ircc_init(void)
info.irq = pnp_info.irq;
if (info.fir_base < 0x2000) {
- IRDA_MESSAGE("%s, chip->init\n", driver_name);
+ net_info_ratelimited("%s, chip->init\n",
+ driver_name);
chip->init(chip, &info);
} else
chip->probe(chip, &info);
@@ -275,7 +279,8 @@ static int __init nsc_ircc_init(void)
* the chip.
*/
if (ret) {
- IRDA_DEBUG(2, "%s, PnP init failed\n", driver_name);
+ pr_debug("%s, PnP init failed\n",
+ driver_name);
memset(&info, 0, sizeof(chipio_t));
info.cfg_base = cfg_base;
info.fir_base = io[i];
@@ -297,7 +302,8 @@ static int __init nsc_ircc_init(void)
}
i++;
} else {
- IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __func__, id);
+ pr_debug("%s(), Wrong chip id=0x%02x\n",
+ __func__, id);
}
}
}
@@ -361,31 +367,29 @@ static int __init nsc_ircc_open(chipio_t *info)
void *ret;
int err, chip_index;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
-
for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) {
if (!dev_self[chip_index])
break;
}
if (chip_index == ARRAY_SIZE(dev_self)) {
- IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __func__);
+ net_err_ratelimited("%s(), maximum number of supported chips reached!\n",
+ __func__);
return -ENOMEM;
}
- IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name,
- info->cfg_base);
+ net_info_ratelimited("%s, Found chip at base=0x%03x\n",
+ driver_name, info->cfg_base);
if ((nsc_ircc_setup(info)) == -1)
return -1;
- IRDA_MESSAGE("%s, driver loaded (Dag Brattli)\n", driver_name);
+ net_info_ratelimited("%s, driver loaded (Dag Brattli)\n", driver_name);
dev = alloc_irdadev(sizeof(struct nsc_ircc_cb));
if (dev == NULL) {
- IRDA_ERROR("%s(), can't allocate memory for "
- "control block!\n", __func__);
+ net_err_ratelimited("%s(), can't allocate memory for control block!\n",
+ __func__);
return -ENOMEM;
}
@@ -408,8 +412,8 @@ static int __init nsc_ircc_open(chipio_t *info)
/* Reserve the ioports that we need */
ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name);
if (!ret) {
- IRDA_WARNING("%s(), can't get iobase of 0x%03x\n",
- __func__, self->io.fir_base);
+ net_warn_ratelimited("%s(), can't get iobase of 0x%03x\n",
+ __func__, self->io.fir_base);
err = -ENODEV;
goto out1;
}
@@ -460,21 +464,22 @@ static int __init nsc_ircc_open(chipio_t *info)
err = register_netdev(dev);
if (err) {
- IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
+ net_err_ratelimited("%s(), register_netdev() failed!\n",
+ __func__);
goto out4;
}
- IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
+ net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
/* Check if user has supplied a valid dongle id or not */
if ((dongle_id <= 0) ||
(dongle_id >= ARRAY_SIZE(dongle_types))) {
dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base);
- IRDA_MESSAGE("%s, Found dongle: %s\n", driver_name,
- dongle_types[dongle_id]);
+ net_info_ratelimited("%s, Found dongle: %s\n",
+ driver_name, dongle_types[dongle_id]);
} else {
- IRDA_MESSAGE("%s, Using dongle: %s\n", driver_name,
- dongle_types[dongle_id]);
+ net_info_ratelimited("%s, Using dongle: %s\n",
+ driver_name, dongle_types[dongle_id]);
}
self->io.dongle_id = dongle_id;
@@ -516,8 +521,6 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
{
int iobase;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
IRDA_ASSERT(self != NULL, return -1;);
iobase = self->io.fir_base;
@@ -528,8 +531,8 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
unregister_netdev(self->netdev);
/* Release the PORT that this driver is using */
- IRDA_DEBUG(4, "%s(), Releasing Region %03x\n",
- __func__, self->io.fir_base);
+ pr_debug("%s(), Releasing Region %03x\n",
+ __func__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
@@ -567,7 +570,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
case 0x2e8: outb(0x15, cfg_base+1); break;
case 0x3f8: outb(0x16, cfg_base+1); break;
case 0x2f8: outb(0x17, cfg_base+1); break;
- default: IRDA_ERROR("%s(), invalid base_address", __func__);
+ default: net_err_ratelimited("%s(), invalid base_address\n", __func__);
}
/* Control Signal Routing Register (CSRT) */
@@ -579,7 +582,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
case 9: temp = 0x05; break;
case 11: temp = 0x06; break;
case 15: temp = 0x07; break;
- default: IRDA_ERROR("%s(), invalid irq", __func__);
+ default: net_err_ratelimited("%s(), invalid irq\n", __func__);
}
outb(CFG_108_CSRT, cfg_base);
@@ -587,7 +590,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
case 0: outb(0x08+temp, cfg_base+1); break;
case 1: outb(0x10+temp, cfg_base+1); break;
case 3: outb(0x18+temp, cfg_base+1); break;
- default: IRDA_ERROR("%s(), invalid dma", __func__);
+ default: net_err_ratelimited("%s(), invalid dma\n", __func__);
}
outb(CFG_108_MCTL, cfg_base); /* Mode Control Register (MCTL) */
@@ -626,8 +629,8 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
break;
}
info->sir_base = info->fir_base;
- IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__,
- info->fir_base);
+ pr_debug("%s(), probing fir_base=0x%03x\n", __func__,
+ info->fir_base);
/* Read control signals routing register (CSRT) */
outb(CFG_108_CSRT, cfg_base);
@@ -659,7 +662,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
info->irq = 15;
break;
}
- IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq);
+ pr_debug("%s(), probing irq=%d\n", __func__, info->irq);
/* Currently we only read Rx DMA but it will also be used for Tx */
switch ((reg >> 3) & 0x03) {
@@ -676,7 +679,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
info->dma = 3;
break;
}
- IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma);
+ pr_debug("%s(), probing dma=%d\n", __func__, info->dma);
/* Read mode control register (MCTL) */
outb(CFG_108_MCTL, cfg_base);
@@ -727,7 +730,7 @@ static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info)
pnp = (reg >> 3) & 0x01;
if (pnp) {
- IRDA_DEBUG(2, "(), Chip is in PnP mode\n");
+ pr_debug("(), Chip is in PnP mode\n");
outb(0x46, cfg_base);
reg = (inb(cfg_base+1) & 0xfe) << 2;
@@ -831,9 +834,8 @@ static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info)
int enabled;
/* User is sure about his config... accept it. */
- IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): "
- "io=0x%04x, irq=%d, dma=%d\n",
- __func__, info->fir_base, info->irq, info->dma);
+ pr_debug("%s(): nsc_ircc_init_39x (user settings): io=0x%04x, irq=%d, dma=%d\n",
+ __func__, info->fir_base, info->irq, info->dma);
/* Access bank for SP2 */
outb(CFG_39X_LDN, cfg_base);
@@ -873,8 +875,8 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
int reg1, reg2, irq, irqt, dma1, dma2;
int enabled, susp;
- IRDA_DEBUG(2, "%s(), nsc_ircc_probe_39x, base=%d\n",
- __func__, cfg_base);
+ pr_debug("%s(), nsc_ircc_probe_39x, base=%d\n",
+ __func__, cfg_base);
/* This function should be executed with irq off to avoid
* another driver messing with the Super I/O bank - Jean II */
@@ -908,7 +910,8 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
outb(CFG_39X_SPC, cfg_base);
susp = 1 - ((inb(cfg_base+1) & 0x02) >> 1);
- IRDA_DEBUG(2, "%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", __func__, reg1,reg2,irq,irqt,dma1,dma2,enabled,susp);
+ pr_debug("%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n",
+ __func__, reg1, reg2, irq, irqt, dma1, dma2, enabled, susp);
/* Configure SP2 */
@@ -959,8 +962,8 @@ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *i
!(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED))
pnp_info.dma = pnp_dma(dev, 0);
- IRDA_DEBUG(0, "%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n",
- __func__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma);
+ pr_debug("%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n",
+ __func__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma);
if((pnp_info.fir_base == 0) ||
(pnp_info.irq == -1) || (pnp_info.dma == -1)) {
@@ -988,13 +991,13 @@ static int nsc_ircc_setup(chipio_t *info)
switch_bank(iobase, BANK3);
version = inb(iobase+MID);
- IRDA_DEBUG(2, "%s() Driver %s Found chip version %02x\n",
- __func__, driver_name, version);
+ pr_debug("%s() Driver %s Found chip version %02x\n",
+ __func__, driver_name, version);
/* Should be 0x2? */
if (0x20 != (version & 0xf0)) {
- IRDA_ERROR("%s, Wrong chip version %02x\n",
- driver_name, version);
+ net_err_ratelimited("%s, Wrong chip version %02x\n",
+ driver_name, version);
return -1;
}
@@ -1092,39 +1095,39 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
switch (dongle_id) {
case 0x00: /* same as */
case 0x01: /* Differential serial interface */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x02: /* same as */
case 0x03: /* Reserved */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x04: /* Sharp RY5HD01 */
break;
case 0x05: /* Reserved, but this is what the Thinkpad reports */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x06: /* Single-ended serial interface */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x07: /* Consumer-IR only */
- IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s is not for IrDA mode\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
- IRDA_DEBUG(0, "%s(), %s\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
outb(0x28, iobase+7); /* Set irsl[0-2] as output */
break;
case 0x0A: /* same as */
case 0x0B: /* Reserved */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x0C: /* same as */
case 0x0D: /* HP HSDL-1100/HSDL-2100 */
@@ -1138,15 +1141,15 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
outb(0x28, iobase+7); /* Set irsl[0-2] as output */
break;
case 0x0F: /* No dongle connected */
- IRDA_DEBUG(0, "%s(), %s\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s\n",
+ __func__, dongle_types[dongle_id]);
switch_bank(iobase, BANK0);
outb(0x62, iobase+MCR);
break;
default:
- IRDA_DEBUG(0, "%s(), invalid dongle_id %#x",
- __func__, dongle_id);
+ pr_debug("%s(), invalid dongle_id %#x",
+ __func__, dongle_id);
}
/* IRCFG1: IRSL1 and 2 are set to IrDA mode */
@@ -1177,31 +1180,31 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
switch (dongle_id) {
case 0x00: /* same as */
case 0x01: /* Differential serial interface */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x02: /* same as */
case 0x03: /* Reserved */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x04: /* Sharp RY5HD01 */
break;
case 0x05: /* Reserved */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x06: /* Single-ended serial interface */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x07: /* Consumer-IR only */
- IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s is not for IrDA mode\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
- IRDA_DEBUG(0, "%s(), %s\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s\n",
+ __func__, dongle_types[dongle_id]);
outb(0x00, iobase+4);
if (speed > 115200)
outb(0x01, iobase+4);
@@ -1219,8 +1222,8 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
break;
case 0x0A: /* same as */
case 0x0B: /* Reserved */
- IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
break;
case 0x0C: /* same as */
case 0x0D: /* HP HSDL-1100/HSDL-2100 */
@@ -1228,14 +1231,14 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
case 0x0E: /* Supports SIR Mode only */
break;
case 0x0F: /* No dongle connected */
- IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
- __func__, dongle_types[dongle_id]);
+ pr_debug("%s(), %s is not for IrDA mode\n",
+ __func__, dongle_types[dongle_id]);
switch_bank(iobase, BANK0);
outb(0x62, iobase+MCR);
break;
default:
- IRDA_DEBUG(0, "%s(), invalid data_rate\n", __func__);
+ pr_debug("%s(), invalid data_rate\n", __func__);
}
/* Restore bank register */
outb(bank, iobase+BSR);
@@ -1256,7 +1259,7 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
__u8 bank;
__u8 ier; /* Interrupt enable register */
- IRDA_DEBUG(2, "%s(), speed=%d\n", __func__, speed);
+ pr_debug("%s(), speed=%d\n", __func__, speed);
IRDA_ASSERT(self != NULL, return 0;);
@@ -1289,20 +1292,20 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
outb(inb(iobase+4) | 0x04, iobase+4);
mcr = MCR_MIR;
- IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__);
+ pr_debug("%s(), handling baud of 576000\n", __func__);
break;
case 1152000:
mcr = MCR_MIR;
- IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__);
+ pr_debug("%s(), handling baud of 1152000\n", __func__);
break;
case 4000000:
mcr = MCR_FIR;
- IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__);
+ pr_debug("%s(), handling baud of 4000000\n", __func__);
break;
default:
mcr = MCR_FIR;
- IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n",
- __func__, speed);
+ pr_debug("%s(), unknown baud rate of %d\n",
+ __func__, speed);
break;
}
@@ -1609,15 +1612,13 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
int actual = 0;
__u8 bank;
- IRDA_DEBUG(4, "%s()\n", __func__);
-
/* Save current bank */
bank = inb(iobase+BSR);
switch_bank(iobase, BANK0);
if (!(inb_p(iobase+LSR) & LSR_TXEMP)) {
- IRDA_DEBUG(4, "%s(), warning, FIFO not empty yet!\n",
- __func__);
+ pr_debug("%s(), warning, FIFO not empty yet!\n",
+ __func__);
/* FIFO may still be filled to the Tx interrupt threshold */
fifo_size -= 17;
@@ -1629,8 +1630,8 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
outb(buf[actual++], iobase+TXD);
}
- IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
- __func__, fifo_size, actual, len);
+ pr_debug("%s(), fifo_size %d ; %d sent of %d\n",
+ __func__, fifo_size, actual, len);
/* Restore bank */
outb(bank, iobase+BSR);
@@ -1651,8 +1652,6 @@ static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self)
__u8 bank;
int ret = TRUE;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
iobase = self->io.fir_base;
/* Save current bank */
@@ -1782,7 +1781,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
len = inb(iobase+RFLFL) | ((inb(iobase+RFLFH) & 0x1f) << 8);
if (st_fifo->tail >= MAX_RX_WINDOW) {
- IRDA_DEBUG(0, "%s(), window is full!\n", __func__);
+ pr_debug("%s(), window is full!\n", __func__);
continue;
}
@@ -1872,9 +1871,6 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
skb = dev_alloc_skb(len+1);
if (skb == NULL) {
- IRDA_WARNING("%s(), memory squeeze, "
- "dropping frame.\n",
- __func__);
self->netdev->stats.rx_dropped++;
/* Restore bank register */
@@ -1979,7 +1975,7 @@ static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir)
* Need to be after self->io.direction to avoid race with
* nsc_ircc_hard_xmit_sir() - Jean II */
if (self->new_speed) {
- IRDA_DEBUG(2, "%s(), Changing speed!\n", __func__);
+ pr_debug("%s(), Changing speed!\n", __func__);
self->ier = nsc_ircc_change_speed(self,
self->new_speed);
self->new_speed = 0;
@@ -2063,9 +2059,8 @@ static void nsc_ircc_fir_interrupt(struct nsc_ircc_cb *self, int iobase,
nsc_ircc_dma_receive(self);
self->ier = IER_SFIF_IE;
} else
- IRDA_WARNING("%s(), potential "
- "Tx queue lockup !\n",
- __func__);
+ net_warn_ratelimited("%s(), potential Tx queue lockup !\n",
+ __func__);
}
} else {
/* Not finished yet, so interrupt on DMA again */
@@ -2174,7 +2169,6 @@ static int nsc_ircc_net_open(struct net_device *dev)
char hwname[32];
__u8 bank;
- IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
@@ -2184,8 +2178,8 @@ static int nsc_ircc_net_open(struct net_device *dev)
iobase = self->io.fir_base;
if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, dev->name, dev)) {
- IRDA_WARNING("%s, unable to allocate irq=%d\n",
- driver_name, self->io.irq);
+ net_warn_ratelimited("%s, unable to allocate irq=%d\n",
+ driver_name, self->io.irq);
return -EAGAIN;
}
/*
@@ -2193,8 +2187,8 @@ static int nsc_ircc_net_open(struct net_device *dev)
* failure.
*/
if (request_dma(self->io.dma, dev->name)) {
- IRDA_WARNING("%s, unable to allocate dma=%d\n",
- driver_name, self->io.dma);
+ net_warn_ratelimited("%s, unable to allocate dma=%d\n",
+ driver_name, self->io.dma);
free_irq(self->io.irq, dev);
return -EAGAIN;
}
@@ -2236,7 +2230,6 @@ static int nsc_ircc_net_close(struct net_device *dev)
int iobase;
__u8 bank;
- IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(dev != NULL, return -1;);
@@ -2290,7 +2283,7 @@ static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -2329,7 +2322,7 @@ static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state)
if (self->io.suspended)
return 0;
- IRDA_DEBUG(1, "%s, Suspending\n", driver_name);
+ pr_debug("%s, Suspending\n", driver_name);
rtnl_lock();
if (netif_running(self->netdev)) {
@@ -2363,7 +2356,7 @@ static int nsc_ircc_resume(struct platform_device *dev)
if (!self->io.suspended)
return 0;
- IRDA_DEBUG(1, "%s, Waking up\n", driver_name);
+ pr_debug("%s, Waking up\n", driver_name);
rtnl_lock();
nsc_ircc_setup(&self->io);
@@ -2372,8 +2365,8 @@ static int nsc_ircc_resume(struct platform_device *dev)
if (netif_running(self->netdev)) {
if (request_irq(self->io.irq, nsc_ircc_interrupt, 0,
self->netdev->name, self->netdev)) {
- IRDA_WARNING("%s, unable to allocate irq=%d\n",
- driver_name, self->io.irq);
+ net_warn_ratelimited("%s, unable to allocate irq=%d\n",
+ driver_name, self->io.irq);
/*
* Don't fail resume process, just kill this
diff --git a/drivers/net/irda/old_belkin-sir.c b/drivers/net/irda/old_belkin-sir.c
index f237136f3827..a7c2e990ae69 100644
--- a/drivers/net/irda/old_belkin-sir.c
+++ b/drivers/net/irda/old_belkin-sir.c
@@ -90,8 +90,6 @@ static int old_belkin_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power on dongle */
sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -108,8 +106,6 @@ static int old_belkin_open(struct sir_dev *dev)
static int old_belkin_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -123,8 +119,6 @@ static int old_belkin_close(struct sir_dev *dev)
*/
static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
dev->speed = 9600;
return (speed==dev->speed) ? 0 : -EINVAL;
}
@@ -137,8 +131,6 @@ static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed)
*/
static int old_belkin_reset(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* This dongles speed "defaults" to 9600 bps ;-) */
dev->speed = 9600;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 43e9ab4f4d7e..6af26a7d787c 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -82,7 +82,7 @@ static int sirdev_tx_complete_fsm(struct sir_dev *dev)
return 0;
default:
- IRDA_ERROR("%s - undefined state\n", __func__);
+ net_err_ratelimited("%s - undefined state\n", __func__);
return -EINVAL;
}
fsm->substate = next_state;
@@ -109,11 +109,11 @@ static void sirdev_config_fsm(struct work_struct *work)
int ret = -1;
unsigned delay;
- IRDA_DEBUG(2, "%s(), <%ld>\n", __func__, jiffies);
+ pr_debug("%s(), <%ld>\n", __func__, jiffies);
do {
- IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
- __func__, fsm->state, fsm->substate);
+ pr_debug("%s - state=0x%04x / substate=0x%04x\n",
+ __func__, fsm->state, fsm->substate);
next_state = fsm->state;
delay = 0;
@@ -251,12 +251,13 @@ static void sirdev_config_fsm(struct work_struct *work)
break;
default:
- IRDA_ERROR("%s - undefined state\n", __func__);
+ net_err_ratelimited("%s - undefined state\n", __func__);
fsm->result = -EINVAL;
/* fall thru */
case SIRDEV_STATE_ERROR:
- IRDA_ERROR("%s - error: %d\n", __func__, fsm->result);
+ net_err_ratelimited("%s - error: %d\n",
+ __func__, fsm->result);
#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
netif_stop_queue(dev->netdev);
@@ -286,12 +287,12 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
{
struct sir_fsm *fsm = &dev->fsm;
- IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __func__,
- initial_state, param);
+ pr_debug("%s - state=0x%04x / param=%u\n", __func__,
+ initial_state, param);
if (down_trylock(&fsm->sem)) {
if (in_interrupt() || in_atomic() || irqs_disabled()) {
- IRDA_DEBUG(1, "%s(), state machine busy!\n", __func__);
+ pr_debug("%s(), state machine busy!\n", __func__);
return -EWOULDBLOCK;
} else
down(&fsm->sem);
@@ -299,7 +300,7 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
if (fsm->state == SIRDEV_STATE_DEAD) {
/* race with sirdev_close should never happen */
- IRDA_ERROR("%s(), instance staled!\n", __func__);
+ net_err_ratelimited("%s(), instance staled!\n", __func__);
up(&fsm->sem);
return -ESTALE; /* or better EPIPE? */
}
@@ -344,7 +345,7 @@ int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
{
int err;
- IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __func__, type);
+ pr_debug("%s : requesting dongle %d.\n", __func__, type);
err = sirdev_schedule_dongle_open(dev, type);
if (unlikely(err))
@@ -379,7 +380,7 @@ int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
if (ret > 0) {
- IRDA_DEBUG(3, "%s(), raw-tx started\n", __func__);
+ pr_debug("%s(), raw-tx started\n", __func__);
dev->tx_buff.data += ret;
dev->tx_buff.len -= ret;
@@ -439,8 +440,8 @@ void sirdev_write_complete(struct sir_dev *dev)
spin_lock_irqsave(&dev->tx_lock, flags);
- IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n",
- __func__, dev->tx_buff.len);
+ pr_debug("%s() - dev->tx_buff.len = %d\n",
+ __func__, dev->tx_buff.len);
if (likely(dev->tx_buff.len > 0)) {
/* Write data left in transmit buffer */
@@ -452,8 +453,8 @@ void sirdev_write_complete(struct sir_dev *dev)
}
else if (unlikely(actual<0)) {
/* could be dropped later when we have tx_timeout to recover */
- IRDA_ERROR("%s: drv->do_write failed (%d)\n",
- __func__, actual);
+ net_err_ratelimited("%s: drv->do_write failed (%d)\n",
+ __func__, actual);
if ((skb=dev->tx_skb) != NULL) {
dev->tx_skb = NULL;
dev_kfree_skb_any(skb);
@@ -474,7 +475,7 @@ void sirdev_write_complete(struct sir_dev *dev)
* restarted when the irda-thread has completed the request.
*/
- IRDA_DEBUG(3, "%s(), raw-tx done\n", __func__);
+ pr_debug("%s(), raw-tx done\n", __func__);
dev->raw_tx = 0;
goto done; /* no post-frame handling in raw mode */
}
@@ -491,7 +492,7 @@ void sirdev_write_complete(struct sir_dev *dev)
* re-activated.
*/
- IRDA_DEBUG(5, "%s(), finished with frame!\n", __func__);
+ pr_debug("%s(), finished with frame!\n", __func__);
if ((skb=dev->tx_skb) != NULL) {
dev->tx_skb = NULL;
@@ -501,14 +502,14 @@ void sirdev_write_complete(struct sir_dev *dev)
}
if (unlikely(dev->new_speed > 0)) {
- IRDA_DEBUG(5, "%s(), Changing speed!\n", __func__);
+ pr_debug("%s(), Changing speed!\n", __func__);
err = sirdev_schedule_speed(dev, dev->new_speed);
if (unlikely(err)) {
/* should never happen
* forget the speed change and hope the stack recovers
*/
- IRDA_ERROR("%s - schedule speed change failed: %d\n",
- __func__, err);
+ net_err_ratelimited("%s - schedule speed change failed: %d\n",
+ __func__, err);
netif_wake_queue(dev->netdev);
}
/* else: success
@@ -535,13 +536,13 @@ EXPORT_SYMBOL(sirdev_write_complete);
int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
{
if (!dev || !dev->netdev) {
- IRDA_WARNING("%s(), not ready yet!\n", __func__);
+ net_warn_ratelimited("%s(), not ready yet!\n", __func__);
return -1;
}
if (!dev->irlap) {
- IRDA_WARNING("%s - too early: %p / %zd!\n",
- __func__, cp, count);
+ net_warn_ratelimited("%s - too early: %p / %zd!\n",
+ __func__, cp, count);
return -1;
}
@@ -551,7 +552,7 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
*/
irda_device_set_media_busy(dev->netdev, TRUE);
dev->netdev->stats.rx_dropped++;
- IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count);
+ pr_debug("%s; rx-drop: %zd\n", __func__, count);
return 0;
}
@@ -597,7 +598,7 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
netif_stop_queue(ndev);
- IRDA_DEBUG(3, "%s(), skb->len = %d\n", __func__, skb->len);
+ pr_debug("%s(), skb->len = %d\n", __func__, skb->len);
speed = irda_get_next_speed(skb);
if ((speed != dev->speed) && (speed != -1)) {
@@ -634,7 +635,7 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
/* Check problems */
if(spin_is_locked(&dev->tx_lock)) {
- IRDA_DEBUG(3, "%s(), write not completed\n", __func__);
+ pr_debug("%s(), write not completed\n", __func__);
}
/* serialize with write completion */
@@ -661,8 +662,8 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
}
else if (unlikely(actual < 0)) {
/* could be dropped later when we have tx_timeout to recover */
- IRDA_ERROR("%s: drv->do_write failed (%d)\n",
- __func__, actual);
+ net_err_ratelimited("%s: drv->do_write failed (%d)\n",
+ __func__, actual);
dev_kfree_skb_any(skb);
dev->netdev->stats.tx_errors++;
dev->netdev->stats.tx_dropped++;
@@ -683,7 +684,7 @@ static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
IRDA_ASSERT(dev != NULL, return -1;);
- IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -800,8 +801,6 @@ static int sirdev_open(struct net_device *ndev)
if (!try_module_get(drv->owner))
return -ESTALE;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
if (sirdev_alloc_buffers(dev))
goto errout_dec;
@@ -818,7 +817,7 @@ static int sirdev_open(struct net_device *ndev)
netif_wake_queue(ndev);
- IRDA_DEBUG(2, "%s - done, speed = %d\n", __func__, dev->speed);
+ pr_debug("%s - done, speed = %d\n", __func__, dev->speed);
return 0;
@@ -838,7 +837,7 @@ static int sirdev_close(struct net_device *ndev)
struct sir_dev *dev = netdev_priv(ndev);
const struct sir_driver *drv;
-// IRDA_DEBUG(0, "%s\n", __func__);
+/* pr_debug("%s\n", __func__); */
netif_stop_queue(ndev);
@@ -880,7 +879,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
struct net_device *ndev;
struct sir_dev *dev;
- IRDA_DEBUG(0, "%s - %s\n", __func__, name);
+ pr_debug("%s - %s\n", __func__, name);
/* instead of adding tests to protect against drv->do_write==NULL
* at several places we refuse to create a sir_dev instance for
@@ -894,7 +893,8 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
*/
ndev = alloc_irdadev(sizeof(*dev));
if (ndev == NULL) {
- IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __func__);
+ net_err_ratelimited("%s - Can't allocate memory for IrDA control block!\n",
+ __func__);
goto out;
}
dev = netdev_priv(ndev);
@@ -919,7 +919,8 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
ndev->netdev_ops = &sirdev_ops;
if (register_netdev(ndev)) {
- IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
+ net_err_ratelimited("%s(), register_netdev() failed!\n",
+ __func__);
goto out_freenetdev;
}
@@ -936,7 +937,7 @@ int sirdev_put_instance(struct sir_dev *dev)
{
int err = 0;
- IRDA_DEBUG(0, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
atomic_set(&dev->enable_rx, 0);
@@ -946,7 +947,7 @@ int sirdev_put_instance(struct sir_dev *dev)
if (dev->dongle_drv)
err = sirdev_schedule_dongle_close(dev);
if (err)
- IRDA_ERROR("%s - error %d\n", __func__, err);
+ net_err_ratelimited("%s - error %d\n", __func__, err);
sirdev_close(dev->netdev);
diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c
index cfbabb63f5cc..7436f73ff1bb 100644
--- a/drivers/net/irda/sir_dongle.c
+++ b/drivers/net/irda/sir_dongle.c
@@ -34,8 +34,8 @@ int irda_register_dongle(struct dongle_driver *new)
struct list_head *entry;
struct dongle_driver *drv;
- IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n",
- __func__, new->driver_name, new->type);
+ pr_debug("%s : registering dongle \"%s\" (%d).\n",
+ __func__, new->driver_name, new->type);
mutex_lock(&dongle_list_lock);
list_for_each(entry, &dongle_list) {
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 282120430f12..b455ffe8850c 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -419,13 +419,16 @@ static int __init smsc_ircc_legacy_probe(void)
#ifdef CONFIG_PCI
if (smsc_ircc_preconfigure_subsystems(ircc_cfg, ircc_fir, ircc_sir, ircc_dma, ircc_irq) < 0) {
/* Ignore errors from preconfiguration */
- IRDA_ERROR("%s, Preconfiguration failed !\n", driver_name);
+ net_err_ratelimited("%s, Preconfiguration failed !\n",
+ driver_name);
}
#endif
if (ircc_fir > 0 && ircc_sir > 0) {
- IRDA_MESSAGE(" Overriding FIR address 0x%04x\n", ircc_fir);
- IRDA_MESSAGE(" Overriding SIR address 0x%04x\n", ircc_sir);
+ net_info_ratelimited(" Overriding FIR address 0x%04x\n",
+ ircc_fir);
+ net_info_ratelimited(" Overriding SIR address 0x%04x\n",
+ ircc_sir);
if (smsc_ircc_open(ircc_fir, ircc_sir, ircc_dma, ircc_irq))
ret = -ENODEV;
@@ -434,8 +437,8 @@ static int __init smsc_ircc_legacy_probe(void)
/* try user provided configuration register base address */
if (ircc_cfg > 0) {
- IRDA_MESSAGE(" Overriding configuration address "
- "0x%04x\n", ircc_cfg);
+ net_info_ratelimited(" Overriding configuration address 0x%04x\n",
+ ircc_cfg);
if (!smsc_superio_fdc(ircc_cfg))
ret = 0;
if (!smsc_superio_lpc(ircc_cfg))
@@ -458,11 +461,12 @@ static int __init smsc_ircc_init(void)
{
int ret;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
ret = platform_driver_register(&smsc_ircc_driver);
if (ret) {
- IRDA_ERROR("%s, Can't register driver!\n", driver_name);
+ net_err_ratelimited("%s, Can't register driver!\n",
+ driver_name);
return ret;
}
@@ -519,7 +523,7 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
struct net_device *dev;
int err;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
err = smsc_ircc_present(fir_base, sir_base);
if (err)
@@ -527,7 +531,7 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
err = -ENOMEM;
if (dev_count >= ARRAY_SIZE(dev_self)) {
- IRDA_WARNING("%s(), too many devices!\n", __func__);
+ net_warn_ratelimited("%s(), too many devices!\n", __func__);
goto err_out1;
}
@@ -536,7 +540,8 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
*/
dev = alloc_irdadev(sizeof(struct smsc_ircc_cb));
if (!dev) {
- IRDA_WARNING("%s() can't allocate net device\n", __func__);
+ net_warn_ratelimited("%s() can't allocate net device\n",
+ __func__);
goto err_out1;
}
@@ -588,8 +593,8 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
err = register_netdev(self->netdev);
if (err) {
- IRDA_ERROR("%s, Network device registration failed!\n",
- driver_name);
+ net_err_ratelimited("%s, Network device registration failed!\n",
+ driver_name);
goto err_out4;
}
@@ -601,7 +606,7 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
}
platform_set_drvdata(self->pldev, self);
- IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
+ net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
dev_count++;
return 0;
@@ -637,15 +642,15 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
if (!request_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT,
driver_name)) {
- IRDA_WARNING("%s: can't get fir_base of 0x%03x\n",
- __func__, fir_base);
+ net_warn_ratelimited("%s: can't get fir_base of 0x%03x\n",
+ __func__, fir_base);
goto out1;
}
if (!request_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT,
driver_name)) {
- IRDA_WARNING("%s: can't get sir_base of 0x%03x\n",
- __func__, sir_base);
+ net_warn_ratelimited("%s: can't get sir_base of 0x%03x\n",
+ __func__, sir_base);
goto out2;
}
@@ -660,13 +665,13 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4;
if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) {
- IRDA_WARNING("%s(), addr 0x%04x - no device found!\n",
- __func__, fir_base);
+ net_warn_ratelimited("%s(), addr 0x%04x - no device found!\n",
+ __func__, fir_base);
goto out3;
}
- IRDA_MESSAGE("SMsC IrDA Controller found\n IrCC version %d.%d, "
- "firport 0x%03x, sirport 0x%03x dma=%d, irq=%d\n",
- chip & 0x0f, version, fir_base, sir_base, dma, irq);
+ net_info_ratelimited("SMsC IrDA Controller found\n IrCC version %d.%d, firport 0x%03x, sirport 0x%03x dma=%d, irq=%d\n",
+ chip & 0x0f, version,
+ fir_base, sir_base, dma, irq);
return 0;
@@ -704,16 +709,16 @@ static void smsc_ircc_setup_io(struct smsc_ircc_cb *self,
if (irq != IRQ_INVAL) {
if (irq != chip_irq)
- IRDA_MESSAGE("%s, Overriding IRQ - chip says %d, using %d\n",
- driver_name, chip_irq, irq);
+ net_info_ratelimited("%s, Overriding IRQ - chip says %d, using %d\n",
+ driver_name, chip_irq, irq);
self->io.irq = irq;
} else
self->io.irq = chip_irq;
if (dma != DMA_INVAL) {
if (dma != chip_dma)
- IRDA_MESSAGE("%s, Overriding DMA - chip says %d, using %d\n",
- driver_name, chip_dma, dma);
+ net_info_ratelimited("%s, Overriding DMA - chip says %d, using %d\n",
+ driver_name, chip_dma, dma);
self->io.dma = dma;
} else
self->io.dma = chip_dma;
@@ -798,7 +803,7 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -852,8 +857,8 @@ static void smsc_ircc_timeout(struct net_device *dev)
struct smsc_ircc_cb *self = netdev_priv(dev);
unsigned long flags;
- IRDA_WARNING("%s: transmit timed out, changing speed to: %d\n",
- dev->name, self->io.speed);
+ net_warn_ratelimited("%s: transmit timed out, changing speed to: %d\n",
+ dev->name, self->io.speed);
spin_lock_irqsave(&self->lock, flags);
smsc_ircc_sir_start(self);
smsc_ircc_change_speed(self, self->io.speed);
@@ -877,7 +882,7 @@ static netdev_tx_t smsc_ircc_hard_xmit_sir(struct sk_buff *skb,
unsigned long flags;
s32 speed;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
@@ -952,21 +957,21 @@ static void smsc_ircc_set_fir_speed(struct smsc_ircc_cb *self, u32 speed)
ir_mode = IRCC_CFGA_IRDA_HDLC;
ctrl = IRCC_CRC;
fast = 0;
- IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__);
+ pr_debug("%s(), handling baud of 576000\n", __func__);
break;
case 1152000:
ir_mode = IRCC_CFGA_IRDA_HDLC;
ctrl = IRCC_1152 | IRCC_CRC;
fast = IRCC_LCR_A_FAST | IRCC_LCR_A_GP_DATA;
- IRDA_DEBUG(0, "%s(), handling baud of 1152000\n",
- __func__);
+ pr_debug("%s(), handling baud of 1152000\n",
+ __func__);
break;
case 4000000:
ir_mode = IRCC_CFGA_IRDA_4PPM;
ctrl = IRCC_CRC;
fast = IRCC_LCR_A_FAST;
- IRDA_DEBUG(0, "%s(), handling baud of 4000000\n",
- __func__);
+ pr_debug("%s(), handling baud of 4000000\n",
+ __func__);
break;
}
#if 0
@@ -994,7 +999,7 @@ static void smsc_ircc_fir_start(struct smsc_ircc_cb *self)
struct net_device *dev;
int fir_base;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
IRDA_ASSERT(self != NULL, return;);
dev = self->netdev;
@@ -1039,7 +1044,7 @@ static void smsc_ircc_fir_stop(struct smsc_ircc_cb *self)
{
int fir_base;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
IRDA_ASSERT(self != NULL, return;);
@@ -1063,7 +1068,7 @@ static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed)
struct net_device *dev;
int last_speed_was_sir;
- IRDA_DEBUG(0, "%s() changing speed to: %d\n", __func__, speed);
+ pr_debug("%s() changing speed to: %d\n", __func__, speed);
IRDA_ASSERT(self != NULL, return;);
dev = self->netdev;
@@ -1131,7 +1136,7 @@ static void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed)
int lcr; /* Line control reg */
int divisor;
- IRDA_DEBUG(0, "%s(), Setting speed to: %d\n", __func__, speed);
+ pr_debug("%s(), Setting speed to: %d\n", __func__, speed);
IRDA_ASSERT(self != NULL, return;);
iobase = self->io.sir_base;
@@ -1166,7 +1171,7 @@ static void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed)
/* Turn on interrups */
outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
- IRDA_DEBUG(2, "%s() speed changed to: %d\n", __func__, speed);
+ pr_debug("%s() speed changed to: %d\n", __func__, speed);
}
@@ -1250,7 +1255,7 @@ static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int bofs)
int iobase = self->io.fir_base;
u8 ctrl;
- IRDA_DEBUG(3, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
#if 1
/* Disable Rx */
register_bank(iobase, 0);
@@ -1304,7 +1309,7 @@ static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self)
{
int iobase = self->io.fir_base;
- IRDA_DEBUG(3, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
#if 0
/* Disable Tx */
register_bank(iobase, 0);
@@ -1408,7 +1413,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
register_bank(iobase, 0);
- IRDA_DEBUG(3, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
#if 0
/* Disable Rx */
register_bank(iobase, 0);
@@ -1419,8 +1424,8 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
lsr= inb(iobase + IRCC_LSR);
msgcnt = inb(iobase + IRCC_LCR_B) & 0x08;
- IRDA_DEBUG(2, "%s: dma count = %d\n", __func__,
- get_dma_residue(self->io.dma));
+ pr_debug("%s: dma count = %d\n", __func__,
+ get_dma_residue(self->io.dma));
len = self->rx_buff.truesize - get_dma_residue(self->io.dma);
@@ -1442,17 +1447,15 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
len -= self->io.speed < 4000000 ? 2 : 4;
if (len < 2 || len > 2050) {
- IRDA_WARNING("%s(), bogus len=%d\n", __func__, len);
+ net_warn_ratelimited("%s(), bogus len=%d\n", __func__, len);
return;
}
- IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __func__, msgcnt, len);
+ pr_debug("%s: msgcnt = %d, len=%d\n", __func__, msgcnt, len);
skb = dev_alloc_skb(len + 1);
- if (!skb) {
- IRDA_WARNING("%s(), memory squeeze, dropping frame.\n",
- __func__);
+ if (!skb)
return;
- }
+
/* Make sure IP header gets aligned */
skb_reserve(skb, 1);
@@ -1491,7 +1494,7 @@ static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self)
/* Make sure we don't stay here to long */
if (boguscount++ > 32) {
- IRDA_DEBUG(2, "%s(), breaking!\n", __func__);
+ pr_debug("%s(), breaking!\n", __func__);
break;
}
} while (inb(iobase + UART_LSR) & UART_LSR_DR);
@@ -1533,7 +1536,7 @@ static irqreturn_t smsc_ircc_interrupt(int dummy, void *dev_id)
lcra = inb(iobase + IRCC_LCR_A);
lsr = inb(iobase + IRCC_LSR);
- IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __func__, iir);
+ pr_debug("%s(), iir = 0x%02x\n", __func__, iir);
if (iir & IRCC_IIR_EOM) {
if (self->io.direction == IO_RECV)
@@ -1583,12 +1586,12 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
/* Clear interrupt */
lsr = inb(iobase + UART_LSR);
- IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
- __func__, iir, lsr, iobase);
+ pr_debug("%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
+ __func__, iir, lsr, iobase);
switch (iir) {
case UART_IIR_RLSI:
- IRDA_DEBUG(2, "%s(), RLSI\n", __func__);
+ pr_debug("%s(), RLSI\n", __func__);
break;
case UART_IIR_RDI:
/* Receive interrupt */
@@ -1600,8 +1603,8 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
smsc_ircc_sir_write_wakeup(self);
break;
default:
- IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n",
- __func__, iir);
+ pr_debug("%s(), unhandled IIR=%#x\n",
+ __func__, iir);
break;
}
@@ -1628,12 +1631,12 @@ static int ircc_is_receiving(struct smsc_ircc_cb *self)
int status = FALSE;
/* int iobase; */
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
IRDA_ASSERT(self != NULL, return FALSE;);
- IRDA_DEBUG(0, "%s: dma count = %d\n", __func__,
- get_dma_residue(self->io.dma));
+ pr_debug("%s: dma count = %d\n", __func__,
+ get_dma_residue(self->io.dma));
status = (self->rx_buff.state != OUTSIDE_FRAME);
@@ -1648,8 +1651,8 @@ static int smsc_ircc_request_irq(struct smsc_ircc_cb *self)
error = request_irq(self->io.irq, smsc_ircc_interrupt, 0,
self->netdev->name, self->netdev);
if (error)
- IRDA_DEBUG(0, "%s(), unable to allocate irq=%d, err=%d\n",
- __func__, self->io.irq, error);
+ pr_debug("%s(), unable to allocate irq=%d, err=%d\n",
+ __func__, self->io.irq, error);
return error;
}
@@ -1693,21 +1696,21 @@ static int smsc_ircc_net_open(struct net_device *dev)
struct smsc_ircc_cb *self;
char hwname[16];
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
IRDA_ASSERT(self != NULL, return 0;);
if (self->io.suspended) {
- IRDA_DEBUG(0, "%s(), device is suspended\n", __func__);
+ pr_debug("%s(), device is suspended\n", __func__);
return -EAGAIN;
}
if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name,
(void *) dev)) {
- IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
- __func__, self->io.irq);
+ pr_debug("%s(), unable to allocate irq=%d\n",
+ __func__, self->io.irq);
return -EAGAIN;
}
@@ -1730,8 +1733,8 @@ static int smsc_ircc_net_open(struct net_device *dev)
if (request_dma(self->io.dma, dev->name)) {
smsc_ircc_net_close(dev);
- IRDA_WARNING("%s(), unable to allocate DMA=%d\n",
- __func__, self->io.dma);
+ net_warn_ratelimited("%s(), unable to allocate DMA=%d\n",
+ __func__, self->io.dma);
return -EAGAIN;
}
@@ -1750,7 +1753,7 @@ static int smsc_ircc_net_close(struct net_device *dev)
{
struct smsc_ircc_cb *self;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
@@ -1781,7 +1784,7 @@ static int smsc_ircc_suspend(struct platform_device *dev, pm_message_t state)
struct smsc_ircc_cb *self = platform_get_drvdata(dev);
if (!self->io.suspended) {
- IRDA_DEBUG(1, "%s, Suspending\n", driver_name);
+ pr_debug("%s, Suspending\n", driver_name);
rtnl_lock();
if (netif_running(self->netdev)) {
@@ -1802,7 +1805,7 @@ static int smsc_ircc_resume(struct platform_device *dev)
struct smsc_ircc_cb *self = platform_get_drvdata(dev);
if (self->io.suspended) {
- IRDA_DEBUG(1, "%s, Waking up\n", driver_name);
+ pr_debug("%s, Waking up\n", driver_name);
rtnl_lock();
smsc_ircc_init_chip(self);
@@ -1833,7 +1836,7 @@ static int smsc_ircc_resume(struct platform_device *dev)
*/
static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
{
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
IRDA_ASSERT(self != NULL, return -1;);
@@ -1845,13 +1848,13 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
smsc_ircc_stop_interrupts(self);
/* Release the PORTS that this driver is using */
- IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __func__,
- self->io.fir_base);
+ pr_debug("%s(), releasing 0x%03x\n", __func__,
+ self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
- IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __func__,
- self->io.sir_base);
+ pr_debug("%s(), releasing 0x%03x\n", __func__,
+ self->io.sir_base);
release_region(self->io.sir_base, self->io.sir_ext);
@@ -1872,7 +1875,7 @@ static void __exit smsc_ircc_cleanup(void)
{
int i;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
for (i = 0; i < 2; i++) {
if (dev_self[i])
@@ -1896,7 +1899,7 @@ static void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
struct net_device *dev;
int fir_base, sir_base;
- IRDA_DEBUG(3, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
IRDA_ASSERT(self != NULL, return;);
dev = self->netdev;
@@ -1922,7 +1925,7 @@ static void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
/* Turn on interrups */
outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base + UART_IER);
- IRDA_DEBUG(3, "%s() - exit\n", __func__);
+ pr_debug("%s() - exit\n", __func__);
outb(0x00, fir_base + IRCC_MASTER);
}
@@ -1932,7 +1935,7 @@ void smsc_ircc_sir_stop(struct smsc_ircc_cb *self)
{
int iobase;
- IRDA_DEBUG(3, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
iobase = self->io.sir_base;
/* Reset UART */
@@ -1958,7 +1961,7 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
IRDA_ASSERT(self != NULL, return;);
- IRDA_DEBUG(4, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
iobase = self->io.sir_base;
@@ -1979,8 +1982,8 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
* if we need to change the speed of the hardware
*/
if (self->new_speed) {
- IRDA_DEBUG(5, "%s(), Changing speed to %d.\n",
- __func__, self->new_speed);
+ pr_debug("%s(), Changing speed to %d.\n",
+ __func__, self->new_speed);
smsc_ircc_sir_wait_hw_transmitter_finish(self);
smsc_ircc_change_speed(self, self->new_speed);
self->new_speed = 0;
@@ -2019,7 +2022,8 @@ static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
/* Tx FIFO should be empty! */
if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) {
- IRDA_WARNING("%s(), failed, fifo not empty!\n", __func__);
+ net_warn_ratelimited("%s(), failed, fifo not empty!\n",
+ __func__);
return 0;
}
@@ -2058,14 +2062,14 @@ static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self)
for (i = 0; smsc_transceivers[i].name != NULL; i++)
if (smsc_transceivers[i].probe(self->io.fir_base)) {
- IRDA_MESSAGE(" %s transceiver found\n",
- smsc_transceivers[i].name);
+ net_info_ratelimited(" %s transceiver found\n",
+ smsc_transceivers[i].name);
self->transceiver= i + 1;
return;
}
- IRDA_MESSAGE("No transceiver found. Defaulting to %s\n",
- smsc_transceivers[SMSC_IRCC2_C_DEFAULT_TRANSCEIVER].name);
+ net_info_ratelimited("No transceiver found. Defaulting to %s\n",
+ smsc_transceivers[SMSC_IRCC2_C_DEFAULT_TRANSCEIVER].name);
self->transceiver = SMSC_IRCC2_C_DEFAULT_TRANSCEIVER;
}
@@ -2119,7 +2123,7 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
udelay(1);
if (count < 0)
- IRDA_DEBUG(0, "%s(): stuck transmitter\n", __func__);
+ pr_debug("%s(): stuck transmitter\n", __func__);
}
@@ -2180,7 +2184,7 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor
u8 mode, dma, irq;
int ret = -ENODEV;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type) == NULL)
return ret;
@@ -2191,7 +2195,7 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor
/*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __func__, mode);*/
if (!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA))
- IRDA_WARNING("%s(): IrDA not enabled\n", __func__);
+ net_warn_ratelimited("%s(): IrDA not enabled\n", __func__);
outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase);
sirbase = inb(cfgbase + 1) << 2;
@@ -2208,7 +2212,8 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor
outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase);
irq = inb(cfgbase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK;
- IRDA_MESSAGE("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", __func__, firbase, sirbase, dma, irq, mode);
+ net_info_ratelimited("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n",
+ __func__, firbase, sirbase, dma, irq, mode);
if (firbase && smsc_ircc_open(firbase, sirbase, dma, irq) == 0)
ret = 0;
@@ -2230,7 +2235,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
unsigned short fir_io, sir_io;
int ret = -ENODEV;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
if (smsc_ircc_probe(cfg_base, 0x20, chips, type) == NULL)
return ret;
@@ -2264,7 +2269,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
{
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
outb(reg, cfg_base);
return inb(cfg_base) != reg ? -1 : 0;
@@ -2274,7 +2279,7 @@ static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base,
{
u8 devid, xdevid, rev;
- IRDA_DEBUG(1, "%s\n", __func__);
+ pr_debug("%s\n", __func__);
/* Leave configuration */
@@ -2329,16 +2334,16 @@ static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base,
return NULL;
}
- IRDA_MESSAGE("found SMC SuperIO Chip (devid=0x%02x rev=%02X base=0x%04x): %s%s\n",
- devid, rev, cfg_base, type, chip->name);
+ net_info_ratelimited("found SMC SuperIO Chip (devid=0x%02x rev=%02X base=0x%04x): %s%s\n",
+ devid, rev, cfg_base, type, chip->name);
if (chip->rev > rev) {
- IRDA_MESSAGE("Revision higher than expected\n");
+ net_info_ratelimited("Revision higher than expected\n");
return NULL;
}
if (chip->flags & NoIRDA)
- IRDA_MESSAGE("chipset does not support IRDA\n");
+ net_info_ratelimited("chipset does not support IRDA\n");
return chip;
}
@@ -2348,8 +2353,8 @@ static int __init smsc_superio_fdc(unsigned short cfg_base)
int ret = -1;
if (!request_region(cfg_base, 2, driver_name)) {
- IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
- __func__, cfg_base);
+ net_warn_ratelimited("%s: can't get cfg_base of 0x%03x\n",
+ __func__, cfg_base);
} else {
if (!smsc_superio_flat(fdc_chips_flat, cfg_base, "FDC") ||
!smsc_superio_paged(fdc_chips_paged, cfg_base, "FDC"))
@@ -2366,8 +2371,8 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
int ret = -1;
if (!request_region(cfg_base, 2, driver_name)) {
- IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
- __func__, cfg_base);
+ net_warn_ratelimited("%s: can't get cfg_base of 0x%03x\n",
+ __func__, cfg_base);
} else {
if (!smsc_superio_flat(lpc_chips_flat, cfg_base, "LPC") ||
!smsc_superio_paged(lpc_chips_paged, cfg_base, "LPC"))
@@ -2529,9 +2534,8 @@ static int __init preconfigure_smsc_chip(struct
outb(LPC47N227_CFGACCESSKEY, iobase); // enter configuration state
outb(SMSCSIOFLAT_DEVICEID_REG, iobase); // set for device ID
tmpbyte = inb(iobase +1); // Read device ID
- IRDA_DEBUG(0,
- "Detected Chip id: 0x%02x, setting up registers...\n",
- tmpbyte);
+ pr_debug("Detected Chip id: 0x%02x, setting up registers...\n",
+ tmpbyte);
/* Disable UART1 and set up SIR I/O port */
outb(0x24, iobase); // select CR24 - UART1 base addr
@@ -2540,8 +2544,8 @@ static int __init preconfigure_smsc_chip(struct
outb( (conf->sir_io >> 2), iobase + 1); // bits 2-9 of 0x3f8
tmpbyte = inb(iobase + 1);
if (tmpbyte != (conf->sir_io >> 2) ) {
- IRDA_WARNING("ERROR: could not configure SIR ioport.\n");
- IRDA_WARNING("Try to supply ircc_cfg argument.\n");
+ net_warn_ratelimited("ERROR: could not configure SIR ioport\n");
+ net_warn_ratelimited("Try to supply ircc_cfg argument\n");
return -ENXIO;
}
@@ -2553,7 +2557,7 @@ static int __init preconfigure_smsc_chip(struct
outb(tmpbyte, iobase + 1);
tmpbyte = inb(iobase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK;
if (tmpbyte != conf->fir_irq) {
- IRDA_WARNING("ERROR: could not configure FIR IRQ channel.\n");
+ net_warn_ratelimited("ERROR: could not configure FIR IRQ channel\n");
return -ENXIO;
}
@@ -2562,7 +2566,7 @@ static int __init preconfigure_smsc_chip(struct
outb((conf->fir_io >> 3), iobase + 1);
tmpbyte = inb(iobase + 1);
if (tmpbyte != (conf->fir_io >> 3) ) {
- IRDA_WARNING("ERROR: could not configure FIR I/O port.\n");
+ net_warn_ratelimited("ERROR: could not configure FIR I/O port\n");
return -ENXIO;
}
@@ -2571,7 +2575,7 @@ static int __init preconfigure_smsc_chip(struct
outb((conf->fir_dma & LPC47N227_FIRDMASELECT_MASK), iobase + 1); // DMA
tmpbyte = inb(iobase + 1) & LPC47N227_FIRDMASELECT_MASK;
if (tmpbyte != (conf->fir_dma & LPC47N227_FIRDMASELECT_MASK)) {
- IRDA_WARNING("ERROR: could not configure FIR DMA channel.\n");
+ net_warn_ratelimited("ERROR: could not configure FIR DMA channel\n");
return -ENXIO;
}
@@ -2628,7 +2632,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev,
unsigned short tmpword;
unsigned char tmpbyte;
- IRDA_MESSAGE("Setting up Intel 82801 controller and SMSC device\n");
+ net_info_ratelimited("Setting up Intel 82801 controller and SMSC device\n");
/*
* Select the range for the COMA COM port (SIR)
* Register COM_DEC:
@@ -2677,7 +2681,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev,
default:
tmpbyte |= 0x01; /* COM2 default */
}
- IRDA_DEBUG(1, "COM_DEC (write): 0x%02x\n", tmpbyte);
+ pr_debug("COM_DEC (write): 0x%02x\n", tmpbyte);
pci_write_config_byte(dev, COM_DEC, tmpbyte);
/* Enable Low Pin Count interface */
@@ -2699,13 +2703,13 @@ static int __init preconfigure_through_82801(struct pci_dev *dev,
tmpword |= 0x0400;
break;
default:
- IRDA_WARNING("Uncommon I/O base address: 0x%04x\n",
- conf->cfg_base);
+ net_warn_ratelimited("Uncommon I/O base address: 0x%04x\n",
+ conf->cfg_base);
break;
}
tmpword &= 0xfffd; /* disable LPC COMB */
tmpword |= 0x0001; /* set bit 0 : enable LPC COMA addr range (GEN2) */
- IRDA_DEBUG(1, "LPC_EN (write): 0x%04x\n", tmpword);
+ pr_debug("LPC_EN (write): 0x%04x\n", tmpword);
pci_write_config_word(dev, LPC_EN, tmpword);
/*
@@ -2750,7 +2754,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev,
default:
break; /* do not change settings */
}
- IRDA_DEBUG(1, "PCI_DMA_C (write): 0x%04x\n", tmpword);
+ pr_debug("PCI_DMA_C (write): 0x%04x\n", tmpword);
pci_write_config_word(dev, PCI_DMA_C, tmpword);
/*
@@ -2761,7 +2765,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev,
*/
tmpword = conf->fir_io & 0xfff8;
tmpword |= 0x0001;
- IRDA_DEBUG(1, "GEN2_DEC (write): 0x%04x\n", tmpword);
+ pr_debug("GEN2_DEC (write): 0x%04x\n", tmpword);
pci_write_config_word(dev, GEN2_DEC, tmpword);
/* Pre-configure chip */
@@ -2800,7 +2804,8 @@ static void __init preconfigure_ali_port(struct pci_dev *dev,
mask = 0x08;
break;
default:
- IRDA_ERROR("Failed to configure unsupported port on ALi 1533 bridge: 0x%04x\n", port);
+ net_err_ratelimited("Failed to configure unsupported port on ALi 1533 bridge: 0x%04x\n",
+ port);
return;
}
@@ -2808,7 +2813,8 @@ static void __init preconfigure_ali_port(struct pci_dev *dev,
/* Turn on the right bits */
tmpbyte |= mask;
pci_write_config_byte(dev, reg, tmpbyte);
- IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port);
+ net_info_ratelimited("Activated ALi 1533 ISA bridge port 0x%04x\n",
+ port);
}
static int __init preconfigure_through_ali(struct pci_dev *dev,
@@ -2877,7 +2883,8 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
if (ircc_irq != IRQ_INVAL)
tmpconf.fir_irq = ircc_irq;
- IRDA_MESSAGE("Detected unconfigured %s SMSC IrDA chip, pre-configuring device.\n", conf->name);
+ net_info_ratelimited("Detected unconfigured %s SMSC IrDA chip, pre-configuring device\n",
+ conf->name);
if (conf->preconfigure)
ret = conf->preconfigure(dev, &tmpconf);
else
@@ -2922,8 +2929,8 @@ static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed)
/* empty */;
if (val)
- IRDA_WARNING("%s(): ATC: 0x%02x\n", __func__,
- inb(fir_base + IRCC_ATC));
+ net_warn_ratelimited("%s(): ATC: 0x%02x\n",
+ __func__, inb(fir_base + IRCC_ATC));
}
/*
diff --git a/drivers/net/irda/tekram-sir.c b/drivers/net/irda/tekram-sir.c
index 048a15422844..9dcf0c103b9d 100644
--- a/drivers/net/irda/tekram-sir.c
+++ b/drivers/net/irda/tekram-sir.c
@@ -63,8 +63,8 @@ static int __init tekram_sir_init(void)
{
if (tekram_delay < 1 || tekram_delay > 500)
tekram_delay = 200;
- IRDA_DEBUG(1, "%s - using %d ms delay\n",
- tekram.driver_name, tekram_delay);
+ pr_debug("%s - using %d ms delay\n",
+ tekram.driver_name, tekram_delay);
return irda_register_dongle(&tekram);
}
@@ -77,8 +77,6 @@ static int tekram_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
sirdev_set_dtr_rts(dev, TRUE, TRUE);
qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
@@ -92,8 +90,6 @@ static int tekram_open(struct sir_dev *dev)
static int tekram_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -130,8 +126,6 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
u8 byte;
static int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
switch(state) {
case SIRDEV_STATE_DONGLE_SPEED:
@@ -179,7 +173,8 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
break;
default:
- IRDA_ERROR("%s - undefined state %d\n", __func__, state);
+ net_err_ratelimited("%s - undefined state %d\n",
+ __func__, state);
ret = -EINVAL;
break;
}
@@ -204,8 +199,6 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
static int tekram_reset(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Clear DTR, Set RTS */
sirdev_set_dtr_rts(dev, FALSE, TRUE);
diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c
index 19ad4606b799..6d2f55959c49 100644
--- a/drivers/net/irda/toim3232-sir.c
+++ b/drivers/net/irda/toim3232-sir.c
@@ -168,8 +168,8 @@ static int __init toim3232_sir_init(void)
{
if (toim3232delay < 1 || toim3232delay > 500)
toim3232delay = 200;
- IRDA_DEBUG(1, "%s - using %d ms delay\n",
- toim3232.driver_name, toim3232delay);
+ pr_debug("%s - using %d ms delay\n",
+ toim3232.driver_name, toim3232delay);
return irda_register_dongle(&toim3232);
}
@@ -182,8 +182,6 @@ static int toim3232_open(struct sir_dev *dev)
{
struct qos_info *qos = &dev->qos;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Pull the lines high to start with.
*
* For the IR320ST-2, we need to charge the main supply capacitor to
@@ -210,8 +208,6 @@ static int toim3232_open(struct sir_dev *dev)
static int toim3232_close(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Power off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -242,8 +238,6 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
u8 byte;
static int ret = 0;
- IRDA_DEBUG(2, "%s()\n", __func__);
-
switch(state) {
case SIRDEV_STATE_DONGLE_SPEED:
@@ -345,8 +339,6 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
static int toim3232_reset(struct sir_dev *dev)
{
- IRDA_DEBUG(2, "%s()\n", __func__);
-
/* Switch off both DTR and RTS to switch off dongle */
sirdev_set_dtr_rts(dev, FALSE, FALSE);
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 36e004288ea7..6960d4cd3cae 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -144,12 +144,10 @@ static int __init via_ircc_init(void)
{
int rc;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
rc = pci_register_driver(&via_driver);
if (rc < 0) {
- IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n",
- __func__, rc);
+ pr_debug("%s(): error rc = %d, returning -ENODEV...\n",
+ __func__, rc);
return -ENODEV;
}
return 0;
@@ -162,11 +160,11 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
chipio_t info;
- IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
+ pr_debug("%s(): Device ID=(0X%X)\n", __func__, id->device);
rc = pci_enable_device (pcidev);
if (rc) {
- IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
+ pr_debug("%s(): error rc = %d\n", __func__, rc);
return -ENODEV;
}
@@ -177,7 +175,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
Chipset=0x3076;
if (Chipset==0x3076) {
- IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
+ pr_debug("%s(): Chipset = 3076\n", __func__);
WriteLPCReg(7,0x0c );
temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
@@ -213,7 +211,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
} else
rc = -ENODEV; //IR not turn on
} else { //Not VT1211
- IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
+ pr_debug("%s(): Chipset = 3096\n", __func__);
pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
if((bTmp&0x01)==1) { // BIOS enable FIR
@@ -252,14 +250,12 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
rc = -ENODEV; //IR not turn on !!!!!
}//Not VT1211
- IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
+ pr_debug("%s(): End - rc = %d\n", __func__, rc);
return rc;
}
static void __exit via_ircc_cleanup(void)
{
- IRDA_DEBUG(3, "%s()\n", __func__);
-
/* Cleanup all instances of the driver */
pci_unregister_driver (&via_driver);
}
@@ -289,8 +285,6 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
struct via_ircc_cb *self;
int err;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
/* Allocate new instance of the driver */
dev = alloc_irdadev(sizeof(struct via_ircc_cb));
if (dev == NULL)
@@ -316,8 +310,8 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
/* Reserve the ioports that we need */
if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
- IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
- __func__, self->io.fir_base);
+ pr_debug("%s(), can't get iobase of 0x%03x\n",
+ __func__, self->io.fir_base);
err = -ENODEV;
goto err_out1;
}
@@ -391,7 +385,8 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
if (err)
goto err_out4;
- IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
+ net_info_ratelimited("IrDA: Registered device %s (via-ircc)\n",
+ dev->name);
/* Initialise the hardware..
*/
@@ -422,8 +417,6 @@ static void via_remove_one(struct pci_dev *pdev)
struct via_ircc_cb *self = pci_get_drvdata(pdev);
int iobase;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
iobase = self->io.fir_base;
ResetChip(iobase, 5); //hardware reset.
@@ -431,8 +424,8 @@ static void via_remove_one(struct pci_dev *pdev)
unregister_netdev(self->netdev);
/* Release the PORT that this driver is using */
- IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
- __func__, self->io.fir_base);
+ pr_debug("%s(), Releasing Region %03x\n",
+ __func__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
@@ -457,8 +450,6 @@ static void via_hw_init(struct via_ircc_cb *self)
{
int iobase = self->io.fir_base;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
// FIFO Init
EnRXFIFOReadyInt(iobase, OFF);
@@ -510,7 +501,7 @@ static void via_hw_init(struct via_ircc_cb *self)
*/
static int via_ircc_read_dongle_id(int iobase)
{
- IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
+ net_err_ratelimited("via-ircc: dongle probing not supported, please specify dongle_id module parameter\n");
return 9; /* Default to IBM */
}
@@ -527,8 +518,8 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
/* speed is unused, as we use IsSIROn()/IsMIROn() */
speed = speed;
- IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
- __func__, speed, iobase, dongle_id);
+ pr_debug("%s(): change_dongle_speed to %d for 0x%x, %d\n",
+ __func__, speed, iobase, dongle_id);
switch (dongle_id) {
@@ -617,7 +608,8 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
case 0x11: /* Temic TFDS4500 */
- IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
+ pr_debug("%s: Temic TFDS4500: One RX pin, TX normal, RX inverted\n",
+ __func__);
UseOneRX(iobase, ON); //use ONE RX....RX1
InvertTX(iobase, OFF);
@@ -635,7 +627,8 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
SlowIRRXLowActive(iobase, OFF);
} else{
- IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
+ pr_debug("%s: Warning: TFDS4500 not running in SIR mode !\n",
+ __func__);
}
break;
@@ -652,8 +645,8 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
break;
default:
- IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
- __func__, dongle_id);
+ net_err_ratelimited("%s: Error: dongle_id %d unsupported !\n",
+ __func__, dongle_id);
}
}
@@ -672,7 +665,7 @@ static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
iobase = self->io.fir_base;
/* Update accounting for new speed */
self->io.speed = speed;
- IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
+ pr_debug("%s: change_speed to %d bps.\n", __func__, speed);
WriteReg(iobase, I_ST_CT_0, 0x0);
@@ -902,10 +895,10 @@ static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
self->tx_buff.head) + self->tx_buff_dma,
self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
- IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
- __func__, self->tx_fifo.ptr,
- self->tx_fifo.queue[self->tx_fifo.ptr].len,
- self->tx_fifo.len);
+ pr_debug("%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
+ __func__, self->tx_fifo.ptr,
+ self->tx_fifo.queue[self->tx_fifo.ptr].len,
+ self->tx_fifo.len);
SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
RXStart(iobase, OFF);
@@ -926,8 +919,6 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
int iobase;
u8 Tx_status;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
iobase = self->io.fir_base;
/* Disable DMA */
// DisableDmaChannel(self->io.dma);
@@ -957,10 +948,9 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
self->tx_fifo.ptr++;
}
}
- IRDA_DEBUG(1,
- "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
- __func__,
- self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
+ pr_debug("%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
+ __func__,
+ self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
/* F01_S
// Any frames to be sent back-to-back?
if (self->tx_fifo.len) {
@@ -995,8 +985,6 @@ static int via_ircc_dma_receive(struct via_ircc_cb *self)
iobase = self->io.fir_base;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
self->tx_fifo.tail = self->tx_buff.head;
self->RxDataReady = 0;
@@ -1078,15 +1066,15 @@ static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
if (len == 0)
return TRUE; //interrupt only, data maybe move by RxT
if (((len - 4) < 2) || ((len - 4) > 2048)) {
- IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
- __func__, len, RxCurCount(iobase, self),
- self->RxLastCount);
+ pr_debug("%s(): Trouble:len=%x,CurCount=%x,LastCount=%x\n",
+ __func__, len, RxCurCount(iobase, self),
+ self->RxLastCount);
hwreset(self);
return FALSE;
}
- IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
- __func__,
- st_fifo->len, len - 4, RxCurCount(iobase, self));
+ pr_debug("%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
+ __func__,
+ st_fifo->len, len - 4, RxCurCount(iobase, self));
st_fifo->entries[st_fifo->tail].status = status;
st_fifo->entries[st_fifo->tail].len = len;
@@ -1133,8 +1121,8 @@ F01_E */
skb_put(skb, len - 4);
skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
- IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
- len - 4, self->rx_buff.data);
+ pr_debug("%s(): len=%x.rx_buff=%p\n", __func__,
+ len - 4, self->rx_buff.data);
// Move to next frame
self->rx_buff.data += len;
@@ -1163,7 +1151,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase)
len = GetRecvByte(iobase, self);
- IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
+ pr_debug("%s(): len=%x\n", __func__, len);
if ((len - 4) < 2) {
self->netdev->stats.rx_dropped++;
@@ -1248,8 +1236,8 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
skb_put(skb, len - 4);
skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
- IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
- len - 4, st_fifo->head);
+ pr_debug("%s(): len=%x.head=%x\n", __func__,
+ len - 4, st_fifo->head);
// Move to next frame
self->rx_buff.data += len;
@@ -1262,10 +1250,8 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
} //while
self->RetryCount = 0;
- IRDA_DEBUG(2,
- "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
- __func__,
- GetHostStatus(iobase), GetRXStatus(iobase));
+ pr_debug("%s(): End of upload HostStatus=%x,RxStatus=%x\n",
+ __func__, GetHostStatus(iobase), GetRXStatus(iobase));
/*
* if frame is receive complete at this routine ,then upload
@@ -1303,12 +1289,12 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
spin_lock(&self->lock);
iHostIntType = GetHostStatus(iobase);
- IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n",
- __func__, iHostIntType,
- (iHostIntType & 0x40) ? "Timer" : "",
- (iHostIntType & 0x20) ? "Tx" : "",
- (iHostIntType & 0x10) ? "Rx" : "",
- (iHostIntType & 0x0e) >> 1);
+ pr_debug("%s(): iHostIntType %02x: %s %s %s %02x\n",
+ __func__, iHostIntType,
+ (iHostIntType & 0x40) ? "Timer" : "",
+ (iHostIntType & 0x20) ? "Tx" : "",
+ (iHostIntType & 0x10) ? "Rx" : "",
+ (iHostIntType & 0x0e) >> 1);
if ((iHostIntType & 0x40) != 0) { //Timer Event
self->EventFlag.TimeOut++;
@@ -1333,12 +1319,12 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
if ((iHostIntType & 0x20) != 0) { //Tx Event
iTxIntType = GetTXStatus(iobase);
- IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n",
- __func__, iTxIntType,
- (iTxIntType & 0x08) ? "FIFO underr." : "",
- (iTxIntType & 0x04) ? "EOM" : "",
- (iTxIntType & 0x02) ? "FIFO ready" : "",
- (iTxIntType & 0x01) ? "Early EOM" : "");
+ pr_debug("%s(): iTxIntType %02x: %s %s %s %s\n",
+ __func__, iTxIntType,
+ (iTxIntType & 0x08) ? "FIFO underr." : "",
+ (iTxIntType & 0x04) ? "EOM" : "",
+ (iTxIntType & 0x02) ? "FIFO ready" : "",
+ (iTxIntType & 0x01) ? "Early EOM" : "");
if (iTxIntType & 0x4) {
self->EventFlag.EOMessage++; // read and will auto clean
@@ -1357,17 +1343,17 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
/* Check if DMA has finished */
iRxIntType = GetRXStatus(iobase);
- IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
- __func__, iRxIntType,
- (iRxIntType & 0x80) ? "PHY err." : "",
- (iRxIntType & 0x40) ? "CRC err" : "",
- (iRxIntType & 0x20) ? "FIFO overr." : "",
- (iRxIntType & 0x10) ? "EOF" : "",
- (iRxIntType & 0x08) ? "RxData" : "",
- (iRxIntType & 0x02) ? "RxMaxLen" : "",
- (iRxIntType & 0x01) ? "SIR bad" : "");
+ pr_debug("%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
+ __func__, iRxIntType,
+ (iRxIntType & 0x80) ? "PHY err." : "",
+ (iRxIntType & 0x40) ? "CRC err" : "",
+ (iRxIntType & 0x20) ? "FIFO overr." : "",
+ (iRxIntType & 0x10) ? "EOF" : "",
+ (iRxIntType & 0x08) ? "RxData" : "",
+ (iRxIntType & 0x02) ? "RxMaxLen" : "",
+ (iRxIntType & 0x01) ? "SIR bad" : "");
if (!iRxIntType)
- IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
+ pr_debug("%s(): RxIRQ =0\n", __func__);
if (iRxIntType & 0x10) {
if (via_ircc_dma_receive_complete(self, iobase)) {
@@ -1376,10 +1362,9 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
}
} // No ERR
else { //ERR
- IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
- __func__, iRxIntType, iHostIntType,
- RxCurCount(iobase, self),
- self->RxLastCount);
+ pr_debug("%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
+ __func__, iRxIntType, iHostIntType,
+ RxCurCount(iobase, self), self->RxLastCount);
if (iRxIntType & 0x20) { //FIFO OverRun ERR
ResetChip(iobase, 0);
@@ -1402,8 +1387,6 @@ static void hwreset(struct via_ircc_cb *self)
int iobase;
iobase = self->io.fir_base;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
ResetChip(iobase, 5);
EnableDMA(iobase, OFF);
EnableTX(iobase, OFF);
@@ -1447,7 +1430,7 @@ static int via_ircc_is_receiving(struct via_ircc_cb *self)
if (CkRxRecv(iobase, self))
status = TRUE;
- IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
+ pr_debug("%s(): status=%x....\n", __func__, status);
return status;
}
@@ -1465,16 +1448,14 @@ static int via_ircc_net_open(struct net_device *dev)
int iobase;
char hwname[32];
- IRDA_DEBUG(3, "%s()\n", __func__);
-
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
dev->stats.rx_packets = 0;
IRDA_ASSERT(self != NULL, return 0;);
iobase = self->io.fir_base;
if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
- IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
- self->io.irq);
+ net_warn_ratelimited("%s, unable to allocate irq=%d\n",
+ driver_name, self->io.irq);
return -EAGAIN;
}
/*
@@ -1482,15 +1463,15 @@ static int via_ircc_net_open(struct net_device *dev)
* failure.
*/
if (request_dma(self->io.dma, dev->name)) {
- IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
- self->io.dma);
+ net_warn_ratelimited("%s, unable to allocate dma=%d\n",
+ driver_name, self->io.dma);
free_irq(self->io.irq, dev);
return -EAGAIN;
}
if (self->io.dma2 != self->io.dma) {
if (request_dma(self->io.dma2, dev->name)) {
- IRDA_WARNING("%s, unable to allocate dma2=%d\n",
- driver_name, self->io.dma2);
+ net_warn_ratelimited("%s, unable to allocate dma2=%d\n",
+ driver_name, self->io.dma2);
free_irq(self->io.irq, dev);
free_dma(self->io.dma);
return -EAGAIN;
@@ -1532,8 +1513,6 @@ static int via_ircc_net_close(struct net_device *dev)
struct via_ircc_cb *self;
int iobase;
- IRDA_DEBUG(3, "%s()\n", __func__);
-
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
IRDA_ASSERT(self != NULL, return 0;);
@@ -1576,8 +1555,8 @@ static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
- cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
+ cmd);
/* Disable interrupts & save flags */
spin_lock_irqsave(&self->lock, flags);
switch (cmd) {
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index a2e556168286..ac39d9f33d5f 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -429,8 +429,8 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
if (rd->buf == NULL ||
!(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
if (rd->buf) {
- IRDA_ERROR("%s: failed to create PCI-MAP for %p",
- __func__, rd->buf);
+ net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
+ __func__, rd->buf);
kfree(rd->buf);
rd->buf = NULL;
}
@@ -483,11 +483,8 @@ static int vlsi_create_hwif(vlsi_irda_dev_t *idev)
ringarea = pci_zalloc_consistent(idev->pdev, HW_RING_AREA_SIZE,
&idev->busaddr);
- if (!ringarea) {
- IRDA_ERROR("%s: insufficient memory for descriptor rings\n",
- __func__);
+ if (!ringarea)
goto out;
- }
hwmap = (struct ring_descr_hw *)ringarea;
idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1],
@@ -559,7 +556,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);
len -= crclen; /* remove trailing CRC */
if (len <= 0) {
- IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __func__, len);
+ pr_debug("%s: strange frame (len=%d)\n", __func__, len);
ret |= VLSI_RX_DROP;
goto done;
}
@@ -574,14 +571,14 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
*/
le16_to_cpus(rd->buf+len);
if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) {
- IRDA_DEBUG(0, "%s: crc error\n", __func__);
+ pr_debug("%s: crc error\n", __func__);
ret |= VLSI_RX_CRC;
goto done;
}
}
if (!rd->skb) {
- IRDA_WARNING("%s: rx packet lost\n", __func__);
+ net_warn_ratelimited("%s: rx packet lost\n", __func__);
ret |= VLSI_RX_DROP;
goto done;
}
@@ -610,8 +607,8 @@ static void vlsi_fill_rx(struct vlsi_ring *r)
for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) {
if (rd_is_active(rd)) {
- IRDA_WARNING("%s: driver bug: rx descr race with hw\n",
- __func__);
+ net_warn_ratelimited("%s: driver bug: rx descr race with hw\n",
+ __func__);
vlsi_ring_debug(r);
break;
}
@@ -670,7 +667,7 @@ static void vlsi_rx_interrupt(struct net_device *ndev)
if (ring_first(r) == NULL) {
/* we are in big trouble, if this should ever happen */
- IRDA_ERROR("%s: rx ring exhausted!\n", __func__);
+ net_err_ratelimited("%s: rx ring exhausted!\n", __func__);
vlsi_ring_debug(r);
}
else
@@ -692,7 +689,7 @@ static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
if (rd_is_active(rd)) {
rd_set_status(rd, 0);
if (rd_get_count(rd)) {
- IRDA_DEBUG(0, "%s - dropping rx packet\n", __func__);
+ pr_debug("%s - dropping rx packet\n", __func__);
ret = -VLSI_RX_DROP;
}
rd_set_count(rd, 0);
@@ -767,7 +764,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
int fifocnt;
baudrate = idev->new_baud;
- IRDA_DEBUG(2, "%s: %d -> %d\n", __func__, idev->baud, idev->new_baud);
+ pr_debug("%s: %d -> %d\n", __func__, idev->baud, idev->new_baud);
if (baudrate == 4000000) {
mode = IFF_FIR;
config = IRCFG_FIR;
@@ -783,8 +780,8 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY;
switch(baudrate) {
default:
- IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n",
- __func__, baudrate);
+ net_warn_ratelimited("%s: undefined baudrate %d - fallback to 9600!\n",
+ __func__, baudrate);
baudrate = 9600;
/* fallthru */
case 2400:
@@ -801,7 +798,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
if (fifocnt != 0) {
- IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt);
+ pr_debug("%s: rx fifo not empty(%d)\n", __func__, fifocnt);
}
outw(0, iobase+VLSI_PIO_IRENABLE);
@@ -825,14 +822,16 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
config ^= IRENABLE_SIR_ON;
if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) {
- IRDA_WARNING("%s: failed to set %s mode!\n", __func__,
- (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR"));
+ net_warn_ratelimited("%s: failed to set %s mode!\n",
+ __func__,
+ mode == IFF_SIR ? "SIR" :
+ mode == IFF_MIR ? "MIR" : "FIR");
ret = -1;
}
else {
if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) {
- IRDA_WARNING("%s: failed to apply baudrate %d\n",
- __func__, baudrate);
+ net_warn_ratelimited("%s: failed to apply baudrate %d\n",
+ __func__, baudrate);
ret = -1;
}
else {
@@ -977,8 +976,8 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
*/
if (len >= r->len-5)
- IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n",
- __func__);
+ net_warn_ratelimited("%s: possible buffer overflow with SIR wrapping!\n",
+ __func__);
}
else {
/* hw deals with MIR/FIR mode wrapping */
@@ -1023,7 +1022,8 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
if (fifocnt != 0) {
- IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt);
+ pr_debug("%s: rx fifo not empty(%d)\n",
+ __func__, fifocnt);
}
config = inw(iobase+VLSI_PIO_IRCFG);
@@ -1035,7 +1035,7 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
if (ring_put(r) == NULL) {
netif_stop_queue(ndev);
- IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __func__);
+ pr_debug("%s: tx ring full - queue stopped\n", __func__);
}
spin_unlock_irqrestore(&idev->lock, flags);
@@ -1044,7 +1044,7 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
drop_unlock:
spin_unlock_irqrestore(&idev->lock, flags);
drop:
- IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg);
+ net_warn_ratelimited("%s: dropping packet - %s\n", __func__, msg);
dev_kfree_skb_any(skb);
ndev->stats.tx_errors++;
ndev->stats.tx_dropped++;
@@ -1100,8 +1100,8 @@ static void vlsi_tx_interrupt(struct net_device *ndev)
fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
if (fifocnt != 0) {
- IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n",
- __func__, fifocnt);
+ pr_debug("%s: rx fifo not empty(%d)\n",
+ __func__, fifocnt);
}
outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
}
@@ -1110,7 +1110,7 @@ static void vlsi_tx_interrupt(struct net_device *ndev)
if (netif_queue_stopped(ndev) && !idev->new_baud) {
netif_wake_queue(ndev);
- IRDA_DEBUG(3, "%s: queue awoken\n", __func__);
+ pr_debug("%s: queue awoken\n", __func__);
}
}
@@ -1134,7 +1134,7 @@ static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
dev_kfree_skb_any(rd->skb);
rd->skb = NULL;
}
- IRDA_DEBUG(0, "%s - dropping tx packet\n", __func__);
+ pr_debug("%s - dropping tx packet\n", __func__);
ret = -VLSI_TX_DROP;
}
else
@@ -1183,8 +1183,8 @@ static int vlsi_start_clock(struct pci_dev *pdev)
}
if (count < 3) {
if (clksrc == 1) { /* explicitly asked for PLL hence bail out */
- IRDA_ERROR("%s: no PLL or failed to lock!\n",
- __func__);
+ net_err_ratelimited("%s: no PLL or failed to lock!\n",
+ __func__);
clkctl = CLKCTL_CLKSTP;
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
return -1;
@@ -1192,8 +1192,8 @@ static int vlsi_start_clock(struct pci_dev *pdev)
else /* was: clksrc=0(auto) */
clksrc = 3; /* fallback to 40MHz XCLK (OB800) */
- IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n",
- __func__, clksrc);
+ pr_debug("%s: PLL not locked, fallback to clksrc=%d\n",
+ __func__, clksrc);
}
else
clksrc = 1; /* got successful PLL lock */
@@ -1265,7 +1265,7 @@ static int vlsi_init_chip(struct pci_dev *pdev)
/* start the clock and clean the registers */
if (vlsi_start_clock(pdev)) {
- IRDA_ERROR("%s: no valid clock source\n", __func__);
+ net_err_ratelimited("%s: no valid clock source\n", __func__);
return -1;
}
iobase = ndev->base_addr;
@@ -1389,8 +1389,8 @@ static void vlsi_tx_timeout(struct net_device *ndev)
idev->new_baud = idev->baud; /* keep current baudrate */
if (vlsi_start_hw(idev))
- IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n",
- __func__, pci_name(idev->pdev), ndev->name);
+ net_err_ratelimited("%s: failed to restart hw - %s(%s) unusable!\n",
+ __func__, pci_name(idev->pdev), ndev->name);
else
netif_start_queue(ndev);
}
@@ -1434,8 +1434,8 @@ static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
irq->ifr_receiving = (fifocnt!=0) ? 1 : 0;
break;
default:
- IRDA_WARNING("%s: notsupp - cmd=%04x\n",
- __func__, cmd);
+ net_warn_ratelimited("%s: notsupp - cmd=%04x\n",
+ __func__, cmd);
ret = -EOPNOTSUPP;
}
@@ -1479,8 +1479,8 @@ static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)
spin_unlock_irqrestore(&idev->lock,flags);
if (boguscount <= 0)
- IRDA_MESSAGE("%s: too much work in interrupt!\n",
- __func__);
+ net_info_ratelimited("%s: too much work in interrupt!\n",
+ __func__);
return IRQ_RETVAL(handled);
}
@@ -1493,7 +1493,7 @@ static int vlsi_open(struct net_device *ndev)
char hwname[32];
if (pci_request_regions(idev->pdev, drivername)) {
- IRDA_WARNING("%s: io resource busy\n", __func__);
+ net_warn_ratelimited("%s: io resource busy\n", __func__);
goto errout;
}
ndev->base_addr = pci_resource_start(idev->pdev,0);
@@ -1507,8 +1507,8 @@ static int vlsi_open(struct net_device *ndev)
if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED,
drivername, ndev)) {
- IRDA_WARNING("%s: couldn't get IRQ: %d\n",
- __func__, ndev->irq);
+ net_warn_ratelimited("%s: couldn't get IRQ: %d\n",
+ __func__, ndev->irq);
goto errout_io;
}
@@ -1529,7 +1529,8 @@ static int vlsi_open(struct net_device *ndev)
netif_start_queue(ndev);
- IRDA_MESSAGE("%s: device %s operational\n", __func__, ndev->name);
+ net_info_ratelimited("%s: device %s operational\n",
+ __func__, ndev->name);
return 0;
@@ -1563,7 +1564,7 @@ static int vlsi_close(struct net_device *ndev)
pci_release_regions(idev->pdev);
- IRDA_MESSAGE("%s: device %s stopped\n", __func__, ndev->name);
+ net_info_ratelimited("%s: device %s stopped\n", __func__, ndev->name);
return 0;
}
@@ -1590,7 +1591,8 @@ static int vlsi_irda_init(struct net_device *ndev)
if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) ||
pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) {
- IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __func__);
+ net_err_ratelimited("%s: aborting due to PCI BM-DMA address limitations\n",
+ __func__);
return -1;
}
@@ -1632,19 +1634,19 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
else
pdev->current_state = 0; /* hw must be running now */
- IRDA_MESSAGE("%s: IrDA PCI controller %s detected\n",
- drivername, pci_name(pdev));
+ net_info_ratelimited("%s: IrDA PCI controller %s detected\n",
+ drivername, pci_name(pdev));
if ( !pci_resource_start(pdev,0) ||
!(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) {
- IRDA_ERROR("%s: bar 0 invalid", __func__);
+ net_err_ratelimited("%s: bar 0 invalid", __func__);
goto out_disable;
}
ndev = alloc_irdadev(sizeof(*idev));
if (ndev==NULL) {
- IRDA_ERROR("%s: Unable to allocate device memory.\n",
- __func__);
+ net_err_ratelimited("%s: Unable to allocate device memory.\n",
+ __func__);
goto out_disable;
}
@@ -1659,7 +1661,7 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_freedev;
if (register_netdev(ndev) < 0) {
- IRDA_ERROR("%s: register_netdev failed\n", __func__);
+ net_err_ratelimited("%s: register_netdev failed\n", __func__);
goto out_freedev;
}
@@ -1669,14 +1671,15 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO,
vlsi_proc_root, VLSI_PROC_FOPS, ndev);
if (!ent) {
- IRDA_WARNING("%s: failed to create proc entry\n",
- __func__);
+ net_warn_ratelimited("%s: failed to create proc entry\n",
+ __func__);
} else {
proc_set_size(ent, 0);
}
idev->proc_entry = ent;
}
- IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name);
+ net_info_ratelimited("%s: registered device %s\n",
+ drivername, ndev->name);
pci_set_drvdata(pdev, ndev);
mutex_unlock(&idev->mtx);
@@ -1698,7 +1701,7 @@ static void vlsi_irda_remove(struct pci_dev *pdev)
vlsi_irda_dev_t *idev;
if (!ndev) {
- IRDA_ERROR("%s: lost netdevice?\n", drivername);
+ net_err_ratelimited("%s: lost netdevice?\n", drivername);
return;
}
@@ -1714,7 +1717,7 @@ static void vlsi_irda_remove(struct pci_dev *pdev)
free_netdev(ndev);
- IRDA_MESSAGE("%s: %s removed\n", drivername, pci_name(pdev));
+ net_info_ratelimited("%s: %s removed\n", drivername, pci_name(pdev));
}
#ifdef CONFIG_PM
@@ -1733,8 +1736,8 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
vlsi_irda_dev_t *idev;
if (!ndev) {
- IRDA_ERROR("%s - %s: no netdevice\n",
- __func__, pci_name(pdev));
+ net_err_ratelimited("%s - %s: no netdevice\n",
+ __func__, pci_name(pdev));
return 0;
}
idev = netdev_priv(ndev);
@@ -1745,7 +1748,9 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
pdev->current_state = state.event;
}
else
- IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __func__, pci_name(pdev), pdev->current_state, state.event);
+ net_err_ratelimited("%s - %s: invalid suspend request %u -> %u\n",
+ __func__, pci_name(pdev),
+ pdev->current_state, state.event);
mutex_unlock(&idev->mtx);
return 0;
}
@@ -1772,16 +1777,16 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
vlsi_irda_dev_t *idev;
if (!ndev) {
- IRDA_ERROR("%s - %s: no netdevice\n",
- __func__, pci_name(pdev));
+ net_err_ratelimited("%s - %s: no netdevice\n",
+ __func__, pci_name(pdev));
return 0;
}
idev = netdev_priv(ndev);
mutex_lock(&idev->mtx);
if (pdev->current_state == 0) {
mutex_unlock(&idev->mtx);
- IRDA_WARNING("%s - %s: already resumed\n",
- __func__, pci_name(pdev));
+ net_warn_ratelimited("%s - %s: already resumed\n",
+ __func__, pci_name(pdev));
return 0;
}
@@ -1800,7 +1805,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
* now we explicitly set pdev->current_state = 0 after enabling the
* device and independently resume_ok should catch any garbage config.
*/
- IRDA_WARNING("%s - hm, nothing to resume?\n", __func__);
+ net_warn_ratelimited("%s - hm, nothing to resume?\n", __func__);
mutex_unlock(&idev->mtx);
return 0;
}
@@ -1837,7 +1842,8 @@ static int __init vlsi_mod_init(void)
int i, ret;
if (clksrc < 0 || clksrc > 3) {
- IRDA_ERROR("%s: invalid clksrc=%d\n", drivername, clksrc);
+ net_err_ratelimited("%s: invalid clksrc=%d\n",
+ drivername, clksrc);
return -1;
}
@@ -1850,7 +1856,10 @@ static int __init vlsi_mod_init(void)
case 64:
break;
default:
- IRDA_WARNING("%s: invalid %s ringsize %d, using default=8", drivername, (i)?"rx":"tx", ringsize[i]);
+ net_warn_ratelimited("%s: invalid %s ringsize %d, using default=8\n",
+ drivername,
+ i ? "rx" : "tx",
+ ringsize[i]);
ringsize[i] = 8;
break;
}
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 56399204e68c..f9119c6d2a09 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -615,7 +615,8 @@ static inline void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s)
*/
if ((a & ~DMA_MASK_MSTRPAGE)>>24 != MSTRPAGE_VALUE) {
- IRDA_ERROR("%s: pci busaddr inconsistency!\n", __func__);
+ net_err_ratelimited("%s: pci busaddr inconsistency!\n",
+ __func__);
dump_stack();
return;
}
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 11dbdf36d9c1..4e3d2e7c697c 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -110,8 +110,6 @@ static int __init w83977af_init(void)
{
int i;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
return 0;
@@ -129,8 +127,6 @@ static void __exit w83977af_cleanup(void)
{
int i;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
for (i=0; i < ARRAY_SIZE(dev_self); i++) {
if (dev_self[i])
w83977af_close(dev_self[i]);
@@ -157,12 +153,10 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
struct w83977af_ir *self;
int err;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
/* Lock the port that we need */
if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
- IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
- __func__ , iobase);
+ pr_debug("%s(), can't get iobase of 0x%03x\n",
+ __func__ , iobase);
return -ENODEV;
}
@@ -236,10 +230,11 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
err = register_netdev(dev);
if (err) {
- IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__);
+ net_err_ratelimited("%s(), register_netdevice() failed!\n",
+ __func__);
goto err_out3;
}
- IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
+ net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
/* Need to store self somewhere */
dev_self[i] = self;
@@ -268,8 +263,6 @@ static int w83977af_close(struct w83977af_ir *self)
{
int iobase;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
iobase = self->io.fir_base;
#ifdef CONFIG_USE_W977_PNP
@@ -288,8 +281,8 @@ static int w83977af_close(struct w83977af_ir *self)
unregister_netdev(self->netdev);
/* Release the PORT that this driver is using */
- IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
- __func__ , self->io.fir_base);
+ pr_debug("%s(), Releasing Region %03x\n",
+ __func__ , self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
@@ -311,7 +304,6 @@ static int w83977af_probe(int iobase, int irq, int dma)
int i;
for (i=0; i < 2; i++) {
- IRDA_DEBUG( 0, "%s()\n", __func__ );
#ifdef CONFIG_USE_W977_PNP
/* Enter PnP configuration mode */
w977_efm_enter(efbase[i]);
@@ -392,13 +384,13 @@ static int w83977af_probe(int iobase, int irq, int dma)
switch_bank(iobase, SET7);
outb(0x40, iobase+7);
- IRDA_MESSAGE("W83977AF (IR) driver loaded. "
- "Version: 0x%02x\n", version);
+ net_info_ratelimited("W83977AF (IR) driver loaded. Version: 0x%02x\n",
+ version);
return 0;
} else {
/* Try next extented function register address */
- IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ );
+ pr_debug("%s(), Wrong chip version", __func__);
}
}
return -1;
@@ -434,19 +426,19 @@ static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
case 115200: outb(0x01, iobase+ABLL); break;
case 576000:
ir_mode = HCR_MIR_576;
- IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ );
+ pr_debug("%s(), handling baud of 576000\n", __func__);
break;
case 1152000:
ir_mode = HCR_MIR_1152;
- IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ );
+ pr_debug("%s(), handling baud of 1152000\n", __func__);
break;
case 4000000:
ir_mode = HCR_FIR;
- IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ );
+ pr_debug("%s(), handling baud of 4000000\n", __func__);
break;
default:
ir_mode = HCR_FIR;
- IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed);
+ pr_debug("%s(), unknown baud rate of %d\n", __func__ , speed);
break;
}
@@ -497,8 +489,8 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
iobase = self->io.fir_base;
- IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies,
- (int) skb->len);
+ pr_debug("%s(%ld), skb->len=%d\n", __func__ , jiffies,
+ (int)skb->len);
/* Lock transmit buffer */
netif_stop_queue(dev);
@@ -525,7 +517,7 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
self->tx_buff.len = skb->len;
mtt = irda_get_mtt(skb);
- IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
+ pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
if (mtt)
udelay(mtt);
@@ -559,7 +551,7 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
{
__u8 set;
- IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
+ pr_debug("%s(), len=%d\n", __func__ , self->tx_buff.len);
/* Save current set */
set = inb(iobase+SSR);
@@ -594,19 +586,16 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
int actual = 0;
__u8 set;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
/* Save current bank */
set = inb(iobase+SSR);
switch_bank(iobase, SET0);
if (!(inb_p(iobase+USR) & USR_TSRE)) {
- IRDA_DEBUG(4,
- "%s(), warning, FIFO not empty yet!\n", __func__ );
+ pr_debug("%s(), warning, FIFO not empty yet!\n", __func__);
fifo_size -= 17;
- IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
- __func__ , fifo_size);
+ pr_debug("%s(), %d bytes left in tx fifo\n",
+ __func__ , fifo_size);
}
/* Fill FIFO with current frame */
@@ -615,8 +604,8 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
outb(buf[actual++], iobase+TBR);
}
- IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
- __func__ , fifo_size, actual, len);
+ pr_debug("%s(), fifo_size %d ; %d sent of %d\n",
+ __func__ , fifo_size, actual, len);
/* Restore bank */
outb(set, iobase+SSR);
@@ -636,7 +625,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
int iobase;
__u8 set;
- IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies);
+ pr_debug("%s(%ld)\n", __func__ , jiffies);
IRDA_ASSERT(self != NULL, return;);
@@ -651,7 +640,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
/* Check for underrun! */
if (inb(iobase+AUDR) & AUDR_UNDR) {
- IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
+ pr_debug("%s(), Transmit underrun!\n", __func__);
self->netdev->stats.tx_errors++;
self->netdev->stats.tx_fifo_errors++;
@@ -692,7 +681,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
#endif
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(4, "%s\n", __func__ );
+ pr_debug("%s\n", __func__);
iobase= self->io.fir_base;
@@ -763,7 +752,7 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
__u8 set;
__u8 status;
- IRDA_DEBUG(4, "%s\n", __func__ );
+ pr_debug("%s\n", __func__);
st_fifo = &self->st_fifo;
@@ -880,8 +869,6 @@ static void w83977af_pio_receive(struct w83977af_ir *self)
__u8 byte = 0x00;
int iobase;
- IRDA_DEBUG(4, "%s()\n", __func__ );
-
IRDA_ASSERT(self != NULL, return;);
iobase = self->io.fir_base;
@@ -907,7 +894,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
__u8 set;
int iobase;
- IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr);
+ pr_debug("%s(), isr=%#x\n", __func__ , isr);
iobase = self->io.fir_base;
/* Transmit FIFO low on data */
@@ -943,8 +930,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
if (isr & ISR_TXEMP_I) {
/* Check if we need to change the speed? */
if (self->new_speed) {
- IRDA_DEBUG(2,
- "%s(), Changing speed!\n", __func__ );
+ pr_debug("%s(), Changing speed!\n", __func__);
w83977af_change_speed(self, self->new_speed);
self->new_speed = 0;
}
@@ -1126,7 +1112,6 @@ static int w83977af_net_open(struct net_device *dev)
char hwname[32];
__u8 set;
- IRDA_DEBUG(0, "%s()\n", __func__ );
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
@@ -1189,8 +1174,6 @@ static int w83977af_net_close(struct net_device *dev)
int iobase;
__u8 set;
- IRDA_DEBUG(0, "%s()\n", __func__ );
-
IRDA_ASSERT(dev != NULL, return -1;);
self = netdev_priv(dev);
@@ -1244,7 +1227,7 @@ static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
IRDA_ASSERT(self != NULL, return -1;);
- IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
spin_lock_irqsave(&self->lock, flags);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index bfb0b6ec8c56..612e0731142d 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -742,11 +742,12 @@ static struct lock_class_key macvlan_netdev_xmit_lock_key;
static struct lock_class_key macvlan_netdev_addr_lock_key;
#define ALWAYS_ON_FEATURES \
- (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX)
+ (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX | \
+ NETIF_F_GSO_ROBUST)
#define MACVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
- NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
+ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_LRO | \
NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
@@ -783,6 +784,7 @@ static int macvlan_init(struct net_device *dev)
(lowerdev->state & MACVLAN_STATE_MASK);
dev->features = lowerdev->features & MACVLAN_FEATURES;
dev->features |= ALWAYS_ON_FEATURES;
+ dev->hw_features |= NETIF_F_LRO;
dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
dev->gso_max_size = lowerdev->gso_max_size;
dev->iflink = lowerdev->ifindex;
@@ -872,7 +874,7 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr,
+ const unsigned char *addr, u16 vid,
u16 flags)
{
struct macvlan_dev *vlan = netdev_priv(dev);
@@ -897,7 +899,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr)
+ const unsigned char *addr, u16 vid)
{
struct macvlan_dev *vlan = netdev_priv(dev);
int err = -EINVAL;
@@ -935,15 +937,15 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ netdev_features_t lowerdev_features = vlan->lowerdev->features;
netdev_features_t mask;
features |= NETIF_F_ALL_FOR_ALL;
features &= (vlan->set_features | ~MACVLAN_FEATURES);
mask = features;
- features = netdev_increment_features(vlan->lowerdev->features,
- features,
- mask);
+ lowerdev_features &= (features | ~NETIF_F_LRO);
+ features = netdev_increment_features(lowerdev_features, features, mask);
features |= ALWAYS_ON_FEATURES;
features &= ~NETIF_F_NETNS_LOCAL;
@@ -1055,6 +1057,9 @@ static int macvlan_port_create(struct net_device *dev)
if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
return -EINVAL;
+ if (netif_is_ipvlan_port(dev))
+ return -EBUSY;
+
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (port == NULL)
return -ENOMEM;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 880cc090dc44..2c157cced81f 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -15,6 +15,7 @@
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/fs.h>
+#include <linux/uio.h>
#include <net/ipv6.h>
#include <net/net_namespace.h>
@@ -639,12 +640,12 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
/* Get packet from user space buffer */
static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
- const struct iovec *iv, unsigned long total_len,
- size_t count, int noblock)
+ struct iov_iter *from, int noblock)
{
int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
struct sk_buff *skb;
struct macvlan_dev *vlan;
+ unsigned long total_len = iov_iter_count(from);
unsigned long len = total_len;
int err;
struct virtio_net_hdr vnet_hdr = { 0 };
@@ -652,6 +653,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
int copylen = 0;
bool zerocopy = false;
size_t linear;
+ ssize_t n;
if (q->flags & IFF_VNET_HDR) {
vnet_hdr_len = q->vnet_hdr_sz;
@@ -661,10 +663,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
goto err;
len -= vnet_hdr_len;
- err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
- sizeof(vnet_hdr));
- if (err < 0)
+ err = -EFAULT;
+ n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from);
+ if (n != sizeof(vnet_hdr))
goto err;
+ iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
vnet_hdr.hdr_len)
@@ -679,17 +682,16 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
if (unlikely(len < ETH_HLEN))
goto err;
- err = -EMSGSIZE;
- if (unlikely(count > UIO_MAXIOV))
- goto err;
-
if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
+ struct iov_iter i;
+
copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
if (copylen > good_linear)
copylen = good_linear;
linear = copylen;
- if (iov_pages(iv, vnet_hdr_len + copylen, count)
- <= MAX_SKB_FRAGS)
+ i = *from;
+ iov_iter_advance(&i, copylen);
+ if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
zerocopy = true;
}
@@ -707,10 +709,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
goto err;
if (zerocopy)
- err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
+ err = zerocopy_sg_from_iter(skb, from);
else {
- err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
- len);
+ err = skb_copy_datagram_from_iter(skb, 0, from, len);
if (!err && m && m->msg_control) {
struct ubuf_info *uarg = m->msg_control;
uarg->callback(uarg, false);
@@ -763,46 +764,42 @@ err:
return err;
}
-static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
- unsigned long count, loff_t pos)
+static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
- ssize_t result = -ENOLINK;
struct macvtap_queue *q = file->private_data;
- result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count,
- file->f_flags & O_NONBLOCK);
- return result;
+ return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
}
/* Put packet to the user space buffer */
static ssize_t macvtap_put_user(struct macvtap_queue *q,
const struct sk_buff *skb,
- const struct iovec *iv, int len)
+ struct iov_iter *iter)
{
int ret;
int vnet_hdr_len = 0;
int vlan_offset = 0;
- int copied, total;
+ int total;
if (q->flags & IFF_VNET_HDR) {
struct virtio_net_hdr vnet_hdr;
vnet_hdr_len = q->vnet_hdr_sz;
- if ((len -= vnet_hdr_len) < 0)
+ if (iov_iter_count(iter) < vnet_hdr_len)
return -EINVAL;
macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
- if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
+ if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
+ sizeof(vnet_hdr))
return -EFAULT;
+
+ iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
}
- total = copied = vnet_hdr_len;
+ total = vnet_hdr_len;
total += skb->len;
- if (!vlan_tx_tag_present(skb))
- len = min_t(int, skb->len, len);
- else {
- int copy;
+ if (vlan_tx_tag_present(skb)) {
struct {
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
@@ -811,86 +808,77 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
- len = min_t(int, skb->len + VLAN_HLEN, len);
total += VLAN_HLEN;
- copy = min_t(int, vlan_offset, len);
- ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
- len -= copy;
- copied += copy;
- if (ret || !len)
+ ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
+ if (ret || !iov_iter_count(iter))
goto done;
- copy = min_t(int, sizeof(veth), len);
- ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
- len -= copy;
- copied += copy;
- if (ret || !len)
+ ret = copy_to_iter(&veth, sizeof(veth), iter);
+ if (ret != sizeof(veth) || !iov_iter_count(iter))
goto done;
}
- ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
+ ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
+ skb->len - vlan_offset);
done:
return ret ? ret : total;
}
static ssize_t macvtap_do_read(struct macvtap_queue *q,
- const struct iovec *iv, unsigned long len,
+ struct iov_iter *to,
int noblock)
{
DEFINE_WAIT(wait);
struct sk_buff *skb;
ssize_t ret = 0;
- while (len) {
+ if (!iov_iter_count(to))
+ return 0;
+
+ while (1) {
if (!noblock)
prepare_to_wait(sk_sleep(&q->sk), &wait,
TASK_INTERRUPTIBLE);
/* Read frames from the queue */
skb = skb_dequeue(&q->sk.sk_receive_queue);
- if (!skb) {
- if (noblock) {
- ret = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
- /* Nothing to read, let's sleep */
- schedule();
- continue;
+ if (skb)
+ break;
+ if (noblock) {
+ ret = -EAGAIN;
+ break;
}
- ret = macvtap_put_user(q, skb, iv, len);
- kfree_skb(skb);
- break;
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ /* Nothing to read, let's sleep */
+ schedule();
+ }
+ if (skb) {
+ ret = macvtap_put_user(q, skb, to);
+ if (unlikely(ret < 0))
+ kfree_skb(skb);
+ else
+ consume_skb(skb);
}
-
if (!noblock)
finish_wait(sk_sleep(&q->sk), &wait);
return ret;
}
-static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
- unsigned long count, loff_t pos)
+static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct macvtap_queue *q = file->private_data;
- ssize_t len, ret = 0;
+ ssize_t len = iov_iter_count(to), ret;
- len = iov_length(iv, count);
- if (len < 0) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK);
+ ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
iocb->ki_pos = ret;
-out:
return ret;
}
@@ -1091,8 +1079,10 @@ static const struct file_operations macvtap_fops = {
.owner = THIS_MODULE,
.open = macvtap_open,
.release = macvtap_release,
- .aio_read = macvtap_aio_read,
- .aio_write = macvtap_aio_write,
+ .read = new_sync_read,
+ .write = new_sync_write,
+ .read_iter = macvtap_read_iter,
+ .write_iter = macvtap_write_iter,
.poll = macvtap_poll,
.llseek = no_llseek,
.unlocked_ioctl = macvtap_ioctl,
@@ -1105,8 +1095,7 @@ static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
- return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen,
- m->msg_flags & MSG_DONTWAIT);
+ return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
}
static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
@@ -1117,8 +1106,7 @@ static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
int ret;
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
return -EINVAL;
- ret = macvtap_do_read(q, m->msg_iov, total_len,
- flags & MSG_DONTWAIT);
+ ret = macvtap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 75472cf734de..b4b0f804e84c 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -26,7 +26,7 @@ config AMD_PHY
config AMD_XGBE_PHY
tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
- depends on OF
+ depends on OF && HAS_IOMEM
---help---
Currently supports the AMD 10GbE PHY
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index c456559f6e7f..903dc3dc9ea7 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -992,7 +992,8 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
if (ret & MDIO_CTRL1_RESET)
return -ETIMEDOUT;
- return 0;
+ /* Make sure the XPCS and SerDes are in compatible states */
+ return amd_xgbe_phy_xgmii_mode(phydev);
}
static int amd_xgbe_phy_config_init(struct phy_device *phydev)
@@ -1467,20 +1468,7 @@ static struct phy_driver amd_xgbe_phy_driver[] = {
},
};
-static int __init amd_xgbe_phy_init(void)
-{
- return phy_drivers_register(amd_xgbe_phy_driver,
- ARRAY_SIZE(amd_xgbe_phy_driver));
-}
-
-static void __exit amd_xgbe_phy_exit(void)
-{
- phy_drivers_unregister(amd_xgbe_phy_driver,
- ARRAY_SIZE(amd_xgbe_phy_driver));
-}
-
-module_init(amd_xgbe_phy_init);
-module_exit(amd_xgbe_phy_exit);
+module_phy_driver(amd_xgbe_phy_driver);
static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
{ XGBE_PHY_ID, XGBE_PHY_MASK },
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index a3fb5ceb6487..65a488f82eb8 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -61,7 +61,7 @@ static int am79c_config_intr(struct phy_device *phydev)
return err;
}
-static struct phy_driver am79c_driver = {
+static struct phy_driver am79c_driver[] = { {
.phy_id = PHY_ID_AM79C874,
.name = "AM79C874",
.phy_id_mask = 0xfffffff0,
@@ -73,20 +73,9 @@ static struct phy_driver am79c_driver = {
.ack_interrupt = am79c_ack_interrupt,
.config_intr = am79c_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
-
-static int __init am79c_init(void)
-{
- return phy_driver_register(&am79c_driver);
-}
-
-static void __exit am79c_exit(void)
-{
- phy_driver_unregister(&am79c_driver);
-}
+} };
-module_init(am79c_init);
-module_exit(am79c_exit);
+module_phy_driver(am79c_driver);
static struct mdio_device_id __maybe_unused amd_tbl[] = {
{ PHY_ID_AM79C874, 0xfffffff0 },
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index fdc1b418fa6a..f80e19ac6704 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -352,19 +352,7 @@ static struct phy_driver at803x_driver[] = {
},
} };
-static int __init atheros_init(void)
-{
- return phy_drivers_register(at803x_driver,
- ARRAY_SIZE(at803x_driver));
-}
-
-static void __exit atheros_exit(void)
-{
- phy_drivers_unregister(at803x_driver, ARRAY_SIZE(at803x_driver));
-}
-
-module_init(atheros_init);
-module_exit(atheros_exit);
+module_phy_driver(at803x_driver);
static struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ ATH8030_PHY_ID, 0xffffffef },
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index ac55b0807853..830ec31f952f 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -100,20 +100,7 @@ static struct phy_driver bcm63xx_driver[] = {
.driver = { .owner = THIS_MODULE },
} };
-static int __init bcm63xx_phy_init(void)
-{
- return phy_drivers_register(bcm63xx_driver,
- ARRAY_SIZE(bcm63xx_driver));
-}
-
-static void __exit bcm63xx_phy_exit(void)
-{
- phy_drivers_unregister(bcm63xx_driver,
- ARRAY_SIZE(bcm63xx_driver));
-}
-
-module_init(bcm63xx_phy_init);
-module_exit(bcm63xx_phy_exit);
+module_phy_driver(bcm63xx_driver);
static struct mdio_device_id __maybe_unused bcm63xx_tbl[] = {
{ 0x00406000, 0xfffffc00 },
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 1d211d369039..974ec4515269 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -39,45 +39,15 @@
#define AFE_RXCONFIG_0 MISC_ADDR(0x38, 0)
#define AFE_RXCONFIG_1 MISC_ADDR(0x38, 1)
+#define AFE_RXCONFIG_2 MISC_ADDR(0x38, 2)
#define AFE_RX_LP_COUNTER MISC_ADDR(0x38, 3)
#define AFE_TX_CONFIG MISC_ADDR(0x39, 0)
+#define AFE_VDCA_ICTRL_0 MISC_ADDR(0x39, 1)
+#define AFE_VDAC_OTHERS_0 MISC_ADDR(0x39, 3)
#define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0)
#define CORE_EXPB0 0xb0
-static int bcm7445_config_init(struct phy_device *phydev)
-{
- int ret;
- const struct bcm7445_regs {
- int reg;
- u16 value;
- } bcm7445_regs_cfg[] = {
- /* increases ADC latency by 24ns */
- { MII_BCM54XX_EXP_SEL, 0x0038 },
- { MII_BCM54XX_EXP_DATA, 0xAB95 },
- /* increases internal 1V LDO voltage by 5% */
- { MII_BCM54XX_EXP_SEL, 0x2038 },
- { MII_BCM54XX_EXP_DATA, 0xBB22 },
- /* reduce RX low pass filter corner frequency */
- { MII_BCM54XX_EXP_SEL, 0x6038 },
- { MII_BCM54XX_EXP_DATA, 0xFFC5 },
- /* reduce RX high pass filter corner frequency */
- { MII_BCM54XX_EXP_SEL, 0x003a },
- { MII_BCM54XX_EXP_DATA, 0x2002 },
- };
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(bcm7445_regs_cfg); i++) {
- ret = phy_write(phydev,
- bcm7445_regs_cfg[i].reg,
- bcm7445_regs_cfg[i].value);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static void phy_write_exp(struct phy_device *phydev,
u16 reg, u16 value)
{
@@ -102,7 +72,16 @@ static void phy_write_misc(struct phy_device *phydev,
phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
}
-static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev)
+static void r_rc_cal_reset(struct phy_device *phydev)
+{
+ /* Reset R_CAL/RC_CAL Engine */
+ phy_write_exp(phydev, 0x00b0, 0x0010);
+
+ /* Disable Reset R_AL/RC_CAL Engine */
+ phy_write_exp(phydev, 0x00b0, 0x0000);
+}
+
+static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
{
/* Increase VCO range to prevent unlocking problem of PLL at low
* temp
@@ -123,11 +102,7 @@ static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev)
/* Switch to CORE_BASE1E */
phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0xd);
- /* Reset R_CAL/RC_CAL Engine */
- phy_write_exp(phydev, CORE_EXPB0, 0x0010);
-
- /* Disable Reset R_CAL/RC_CAL Engine */
- phy_write_exp(phydev, CORE_EXPB0, 0x0000);
+ r_rc_cal_reset(phydev);
/* write AFE_RXCONFIG_0 */
phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
@@ -147,6 +122,71 @@ static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev)
return 0;
}
+static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
+{
+ /* AFE_RXCONFIG_0 */
+ phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb15);
+
+ /* AFE_RXCONFIG_1 */
+ phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
+
+ /* AFE_RXCONFIG_2, set rCal offset for HT=0 code and LT=-2 code */
+ phy_write_misc(phydev, AFE_RXCONFIG_2, 0x2003);
+
+ /* AFE_RX_LP_COUNTER, set RX bandwidth to maximum */
+ phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
+
+ /* AFE_TX_CONFIG, set 1000BT Cfeed=110 for all ports */
+ phy_write_misc(phydev, AFE_TX_CONFIG, 0x0061);
+
+ /* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
+ phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
+
+ /* AFE_VDAC_OTHERS_0, set 1000BT Cidac=010 for all ports */
+ phy_write_misc(phydev, AFE_VDAC_OTHERS_0, 0xa020);
+
+ /* AFE_HPF_TRIM_OTHERS, set 100Tx/10BT to -4.5% swing and set rCal
+ * offset for HT=0 code
+ */
+ phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x00e3);
+
+ /* CORE_BASE1E, force trim to overwrite and set I_ext trim to 0000 */
+ phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0x0010);
+
+ /* DSP_TAP10, adjust bias current trim (+0% swing, +0 tick) */
+ phy_write_misc(phydev, DSP_TAP10, 0x011b);
+
+ /* Reset R_CAL/RC_CAL engine */
+ r_rc_cal_reset(phydev);
+
+ return 0;
+}
+
+static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev)
+{
+ /* AFE_RXCONFIG_1, provide more margin for INL/DNL measurement */
+ phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
+
+ /* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
+ phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
+
+ /* AFE_HPF_TRIM_OTHERS, set 100Tx/10BT to -4.5% swing and set rCal
+ * offset for HT=0 code
+ */
+ phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x00e3);
+
+ /* CORE_BASE1E, force trim to overwrite and set I_ext trim to 0000 */
+ phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0x0010);
+
+ /* DSP_TAP10, adjust bias current trim (+0% swing, +0 tick) */
+ phy_write_misc(phydev, DSP_TAP10, 0x011b);
+
+ /* Reset R_CAL/RC_CAL engine */
+ r_rc_cal_reset(phydev);
+
+ return 0;
+}
+
static int bcm7xxx_apd_enable(struct phy_device *phydev)
{
int val;
@@ -200,15 +240,23 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
u8 patch = PHY_BRCM_7XXX_PATCH(phydev->dev_flags);
int ret = 0;
- dev_info(&phydev->dev, "PHY revision: 0x%02x, patch: %d\n", rev, patch);
+ pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n",
+ dev_name(&phydev->dev), phydev->drv->name, rev, patch);
switch (rev) {
- case 0xa0:
case 0xb0:
- ret = bcm7445_config_init(phydev);
+ ret = bcm7xxx_28nm_b0_afe_config_init(phydev);
+ break;
+ case 0xd0:
+ ret = bcm7xxx_28nm_d0_afe_config_init(phydev);
+ break;
+ case 0xe0:
+ case 0xf0:
+ /* Rev G0 introduces a roll over */
+ case 0x10:
+ ret = bcm7xxx_28nm_e0_plus_afe_config_init(phydev);
break;
default:
- ret = bcm7xxx_28nm_afe_config_init(phydev);
break;
}
@@ -336,7 +384,7 @@ static int bcm7xxx_dummy_config_init(struct phy_device *phydev)
.features = PHY_GBIT_FEATURES | \
SUPPORTED_Pause | SUPPORTED_Asym_Pause, \
.flags = PHY_IS_INTERNAL, \
- .config_init = bcm7xxx_28nm_afe_config_init, \
+ .config_init = bcm7xxx_28nm_config_init, \
.config_aneg = genphy_config_aneg, \
.read_status = genphy_read_status, \
.resume = bcm7xxx_28nm_resume, \
@@ -416,20 +464,7 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ }
};
-static int __init bcm7xxx_phy_init(void)
-{
- return phy_drivers_register(bcm7xxx_driver,
- ARRAY_SIZE(bcm7xxx_driver));
-}
-
-static void __exit bcm7xxx_phy_exit(void)
-{
- phy_drivers_unregister(bcm7xxx_driver,
- ARRAY_SIZE(bcm7xxx_driver));
-}
-
-module_init(bcm7xxx_phy_init);
-module_exit(bcm7xxx_phy_exit);
+module_phy_driver(bcm7xxx_driver);
MODULE_DEVICE_TABLE(mdio, bcm7xxx_tbl);
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index 799789518e87..1eca20452f03 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -216,18 +216,6 @@ static struct phy_driver bcm87xx_driver[] = {
.driver = { .owner = THIS_MODULE },
} };
-static int __init bcm87xx_init(void)
-{
- return phy_drivers_register(bcm87xx_driver,
- ARRAY_SIZE(bcm87xx_driver));
-}
-module_init(bcm87xx_init);
-
-static void __exit bcm87xx_exit(void)
-{
- phy_drivers_unregister(bcm87xx_driver,
- ARRAY_SIZE(bcm87xx_driver));
-}
-module_exit(bcm87xx_exit);
+module_phy_driver(bcm87xx_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 854f2c9a7b2b..a52afb26421b 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -654,20 +654,7 @@ static struct phy_driver broadcom_drivers[] = {
.driver = { .owner = THIS_MODULE },
} };
-static int __init broadcom_init(void)
-{
- return phy_drivers_register(broadcom_drivers,
- ARRAY_SIZE(broadcom_drivers));
-}
-
-static void __exit broadcom_exit(void)
-{
- phy_drivers_unregister(broadcom_drivers,
- ARRAY_SIZE(broadcom_drivers));
-}
-
-module_init(broadcom_init);
-module_exit(broadcom_exit);
+module_phy_driver(broadcom_drivers);
static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM5411, 0xfffffff0 },
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index b57ce0cc9657..27f5464899d4 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -129,20 +129,7 @@ static struct phy_driver cis820x_driver[] = {
.driver = { .owner = THIS_MODULE,},
} };
-static int __init cicada_init(void)
-{
- return phy_drivers_register(cis820x_driver,
- ARRAY_SIZE(cis820x_driver));
-}
-
-static void __exit cicada_exit(void)
-{
- phy_drivers_unregister(cis820x_driver,
- ARRAY_SIZE(cis820x_driver));
-}
-
-module_init(cicada_init);
-module_exit(cicada_exit);
+module_phy_driver(cis820x_driver);
static struct mdio_device_id __maybe_unused cicada_tbl[] = {
{ 0x000fc410, 0x000ffff0 },
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index d2c08f625a41..0d16c7d9e1bf 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -182,20 +182,7 @@ static struct phy_driver dm91xx_driver[] = {
.driver = { .owner = THIS_MODULE,},
} };
-static int __init davicom_init(void)
-{
- return phy_drivers_register(dm91xx_driver,
- ARRAY_SIZE(dm91xx_driver));
-}
-
-static void __exit davicom_exit(void)
-{
- phy_drivers_unregister(dm91xx_driver,
- ARRAY_SIZE(dm91xx_driver));
-}
-
-module_init(davicom_init);
-module_exit(davicom_exit);
+module_phy_driver(dm91xx_driver);
static struct mdio_device_id __maybe_unused davicom_tbl[] = {
{ 0x0181b880, 0x0ffffff0 },
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index a8eb19ec3183..a907743816a8 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -87,7 +87,7 @@ static int et1011c_read_status(struct phy_device *phydev)
return ret;
}
-static struct phy_driver et1011c_driver = {
+static struct phy_driver et1011c_driver[] = { {
.phy_id = 0x0282f014,
.name = "ET1011C",
.phy_id_mask = 0xfffffff0,
@@ -96,20 +96,9 @@ static struct phy_driver et1011c_driver = {
.config_aneg = et1011c_config_aneg,
.read_status = et1011c_read_status,
.driver = { .owner = THIS_MODULE,},
-};
-
-static int __init et1011c_init(void)
-{
- return phy_driver_register(&et1011c_driver);
-}
-
-static void __exit et1011c_exit(void)
-{
- phy_driver_unregister(&et1011c_driver);
-}
+} };
-module_init(et1011c_init);
-module_exit(et1011c_exit);
+module_phy_driver(et1011c_driver);
static struct mdio_device_id __maybe_unused et1011c_tbl[] = {
{ 0x0282f014, 0xfffffff0 },
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 97bf58bf4939..8644f039d922 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -253,20 +253,7 @@ static struct phy_driver icplus_driver[] = {
.driver = { .owner = THIS_MODULE,},
} };
-static int __init icplus_init(void)
-{
- return phy_drivers_register(icplus_driver,
- ARRAY_SIZE(icplus_driver));
-}
-
-static void __exit icplus_exit(void)
-{
- phy_drivers_unregister(icplus_driver,
- ARRAY_SIZE(icplus_driver));
-}
-
-module_init(icplus_init);
-module_exit(icplus_exit);
+module_phy_driver(icplus_driver);
static struct mdio_device_id __maybe_unused icplus_tbl[] = {
{ 0x02430d80, 0x0ffffff0 },
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 9108f3191701..a3a5a703635b 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -312,20 +312,7 @@ static struct phy_driver lxt97x_driver[] = {
.driver = { .owner = THIS_MODULE,},
} };
-static int __init lxt_init(void)
-{
- return phy_drivers_register(lxt97x_driver,
- ARRAY_SIZE(lxt97x_driver));
-}
-
-static void __exit lxt_exit(void)
-{
- phy_drivers_unregister(lxt97x_driver,
- ARRAY_SIZE(lxt97x_driver));
-}
-
-module_init(lxt_init);
-module_exit(lxt_exit);
+module_phy_driver(lxt97x_driver);
static struct mdio_device_id __maybe_unused lxt_tbl[] = {
{ 0x78100000, 0xfffffff0 },
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 225c033b08f3..1b1698f98818 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -54,6 +54,9 @@
#define MII_M1145_PHY_EXT_CR 0x14
#define MII_M1145_RGMII_RX_DELAY 0x0080
#define MII_M1145_RGMII_TX_DELAY 0x0002
+#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4
+#define MII_M1145_HWCFG_MODE_MASK 0xf
+#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000
#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4
#define MII_M1145_HWCFG_MODE_MASK 0xf
@@ -123,6 +126,9 @@
#define MII_M1116R_CONTROL_REG_MAC 21
+#define MII_88E3016_PHY_SPEC_CTRL 0x10
+#define MII_88E3016_DISABLE_SCRAMBLER 0x0200
+#define MII_88E3016_AUTO_MDIX_CROSSOVER 0x0030
MODULE_DESCRIPTION("Marvell PHY driver");
MODULE_AUTHOR("Andy Fleming");
@@ -439,6 +445,25 @@ static int m88e1116r_config_init(struct phy_device *phydev)
return 0;
}
+static int m88e3016_config_init(struct phy_device *phydev)
+{
+ int reg;
+
+ /* Enable Scrambler and Auto-Crossover */
+ reg = phy_read(phydev, MII_88E3016_PHY_SPEC_CTRL);
+ if (reg < 0)
+ return reg;
+
+ reg &= ~MII_88E3016_DISABLE_SCRAMBLER;
+ reg |= MII_88E3016_AUTO_MDIX_CROSSOVER;
+
+ reg = phy_write(phydev, MII_88E3016_PHY_SPEC_CTRL, reg);
+ if (reg < 0)
+ return reg;
+
+ return 0;
+}
+
static int m88e1111_config_init(struct phy_device *phydev)
{
int err;
@@ -625,6 +650,7 @@ static int m88e1149_config_init(struct phy_device *phydev)
static int m88e1145_config_init(struct phy_device *phydev)
{
int err;
+ int temp;
/* Take care of errata E0 & E1 */
err = phy_write(phydev, 0x1d, 0x001b);
@@ -682,7 +708,7 @@ static int m88e1145_config_init(struct phy_device *phydev)
}
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- int temp = phy_read(phydev, MII_M1145_PHY_EXT_SR);
+ temp = phy_read(phydev, MII_M1145_PHY_EXT_SR);
if (temp < 0)
return temp;
@@ -789,6 +815,12 @@ static int marvell_read_status(struct phy_device *phydev)
return 0;
}
+static int marvell_aneg_done(struct phy_device *phydev)
+{
+ int retval = phy_read(phydev, MII_M1011_PHY_STATUS);
+ return (retval < 0) ? retval : (retval & MII_M1011_PHY_STATUS_RESOLVED);
+}
+
static int m88e1121_did_interrupt(struct phy_device *phydev)
{
int imask;
@@ -1069,22 +1101,26 @@ static struct phy_driver marvell_drivers[] = {
.suspend = &genphy_suspend,
.driver = { .owner = THIS_MODULE },
},
+ {
+ .phy_id = MARVELL_PHY_ID_88E3016,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E3016",
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .config_init = &m88e3016_config_init,
+ .aneg_done = &marvell_aneg_done,
+ .read_status = &marvell_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+ .did_interrupt = &m88e1121_did_interrupt,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
+ .driver = { .owner = THIS_MODULE },
+ },
};
-static int __init marvell_init(void)
-{
- return phy_drivers_register(marvell_drivers,
- ARRAY_SIZE(marvell_drivers));
-}
-
-static void __exit marvell_exit(void)
-{
- phy_drivers_unregister(marvell_drivers,
- ARRAY_SIZE(marvell_drivers));
-}
-
-module_init(marvell_init);
-module_exit(marvell_exit);
+module_phy_driver(marvell_drivers);
static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK },
@@ -1098,6 +1134,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 8c2a29a9bd7f..c530de1e63f5 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -6,6 +6,7 @@
* Author: David J. Choi
*
* Copyright (c) 2010-2013 Micrel, Inc.
+ * Copyright (c) 2014 Johan Hovold <johan@kernel.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -30,30 +31,32 @@
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
-#define KSZPHY_OMSO_B_CAST_OFF (1 << 9)
-#define KSZPHY_OMSO_RMII_OVERRIDE (1 << 1)
-#define KSZPHY_OMSO_MII_OVERRIDE (1 << 0)
+#define KSZPHY_OMSO_B_CAST_OFF BIT(9)
+#define KSZPHY_OMSO_RMII_OVERRIDE BIT(1)
+#define KSZPHY_OMSO_MII_OVERRIDE BIT(0)
/* general Interrupt control/status reg in vendor specific block. */
#define MII_KSZPHY_INTCS 0x1B
-#define KSZPHY_INTCS_JABBER (1 << 15)
-#define KSZPHY_INTCS_RECEIVE_ERR (1 << 14)
-#define KSZPHY_INTCS_PAGE_RECEIVE (1 << 13)
-#define KSZPHY_INTCS_PARELLEL (1 << 12)
-#define KSZPHY_INTCS_LINK_PARTNER_ACK (1 << 11)
-#define KSZPHY_INTCS_LINK_DOWN (1 << 10)
-#define KSZPHY_INTCS_REMOTE_FAULT (1 << 9)
-#define KSZPHY_INTCS_LINK_UP (1 << 8)
+#define KSZPHY_INTCS_JABBER BIT(15)
+#define KSZPHY_INTCS_RECEIVE_ERR BIT(14)
+#define KSZPHY_INTCS_PAGE_RECEIVE BIT(13)
+#define KSZPHY_INTCS_PARELLEL BIT(12)
+#define KSZPHY_INTCS_LINK_PARTNER_ACK BIT(11)
+#define KSZPHY_INTCS_LINK_DOWN BIT(10)
+#define KSZPHY_INTCS_REMOTE_FAULT BIT(9)
+#define KSZPHY_INTCS_LINK_UP BIT(8)
#define KSZPHY_INTCS_ALL (KSZPHY_INTCS_LINK_UP |\
KSZPHY_INTCS_LINK_DOWN)
-/* general PHY control reg in vendor specific block. */
-#define MII_KSZPHY_CTRL 0x1F
+/* PHY Control 1 */
+#define MII_KSZPHY_CTRL_1 0x1e
+
+/* PHY Control 2 / PHY Control (if no PHY Control 1) */
+#define MII_KSZPHY_CTRL_2 0x1f
+#define MII_KSZPHY_CTRL MII_KSZPHY_CTRL_2
/* bitmap of PHY register to set interrupt mode */
-#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9)
-#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14)
-#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
-#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
+#define KSZPHY_CTRL_INT_ACTIVE_HIGH BIT(9)
+#define KSZPHY_RMII_REF_CLK_SEL BIT(7)
/* Write/read to/from extended registers */
#define MII_KSZPHY_EXTREG 0x0b
@@ -69,20 +72,46 @@
#define PS_TO_REG 200
-static int ksz_config_flags(struct phy_device *phydev)
-{
- int regval;
+struct kszphy_type {
+ u32 led_mode_reg;
+ u16 interrupt_level_mask;
+ bool has_broadcast_disable;
+ bool has_rmii_ref_clk_sel;
+};
- if (phydev->dev_flags & (MICREL_PHY_50MHZ_CLK | MICREL_PHY_25MHZ_CLK)) {
- regval = phy_read(phydev, MII_KSZPHY_CTRL);
- if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK)
- regval |= KSZ8051_RMII_50MHZ_CLK;
- else
- regval &= ~KSZ8051_RMII_50MHZ_CLK;
- return phy_write(phydev, MII_KSZPHY_CTRL, regval);
- }
- return 0;
-}
+struct kszphy_priv {
+ const struct kszphy_type *type;
+ int led_mode;
+ bool rmii_ref_clk_sel;
+ bool rmii_ref_clk_sel_val;
+};
+
+static const struct kszphy_type ksz8021_type = {
+ .led_mode_reg = MII_KSZPHY_CTRL_2,
+ .has_rmii_ref_clk_sel = true,
+};
+
+static const struct kszphy_type ksz8041_type = {
+ .led_mode_reg = MII_KSZPHY_CTRL_1,
+};
+
+static const struct kszphy_type ksz8051_type = {
+ .led_mode_reg = MII_KSZPHY_CTRL_2,
+};
+
+static const struct kszphy_type ksz8081_type = {
+ .led_mode_reg = MII_KSZPHY_CTRL_2,
+ .has_broadcast_disable = true,
+ .has_rmii_ref_clk_sel = true,
+};
+
+static const struct kszphy_type ks8737_type = {
+ .interrupt_level_mask = BIT(14),
+};
+
+static const struct kszphy_type ksz9021_type = {
+ .interrupt_level_mask = BIT(14),
+};
static int kszphy_extended_write(struct phy_device *phydev,
u32 regnum, u16 val)
@@ -108,112 +137,137 @@ static int kszphy_ack_interrupt(struct phy_device *phydev)
return (rc < 0) ? rc : 0;
}
-static int kszphy_set_interrupt(struct phy_device *phydev)
+static int kszphy_config_intr(struct phy_device *phydev)
{
+ const struct kszphy_type *type = phydev->drv->driver_data;
int temp;
- temp = (PHY_INTERRUPT_ENABLED == phydev->interrupts) ?
- KSZPHY_INTCS_ALL : 0;
- return phy_write(phydev, MII_KSZPHY_INTCS, temp);
-}
+ u16 mask;
-static int kszphy_config_intr(struct phy_device *phydev)
-{
- int temp, rc;
+ if (type && type->interrupt_level_mask)
+ mask = type->interrupt_level_mask;
+ else
+ mask = KSZPHY_CTRL_INT_ACTIVE_HIGH;
/* set the interrupt pin active low */
temp = phy_read(phydev, MII_KSZPHY_CTRL);
- temp &= ~KSZPHY_CTRL_INT_ACTIVE_HIGH;
+ if (temp < 0)
+ return temp;
+ temp &= ~mask;
phy_write(phydev, MII_KSZPHY_CTRL, temp);
- rc = kszphy_set_interrupt(phydev);
- return rc < 0 ? rc : 0;
-}
-static int ksz9021_config_intr(struct phy_device *phydev)
-{
- int temp, rc;
+ /* enable / disable interrupts */
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ temp = KSZPHY_INTCS_ALL;
+ else
+ temp = 0;
- /* set the interrupt pin active low */
- temp = phy_read(phydev, MII_KSZPHY_CTRL);
- temp &= ~KSZ9021_CTRL_INT_ACTIVE_HIGH;
- phy_write(phydev, MII_KSZPHY_CTRL, temp);
- rc = kszphy_set_interrupt(phydev);
- return rc < 0 ? rc : 0;
+ return phy_write(phydev, MII_KSZPHY_INTCS, temp);
}
-static int ks8737_config_intr(struct phy_device *phydev)
+static int kszphy_rmii_clk_sel(struct phy_device *phydev, bool val)
{
- int temp, rc;
+ int ctrl;
- /* set the interrupt pin active low */
- temp = phy_read(phydev, MII_KSZPHY_CTRL);
- temp &= ~KS8737_CTRL_INT_ACTIVE_HIGH;
- phy_write(phydev, MII_KSZPHY_CTRL, temp);
- rc = kszphy_set_interrupt(phydev);
- return rc < 0 ? rc : 0;
-}
-
-static int kszphy_setup_led(struct phy_device *phydev,
- unsigned int reg, unsigned int shift)
-{
+ ctrl = phy_read(phydev, MII_KSZPHY_CTRL);
+ if (ctrl < 0)
+ return ctrl;
- struct device *dev = &phydev->dev;
- struct device_node *of_node = dev->of_node;
- int rc, temp;
- u32 val;
+ if (val)
+ ctrl |= KSZPHY_RMII_REF_CLK_SEL;
+ else
+ ctrl &= ~KSZPHY_RMII_REF_CLK_SEL;
- if (!of_node && dev->parent->of_node)
- of_node = dev->parent->of_node;
+ return phy_write(phydev, MII_KSZPHY_CTRL, ctrl);
+}
- if (of_property_read_u32(of_node, "micrel,led-mode", &val))
- return 0;
+static int kszphy_setup_led(struct phy_device *phydev, u32 reg, int val)
+{
+ int rc, temp, shift;
+
+ switch (reg) {
+ case MII_KSZPHY_CTRL_1:
+ shift = 14;
+ break;
+ case MII_KSZPHY_CTRL_2:
+ shift = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
temp = phy_read(phydev, reg);
- if (temp < 0)
- return temp;
+ if (temp < 0) {
+ rc = temp;
+ goto out;
+ }
temp &= ~(3 << shift);
temp |= val << shift;
rc = phy_write(phydev, reg, temp);
+out:
+ if (rc < 0)
+ dev_err(&phydev->dev, "failed to set led mode\n");
- return rc < 0 ? rc : 0;
+ return rc;
}
-static int kszphy_config_init(struct phy_device *phydev)
+/* Disable PHY address 0 as the broadcast address, so that it can be used as a
+ * unique (non-broadcast) address on a shared bus.
+ */
+static int kszphy_broadcast_disable(struct phy_device *phydev)
{
- return 0;
-}
+ int ret;
-static int kszphy_config_init_led8041(struct phy_device *phydev)
-{
- /* single led control, register 0x1e bits 15..14 */
- return kszphy_setup_led(phydev, 0x1e, 14);
+ ret = phy_read(phydev, MII_KSZPHY_OMSO);
+ if (ret < 0)
+ goto out;
+
+ ret = phy_write(phydev, MII_KSZPHY_OMSO, ret | KSZPHY_OMSO_B_CAST_OFF);
+out:
+ if (ret)
+ dev_err(&phydev->dev, "failed to disable broadcast address\n");
+
+ return ret;
}
-static int ksz8021_config_init(struct phy_device *phydev)
+static int kszphy_config_init(struct phy_device *phydev)
{
- const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE;
- int rc;
+ struct kszphy_priv *priv = phydev->priv;
+ const struct kszphy_type *type;
+ int ret;
- rc = kszphy_setup_led(phydev, 0x1f, 4);
- if (rc)
- dev_err(&phydev->dev, "failed to set led mode\n");
+ if (!priv)
+ return 0;
- rc = ksz_config_flags(phydev);
- if (rc < 0)
- return rc;
- rc = phy_write(phydev, MII_KSZPHY_OMSO, val);
- return rc < 0 ? rc : 0;
+ type = priv->type;
+
+ if (type->has_broadcast_disable)
+ kszphy_broadcast_disable(phydev);
+
+ if (priv->rmii_ref_clk_sel) {
+ ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val);
+ if (ret) {
+ dev_err(&phydev->dev, "failed to set rmii reference clock\n");
+ return ret;
+ }
+ }
+
+ if (priv->led_mode >= 0)
+ kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
+
+ return 0;
}
-static int ks8051_config_init(struct phy_device *phydev)
+static int ksz8021_config_init(struct phy_device *phydev)
{
int rc;
- rc = kszphy_setup_led(phydev, 0x1f, 4);
+ rc = kszphy_config_init(phydev);
if (rc)
- dev_err(&phydev->dev, "failed to set led mode\n");
+ return rc;
+
+ rc = kszphy_broadcast_disable(phydev);
- rc = ksz_config_flags(phydev);
return rc < 0 ? rc : 0;
}
@@ -394,8 +448,8 @@ static int ksz9031_config_init(struct phy_device *phydev)
}
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
-#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6)
-#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4)
+#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX BIT(6)
+#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED BIT(4)
static int ksz8873mll_read_status(struct phy_device *phydev)
{
int regval;
@@ -446,24 +500,62 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
{
}
-static int ksz8021_probe(struct phy_device *phydev)
+static int kszphy_probe(struct phy_device *phydev)
{
+ const struct kszphy_type *type = phydev->drv->driver_data;
+ struct device_node *np = phydev->dev.of_node;
+ struct kszphy_priv *priv;
struct clk *clk;
+ int ret;
+
+ priv = devm_kzalloc(&phydev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ priv->type = type;
+
+ if (type->led_mode_reg) {
+ ret = of_property_read_u32(np, "micrel,led-mode",
+ &priv->led_mode);
+ if (ret)
+ priv->led_mode = -1;
+
+ if (priv->led_mode > 3) {
+ dev_err(&phydev->dev, "invalid led mode: 0x%02x\n",
+ priv->led_mode);
+ priv->led_mode = -1;
+ }
+ } else {
+ priv->led_mode = -1;
+ }
clk = devm_clk_get(&phydev->dev, "rmii-ref");
if (!IS_ERR(clk)) {
unsigned long rate = clk_get_rate(clk);
+ bool rmii_ref_clk_sel_25_mhz;
+
+ priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel;
+ rmii_ref_clk_sel_25_mhz = of_property_read_bool(np,
+ "micrel,rmii-reference-clock-select-25-mhz");
if (rate > 24500000 && rate < 25500000) {
- phydev->dev_flags |= MICREL_PHY_25MHZ_CLK;
+ priv->rmii_ref_clk_sel_val = rmii_ref_clk_sel_25_mhz;
} else if (rate > 49500000 && rate < 50500000) {
- phydev->dev_flags |= MICREL_PHY_50MHZ_CLK;
+ priv->rmii_ref_clk_sel_val = !rmii_ref_clk_sel_25_mhz;
} else {
dev_err(&phydev->dev, "Clock rate out of range: %ld\n", rate);
return -EINVAL;
}
}
+ /* Support legacy board-file configuration */
+ if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
+ priv->rmii_ref_clk_sel = true;
+ priv->rmii_ref_clk_sel_val = true;
+ }
+
return 0;
}
@@ -474,11 +566,12 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KS8737",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .driver_data = &ks8737_type,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
- .config_intr = ks8737_config_intr,
+ .config_intr = kszphy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
@@ -489,7 +582,8 @@ static struct phy_driver ksphy_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .probe = ksz8021_probe,
+ .driver_data = &ksz8021_type,
+ .probe = kszphy_probe,
.config_init = ksz8021_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -505,7 +599,8 @@ static struct phy_driver ksphy_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .probe = ksz8021_probe,
+ .driver_data = &ksz8021_type,
+ .probe = kszphy_probe,
.config_init = ksz8021_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -521,7 +616,9 @@ static struct phy_driver ksphy_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init_led8041,
+ .driver_data = &ksz8041_type,
+ .probe = kszphy_probe,
+ .config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -536,7 +633,9 @@ static struct phy_driver ksphy_driver[] = {
.features = PHY_BASIC_FEATURES |
SUPPORTED_Pause | SUPPORTED_Asym_Pause,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init_led8041,
+ .driver_data = &ksz8041_type,
+ .probe = kszphy_probe,
+ .config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -551,7 +650,9 @@ static struct phy_driver ksphy_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = ks8051_config_init,
+ .driver_data = &ksz8051_type,
+ .probe = kszphy_probe,
+ .config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -565,7 +666,9 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x00ffffff,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init_led8041,
+ .driver_data = &ksz8041_type,
+ .probe = kszphy_probe,
+ .config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -579,6 +682,8 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x00fffff0,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .driver_data = &ksz8081_type,
+ .probe = kszphy_probe,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -607,11 +712,12 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ9021 Gigabit PHY",
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .driver_data = &ksz9021_type,
.config_init = ksz9021_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
- .config_intr = ksz9021_config_intr,
+ .config_intr = kszphy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_mmd_indirect = ksz9021_rd_mmd_phyreg,
@@ -623,11 +729,12 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ9031 Gigabit PHY",
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .driver_data = &ksz9021_type,
.config_init = ksz9031_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
- .config_intr = ksz9021_config_intr,
+ .config_intr = kszphy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, },
@@ -657,20 +764,7 @@ static struct phy_driver ksphy_driver[] = {
.driver = { .owner = THIS_MODULE, },
} };
-static int __init ksphy_init(void)
-{
- return phy_drivers_register(ksphy_driver,
- ARRAY_SIZE(ksphy_driver));
-}
-
-static void __exit ksphy_exit(void)
-{
- phy_drivers_unregister(ksphy_driver,
- ARRAY_SIZE(ksphy_driver));
-}
-
-module_init(ksphy_init);
-module_exit(ksphy_exit);
+module_phy_driver(ksphy_driver);
MODULE_DESCRIPTION("Micrel PHY driver");
MODULE_AUTHOR("David J. Choi");
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 9a5f234d95b0..0a7b9c7f09a2 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -129,7 +129,7 @@ static int ns_config_init(struct phy_device *phydev)
return ns_ack_interrupt(phydev);
}
-static struct phy_driver dp83865_driver = {
+static struct phy_driver dp83865_driver[] = { {
.phy_id = DP83865_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83865",
@@ -141,25 +141,14 @@ static struct phy_driver dp83865_driver = {
.ack_interrupt = ns_ack_interrupt,
.config_intr = ns_config_intr,
.driver = {.owner = THIS_MODULE,}
-};
+} };
-static int __init ns_init(void)
-{
- return phy_driver_register(&dp83865_driver);
-}
-
-static void __exit ns_exit(void)
-{
- phy_driver_unregister(&dp83865_driver);
-}
+module_phy_driver(dp83865_driver);
MODULE_DESCRIPTION("NatSemi PHY driver");
MODULE_AUTHOR("Stuart Menefy");
MODULE_LICENSE("GPL");
-module_init(ns_init);
-module_exit(ns_exit);
-
static struct mdio_device_id __maybe_unused ns_tbl[] = {
{ DP83865_PHY_ID, 0xfffffff0 },
{ }
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index fe0d0a15d5e1..be4c6f7c3645 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -111,7 +111,7 @@ static int qs6612_config_intr(struct phy_device *phydev)
}
-static struct phy_driver qs6612_driver = {
+static struct phy_driver qs6612_driver[] = { {
.phy_id = 0x00181440,
.name = "QS6612",
.phy_id_mask = 0xfffffff0,
@@ -123,20 +123,9 @@ static struct phy_driver qs6612_driver = {
.ack_interrupt = qs6612_ack_interrupt,
.config_intr = qs6612_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
-
-static int __init qs6612_init(void)
-{
- return phy_driver_register(&qs6612_driver);
-}
-
-static void __exit qs6612_exit(void)
-{
- phy_driver_unregister(&qs6612_driver);
-}
+} };
-module_init(qs6612_init);
-module_exit(qs6612_exit);
+module_phy_driver(qs6612_driver);
static struct mdio_device_id __maybe_unused qs6612_tbl[] = {
{ 0x00181440, 0xfffffff0 },
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 45483fdfbe06..96a0f0fab3ca 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -101,18 +101,7 @@ static struct phy_driver realtek_drvs[] = {
},
};
-static int __init realtek_init(void)
-{
- return phy_drivers_register(realtek_drvs, ARRAY_SIZE(realtek_drvs));
-}
-
-static void __exit realtek_exit(void)
-{
- phy_drivers_unregister(realtek_drvs, ARRAY_SIZE(realtek_drvs));
-}
-
-module_init(realtek_init);
-module_exit(realtek_exit);
+module_phy_driver(realtek_drvs);
static struct mdio_device_id __maybe_unused realtek_tbl[] = {
{ 0x001cc912, 0x001fffff },
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index a4b08198fb9f..c0f6479e19d4 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -250,24 +250,12 @@ static struct phy_driver smsc_phy_driver[] = {
.driver = { .owner = THIS_MODULE, }
} };
-static int __init smsc_init(void)
-{
- return phy_drivers_register(smsc_phy_driver,
- ARRAY_SIZE(smsc_phy_driver));
-}
-
-static void __exit smsc_exit(void)
-{
- phy_drivers_unregister(smsc_phy_driver, ARRAY_SIZE(smsc_phy_driver));
-}
+module_phy_driver(smsc_phy_driver);
MODULE_DESCRIPTION("SMSC PHY driver");
MODULE_AUTHOR("Herbert Valerio Riedel");
MODULE_LICENSE("GPL");
-module_init(smsc_init);
-module_exit(smsc_exit);
-
static struct mdio_device_id __maybe_unused smsc_tbl[] = {
{ 0x0007c0a0, 0xfffffff0 },
{ 0x0007c0b0, 0xfffffff0 },
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index eab57fc5b967..46530159256b 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -353,7 +353,9 @@ static int ks8995_probe(struct spi_device *spi)
static int ks8995_remove(struct spi_device *spi)
{
- sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+ struct ks8995_switch *ks = spi_get_drvdata(spi);
+
+ sysfs_remove_bin_file(&spi->dev.kobj, &ks->regs_attr);
return 0;
}
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 5e1eb138916f..3fc199b773e6 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -112,20 +112,7 @@ static struct phy_driver ste10xp_pdriver[] = {
.driver = {.owner = THIS_MODULE,}
} };
-static int __init ste10Xp_init(void)
-{
- return phy_drivers_register(ste10xp_pdriver,
- ARRAY_SIZE(ste10xp_pdriver));
-}
-
-static void __exit ste10Xp_exit(void)
-{
- phy_drivers_unregister(ste10xp_pdriver,
- ARRAY_SIZE(ste10xp_pdriver));
-}
-
-module_init(ste10Xp_init);
-module_exit(ste10Xp_exit);
+module_phy_driver(ste10xp_pdriver);
static struct mdio_device_id __maybe_unused ste10Xp_tbl[] = {
{ STE101P_PHY_ID, 0xfffffff0 },
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 5dc0935da99c..76cad712ddb2 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -311,19 +311,7 @@ static struct phy_driver vsc82xx_driver[] = {
.driver = { .owner = THIS_MODULE,},
} };
-static int __init vsc82xx_init(void)
-{
- return phy_drivers_register(vsc82xx_driver,
- ARRAY_SIZE(vsc82xx_driver));
-}
-
-static void __exit vsc82xx_exit(void)
-{
- phy_drivers_unregister(vsc82xx_driver, ARRAY_SIZE(vsc82xx_driver));
-}
-
-module_init(vsc82xx_init);
-module_exit(vsc82xx_exit);
+module_phy_driver(vsc82xx_driver);
static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
{ PHY_ID_VSC8234, 0x000ffff0 },
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 794a47329368..af034dba9bd6 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -417,6 +417,7 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
ssize_t ret;
struct sk_buff *skb = NULL;
struct iovec iov;
+ struct iov_iter to;
ret = count;
@@ -462,7 +463,8 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
ret = -EFAULT;
iov.iov_base = buf;
iov.iov_len = count;
- if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len))
+ iov_iter_init(&to, READ, &iov, 1, count);
+ if (skb_copy_datagram_iter(skb, 0, &to, skb->len))
goto outf;
ret = skb->len;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 6c9c16d76935..d2408a5e43a6 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -869,7 +869,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
ph = (struct pppoe_hdr *)skb_put(skb, total_len + sizeof(struct pppoe_hdr));
start = (char *)&ph->tag[0];
- error = memcpy_fromiovec(start, m->msg_iov, total_len);
+ error = memcpy_from_msg(start, m, total_len);
if (error < 0) {
kfree_skb(skb);
goto end;
@@ -981,7 +981,7 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
if (skb) {
total_len = min_t(size_t, total_len, skb->len);
- error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
+ error = skb_copy_datagram_msg(skb, 0, m, total_len);
if (error == 0) {
consume_skb(skb);
return total_len;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 2368395d8ae5..93e224217e24 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1179,6 +1179,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_enable_netpoll;
}
+ if (!(dev->features & NETIF_F_LRO))
+ dev_disable_lro(port_dev);
+
err = netdev_rx_handler_register(port_dev, team_handle_frame,
port);
if (err) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 9dd3746994a4..f3e992ed87ac 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -71,6 +71,7 @@
#include <net/rtnetlink.h>
#include <net/sock.h>
#include <linux/seq_file.h>
+#include <linux/uio.h>
#include <asm/uaccess.h>
@@ -818,7 +819,7 @@ drop:
skb_tx_error(skb);
kfree_skb(skb);
rcu_read_unlock();
- return NETDEV_TX_OK;
+ return NET_XMIT_DROP;
}
static void tun_net_mclist(struct net_device *dev)
@@ -1011,28 +1012,29 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
/* Get packet from user space buffer */
static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
- void *msg_control, const struct iovec *iv,
- size_t total_len, size_t count, int noblock)
+ void *msg_control, struct iov_iter *from,
+ int noblock)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
+ size_t total_len = iov_iter_count(from);
size_t len = total_len, align = NET_SKB_PAD, linear;
struct virtio_net_hdr gso = { 0 };
int good_linear;
- int offset = 0;
int copylen;
bool zerocopy = false;
int err;
u32 rxhash;
+ ssize_t n;
if (!(tun->flags & TUN_NO_PI)) {
if (len < sizeof(pi))
return -EINVAL;
len -= sizeof(pi);
- if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
+ n = copy_from_iter(&pi, sizeof(pi), from);
+ if (n != sizeof(pi))
return -EFAULT;
- offset += sizeof(pi);
}
if (tun->flags & TUN_VNET_HDR) {
@@ -1040,7 +1042,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
return -EINVAL;
len -= tun->vnet_hdr_sz;
- if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
+ n = copy_from_iter(&gso, sizeof(gso), from);
+ if (n != sizeof(gso))
return -EFAULT;
if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
@@ -1049,7 +1052,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (gso.hdr_len > len)
return -EINVAL;
- offset += tun->vnet_hdr_sz;
+ iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
}
if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
@@ -1062,6 +1065,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
good_linear = SKB_MAX_HEAD(align);
if (msg_control) {
+ struct iov_iter i = *from;
+
/* There are 256 bytes to be copied in skb, so there is
* enough room for skb expand head in case it is used.
* The rest of the buffer is mapped from userspace.
@@ -1070,7 +1075,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (copylen > good_linear)
copylen = good_linear;
linear = copylen;
- if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
+ iov_iter_advance(&i, copylen);
+ if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
zerocopy = true;
}
@@ -1090,9 +1096,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
if (zerocopy)
- err = zerocopy_sg_from_iovec(skb, iv, offset, count);
+ err = zerocopy_sg_from_iter(skb, from);
else {
- err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
+ err = skb_copy_datagram_from_iter(skb, 0, from, len);
if (!err && msg_control) {
struct ubuf_info *uarg = msg_control;
uarg->callback(uarg, false);
@@ -1206,8 +1212,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
return total_len;
}
-static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
- unsigned long count, loff_t pos)
+static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct tun_struct *tun = tun_get(file);
@@ -1217,10 +1222,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
if (!tun)
return -EBADFD;
- tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
-
- result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count),
- count, file->f_flags & O_NONBLOCK);
+ result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK);
tun_put(tun);
return result;
@@ -1230,11 +1232,11 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
static ssize_t tun_put_user(struct tun_struct *tun,
struct tun_file *tfile,
struct sk_buff *skb,
- const struct iovec *iv, int len)
+ struct iov_iter *iter)
{
struct tun_pi pi = { 0, skb->protocol };
- ssize_t total = 0;
- int vlan_offset = 0, copied;
+ ssize_t total;
+ int vlan_offset = 0;
int vlan_hlen = 0;
int vnet_hdr_sz = 0;
@@ -1244,23 +1246,25 @@ static ssize_t tun_put_user(struct tun_struct *tun,
if (tun->flags & TUN_VNET_HDR)
vnet_hdr_sz = tun->vnet_hdr_sz;
+ total = skb->len + vlan_hlen + vnet_hdr_sz;
+
if (!(tun->flags & TUN_NO_PI)) {
- if ((len -= sizeof(pi)) < 0)
+ if (iov_iter_count(iter) < sizeof(pi))
return -EINVAL;
- if (len < skb->len + vlan_hlen + vnet_hdr_sz) {
+ total += sizeof(pi);
+ if (iov_iter_count(iter) < total) {
/* Packet will be striped */
pi.flags |= TUN_PKT_STRIP;
}
- if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi)))
+ if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
return -EFAULT;
- total += sizeof(pi);
}
if (vnet_hdr_sz) {
struct virtio_net_hdr gso = { 0 }; /* no info leak */
- if ((len -= vnet_hdr_sz) < 0)
+ if (iov_iter_count(iter) < vnet_hdr_sz)
return -EINVAL;
if (skb_is_gso(skb)) {
@@ -1299,17 +1303,14 @@ static ssize_t tun_put_user(struct tun_struct *tun,
gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
- if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
- sizeof(gso))))
+ if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
return -EFAULT;
- total += vnet_hdr_sz;
+
+ iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
}
- copied = total;
- len = min_t(int, skb->len + vlan_hlen, len);
- total += skb->len + vlan_hlen;
if (vlan_hlen) {
- int copy, ret;
+ int ret;
struct {
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
@@ -1320,41 +1321,36 @@ static ssize_t tun_put_user(struct tun_struct *tun,
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
- copy = min_t(int, vlan_offset, len);
- ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
- len -= copy;
- copied += copy;
- if (ret || !len)
+ ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
+ if (ret || !iov_iter_count(iter))
goto done;
- copy = min_t(int, sizeof(veth), len);
- ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
- len -= copy;
- copied += copy;
- if (ret || !len)
+ ret = copy_to_iter(&veth, sizeof(veth), iter);
+ if (ret != sizeof(veth) || !iov_iter_count(iter))
goto done;
}
- skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
+ skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
done:
tun->dev->stats.tx_packets++;
- tun->dev->stats.tx_bytes += len;
+ tun->dev->stats.tx_bytes += skb->len + vlan_hlen;
return total;
}
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
- const struct iovec *iv, ssize_t len, int noblock)
+ struct iov_iter *to,
+ int noblock)
{
struct sk_buff *skb;
- ssize_t ret = 0;
+ ssize_t ret;
int peeked, err, off = 0;
tun_debug(KERN_INFO, tun, "tun_do_read\n");
- if (!len)
- return ret;
+ if (!iov_iter_count(to))
+ return 0;
if (tun->dev->reg_state != NETREG_REGISTERED)
return -EIO;
@@ -1362,37 +1358,31 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
/* Read frames from queue */
skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
&peeked, &off, &err);
- if (skb) {
- ret = tun_put_user(tun, tfile, skb, iv, len);
+ if (!skb)
+ return 0;
+
+ ret = tun_put_user(tun, tfile, skb, to);
+ if (unlikely(ret < 0))
kfree_skb(skb);
- } else
- ret = err;
+ else
+ consume_skb(skb);
return ret;
}
-static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
- unsigned long count, loff_t pos)
+static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = __tun_get(tfile);
- ssize_t len, ret;
+ ssize_t len = iov_iter_count(to), ret;
if (!tun)
return -EBADFD;
- len = iov_length(iv, count);
- if (len < 0) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = tun_do_read(tun, tfile, iv, len,
- file->f_flags & O_NONBLOCK);
+ ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
iocb->ki_pos = ret;
-out:
tun_put(tun);
return ret;
}
@@ -1462,8 +1452,9 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
if (!tun)
return -EBADFD;
- ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len,
- m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
+
+ ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
+ m->msg_flags & MSG_DONTWAIT);
tun_put(tun);
return ret;
}
@@ -1488,8 +1479,7 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
SOL_PACKET, TUN_TX_TIMESTAMP);
goto out;
}
- ret = tun_do_read(tun, tfile, m->msg_iov, total_len,
- flags & MSG_DONTWAIT);
+ ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2245,10 +2235,10 @@ static int tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
static const struct file_operations tun_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
- .read = do_sync_read,
- .aio_read = tun_chr_aio_read,
- .write = do_sync_write,
- .aio_write = tun_chr_aio_write,
+ .read = new_sync_read,
+ .write = new_sync_write,
+ .read_iter = tun_chr_read_iter,
+ .write_iter = tun_chr_write_iter,
.poll = tun_chr_poll,
.unlocked_ioctl = tun_chr_ioctl,
#ifdef CONFIG_COMPAT
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 816d511e34d3..bf49792062a2 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -487,8 +487,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
{
- if (dev->driver_priv)
- kfree(dev->driver_priv);
+ kfree(dev->driver_priv);
}
static const struct ethtool_ops ax88178_ethtool_ops = {
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 2ec1500d0077..415ce8b882c6 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
struct page *page;
int err;
- page = __skb_alloc_page(gfp_flags | __GFP_NOMEMALLOC, NULL);
+ page = __dev_alloc_page(gfp_flags | __GFP_NOMEMALLOC);
if (!page)
return -ENOMEM;
@@ -212,7 +212,7 @@ resubmit:
if (page)
put_page(page);
if (req)
- rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD);
+ rx_submit(pnd, req, GFP_ATOMIC);
}
static int usbpn_close(struct net_device *dev);
@@ -231,7 +231,7 @@ static int usbpn_open(struct net_device *dev)
for (i = 0; i < rxq_size; i++) {
struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
- if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
+ if (!req || rx_submit(pnd, req, GFP_KERNEL)) {
usb_free_urb(req);
usbpn_close(dev);
return -ENOMEM;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index d3920b54a92c..9311a08565be 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -71,18 +71,19 @@ static void usbnet_cdc_update_filter(struct usbnet *dev)
{
struct cdc_state *info = (void *) &dev->data;
struct usb_interface *intf = info->control;
+ struct net_device *net = dev->net;
- u16 cdc_filter =
- USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED |
- USB_CDC_PACKET_TYPE_BROADCAST;
+ u16 cdc_filter = USB_CDC_PACKET_TYPE_DIRECTED
+ | USB_CDC_PACKET_TYPE_BROADCAST;
- if (dev->net->flags & IFF_PROMISC)
- cdc_filter |= USB_CDC_PACKET_TYPE_PROMISCUOUS;
-
- /* FIXME cdc-ether has some multicast code too, though it complains
- * in routine cases. info->ether describes the multicast support.
- * Implement that here, manipulating the cdc filter as needed.
+ /* filtering on the device is an optional feature and not worth
+ * the hassle so we just roughly care about snooping and if any
+ * multicast is requested, we take every multicast
*/
+ if (net->flags & IFF_PROMISC)
+ cdc_filter |= USB_CDC_PACKET_TYPE_PROMISCUOUS;
+ if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI))
+ cdc_filter |= USB_CDC_PACKET_TYPE_ALL_MULTICAST;
usb_control_msg(dev->udev,
usb_sndctrlpipe(dev->udev, 0),
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 5ee7a1dbc023..96fc8a5bde84 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -402,7 +402,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
/* map MBIM session to VLAN */
if (tci)
- vlan_put_tag(skb, htons(ETH_P_8021Q), tci);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
err:
return skb;
}
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index babda7d8693e..9c5aa922a9f4 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2746,8 +2746,7 @@ exit:
tty_unregister_device(tty_drv, serial->minor);
kfree(serial);
}
- if (hso_dev)
- kfree(hso_dev);
+ kfree(hso_dev);
return NULL;
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index c6554c7a8147..2d1c77e81836 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -24,6 +24,7 @@
#include <net/ip6_checksum.h>
#include <uapi/linux/mdio.h>
#include <linux/mdio.h>
+#include <linux/usb/cdc.h>
/* Version Information */
#define DRIVER_VERSION "v1.07.0 (2014/10/09)"
@@ -461,18 +462,11 @@ enum rtl8152_flags {
/* Define these values to match your device */
#define VENDOR_ID_REALTEK 0x0bda
-#define PRODUCT_ID_RTL8152 0x8152
-#define PRODUCT_ID_RTL8153 0x8153
-
#define VENDOR_ID_SAMSUNG 0x04e8
-#define PRODUCT_ID_SAMSUNG 0xa101
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
-#define REALTEK_USB_DEVICE(vend, prod) \
- USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC)
-
struct tally_counter {
__le64 tx_packets;
__le64 rx_packets;
@@ -486,7 +480,7 @@ struct tally_counter {
__le64 rx_broadcast;
__le32 rx_multicast;
__le16 tx_aborted;
- __le16 tx_underun;
+ __le16 tx_underrun;
};
struct rx_desc {
@@ -690,6 +684,9 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
}
}
+ if (ret == -ENODEV)
+ set_bit(RTL8152_UNPLUG, &tp->flags);
+
return ret;
}
@@ -757,6 +754,9 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
}
error1:
+ if (ret == -ENODEV)
+ set_bit(RTL8152_UNPLUG, &tp->flags);
+
return ret;
}
@@ -1030,7 +1030,6 @@ static void read_bulk_callback(struct urb *urb)
int status = urb->status;
struct rx_agg *agg;
struct r8152 *tp;
- int result;
agg = urb->context;
if (!agg)
@@ -1081,15 +1080,7 @@ static void read_bulk_callback(struct urb *urb)
break;
}
- result = r8152_submit_rx(tp, agg, GFP_ATOMIC);
- if (result == -ENODEV) {
- netif_device_detach(tp->netdev);
- } else if (result) {
- spin_lock(&tp->rx_lock);
- list_add_tail(&agg->list, &tp->rx_done);
- spin_unlock(&tp->rx_lock);
- tasklet_schedule(&tp->tl);
- }
+ r8152_submit_rx(tp, agg, GFP_ATOMIC);
}
static void write_bulk_callback(struct urb *urb)
@@ -1190,11 +1181,13 @@ static void intr_callback(struct urb *urb)
resubmit:
res = usb_submit_urb(urb, GFP_ATOMIC);
- if (res == -ENODEV)
+ if (res == -ENODEV) {
+ set_bit(RTL8152_UNPLUG, &tp->flags);
netif_device_detach(tp->netdev);
- else if (res)
+ } else if (res) {
netif_err(tp, intr, tp->netdev,
"can't resubmit intr, status %d\n", res);
+ }
}
static inline void *rx_agg_align(void *data)
@@ -1250,7 +1243,6 @@ static int alloc_all_mem(struct r8152 *tp)
spin_lock_init(&tp->rx_lock);
spin_lock_init(&tp->tx_lock);
- INIT_LIST_HEAD(&tp->rx_done);
INIT_LIST_HEAD(&tp->tx_free);
skb_queue_head_init(&tp->tx_queue);
@@ -1676,7 +1668,6 @@ static void rx_bottom(struct r8152 *tp)
int len_used = 0;
struct urb *urb;
u8 *rx_data;
- int ret;
list_del_init(cursor);
@@ -1729,13 +1720,7 @@ find_next_rx:
}
submit:
- ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
- if (ret && ret != -ENODEV) {
- spin_lock_irqsave(&tp->rx_lock, flags);
- list_add_tail(&agg->list, &tp->rx_done);
- spin_unlock_irqrestore(&tp->rx_lock, flags);
- tasklet_schedule(&tp->tl);
- }
+ r8152_submit_rx(tp, agg, GFP_ATOMIC);
}
}
@@ -1758,6 +1743,7 @@ static void tx_bottom(struct r8152 *tp)
struct net_device *netdev = tp->netdev;
if (res == -ENODEV) {
+ set_bit(RTL8152_UNPLUG, &tp->flags);
netif_device_detach(netdev);
} else {
struct net_device_stats *stats = &netdev->stats;
@@ -1792,6 +1778,8 @@ static void bottom_half(unsigned long data)
if (!netif_carrier_ok(tp->netdev))
return;
+ clear_bit(SCHEDULE_TASKLET, &tp->flags);
+
rx_bottom(tp);
tx_bottom(tp);
}
@@ -1799,11 +1787,28 @@ static void bottom_half(unsigned long data)
static
int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
{
+ int ret;
+
usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
agg->head, agg_buf_sz,
(usb_complete_t)read_bulk_callback, agg);
- return usb_submit_urb(agg->urb, mem_flags);
+ ret = usb_submit_urb(agg->urb, mem_flags);
+ if (ret == -ENODEV) {
+ set_bit(RTL8152_UNPLUG, &tp->flags);
+ netif_device_detach(tp->netdev);
+ } else if (ret) {
+ struct urb *urb = agg->urb;
+ unsigned long flags;
+
+ urb->actual_length = 0;
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ tasklet_schedule(&tp->tl);
+ }
+
+ return ret;
}
static void rtl_drop_queued_tx(struct r8152 *tp)
@@ -1994,6 +1999,25 @@ static int rtl_start_rx(struct r8152 *tp)
break;
}
+ if (ret && ++i < RTL8152_MAX_RX) {
+ struct list_head rx_queue;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&rx_queue);
+
+ do {
+ struct rx_agg *agg = &tp->rx_info[i++];
+ struct urb *urb = agg->urb;
+
+ urb->actual_length = 0;
+ list_add_tail(&agg->list, &rx_queue);
+ } while (i < RTL8152_MAX_RX);
+
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_splice_tail(&rx_queue, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ }
+
return ret;
}
@@ -2850,15 +2874,18 @@ static void rtl_work_func_t(struct work_struct *work)
{
struct r8152 *tp = container_of(work, struct r8152, schedule.work);
+ /* If the device is unplugged or !netif_running(), the workqueue
+ * doesn't need to wake the device, and could return directly.
+ */
+ if (test_bit(RTL8152_UNPLUG, &tp->flags) || !netif_running(tp->netdev))
+ return;
+
if (usb_autopm_get_interface(tp->intf) < 0)
return;
if (!test_bit(WORK_ENABLE, &tp->flags))
goto out1;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
- goto out1;
-
if (!mutex_trylock(&tp->control)) {
schedule_delayed_work(&tp->schedule, 0);
goto out1;
@@ -2933,6 +2960,8 @@ static int rtl8152_open(struct net_device *netdev)
netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
res);
free_all_mem(tp);
+ } else {
+ tasklet_enable(&tp->tl);
}
mutex_unlock(&tp->control);
@@ -2948,6 +2977,7 @@ static int rtl8152_close(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ tasklet_disable(&tp->tl);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
@@ -2965,9 +2995,7 @@ static int rtl8152_close(struct net_device *netdev)
*/
rtl_runtime_suspend_enable(tp, false);
- tasklet_disable(&tp->tl);
tp->rtl_ops.down(tp);
- tasklet_enable(&tp->tl);
mutex_unlock(&tp->control);
@@ -3427,7 +3455,7 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev,
data[9] = le64_to_cpu(tally.rx_broadcast);
data[10] = le32_to_cpu(tally.rx_multicast);
data[11] = le16_to_cpu(tally.tx_aborted);
- data[12] = le16_to_cpu(tally.tx_underun);
+ data[12] = le16_to_cpu(tally.tx_underrun);
}
static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -3565,11 +3593,33 @@ out:
return ret;
}
+static int rtl8152_nway_reset(struct net_device *dev)
+{
+ struct r8152 *tp = netdev_priv(dev);
+ int ret;
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out;
+
+ mutex_lock(&tp->control);
+
+ ret = mii_nway_restart(&tp->mii);
+
+ mutex_unlock(&tp->control);
+
+ usb_autopm_put_interface(tp->intf);
+
+out:
+ return ret;
+}
+
static struct ethtool_ops ops = {
.get_drvinfo = rtl8152_get_drvinfo,
.get_settings = rtl8152_get_settings,
.set_settings = rtl8152_set_settings,
.get_link = ethtool_op_get_link,
+ .nway_reset = rtl8152_nway_reset,
.get_msglevel = rtl8152_get_msglevel,
.set_msglevel = rtl8152_set_msglevel,
.get_wol = rtl8152_get_wol,
@@ -3709,66 +3759,43 @@ static void rtl8153_unload(struct r8152 *tp)
r8153_power_cut_en(tp, false);
}
-static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
+static int rtl_ops_init(struct r8152 *tp)
{
struct rtl_ops *ops = &tp->rtl_ops;
- int ret = -ENODEV;
-
- switch (id->idVendor) {
- case VENDOR_ID_REALTEK:
- switch (id->idProduct) {
- case PRODUCT_ID_RTL8152:
- ops->init = r8152b_init;
- ops->enable = rtl8152_enable;
- ops->disable = rtl8152_disable;
- ops->up = rtl8152_up;
- ops->down = rtl8152_down;
- ops->unload = rtl8152_unload;
- ops->eee_get = r8152_get_eee;
- ops->eee_set = r8152_set_eee;
- ret = 0;
- break;
- case PRODUCT_ID_RTL8153:
- ops->init = r8153_init;
- ops->enable = rtl8153_enable;
- ops->disable = rtl8153_disable;
- ops->up = rtl8153_up;
- ops->down = rtl8153_down;
- ops->unload = rtl8153_unload;
- ops->eee_get = r8153_get_eee;
- ops->eee_set = r8153_set_eee;
- ret = 0;
- break;
- default:
- break;
- }
+ int ret = 0;
+
+ switch (tp->version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ ops->init = r8152b_init;
+ ops->enable = rtl8152_enable;
+ ops->disable = rtl8152_disable;
+ ops->up = rtl8152_up;
+ ops->down = rtl8152_down;
+ ops->unload = rtl8152_unload;
+ ops->eee_get = r8152_get_eee;
+ ops->eee_set = r8152_set_eee;
break;
- case VENDOR_ID_SAMSUNG:
- switch (id->idProduct) {
- case PRODUCT_ID_SAMSUNG:
- ops->init = r8153_init;
- ops->enable = rtl8153_enable;
- ops->disable = rtl8153_disable;
- ops->up = rtl8153_up;
- ops->down = rtl8153_down;
- ops->unload = rtl8153_unload;
- ops->eee_get = r8153_get_eee;
- ops->eee_set = r8153_set_eee;
- ret = 0;
- break;
- default:
- break;
- }
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ ops->init = r8153_init;
+ ops->enable = rtl8153_enable;
+ ops->disable = rtl8153_disable;
+ ops->up = rtl8153_up;
+ ops->down = rtl8153_down;
+ ops->unload = rtl8153_unload;
+ ops->eee_get = r8153_get_eee;
+ ops->eee_set = r8153_set_eee;
break;
default:
+ ret = -ENODEV;
+ netif_err(tp, probe, tp->netdev, "Unknown Device\n");
break;
}
- if (ret)
- netif_err(tp, probe, tp->netdev, "Unknown Device\n");
-
return ret;
}
@@ -3800,7 +3827,8 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->netdev = netdev;
tp->intf = intf;
- ret = rtl_ops_init(tp, id);
+ r8152b_get_version(tp);
+ ret = rtl_ops_init(tp);
if (ret)
goto out;
@@ -3833,11 +3861,9 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->mii.phy_id_mask = 0x3f;
tp->mii.reg_num_mask = 0x1f;
tp->mii.phy_id = R8152_PHY_ID;
- tp->mii.supports_gmii = 0;
intf->needs_remote_wakeup = 1;
- r8152b_get_version(tp);
tp->rtl_ops.init(tp);
set_ethernet_addr(tp);
@@ -3855,12 +3881,15 @@ static int rtl8152_probe(struct usb_interface *intf,
else
device_set_wakeup_enable(&udev->dev, false);
+ tasklet_disable(&tp->tl);
+
netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
return 0;
out1:
usb_set_intfdata(intf, NULL);
+ tasklet_kill(&tp->tl);
out:
free_netdev(netdev);
return ret;
@@ -3884,11 +3913,27 @@ static void rtl8152_disconnect(struct usb_interface *intf)
}
}
+#define REALTEK_USB_DEVICE(vend, prod) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
+ USB_DEVICE_ID_MATCH_INT_CLASS, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC \
+}, \
+{ \
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | \
+ USB_DEVICE_ID_MATCH_DEVICE, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .bInterfaceClass = USB_CLASS_COMM, \
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE
+
/* table of devices that work with this driver */
static struct usb_device_id rtl8152_table[] = {
- {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
- {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)},
- {USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{}
};
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 6e87e5710048..d37b7dce2d40 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -753,14 +753,13 @@ static int rtl8150_open(struct net_device *netdev)
static int rtl8150_close(struct net_device *netdev)
{
rtl8150_t *dev = netdev_priv(netdev);
- int res = 0;
netif_stop_queue(netdev);
if (!test_bit(RTL8150_UNPLUG, &dev->flags))
disable_net_traffic(dev);
unlink_all_urbs(dev);
- return res;
+ return 0;
}
static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index d07bf4cb893f..26423adc35ee 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1670,12 +1670,14 @@ done:
static int smsc95xx_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- u8 suspend_flags = pdata->suspend_flags;
+ struct smsc95xx_priv *pdata;
+ u8 suspend_flags;
int ret;
u32 val;
BUG_ON(!dev);
+ pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ suspend_flags = pdata->suspend_flags;
netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 6dfcbf523936..afd295348ddb 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2199,13 +2199,6 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
if (adapter->rss) {
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
- static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
- 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
- 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
- 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
- 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
- 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
- };
devRead->misc.uptFeatures |= UPT1_F_RSS;
devRead->misc.numRxQueues = adapter->num_rx_queues;
@@ -2216,7 +2209,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
- memcpy(rssConf->hashKey, rss_key, sizeof(rss_key));
+ netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
for (i = 0; i < rssConf->indTableSize; i++)
rssConf->indTable[i] = ethtool_rxfh_indir_default(
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index b725fd9e7803..b7b53329d575 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -583,12 +583,16 @@ vmxnet3_get_rss_indir_size(struct net_device *netdev)
}
static int
-vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key)
+vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
unsigned int n = rssConf->indTableSize;
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!p)
+ return 0;
while (n--)
p[n] = rssConf->indTable[n];
return 0;
@@ -596,13 +600,20 @@ vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key)
}
static int
-vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key)
+vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key,
+ const u8 hfunc)
{
unsigned int i;
unsigned long flags;
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+ /* We do not allow change in unsupported parameters */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!p)
+ return 0;
for (i = 0; i < rssConf->indTableSize; i++)
rssConf->indTable[i] = p[i];
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index be4649a49c5e..31ecb03368c6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -849,7 +849,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
/* Add static entry (via netlink) */
static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr, u16 flags)
+ const unsigned char *addr, u16 vid, u16 flags)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
/* struct net *net = dev_net(vxlan->dev); */
@@ -885,7 +885,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
/* Delete entry (via netlink) */
static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- const unsigned char *addr)
+ const unsigned char *addr, u16 vid)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
@@ -1593,14 +1593,9 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
if (unlikely(err))
return err;
- if (vlan_tx_tag_present(skb)) {
- if (WARN_ON(!__vlan_put_tag(skb,
- skb->vlan_proto,
- vlan_tx_tag_get(skb))))
- return -ENOMEM;
-
- skb->vlan_tci = 0;
- }
+ skb = vlan_hwaccel_push_inside(skb);
+ if (WARN_ON(!skb))
+ return -ENOMEM;
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_FLAGS);
@@ -1637,14 +1632,9 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
if (unlikely(err))
return err;
- if (vlan_tx_tag_present(skb)) {
- if (WARN_ON(!__vlan_put_tag(skb,
- skb->vlan_proto,
- vlan_tx_tag_get(skb))))
- return -ENOMEM;
-
- skb->vlan_tci = 0;
- }
+ skb = vlan_hwaccel_push_inside(skb);
+ if (WARN_ON(!skb))
+ return -ENOMEM;
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_FLAGS);
@@ -2236,6 +2226,9 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
[IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
[IFLA_VXLAN_PORT] = { .type = NLA_U16 },
+ [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
+ [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
+ [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 86907e5ba6ca..ccba4fea7269 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -135,6 +135,11 @@ struct ath_ops {
struct ath_common;
struct ath_bus_ops;
+struct ath_ps_ops {
+ void (*wakeup)(struct ath_common *common);
+ void (*restore)(struct ath_common *common);
+};
+
struct ath_common {
void *ah;
void *priv;
@@ -148,7 +153,7 @@ struct ath_common {
u16 cachelsz;
u16 curaid;
u8 macaddr[ETH_ALEN];
- u8 curbssid[ETH_ALEN];
+ u8 curbssid[ETH_ALEN] __aligned(2);
u8 bssidmask[ETH_ALEN];
u32 rx_bufsize;
@@ -169,6 +174,7 @@ struct ath_common {
struct ath_regulatory reg_world_copy;
const struct ath_ops *ops;
const struct ath_bus_ops *bus_ops;
+ const struct ath_ps_ops *ps_ops;
bool btcoex_enabled;
bool disable_ani;
@@ -178,6 +184,11 @@ struct ath_common {
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
};
+static inline const struct ath_ps_ops *ath_ps_ops(struct ath_common *common)
+{
+ return common->ps_ops;
+}
+
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
u32 len,
gfp_t gfp_mask);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 101cadb6e4ba..a156e6e48708 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -443,12 +443,12 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
* Guts of ath10k_ce_completed_recv_next.
* The caller takes responsibility for any necessary locking.
*/
-static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
- void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp)
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp,
+ unsigned int *flagsp)
{
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
@@ -558,6 +558,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
/* sanity */
dest_ring->per_transfer_context[sw_index] = NULL;
+ desc->nbytes = 0;
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
@@ -576,11 +577,11 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
* Guts of ath10k_ce_completed_send_next.
* The caller takes responsibility for any necessary locking.
*/
-static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
- void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp)
+int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp)
{
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
@@ -817,7 +818,10 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ce_id;
- for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ /* Skip the last copy engine, CE7 the diagnostic window, as that
+ * uses polling and isn't initialized for interrupts.
+ */
+ for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++)
ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]);
}
@@ -832,8 +836,8 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
nentries = roundup_pow_of_two(attr->src_nentries);
- memset(src_ring->per_transfer_context, 0,
- nentries * sizeof(*src_ring->per_transfer_context));
+ memset(src_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc));
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
@@ -869,8 +873,8 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
nentries = roundup_pow_of_two(attr->dest_nentries);
- memset(dest_ring->per_transfer_context, 0,
- nentries * sizeof(*dest_ring->per_transfer_context));
+ memset(dest_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc));
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
@@ -1020,37 +1024,10 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
* initialized by software/firmware.
*/
int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
- const struct ce_attr *attr,
- void (*send_cb)(struct ath10k_ce_pipe *),
- void (*recv_cb)(struct ath10k_ce_pipe *))
+ const struct ce_attr *attr)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
int ret;
- /*
- * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
- * additional TX locking checks.
- *
- * For the lack of a better place do the check here.
- */
- BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
- (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
- (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-
- spin_lock_bh(&ar_pci->ce_lock);
- ce_state->ar = ar;
- ce_state->id = ce_id;
- ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
- ce_state->attr_flags = attr->flags;
- ce_state->src_sz_max = attr->src_sz_max;
- if (attr->src_nentries)
- ce_state->send_cb = send_cb;
- if (attr->dest_nentries)
- ce_state->recv_cb = recv_cb;
- spin_unlock_bh(&ar_pci->ce_lock);
-
if (attr->src_nentries) {
ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
if (ret) {
@@ -1098,12 +1075,37 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
}
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
- const struct ce_attr *attr)
+ const struct ce_attr *attr,
+ void (*send_cb)(struct ath10k_ce_pipe *),
+ void (*recv_cb)(struct ath10k_ce_pipe *))
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
int ret;
+ /*
+ * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
+ * additional TX locking checks.
+ *
+ * For the lack of a better place do the check here.
+ */
+ BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+ BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+
+ ce_state->ar = ar;
+ ce_state->id = ce_id;
+ ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
+ ce_state->attr_flags = attr->flags;
+ ce_state->src_sz_max = attr->src_sz_max;
+
+ if (attr->src_nentries)
+ ce_state->send_cb = send_cb;
+
+ if (attr->dest_nentries)
+ ce_state->recv_cb = recv_cb;
+
if (attr->src_nentries) {
ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
if (IS_ERR(ce_state->src_ring)) {
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 329b7340fa72..617a151e8ce4 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -192,15 +192,21 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
unsigned int *nbytesp,
unsigned int *transfer_idp);
+int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp);
+
/*==================CE Engine Initialization=======================*/
int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
- const struct ce_attr *attr,
- void (*send_cb)(struct ath10k_ce_pipe *),
- void (*recv_cb)(struct ath10k_ce_pipe *));
+ const struct ce_attr *attr);
void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
- const struct ce_attr *attr);
+ const struct ce_attr *attr,
+ void (*send_cb)(struct ath10k_ce_pipe *),
+ void (*recv_cb)(struct ath10k_ce_pipe *));
void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
/*==================CE Engine Shutdown=======================*/
@@ -213,6 +219,13 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp);
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp,
+ unsigned int *flagsp);
+
/*
* Support clean shutdown by allowing the caller to cancel
* pending sends. Target DMA must be stopped before using
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index cee18c89d7f2..7762061a1944 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -31,12 +31,17 @@
unsigned int ath10k_debug_mask;
static bool uart_print;
static unsigned int ath10k_p2p;
+static bool skip_otp;
+
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
module_param(uart_print, bool, 0644);
module_param_named(p2p, ath10k_p2p, uint, 0644);
+module_param(skip_otp, bool, 0644);
+
MODULE_PARM_DESC(debug_mask, "Debugging mask");
MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
+MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
@@ -138,7 +143,8 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
return fw;
}
-static int ath10k_push_board_ext_data(struct ath10k *ar)
+static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data,
+ size_t data_len)
{
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
@@ -159,14 +165,14 @@ static int ath10k_push_board_ext_data(struct ath10k *ar)
if (board_ext_data_addr == 0)
return 0;
- if (ar->board_len != (board_data_size + board_ext_data_size)) {
+ if (data_len != (board_data_size + board_ext_data_size)) {
ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n",
- ar->board_len, board_data_size, board_ext_data_size);
+ data_len, board_data_size, board_ext_data_size);
return -EINVAL;
}
ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
- ar->board_data + board_data_size,
+ data + board_data_size,
board_ext_data_size);
if (ret) {
ath10k_err(ar, "could not write board ext data (%d)\n", ret);
@@ -184,13 +190,14 @@ static int ath10k_push_board_ext_data(struct ath10k *ar)
return 0;
}
-static int ath10k_download_board_data(struct ath10k *ar)
+static int ath10k_download_board_data(struct ath10k *ar, const void *data,
+ size_t data_len)
{
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 address;
int ret;
- ret = ath10k_push_board_ext_data(ar);
+ ret = ath10k_push_board_ext_data(ar, data, data_len);
if (ret) {
ath10k_err(ar, "could not push board ext data (%d)\n", ret);
goto exit;
@@ -202,9 +209,9 @@ static int ath10k_download_board_data(struct ath10k *ar)
goto exit;
}
- ret = ath10k_bmi_write_memory(ar, address, ar->board_data,
+ ret = ath10k_bmi_write_memory(ar, address, data,
min_t(u32, board_data_size,
- ar->board_len));
+ data_len));
if (ret) {
ath10k_err(ar, "could not write board data (%d)\n", ret);
goto exit;
@@ -220,11 +227,39 @@ exit:
return ret;
}
+static int ath10k_download_cal_file(struct ath10k *ar)
+{
+ int ret;
+
+ if (!ar->cal_file)
+ return -ENOENT;
+
+ if (IS_ERR(ar->cal_file))
+ return PTR_ERR(ar->cal_file);
+
+ ret = ath10k_download_board_data(ar, ar->cal_file->data,
+ ar->cal_file->size);
+ if (ret) {
+ ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n");
+
+ return 0;
+}
+
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
u32 result, address = ar->hw_params.patch_load_addr;
int ret;
+ ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
+ if (ret) {
+ ath10k_err(ar, "failed to download board data: %d\n", ret);
+ return ret;
+ }
+
/* OTP is optional */
if (!ar->otp_data || !ar->otp_len) {
@@ -250,7 +285,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
- if (result != 0) {
+ if (!skip_otp && result != 0) {
ath10k_err(ar, "otp calibration failed: %d", result);
return -EINVAL;
}
@@ -308,6 +343,9 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
if (ar->firmware && !IS_ERR(ar->firmware))
release_firmware(ar->firmware);
+ if (ar->cal_file && !IS_ERR(ar->cal_file))
+ release_firmware(ar->cal_file);
+
ar->board = NULL;
ar->board_data = NULL;
ar->board_len = 0;
@@ -319,6 +357,27 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
ar->firmware = NULL;
ar->firmware_data = NULL;
ar->firmware_len = 0;
+
+ ar->cal_file = NULL;
+}
+
+static int ath10k_fetch_cal_file(struct ath10k *ar)
+{
+ char filename[100];
+
+ /* cal-<bus>-<id>.bin */
+ scnprintf(filename, sizeof(filename), "cal-%s-%s.bin",
+ ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
+ ar->cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
+ if (IS_ERR(ar->cal_file))
+ /* calibration file is optional, don't print any warnings */
+ return PTR_ERR(ar->cal_file);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n",
+ ATH10K_FW_DIR, filename);
+
+ return 0;
}
static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
@@ -562,6 +621,9 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
{
int ret;
+ /* calibration file is optional, don't check for any errors */
+ ath10k_fetch_cal_file(ar);
+
ar->fw_api = 3;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
@@ -589,30 +651,32 @@ success:
return 0;
}
-static int ath10k_init_download_firmware(struct ath10k *ar,
- enum ath10k_firmware_mode mode)
+static int ath10k_download_cal_data(struct ath10k *ar)
{
int ret;
- ret = ath10k_download_board_data(ar);
- if (ret) {
- ath10k_err(ar, "failed to download board data: %d\n", ret);
- return ret;
+ ret = ath10k_download_cal_file(ar);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_CAL_MODE_FILE;
+ goto done;
}
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find a calibration file, try OTP next: %d\n",
+ ret);
+
ret = ath10k_download_and_run_otp(ar);
if (ret) {
ath10k_err(ar, "failed to run otp: %d\n", ret);
return ret;
}
- ret = ath10k_download_fw(ar, mode);
- if (ret) {
- ath10k_err(ar, "failed to download firmware: %d\n", ret);
- return ret;
- }
+ ar->cal_mode = ATH10K_CAL_MODE_OTP;
- return ret;
+done:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
+ ath10k_cal_mode_str(ar->cal_mode));
+ return 0;
}
static int ath10k_init_uart(struct ath10k *ar)
@@ -685,6 +749,25 @@ static void ath10k_core_restart(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+ set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+
+ /* Place a barrier to make sure the compiler doesn't reorder
+ * CRASH_FLUSH and calling other functions.
+ */
+ barrier();
+
+ ieee80211_stop_queues(ar->hw);
+ ath10k_drain_tx(ar);
+ complete_all(&ar->scan.started);
+ complete_all(&ar->scan.completed);
+ complete_all(&ar->scan.on_channel);
+ complete_all(&ar->offchan_tx_completed);
+ complete_all(&ar->install_key_done);
+ complete_all(&ar->vdev_setup_done);
+ wake_up(&ar->htt.empty_tx_wq);
+ wake_up(&ar->wmi.tx_credits_wq);
+ wake_up(&ar->peer_mapping_wq);
+
mutex_lock(&ar->conf_mutex);
switch (ar->state) {
@@ -716,12 +799,25 @@ static void ath10k_core_restart(struct work_struct *work)
mutex_unlock(&ar->conf_mutex);
}
+static void ath10k_core_init_max_sta_count(struct ath10k *ar)
+{
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ ar->max_num_peers = TARGET_10X_NUM_PEERS;
+ ar->max_num_stations = TARGET_10X_NUM_STATIONS;
+ } else {
+ ar->max_num_peers = TARGET_NUM_PEERS;
+ ar->max_num_stations = TARGET_NUM_STATIONS;
+ }
+}
+
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
{
int status;
lockdep_assert_held(&ar->conf_mutex);
+ clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+
ath10k_bmi_start(ar);
if (ath10k_init_configure_target(ar)) {
@@ -729,7 +825,11 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
goto err;
}
- status = ath10k_init_download_firmware(ar, mode);
+ status = ath10k_download_cal_data(ar);
+ if (status)
+ goto err;
+
+ status = ath10k_download_fw(ar, mode);
if (status)
goto err;
@@ -846,9 +946,9 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
goto err_hif_stop;
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
- ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1;
+ ar->free_vdev_map = (1LL << TARGET_10X_NUM_VDEVS) - 1;
else
- ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
+ ar->free_vdev_map = (1LL << TARGET_NUM_VDEVS) - 1;
INIT_LIST_HEAD(&ar->arvifs);
@@ -946,6 +1046,8 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
return ret;
}
+ ath10k_core_init_max_sta_count(ar);
+
mutex_lock(&ar->conf_mutex);
ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
@@ -1084,6 +1186,7 @@ void ath10k_core_unregister(struct ath10k *ar)
EXPORT_SYMBOL(ath10k_core_unregister);
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
+ enum ath10k_bus bus,
const struct ath10k_hif_ops *hif_ops)
{
struct ath10k *ar;
@@ -1100,6 +1203,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ar->dev = dev;
ar->hif.ops = hif_ops;
+ ar->hif.bus = bus;
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
@@ -1120,6 +1224,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
INIT_LIST_HEAD(&ar->peers);
init_waitqueue_head(&ar->peer_mapping_wq);
+ init_waitqueue_head(&ar->htt.empty_tx_wq);
+ init_waitqueue_head(&ar->wmi.tx_credits_wq);
init_completion(&ar->offchan_tx_completed);
INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index fe531ea6926c..514c219263a5 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -63,12 +63,28 @@
struct ath10k;
+enum ath10k_bus {
+ ATH10K_BUS_PCI,
+};
+
+static inline const char *ath10k_bus_str(enum ath10k_bus bus)
+{
+ switch (bus) {
+ case ATH10K_BUS_PCI:
+ return "pci";
+ }
+
+ return "unknown";
+}
+
struct ath10k_skb_cb {
dma_addr_t paddr;
+ u8 eid;
u8 vdev_id;
struct {
u8 tid;
+ u16 freq;
bool is_offchan;
struct ath10k_htt_txbuf *txbuf;
u32 txbuf_paddr;
@@ -96,8 +112,6 @@ struct ath10k_bmi {
bool done_sent;
};
-#define ATH10K_MAX_MEM_REQS 16
-
struct ath10k_mem_chunk {
void *vaddr;
dma_addr_t paddr;
@@ -110,22 +124,27 @@ struct ath10k_wmi {
struct completion service_ready;
struct completion unified_ready;
wait_queue_head_t tx_credits_wq;
+ DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX);
struct wmi_cmd_map *cmd;
struct wmi_vdev_param_map *vdev_param;
struct wmi_pdev_param_map *pdev_param;
u32 num_mem_chunks;
- struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS];
+ struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
};
-struct ath10k_peer_stat {
+struct ath10k_fw_stats_peer {
+ struct list_head list;
+
u8 peer_macaddr[ETH_ALEN];
u32 peer_rssi;
u32 peer_tx_rate;
u32 peer_rx_rate; /* 10x only */
};
-struct ath10k_target_stats {
+struct ath10k_fw_stats_pdev {
+ struct list_head list;
+
/* PDEV stats */
s32 ch_noise_floor;
u32 tx_frame_count;
@@ -180,15 +199,11 @@ struct ath10k_target_stats {
s32 phy_errs;
s32 phy_err_drop;
s32 mpdu_errs;
+};
- /* VDEV STATS */
-
- /* PEER STATS */
- u8 peers;
- struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS];
-
- /* TODO: Beacon filter stats */
-
+struct ath10k_fw_stats {
+ struct list_head pdevs;
+ struct list_head peers;
};
struct ath10k_dfs_stats {
@@ -206,6 +221,8 @@ struct ath10k_peer {
int vdev_id;
u8 addr[ETH_ALEN];
DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
+
+ /* protected by ar->data_lock */
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
};
@@ -234,6 +251,8 @@ struct ath10k_vif {
struct sk_buff *beacon;
/* protected by data_lock */
bool beacon_sent;
+ void *beacon_buf;
+ dma_addr_t beacon_paddr;
struct ath10k *ar;
struct ieee80211_vif *vif;
@@ -273,6 +292,7 @@ struct ath10k_vif {
u8 force_sgi;
bool use_cts_prot;
int num_legacy_stations;
+ int txpower;
};
struct ath10k_vif_iter {
@@ -292,17 +312,19 @@ struct ath10k_fw_crash_data {
struct ath10k_debug {
struct dentry *debugfs_phy;
- struct ath10k_target_stats target_stats;
- DECLARE_BITMAP(wmi_service_bitmap, WMI_SERVICE_MAX);
-
- struct completion event_stats_compl;
+ struct ath10k_fw_stats fw_stats;
+ struct completion fw_stats_complete;
+ bool fw_stats_done;
unsigned long htt_stats_mask;
struct delayed_work htt_stats_dwork;
struct ath10k_dfs_stats dfs_stats;
struct ath_dfs_pool_stats dfs_pool_stats;
+ /* protected by conf_mutex */
u32 fw_dbglog_mask;
+ u32 pktlog_filter;
+ u32 reg_addr;
u8 htt_max_amsdu;
u8 htt_max_ampdu;
@@ -321,7 +343,7 @@ enum ath10k_state {
* stopped in ath10k_core_restart() work holding conf_mutex. The state
* RESTARTED means that the device is up and mac80211 has started hw
* reconfiguration. Once mac80211 is done with the reconfiguration we
- * set the state to STATE_ON in restart_complete(). */
+ * set the state to STATE_ON in reconfig_complete(). */
ATH10K_STATE_RESTARTING,
ATH10K_STATE_RESTARTED,
@@ -369,8 +391,30 @@ enum ath10k_dev_flags {
/* Indicates that ath10k device is during CAC phase of DFS */
ATH10K_CAC_RUNNING,
ATH10K_FLAG_CORE_REGISTERED,
+
+ /* Device has crashed and needs to restart. This indicates any pending
+ * waiters should immediately cancel instead of waiting for a time out.
+ */
+ ATH10K_FLAG_CRASH_FLUSH,
};
+enum ath10k_cal_mode {
+ ATH10K_CAL_MODE_FILE,
+ ATH10K_CAL_MODE_OTP,
+};
+
+static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
+{
+ switch (mode) {
+ case ATH10K_CAL_MODE_FILE:
+ return "file";
+ case ATH10K_CAL_MODE_OTP:
+ return "otp";
+ }
+
+ return "unknown";
+}
+
enum ath10k_scan_state {
ATH10K_SCAN_IDLE,
ATH10K_SCAN_STARTING,
@@ -421,6 +465,7 @@ struct ath10k {
bool p2p;
struct {
+ enum ath10k_bus bus;
const struct ath10k_hif_ops *ops;
} hif;
@@ -456,7 +501,10 @@ struct ath10k {
const void *firmware_data;
size_t firmware_len;
+ const struct firmware *cal_file;
+
int fw_api;
+ enum ath10k_cal_mode cal_mode;
struct {
struct completion started;
@@ -482,7 +530,7 @@ struct ath10k {
/* current operating channel definition */
struct cfg80211_chan_def chandef;
- int free_vdev_map;
+ unsigned long long free_vdev_map;
bool monitor;
int monitor_vdev_id;
bool monitor_started;
@@ -517,8 +565,12 @@ struct ath10k {
struct list_head peers;
wait_queue_head_t peer_mapping_wq;
- /* number of created peers; protected by data_lock */
+ /* protected by conf_mutex */
int num_peers;
+ int num_stations;
+
+ int max_num_peers;
+ int max_num_stations;
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
@@ -563,11 +615,19 @@ struct ath10k {
bool utf_monitor;
} testmode;
+ struct {
+ /* protected by data_lock */
+ u32 fw_crash_counter;
+ u32 fw_warm_reset_counter;
+ u32 fw_cold_reset_counter;
+ } stats;
+
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
+ enum ath10k_bus bus,
const struct ath10k_hif_ops *hif_ops);
void ath10k_core_destroy(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 3756feba3223..a716758f14b0 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -17,12 +17,12 @@
#include <linux/module.h>
#include <linux/debugfs.h>
-#include <linux/version.h>
-#include <linux/vermagic.h>
#include <linux/vmalloc.h>
+#include <linux/utsname.h>
#include "core.h"
#include "debug.h"
+#include "hif.h"
/* ms */
#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
@@ -106,34 +106,37 @@ struct ath10k_dump_file_data {
u8 data[0];
} __packed;
-int ath10k_info(struct ath10k *ar, const char *fmt, ...)
+void ath10k_info(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
- int ret;
va_start(args, fmt);
vaf.va = &args;
- ret = dev_info(ar->dev, "%pV", &vaf);
+ dev_info(ar->dev, "%pV", &vaf);
trace_ath10k_log_info(ar, &vaf);
va_end(args);
-
- return ret;
}
EXPORT_SYMBOL(ath10k_info);
void ath10k_print_driver_info(struct ath10k *ar)
{
- ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
+ ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d.%d.%d.%d cal %s max_sta %d\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
ar->hw->wiphy->fw_version,
ar->fw_api,
ar->htt.target_version_major,
- ar->htt.target_version_minor);
+ ar->htt.target_version_minor,
+ ar->fw_version_major,
+ ar->fw_version_minor,
+ ar->fw_version_release,
+ ar->fw_version_build,
+ ath10k_cal_mode_str(ar->cal_mode),
+ ar->max_num_stations);
ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
config_enabled(CONFIG_ATH10K_DEBUG),
config_enabled(CONFIG_ATH10K_DEBUGFS),
@@ -143,25 +146,22 @@ void ath10k_print_driver_info(struct ath10k *ar)
}
EXPORT_SYMBOL(ath10k_print_driver_info);
-int ath10k_err(struct ath10k *ar, const char *fmt, ...)
+void ath10k_err(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
- int ret;
va_start(args, fmt);
vaf.va = &args;
- ret = dev_err(ar->dev, "%pV", &vaf);
+ dev_err(ar->dev, "%pV", &vaf);
trace_ath10k_log_err(ar, &vaf);
va_end(args);
-
- return ret;
}
EXPORT_SYMBOL(ath10k_err);
-int ath10k_warn(struct ath10k *ar, const char *fmt, ...)
+void ath10k_warn(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
@@ -174,20 +174,11 @@ int ath10k_warn(struct ath10k *ar, const char *fmt, ...)
trace_ath10k_log_warn(ar, &vaf);
va_end(args);
-
- return 0;
}
EXPORT_SYMBOL(ath10k_warn);
#ifdef CONFIG_ATH10K_DEBUGFS
-void ath10k_debug_read_service_map(struct ath10k *ar,
- void *service_map,
- size_t map_size)
-{
- memcpy(ar->debug.wmi_service_bitmap, service_map, map_size);
-}
-
static ssize_t ath10k_read_wmi_services(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -209,8 +200,9 @@ static ssize_t ath10k_read_wmi_services(struct file *file,
if (len > buf_len)
len = buf_len;
+ spin_lock_bh(&ar->data_lock);
for (i = 0; i < WMI_SERVICE_MAX; i++) {
- enabled = test_bit(i, ar->debug.wmi_service_bitmap);
+ enabled = test_bit(i, ar->wmi.svc_map);
name = wmi_service_name(i);
if (!name) {
@@ -226,6 +218,7 @@ static ssize_t ath10k_read_wmi_services(struct file *file,
"%-40s %s\n",
name, enabled ? "enabled" : "-");
}
+ spin_unlock_bh(&ar->data_lock);
ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
@@ -242,169 +235,182 @@ static const struct file_operations fops_wmi_services = {
.llseek = default_llseek,
};
-void ath10k_debug_read_target_stats(struct ath10k *ar,
- struct wmi_stats_event *ev)
+static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head)
{
- u8 *tmp = ev->data;
- struct ath10k_target_stats *stats;
- int num_pdev_stats, num_vdev_stats, num_peer_stats;
- struct wmi_pdev_stats_10x *ps;
- int i;
+ struct ath10k_fw_stats_pdev *i, *tmp;
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath10k_debug_fw_stats_peers_free(struct list_head *head)
+{
+ struct ath10k_fw_stats_peer *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
+{
spin_lock_bh(&ar->data_lock);
+ ar->debug.fw_stats_done = false;
+ ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+ ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers);
+ spin_unlock_bh(&ar->data_lock);
+}
- stats = &ar->debug.target_stats;
-
- num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */
- num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */
- num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
-
- if (num_pdev_stats) {
- ps = (struct wmi_pdev_stats_10x *)tmp;
-
- stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
- stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
- stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count);
- stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count);
- stats->cycle_count = __le32_to_cpu(ps->cycle_count);
- stats->phy_err_count = __le32_to_cpu(ps->phy_err_count);
- stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr);
-
- stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued);
- stats->comp_delivered =
- __le32_to_cpu(ps->wal.tx.comp_delivered);
- stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued);
- stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued);
- stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop);
- stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued);
- stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed);
- stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued);
- stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped);
- stats->underrun = __le32_to_cpu(ps->wal.tx.underrun);
- stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort);
- stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed);
- stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko);
- stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc);
- stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers);
- stats->sw_retry_failure =
- __le32_to_cpu(ps->wal.tx.sw_retry_failure);
- stats->illgl_rate_phy_err =
- __le32_to_cpu(ps->wal.tx.illgl_rate_phy_err);
- stats->pdev_cont_xretry =
- __le32_to_cpu(ps->wal.tx.pdev_cont_xretry);
- stats->pdev_tx_timeout =
- __le32_to_cpu(ps->wal.tx.pdev_tx_timeout);
- stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets);
- stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun);
- stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf);
-
- stats->mid_ppdu_route_change =
- __le32_to_cpu(ps->wal.rx.mid_ppdu_route_change);
- stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd);
- stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags);
- stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags);
- stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags);
- stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags);
- stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus);
- stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus);
- stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus);
- stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus);
- stats->oversize_amsdu =
- __le32_to_cpu(ps->wal.rx.oversize_amsdu);
- stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs);
- stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
- stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
-
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
- ar->fw_features)) {
- stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad);
- stats->rts_bad = __le32_to_cpu(ps->rts_bad);
- stats->rts_good = __le32_to_cpu(ps->rts_good);
- stats->fcs_bad = __le32_to_cpu(ps->fcs_bad);
- stats->no_beacons = __le32_to_cpu(ps->no_beacons);
- stats->mib_int_count = __le32_to_cpu(ps->mib_int_count);
- tmp += sizeof(struct wmi_pdev_stats_10x);
- } else {
- tmp += sizeof(struct wmi_pdev_stats_old);
- }
+static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head)
+{
+ struct ath10k_fw_stats_peer *i;
+ size_t num = 0;
+
+ list_for_each_entry(i, head, list)
+ ++num;
+
+ return num;
+}
+
+void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_fw_stats stats = {};
+ bool is_start, is_started, is_end;
+ size_t num_peers;
+ int ret;
+
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.peers);
+
+ spin_lock_bh(&ar->data_lock);
+ ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats);
+ if (ret) {
+ ath10k_warn(ar, "failed to pull fw stats: %d\n", ret);
+ goto unlock;
}
- /* 0 or max vdevs */
- /* Currently firmware does not support VDEV stats */
- if (num_vdev_stats) {
- struct wmi_vdev_stats *vdev_stats;
+ /* Stat data may exceed htc-wmi buffer limit. In such case firmware
+ * splits the stats data and delivers it in a ping-pong fashion of
+ * request cmd-update event.
+ *
+ * However there is no explicit end-of-data. Instead start-of-data is
+ * used as an implicit one. This works as follows:
+ * a) discard stat update events until one with pdev stats is
+ * delivered - this skips session started at end of (b)
+ * b) consume stat update events until another one with pdev stats is
+ * delivered which is treated as end-of-data and is itself discarded
+ */
- for (i = 0; i < num_vdev_stats; i++) {
- vdev_stats = (struct wmi_vdev_stats *)tmp;
- tmp += sizeof(struct wmi_vdev_stats);
- }
+ if (ar->debug.fw_stats_done) {
+ ath10k_warn(ar, "received unsolicited stats update event\n");
+ goto free;
}
- if (num_peer_stats) {
- struct wmi_peer_stats_10x *peer_stats;
- struct ath10k_peer_stat *s;
-
- stats->peers = num_peer_stats;
-
- for (i = 0; i < num_peer_stats; i++) {
- peer_stats = (struct wmi_peer_stats_10x *)tmp;
- s = &stats->peer_stat[i];
-
- memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr,
- ETH_ALEN);
- s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
- s->peer_tx_rate =
- __le32_to_cpu(peer_stats->peer_tx_rate);
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
- ar->fw_features)) {
- s->peer_rx_rate =
- __le32_to_cpu(peer_stats->peer_rx_rate);
- tmp += sizeof(struct wmi_peer_stats_10x);
-
- } else {
- tmp += sizeof(struct wmi_peer_stats_old);
- }
+ num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers);
+ is_start = (list_empty(&ar->debug.fw_stats.pdevs) &&
+ !list_empty(&stats.pdevs));
+ is_end = (!list_empty(&ar->debug.fw_stats.pdevs) &&
+ !list_empty(&stats.pdevs));
+
+ if (is_start)
+ list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
+
+ if (is_end)
+ ar->debug.fw_stats_done = true;
+
+ is_started = !list_empty(&ar->debug.fw_stats.pdevs);
+
+ if (is_started && !is_end) {
+ if (num_peers >= ATH10K_MAX_NUM_PEER_IDS) {
+ /* Although this is unlikely impose a sane limit to
+ * prevent firmware from DoS-ing the host.
+ */
+ ath10k_warn(ar, "dropping fw peer stats\n");
+ goto free;
}
+
+ list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
}
+ complete(&ar->debug.fw_stats_complete);
+
+free:
+ /* In some cases lists have been spliced and cleared. Free up
+ * resources if that is not the case.
+ */
+ ath10k_debug_fw_stats_pdevs_free(&stats.pdevs);
+ ath10k_debug_fw_stats_peers_free(&stats.peers);
+
+unlock:
spin_unlock_bh(&ar->data_lock);
- complete(&ar->debug.event_stats_compl);
}
-static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static int ath10k_debug_fw_stats_request(struct ath10k *ar)
{
- struct ath10k *ar = file->private_data;
- struct ath10k_target_stats *fw_stats;
- char *buf = NULL;
- unsigned int len = 0, buf_len = 8000;
- ssize_t ret_cnt = 0;
- long left;
- int i;
+ unsigned long timeout;
int ret;
- fw_stats = &ar->debug.target_stats;
+ lockdep_assert_held(&ar->conf_mutex);
- mutex_lock(&ar->conf_mutex);
+ timeout = jiffies + msecs_to_jiffies(1*HZ);
- if (ar->state != ATH10K_STATE_ON)
- goto exit;
+ ath10k_debug_fw_stats_reset(ar);
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- goto exit;
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
- ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
- if (ret) {
- ath10k_warn(ar, "could not request stats (%d)\n", ret);
- goto exit;
+ reinit_completion(&ar->debug.fw_stats_complete);
+
+ ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
+ if (ret) {
+ ath10k_warn(ar, "could not request stats (%d)\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
+ 1*HZ);
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->debug.fw_stats_done) {
+ spin_unlock_bh(&ar->data_lock);
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
}
- left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
- if (left <= 0)
- goto exit;
+ return 0;
+}
+
+/* FIXME: How to calculate the buffer size sanely? */
+#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
+
+static void ath10k_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf)
+{
+ unsigned int len = 0;
+ unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
+ const struct ath10k_fw_stats_pdev *pdev;
+ const struct ath10k_fw_stats_peer *peer;
+ size_t num_peers;
spin_lock_bh(&ar->data_lock);
+
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath10k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath10k_warn(ar, "failed to get pdev stats\n");
+ goto unlock;
+ }
+
+ num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers);
+
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
"ath10k PDEV stats");
@@ -412,29 +418,29 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
"=================");
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Channel noise floor", fw_stats->ch_noise_floor);
+ "Channel noise floor", pdev->ch_noise_floor);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "Channel TX power", fw_stats->chan_tx_power);
+ "Channel TX power", pdev->chan_tx_power);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "TX frame count", fw_stats->tx_frame_count);
+ "TX frame count", pdev->tx_frame_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "RX frame count", fw_stats->rx_frame_count);
+ "RX frame count", pdev->rx_frame_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "RX clear count", fw_stats->rx_clear_count);
+ "RX clear count", pdev->rx_clear_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "Cycle count", fw_stats->cycle_count);
+ "Cycle count", pdev->cycle_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "PHY error count", fw_stats->phy_err_count);
+ "PHY error count", pdev->phy_err_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "RTS bad count", fw_stats->rts_bad);
+ "RTS bad count", pdev->rts_bad);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "RTS good count", fw_stats->rts_good);
+ "RTS good count", pdev->rts_good);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "FCS bad count", fw_stats->fcs_bad);
+ "FCS bad count", pdev->fcs_bad);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "No beacon count", fw_stats->no_beacons);
+ "No beacon count", pdev->no_beacons);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "MIB int count", fw_stats->mib_int_count);
+ "MIB int count", pdev->mib_int_count);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
@@ -443,51 +449,51 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
"=================");
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "HTT cookies queued", fw_stats->comp_queued);
+ "HTT cookies queued", pdev->comp_queued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "HTT cookies disp.", fw_stats->comp_delivered);
+ "HTT cookies disp.", pdev->comp_delivered);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MSDU queued", fw_stats->msdu_enqued);
+ "MSDU queued", pdev->msdu_enqued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDU queued", fw_stats->mpdu_enqued);
+ "MPDU queued", pdev->mpdu_enqued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MSDUs dropped", fw_stats->wmm_drop);
+ "MSDUs dropped", pdev->wmm_drop);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Local enqued", fw_stats->local_enqued);
+ "Local enqued", pdev->local_enqued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Local freed", fw_stats->local_freed);
+ "Local freed", pdev->local_freed);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "HW queued", fw_stats->hw_queued);
+ "HW queued", pdev->hw_queued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PPDUs reaped", fw_stats->hw_reaped);
+ "PPDUs reaped", pdev->hw_reaped);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Num underruns", fw_stats->underrun);
+ "Num underruns", pdev->underrun);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PPDUs cleaned", fw_stats->tx_abort);
+ "PPDUs cleaned", pdev->tx_abort);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDUs requed", fw_stats->mpdus_requed);
+ "MPDUs requed", pdev->mpdus_requed);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Excessive retries", fw_stats->tx_ko);
+ "Excessive retries", pdev->tx_ko);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "HW rate", fw_stats->data_rc);
+ "HW rate", pdev->data_rc);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Sched self tiggers", fw_stats->self_triggers);
+ "Sched self tiggers", pdev->self_triggers);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Dropped due to SW retries",
- fw_stats->sw_retry_failure);
+ pdev->sw_retry_failure);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Illegal rate phy errors",
- fw_stats->illgl_rate_phy_err);
+ pdev->illgl_rate_phy_err);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Pdev continous xretry", fw_stats->pdev_cont_xretry);
+ "Pdev continous xretry", pdev->pdev_cont_xretry);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "TX timeout", fw_stats->pdev_tx_timeout);
+ "TX timeout", pdev->pdev_tx_timeout);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PDEV resets", fw_stats->pdev_resets);
+ "PDEV resets", pdev->pdev_resets);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PHY underrun", fw_stats->phy_underrun);
+ "PHY underrun", pdev->phy_underrun);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDU is more than txop limit", fw_stats->txop_ovf);
+ "MPDU is more than txop limit", pdev->txop_ovf);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
@@ -497,70 +503,161 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Mid PPDU route change",
- fw_stats->mid_ppdu_route_change);
+ pdev->mid_ppdu_route_change);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Tot. number of statuses", fw_stats->status_rcvd);
+ "Tot. number of statuses", pdev->status_rcvd);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Extra frags on rings 0", fw_stats->r0_frags);
+ "Extra frags on rings 0", pdev->r0_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Extra frags on rings 1", fw_stats->r1_frags);
+ "Extra frags on rings 1", pdev->r1_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Extra frags on rings 2", fw_stats->r2_frags);
+ "Extra frags on rings 2", pdev->r2_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Extra frags on rings 3", fw_stats->r3_frags);
+ "Extra frags on rings 3", pdev->r3_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MSDUs delivered to HTT", fw_stats->htt_msdus);
+ "MSDUs delivered to HTT", pdev->htt_msdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDUs delivered to HTT", fw_stats->htt_mpdus);
+ "MPDUs delivered to HTT", pdev->htt_mpdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MSDUs delivered to stack", fw_stats->loc_msdus);
+ "MSDUs delivered to stack", pdev->loc_msdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDUs delivered to stack", fw_stats->loc_mpdus);
+ "MPDUs delivered to stack", pdev->loc_mpdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Oversized AMSUs", fw_stats->oversize_amsdu);
+ "Oversized AMSUs", pdev->oversize_amsdu);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PHY errors", fw_stats->phy_errs);
+ "PHY errors", pdev->phy_errs);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "PHY errors drops", fw_stats->phy_err_drop);
+ "PHY errors drops", pdev->phy_err_drop);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
+ "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
len += scnprintf(buf + len, buf_len - len, "\n");
- len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n",
- "ath10k PEER stats", fw_stats->peers);
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k PEER stats", num_peers);
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
- for (i = 0; i < fw_stats->peers; i++) {
+ list_for_each_entry(peer, &fw_stats->peers, list) {
len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
- "Peer MAC address",
- fw_stats->peer_stat[i].peer_macaddr);
+ "Peer MAC address", peer->peer_macaddr);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "Peer RSSI", fw_stats->peer_stat[i].peer_rssi);
+ "Peer RSSI", peer->peer_rssi);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "Peer TX rate",
- fw_stats->peer_stat[i].peer_tx_rate);
+ "Peer TX rate", peer->peer_tx_rate);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
- "Peer RX rate",
- fw_stats->peer_stat[i].peer_rx_rate);
+ "Peer RX rate", peer->peer_rx_rate);
len += scnprintf(buf + len, buf_len - len, "\n");
}
+
+unlock:
spin_unlock_bh(&ar->data_lock);
- if (len > buf_len)
- len = buf_len;
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
- ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+static int ath10k_fw_stats_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH10K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ret = ath10k_debug_fw_stats_request(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request fw stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath10k_fw_stats_fill(ar, &ar->debug.fw_stats, buf);
+ file->private_data = buf;
-exit:
mutex_unlock(&ar->conf_mutex);
- kfree(buf);
- return ret_cnt;
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_fw_stats_release(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ unsigned int len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_fw_stats = {
- .read = ath10k_read_fw_stats,
+ .open = ath10k_fw_stats_open,
+ .release = ath10k_fw_stats_release,
+ .read = ath10k_fw_stats_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret, len, buf_len;
+ char *buf;
+
+ buf_len = 500;
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock_bh(&ar->data_lock);
+
+ len = 0;
+ len += scnprintf(buf + len, buf_len - len,
+ "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter);
+ len += scnprintf(buf + len, buf_len - len,
+ "fw_warm_reset_counter\t\t%d\n",
+ ar->stats.fw_warm_reset_counter);
+ len += scnprintf(buf + len, buf_len - len,
+ "fw_cold_reset_counter\t\t%d\n",
+ ar->stats.fw_cold_reset_counter);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations fops_fw_reset_stats = {
.open = simple_open,
+ .read = ath10k_debug_fw_reset_stats_read,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
@@ -593,7 +690,8 @@ static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
"To simulate firmware crash write one of the keywords to this file:\n"
"`soft` - this will send WMI_FORCE_FW_HANG_ASSERT to firmware if FW supports that command.\n"
"`hard` - this will send to firmware command with illegal parameters causing firmware crash.\n"
- "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n";
+ "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n"
+ "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
}
@@ -646,6 +744,10 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
} else if (!strcmp(buf, "assert")) {
ath10k_info(ar, "simulating firmware assert crash\n");
ret = ath10k_debug_fw_assert(ar);
+ } else if (!strcmp(buf, "hw-restart")) {
+ ath10k_info(ar, "user requested hw restart\n");
+ queue_work(ar->workqueue, &ar->restart_work);
+ ret = 0;
} else {
ret = -EINVAL;
goto exit;
@@ -759,8 +861,8 @@ static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar)
strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
sizeof(dump_data->fw_ver));
- dump_data->kernel_ver_code = cpu_to_le32(LINUX_VERSION_CODE);
- strlcpy(dump_data->kernel_ver, VERMAGIC_STRING,
+ dump_data->kernel_ver_code = 0;
+ strlcpy(dump_data->kernel_ver, init_utsname()->release,
sizeof(dump_data->kernel_ver));
dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
@@ -822,6 +924,236 @@ static const struct file_operations fops_fw_crash_dump = {
.llseek = default_llseek,
};
+static ssize_t ath10k_reg_addr_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 buf[32];
+ unsigned int len = 0;
+ u32 reg_addr;
+
+ mutex_lock(&ar->conf_mutex);
+ reg_addr = ar->debug.reg_addr;
+ mutex_unlock(&ar->conf_mutex);
+
+ len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n", reg_addr);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_reg_addr_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 reg_addr;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &reg_addr);
+ if (ret)
+ return ret;
+
+ if (!IS_ALIGNED(reg_addr, 4))
+ return -EFAULT;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->debug.reg_addr = reg_addr;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_reg_addr = {
+ .read = ath10k_reg_addr_read,
+ .write = ath10k_reg_addr_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_reg_value_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 buf[48];
+ unsigned int len;
+ u32 reg_addr, reg_val;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ reg_addr = ar->debug.reg_addr;
+
+ reg_val = ath10k_hif_read32(ar, reg_addr);
+ len = scnprintf(buf, sizeof(buf), "0x%08x:0x%08x\n", reg_addr, reg_val);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_reg_value_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 reg_addr, reg_val;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ reg_addr = ar->debug.reg_addr;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &reg_val);
+ if (ret)
+ goto exit;
+
+ ath10k_hif_write32(ar, reg_addr, reg_val);
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_reg_value = {
+ .read = ath10k_reg_value_read,
+ .write = ath10k_reg_value_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_mem_value_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 *buf;
+ int ret;
+
+ if (*ppos < 0)
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ buf = vmalloc(count);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ ret = ath10k_hif_diag_read(ar, *ppos, buf, count);
+ if (ret) {
+ ath10k_warn(ar, "failed to read address 0x%08x via diagnose window fnrom debugfs: %d\n",
+ (u32)(*ppos), ret);
+ goto exit;
+ }
+
+ ret = copy_to_user(user_buf, buf, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ count -= ret;
+ *ppos += count;
+ ret = count;
+
+exit:
+ vfree(buf);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_mem_value_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 *buf;
+ int ret;
+
+ if (*ppos < 0)
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ buf = vmalloc(count);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ ret = copy_from_user(buf, user_buf, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ ret = ath10k_hif_diag_write(ar, *ppos, buf, count);
+ if (ret) {
+ ath10k_warn(ar, "failed to write address 0x%08x via diagnose window from debugfs: %d\n",
+ (u32)(*ppos), ret);
+ goto exit;
+ }
+
+ *ppos += count;
+ ret = count;
+
+exit:
+ vfree(buf);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_mem_value = {
+ .read = ath10k_mem_value_read,
+ .write = ath10k_mem_value_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static int ath10k_debug_htt_stats_req(struct ath10k *ar)
{
u64 cookie;
@@ -1029,6 +1361,166 @@ exit:
return ret;
}
+/* TODO: Would be nice to always support ethtool stats, would need to
+ * move the stats storage out of ath10k_debug, or always have ath10k_debug
+ * struct available..
+ */
+
+/* This generally cooresponds to the debugfs fw_stats file */
+static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx_pkts_nic",
+ "tx_bytes_nic",
+ "rx_pkts_nic",
+ "rx_bytes_nic",
+ "d_noise_floor",
+ "d_cycle_count",
+ "d_phy_error",
+ "d_rts_bad",
+ "d_rts_good",
+ "d_tx_power", /* in .5 dbM I think */
+ "d_rx_crc_err", /* fcs_bad */
+ "d_no_beacon",
+ "d_tx_mpdus_queued",
+ "d_tx_msdu_queued",
+ "d_tx_msdu_dropped",
+ "d_local_enqued",
+ "d_local_freed",
+ "d_tx_ppdu_hw_queued",
+ "d_tx_ppdu_reaped",
+ "d_tx_fifo_underrun",
+ "d_tx_ppdu_abort",
+ "d_tx_mpdu_requed",
+ "d_tx_excessive_retries",
+ "d_tx_hw_rate",
+ "d_tx_dropped_sw_retries",
+ "d_tx_illegal_rate",
+ "d_tx_continuous_xretries",
+ "d_tx_timeout",
+ "d_tx_mpdu_txop_limit",
+ "d_pdev_resets",
+ "d_rx_mid_ppdu_route_change",
+ "d_rx_status",
+ "d_rx_extra_frags_ring0",
+ "d_rx_extra_frags_ring1",
+ "d_rx_extra_frags_ring2",
+ "d_rx_extra_frags_ring3",
+ "d_rx_msdu_htt",
+ "d_rx_mpdu_htt",
+ "d_rx_msdu_stack",
+ "d_rx_mpdu_stack",
+ "d_rx_phy_err",
+ "d_rx_phy_err_drops",
+ "d_rx_mpdu_errors", /* FCS, MIC, ENC */
+ "d_fw_crash_count",
+ "d_fw_warm_reset_count",
+ "d_fw_cold_reset_count",
+};
+
+#define ATH10K_SSTATS_LEN ARRAY_SIZE(ath10k_gstrings_stats)
+
+void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, *ath10k_gstrings_stats,
+ sizeof(ath10k_gstrings_stats));
+}
+
+int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ATH10K_SSTATS_LEN;
+
+ return 0;
+}
+
+void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ath10k *ar = hw->priv;
+ static const struct ath10k_fw_stats_pdev zero_stats = {};
+ const struct ath10k_fw_stats_pdev *pdev_stats;
+ int i = 0, ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH10K_STATE_ON) {
+ ret = ath10k_debug_fw_stats_request(ar);
+ if (ret) {
+ /* just print a warning and try to use older results */
+ ath10k_warn(ar,
+ "failed to get fw stats for ethtool: %d\n",
+ ret);
+ }
+ }
+
+ pdev_stats = list_first_entry_or_null(&ar->debug.fw_stats.pdevs,
+ struct ath10k_fw_stats_pdev,
+ list);
+ if (!pdev_stats) {
+ /* no results available so just return zeroes */
+ pdev_stats = &zero_stats;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ data[i++] = pdev_stats->hw_reaped; /* ppdu reaped */
+ data[i++] = 0; /* tx bytes */
+ data[i++] = pdev_stats->htt_mpdus;
+ data[i++] = 0; /* rx bytes */
+ data[i++] = pdev_stats->ch_noise_floor;
+ data[i++] = pdev_stats->cycle_count;
+ data[i++] = pdev_stats->phy_err_count;
+ data[i++] = pdev_stats->rts_bad;
+ data[i++] = pdev_stats->rts_good;
+ data[i++] = pdev_stats->chan_tx_power;
+ data[i++] = pdev_stats->fcs_bad;
+ data[i++] = pdev_stats->no_beacons;
+ data[i++] = pdev_stats->mpdu_enqued;
+ data[i++] = pdev_stats->msdu_enqued;
+ data[i++] = pdev_stats->wmm_drop;
+ data[i++] = pdev_stats->local_enqued;
+ data[i++] = pdev_stats->local_freed;
+ data[i++] = pdev_stats->hw_queued;
+ data[i++] = pdev_stats->hw_reaped;
+ data[i++] = pdev_stats->underrun;
+ data[i++] = pdev_stats->tx_abort;
+ data[i++] = pdev_stats->mpdus_requed;
+ data[i++] = pdev_stats->tx_ko;
+ data[i++] = pdev_stats->data_rc;
+ data[i++] = pdev_stats->sw_retry_failure;
+ data[i++] = pdev_stats->illgl_rate_phy_err;
+ data[i++] = pdev_stats->pdev_cont_xretry;
+ data[i++] = pdev_stats->pdev_tx_timeout;
+ data[i++] = pdev_stats->txop_ovf;
+ data[i++] = pdev_stats->pdev_resets;
+ data[i++] = pdev_stats->mid_ppdu_route_change;
+ data[i++] = pdev_stats->status_rcvd;
+ data[i++] = pdev_stats->r0_frags;
+ data[i++] = pdev_stats->r1_frags;
+ data[i++] = pdev_stats->r2_frags;
+ data[i++] = pdev_stats->r3_frags;
+ data[i++] = pdev_stats->htt_msdus;
+ data[i++] = pdev_stats->htt_mpdus;
+ data[i++] = pdev_stats->loc_msdus;
+ data[i++] = pdev_stats->loc_mpdus;
+ data[i++] = pdev_stats->phy_errs;
+ data[i++] = pdev_stats->phy_err_drop;
+ data[i++] = pdev_stats->mpdu_errs;
+ data[i++] = ar->stats.fw_crash_counter;
+ data[i++] = ar->stats.fw_warm_reset_counter;
+ data[i++] = ar->stats.fw_cold_reset_counter;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ WARN_ON(i != ATH10K_SSTATS_LEN);
+}
+
static const struct file_operations fops_fw_dbglog = {
.read = ath10k_read_fw_dbglog,
.write = ath10k_write_fw_dbglog,
@@ -1037,6 +1529,84 @@ static const struct file_operations fops_fw_dbglog = {
.llseek = default_llseek,
};
+static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
+ void *buf;
+ u32 hi_addr;
+ __le32 addr;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto err;
+ }
+
+ buf = vmalloc(QCA988X_CAL_DATA_LEN);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
+
+ ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
+ if (ret) {
+ ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
+ goto err_vfree;
+ }
+
+ ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
+ QCA988X_CAL_DATA_LEN);
+ if (ret) {
+ ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
+ goto err_vfree;
+ }
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+
+err_vfree:
+ vfree(buf);
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_debug_cal_data_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ void *buf = file->private_data;
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ buf, QCA988X_CAL_DATA_LEN);
+}
+
+static int ath10k_debug_cal_data_release(struct inode *inode,
+ struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static const struct file_operations fops_cal_data = {
+ .open = ath10k_debug_cal_data_open,
+ .read = ath10k_debug_cal_data_read,
+ .release = ath10k_debug_cal_data_release,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath10k_debug_start(struct ath10k *ar)
{
int ret;
@@ -1057,7 +1627,22 @@ int ath10k_debug_start(struct ath10k *ar)
ret);
}
- return 0;
+ if (ar->debug.pktlog_filter) {
+ ret = ath10k_wmi_pdev_pktlog_enable(ar,
+ ar->debug.pktlog_filter);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar,
+ "failed to enable pktlog filter %x: %d\n",
+ ar->debug.pktlog_filter, ret);
+ } else {
+ ret = ath10k_wmi_pdev_pktlog_disable(ar);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
+ }
+
+ return ret;
}
void ath10k_debug_stop(struct ath10k *ar)
@@ -1072,6 +1657,8 @@ void ath10k_debug_stop(struct ath10k *ar)
ar->debug.htt_max_amsdu = 0;
ar->debug.htt_max_ampdu = 0;
+
+ ath10k_wmi_pdev_pktlog_disable(ar);
}
static ssize_t ath10k_write_simulate_radar(struct file *file,
@@ -1154,12 +1741,78 @@ static const struct file_operations fops_dfs_stats = {
.llseek = default_llseek,
};
+static ssize_t ath10k_write_pktlog_filter(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 filter;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &filter))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ar->debug.pktlog_filter = filter;
+ ret = count;
+ goto out;
+ }
+
+ if (filter && (filter != ar->debug.pktlog_filter)) {
+ ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
+ ar->debug.pktlog_filter, ret);
+ goto out;
+ }
+ } else {
+ ret = ath10k_wmi_pdev_pktlog_disable(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
+ goto out;
+ }
+ }
+
+ ar->debug.pktlog_filter = filter;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+ ar->debug.pktlog_filter);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pktlog_filter = {
+ .read = ath10k_read_pktlog_filter,
+ .write = ath10k_write_pktlog_filter,
+ .open = simple_open
+};
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
if (!ar->debug.fw_crash_data)
return -ENOMEM;
+ INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
+
return 0;
}
@@ -1167,6 +1820,8 @@ void ath10k_debug_destroy(struct ath10k *ar)
{
vfree(ar->debug.fw_crash_data);
ar->debug.fw_crash_data = NULL;
+
+ ath10k_debug_fw_stats_reset(ar);
}
int ath10k_debug_register(struct ath10k *ar)
@@ -1183,11 +1838,14 @@ int ath10k_debug_register(struct ath10k *ar)
INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
ath10k_debug_htt_stats_dwork);
- init_completion(&ar->debug.event_stats_compl);
+ init_completion(&ar->debug.fw_stats_complete);
debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
&fops_fw_stats);
+ debugfs_create_file("fw_reset_stats", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_fw_reset_stats);
+
debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
&fops_wmi_services);
@@ -1197,6 +1855,15 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_fw_crash_dump);
+ debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_reg_addr);
+
+ debugfs_create_file("reg_value", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_reg_value);
+
+ debugfs_create_file("mem_value", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_mem_value);
+
debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_chip_id);
@@ -1210,6 +1877,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_fw_dbglog);
+ debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_cal_data);
+
if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
debugfs_create_file("dfs_simulate_radar", S_IWUSR,
ar->debug.debugfs_phy, ar,
@@ -1224,6 +1894,9 @@ int ath10k_debug_register(struct ath10k *ar)
&fops_dfs_stats);
}
+ debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
+
return 0;
}
@@ -1260,11 +1933,26 @@ void ath10k_dbg_dump(struct ath10k *ar,
const char *msg, const char *prefix,
const void *buf, size_t len)
{
+ char linebuf[256];
+ unsigned int linebuflen;
+ const void *ptr;
+
if (ath10k_debug_mask & mask) {
if (msg)
ath10k_dbg(ar, mask, "%s\n", msg);
- print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
+ for (ptr = buf; (ptr - buf) < len; ptr += 16) {
+ linebuflen = 0;
+ linebuflen += scnprintf(linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen,
+ "%s%08x: ",
+ (prefix ? prefix : ""),
+ (unsigned int)(ptr - buf));
+ hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
+ linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen, true);
+ dev_printk(KERN_DEBUG, ar->dev, "%s\n", linebuf);
+ }
}
/* tracing code doesn't like null strings :/ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index b3774f7f492c..1b87a5dbec53 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -35,14 +35,24 @@ enum ath10k_debug_mask {
ATH10K_DBG_BMI = 0x00000400,
ATH10K_DBG_REGULATORY = 0x00000800,
ATH10K_DBG_TESTMODE = 0x00001000,
+ ATH10K_DBG_WMI_PRINT = 0x00002000,
ATH10K_DBG_ANY = 0xffffffff,
};
+enum ath10k_pktlog_filter {
+ ATH10K_PKTLOG_RX = 0x000000001,
+ ATH10K_PKTLOG_TX = 0x000000002,
+ ATH10K_PKTLOG_RCFIND = 0x000000004,
+ ATH10K_PKTLOG_RCUPDATE = 0x000000008,
+ ATH10K_PKTLOG_DBG_PRINT = 0x000000010,
+ ATH10K_PKTLOG_ANY = 0x00000001f,
+};
+
extern unsigned int ath10k_debug_mask;
-__printf(2, 3) int ath10k_info(struct ath10k *ar, const char *fmt, ...);
-__printf(2, 3) int ath10k_err(struct ath10k *ar, const char *fmt, ...);
-__printf(2, 3) int ath10k_warn(struct ath10k *ar, const char *fmt, ...);
+__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
+__printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...);
+__printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...);
void ath10k_print_driver_info(struct ath10k *ar);
#ifdef CONFIG_ATH10K_DEBUGFS
@@ -52,18 +62,22 @@ int ath10k_debug_create(struct ath10k *ar);
void ath10k_debug_destroy(struct ath10k *ar);
int ath10k_debug_register(struct ath10k *ar);
void ath10k_debug_unregister(struct ath10k *ar);
-void ath10k_debug_read_service_map(struct ath10k *ar,
- void *service_map,
- size_t map_size);
-void ath10k_debug_read_target_stats(struct ath10k *ar,
- struct wmi_stats_event *ev);
+void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
-
#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
+void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data);
+int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset);
+void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data);
+
#else
static inline int ath10k_debug_start(struct ath10k *ar)
{
@@ -92,14 +106,8 @@ static inline void ath10k_debug_unregister(struct ath10k *ar)
{
}
-static inline void ath10k_debug_read_service_map(struct ath10k *ar,
- void *service_map,
- size_t map_size)
-{
-}
-
-static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
- struct wmi_stats_event *ev)
+static inline void ath10k_debug_fw_stats_process(struct ath10k *ar,
+ struct sk_buff *skb)
{
}
@@ -116,6 +124,10 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
+#define ath10k_debug_get_et_strings NULL
+#define ath10k_debug_get_et_sset_count NULL
+#define ath10k_debug_get_et_stats NULL
+
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 62323fea27e1..0c92e0251e84 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include "core.h"
+#include "debug.h"
struct ath10k_hif_sg_item {
u16 transfer_id;
@@ -31,11 +32,9 @@ struct ath10k_hif_sg_item {
struct ath10k_hif_cb {
int (*tx_completion)(struct ath10k *ar,
- struct sk_buff *wbuf,
- unsigned transfer_id);
+ struct sk_buff *wbuf);
int (*rx_completion)(struct ath10k *ar,
- struct sk_buff *wbuf,
- u8 pipe_id);
+ struct sk_buff *wbuf);
};
struct ath10k_hif_ops {
@@ -43,6 +42,12 @@ struct ath10k_hif_ops {
int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
struct ath10k_hif_sg_item *items, int n_items);
+ /* read firmware memory through the diagnose interface */
+ int (*diag_read)(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len);
+
+ int (*diag_write)(struct ath10k *ar, u32 address, const void *data,
+ int nbytes);
/*
* API to handle HIF-specific BMI message exchanges, this API is
* synchronous and only allowed to be called from a context that
@@ -80,6 +85,10 @@ struct ath10k_hif_ops {
u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
+ u32 (*read32)(struct ath10k *ar, u32 address);
+
+ void (*write32)(struct ath10k *ar, u32 address, u32 value);
+
/* Power up the device and enter BMI transfer mode for FW download */
int (*power_up)(struct ath10k *ar);
@@ -98,6 +107,21 @@ static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
}
+static inline int ath10k_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
+{
+ return ar->hif.ops->diag_read(ar, address, buf, buf_len);
+}
+
+static inline int ath10k_hif_diag_write(struct ath10k *ar, u32 address,
+ const void *data, int nbytes)
+{
+ if (!ar->hif.ops->diag_write)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->diag_write(ar, address, data, nbytes);
+}
+
static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
void *request, u32 request_len,
void *response, u32 *response_len)
@@ -177,4 +201,25 @@ static inline int ath10k_hif_resume(struct ath10k *ar)
return ar->hif.ops->resume(ar);
}
+static inline u32 ath10k_hif_read32(struct ath10k *ar, u32 address)
+{
+ if (!ar->hif.ops->read32) {
+ ath10k_warn(ar, "hif read32 not supported\n");
+ return 0xdeaddead;
+ }
+
+ return ar->hif.ops->read32(ar, address);
+}
+
+static inline void ath10k_hif_write32(struct ath10k *ar,
+ u32 address, u32 data)
+{
+ if (!ar->hif.ops->write32) {
+ ath10k_warn(ar, "hif write32 not supported\n");
+ return;
+ }
+
+ ar->hif.ops->write32(ar, address, data);
+}
+
#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 676bd4ed969b..f1946a6be442 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -160,6 +160,7 @@ int ath10k_htc_send(struct ath10k_htc *htc,
ath10k_htc_prepare_tx_skb(ep, skb);
+ skb_cb->eid = eid;
skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
ret = dma_mapping_error(dev, skb_cb->paddr);
if (ret)
@@ -197,15 +198,18 @@ err_pull:
}
static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
- struct sk_buff *skb,
- unsigned int eid)
+ struct sk_buff *skb)
{
struct ath10k_htc *htc = &ar->htc;
- struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath10k_skb_cb *skb_cb;
+ struct ath10k_htc_ep *ep;
if (WARN_ON_ONCE(!skb))
return 0;
+ skb_cb = ATH10K_SKB_CB(skb);
+ ep = &htc->endpoint[skb_cb->eid];
+
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
@@ -317,8 +321,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
}
static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
- struct sk_buff *skb,
- u8 pipe_id)
+ struct sk_buff *skb)
{
int status = 0;
struct ath10k_htc *htc = &ar->htc;
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 3b44217a6c19..1bd5545af903 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -126,6 +126,7 @@ enum htt_data_tx_ext_tid {
* (HL hosts manage queues on the host )
* more_in_batch: only for HL hosts. indicates if more packets are
* pending. this allows target to wait and aggregate
+ * freq: 0 means home channel of given vdev. intended for offchannel
*/
struct htt_data_tx_desc {
u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
@@ -133,7 +134,8 @@ struct htt_data_tx_desc {
__le16 len;
__le16 id;
__le32 frags_paddr;
- __le32 peerid;
+ __le16 peerid;
+ __le16 freq;
u8 prefetch[0]; /* start of frame, for FW classification engine */
} __packed;
@@ -156,6 +158,9 @@ enum htt_rx_ring_flags {
HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15
};
+#define HTT_RX_RING_SIZE_MIN 128
+#define HTT_RX_RING_SIZE_MAX 2048
+
struct htt_rx_ring_setup_ring {
__le32 fw_idx_shadow_reg_paddr;
__le32 rx_ring_base_paddr;
@@ -725,7 +730,7 @@ static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
*/
struct htt_pktlog_msg {
u8 pad[3];
- __le32 payload[1 /* or more */];
+ u8 payload[0];
} __packed;
struct htt_dbg_stats_rx_reorder_stats {
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 60d40a04508b..9c782a42665e 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -25,19 +25,8 @@
#include <linux/log2.h>
-/* slightly larger than one large A-MPDU */
-#define HTT_RX_RING_SIZE_MIN 128
-
-/* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
-#define HTT_RX_RING_SIZE_MAX 2048
-
-#define HTT_RX_AVG_FRM_BYTES 1000
-
-/* ms, very conservative */
-#define HTT_RX_HOST_LATENCY_MAX_MS 20
-
-/* ms, conservative */
-#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
+#define HTT_RX_RING_SIZE 1024
+#define HTT_RX_RING_FILL_LEVEL 1000
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
@@ -45,68 +34,6 @@
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
static void ath10k_htt_txrx_compl_task(unsigned long ptr);
-static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
-{
- int size;
-
- /*
- * It is expected that the host CPU will typically be able to
- * service the rx indication from one A-MPDU before the rx
- * indication from the subsequent A-MPDU happens, roughly 1-2 ms
- * later. However, the rx ring should be sized very conservatively,
- * to accomodate the worst reasonable delay before the host CPU
- * services a rx indication interrupt.
- *
- * The rx ring need not be kept full of empty buffers. In theory,
- * the htt host SW can dynamically track the low-water mark in the
- * rx ring, and dynamically adjust the level to which the rx ring
- * is filled with empty buffers, to dynamically meet the desired
- * low-water mark.
- *
- * In contrast, it's difficult to resize the rx ring itself, once
- * it's in use. Thus, the ring itself should be sized very
- * conservatively, while the degree to which the ring is filled
- * with empty buffers should be sized moderately conservatively.
- */
-
- /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
- size =
- htt->max_throughput_mbps +
- 1000 /
- (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
-
- if (size < HTT_RX_RING_SIZE_MIN)
- size = HTT_RX_RING_SIZE_MIN;
-
- if (size > HTT_RX_RING_SIZE_MAX)
- size = HTT_RX_RING_SIZE_MAX;
-
- size = roundup_pow_of_two(size);
-
- return size;
-}
-
-static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
-{
- int size;
-
- /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
- size =
- htt->max_throughput_mbps *
- 1000 /
- (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
-
- /*
- * Make sure the fill level is at least 1 less than the ring size.
- * Leaving 1 element empty allows the SW to easily distinguish
- * between a full ring vs. an empty ring.
- */
- if (size >= htt->rx_ring.size)
- size = htt->rx_ring.size - 1;
-
- return size;
-}
-
static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
{
struct sk_buff *skb;
@@ -291,50 +218,38 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
htt->rx_ring.sw_rd_idx.msdu_payld = idx;
htt->rx_ring.fill_cnt--;
- return msdu;
-}
+ dma_unmap_single(htt->ar->dev,
+ ATH10K_SKB_CB(msdu)->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
+ msdu->data, msdu->len + skb_tailroom(msdu));
-static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
-{
- struct sk_buff *next;
-
- while (skb) {
- next = skb->next;
- dev_kfree_skb_any(skb);
- skb = next;
- }
+ return msdu;
}
/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
u8 **fw_desc, int *fw_desc_len,
- struct sk_buff **head_msdu,
- struct sk_buff **tail_msdu,
- u32 *attention)
+ struct sk_buff_head *amsdu)
{
struct ath10k *ar = htt->ar;
int msdu_len, msdu_chaining = 0;
- struct sk_buff *msdu, *next;
+ struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
lockdep_assert_held(&htt->rx_ring.lock);
- if (htt->rx_confused) {
- ath10k_warn(ar, "htt is confused. refusing rx\n");
- return -1;
- }
-
- msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
- while (msdu) {
+ for (;;) {
int last_msdu, msdu_len_invalid, msdu_chained;
- dma_unmap_single(htt->ar->dev,
- ATH10K_SKB_CB(msdu)->paddr,
- msdu->len + skb_tailroom(msdu),
- DMA_FROM_DEVICE);
+ msdu = ath10k_htt_rx_netbuf_pop(htt);
+ if (!msdu) {
+ __skb_queue_purge(amsdu);
+ return -ENOENT;
+ }
- ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
- msdu->data, msdu->len + skb_tailroom(msdu));
+ __skb_queue_tail(amsdu, msdu);
rx_desc = (struct htt_rx_desc *)msdu->data;
@@ -353,19 +268,10 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
*/
if (!(__le32_to_cpu(rx_desc->attention.flags)
& RX_ATTENTION_FLAGS_MSDU_DONE)) {
- ath10k_htt_rx_free_msdu_chain(*head_msdu);
- *head_msdu = NULL;
- msdu = NULL;
- ath10k_err(ar, "htt rx stopped. cannot recover\n");
- htt->rx_confused = true;
- break;
+ __skb_queue_purge(amsdu);
+ return -EIO;
}
- *attention |= __le32_to_cpu(rx_desc->attention.flags) &
- (RX_ATTENTION_FLAGS_TKIP_MIC_ERR |
- RX_ATTENTION_FLAGS_DECRYPT_ERR |
- RX_ATTENTION_FLAGS_FCS_ERR |
- RX_ATTENTION_FLAGS_MGMT_TYPE);
/*
* Copy the FW rx descriptor for this MSDU from the rx
* indication message into the MSDU's netbuf. HL uses the
@@ -422,43 +328,32 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
msdu_len -= msdu->len;
- /* FIXME: Do chained buffers include htt_rx_desc or not? */
+ /* Note: Chained buffers do not contain rx descriptor */
while (msdu_chained--) {
- struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
-
- dma_unmap_single(htt->ar->dev,
- ATH10K_SKB_CB(next)->paddr,
- next->len + skb_tailroom(next),
- DMA_FROM_DEVICE);
-
- ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
- "htt rx chained: ", next->data,
- next->len + skb_tailroom(next));
-
- skb_trim(next, 0);
- skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
- msdu_len -= next->len;
+ msdu = ath10k_htt_rx_netbuf_pop(htt);
+ if (!msdu) {
+ __skb_queue_purge(amsdu);
+ return -ENOENT;
+ }
- msdu->next = next;
- msdu = next;
+ __skb_queue_tail(amsdu, msdu);
+ skb_trim(msdu, 0);
+ skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
+ msdu_len -= msdu->len;
msdu_chaining = 1;
}
last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
- if (last_msdu) {
- msdu->next = NULL;
- break;
- }
+ trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
+ sizeof(*rx_desc) - sizeof(u32));
- next = ath10k_htt_rx_netbuf_pop(htt);
- msdu->next = next;
- msdu = next;
+ if (last_msdu)
+ break;
}
- *tail_msdu = msdu;
- if (*head_msdu == NULL)
+ if (skb_queue_empty(amsdu))
msdu_chaining = -1;
/*
@@ -492,25 +387,20 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
size_t size;
struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
- htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
+ htt->rx_confused = false;
+
+ /* XXX: The fill level could be changed during runtime in response to
+ * the host processing latency. Is this really worth it?
+ */
+ htt->rx_ring.size = HTT_RX_RING_SIZE;
+ htt->rx_ring.size_mask = htt->rx_ring.size - 1;
+ htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
+
if (!is_power_of_2(htt->rx_ring.size)) {
ath10k_warn(ar, "htt rx ring size is not power of 2\n");
return -EINVAL;
}
- htt->rx_ring.size_mask = htt->rx_ring.size - 1;
-
- /*
- * Set the initial value for the level to which the rx ring
- * should be filled, based on the max throughput and the
- * worst likely latency for the host to fill the rx ring
- * with new buffers. In theory, this fill level can be
- * dynamically adjusted from the initial value set here, to
- * reflect the actual host latency rather than a
- * conservative assumption about the host latency.
- */
- htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
-
htt->rx_ring.netbufs_ring =
kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
GFP_KERNEL);
@@ -581,73 +471,50 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ return 0;
case HTT_RX_MPDU_ENCRYPT_WEP40:
case HTT_RX_MPDU_ENCRYPT_WEP104:
- return 4;
+ return IEEE80211_WEP_IV_LEN;
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
- case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
- case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
+ return IEEE80211_TKIP_IV_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
- return 8;
- case HTT_RX_MPDU_ENCRYPT_NONE:
- return 0;
+ return IEEE80211_CCMP_HDR_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
}
- ath10k_warn(ar, "unknown encryption type %d\n", type);
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
return 0;
}
+#define MICHAEL_MIC_LEN 8
+
static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
+ return 0;
case HTT_RX_MPDU_ENCRYPT_WEP40:
case HTT_RX_MPDU_ENCRYPT_WEP104:
- case HTT_RX_MPDU_ENCRYPT_WEP128:
- case HTT_RX_MPDU_ENCRYPT_WAPI:
- return 0;
+ return IEEE80211_WEP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
- return 4;
+ return IEEE80211_TKIP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
- return 8;
+ return IEEE80211_CCMP_MIC_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
}
- ath10k_warn(ar, "unknown encryption type %d\n", type);
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
return 0;
}
-/* Applies for first msdu in chain, before altering it. */
-static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
-{
- struct htt_rx_desc *rxd;
- enum rx_msdu_decap_format fmt;
-
- rxd = (void *)skb->data - sizeof(*rxd);
- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
-
- if (fmt == RX_MSDU_DECAP_RAW)
- return (void *)skb->data;
-
- return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
-}
-
-/* This function only applies for first msdu in an msdu chain */
-static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
-{
- u8 *qc;
-
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- qc = ieee80211_get_qos_ctl(hdr);
- if (qc[0] & 0x80)
- return true;
- }
- return false;
-}
-
struct rfc1042_hdr {
u8 llc_dsap;
u8 llc_ssap;
@@ -682,23 +549,34 @@ static const u8 rx_legacy_rate_idx[] = {
};
static void ath10k_htt_rx_h_rates(struct ath10k *ar,
- enum ieee80211_band band,
- u8 info0, u32 info1, u32 info2,
- struct ieee80211_rx_status *status)
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd)
{
+ enum ieee80211_band band;
u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
u8 preamble = 0;
+ u32 info1, info2, info3;
- /* Check if valid fields */
- if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
+ /* Band value can't be set as undefined but freq can be 0 - use that to
+ * determine whether band is provided.
+ *
+ * FIXME: Perhaps this can go away if CCK rate reporting is a little
+ * reworked?
+ */
+ if (!status->freq)
return;
- preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
+ band = status->band;
+ info1 = __le32_to_cpu(rxd->ppdu_start.info1);
+ info2 = __le32_to_cpu(rxd->ppdu_start.info2);
+ info3 = __le32_to_cpu(rxd->ppdu_start.info3);
+
+ preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
switch (preamble) {
case HTT_RX_LEGACY:
- cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
- rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
+ cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
+ rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
rate_idx = 0;
if (rate < 0x08 || rate > 0x0F)
@@ -725,11 +603,11 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
break;
case HTT_RX_HT:
case HTT_RX_HT_WITH_TXBF:
- /* HT-SIG - Table 20-11 in info1 and info2 */
- mcs = info1 & 0x1F;
+ /* HT-SIG - Table 20-11 in info2 and info3 */
+ mcs = info2 & 0x1F;
nss = mcs >> 3;
- bw = (info1 >> 7) & 1;
- sgi = (info2 >> 7) & 1;
+ bw = (info2 >> 7) & 1;
+ sgi = (info3 >> 7) & 1;
status->rate_idx = mcs;
status->flag |= RX_FLAG_HT;
@@ -740,12 +618,12 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
break;
case HTT_RX_VHT:
case HTT_RX_VHT_WITH_TXBF:
- /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
+ /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
TODO check this */
- mcs = (info2 >> 4) & 0x0F;
- nss = ((info1 >> 10) & 0x07) + 1;
- bw = info1 & 3;
- sgi = info2 & 1;
+ mcs = (info3 >> 4) & 0x0F;
+ nss = ((info2 >> 10) & 0x07) + 1;
+ bw = info2 & 3;
+ sgi = info3 & 1;
status->rate_idx = mcs;
status->vht_nss = nss;
@@ -773,41 +651,6 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
}
}
-static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
- struct ieee80211_rx_status *rx_status,
- struct sk_buff *skb,
- enum htt_rx_mpdu_encrypt_type enctype,
- enum rx_msdu_decap_format fmt,
- bool dot11frag)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-
- rx_status->flag &= ~(RX_FLAG_DECRYPTED |
- RX_FLAG_IV_STRIPPED |
- RX_FLAG_MMIC_STRIPPED);
-
- if (enctype == HTT_RX_MPDU_ENCRYPT_NONE)
- return;
-
- /*
- * There's no explicit rx descriptor flag to indicate whether a given
- * frame has been decrypted or not. We're forced to use the decap
- * format as an implicit indication. However fragmentation rx is always
- * raw and it probably never reports undecrypted raws.
- *
- * This makes sure sniffed frames are reported as-is without stripping
- * the protected flag.
- */
- if (fmt == RX_MSDU_DECAP_RAW && !dot11frag)
- return;
-
- rx_status->flag |= RX_FLAG_DECRYPTED |
- RX_FLAG_IV_STRIPPED |
- RX_FLAG_MMIC_STRIPPED;
- hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
- ~IEEE80211_FCTL_PROTECTED);
-}
-
static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
struct ieee80211_rx_status *status)
{
@@ -828,6 +671,72 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
return true;
}
+static void ath10k_htt_rx_h_signal(struct ath10k *ar,
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd)
+{
+ /* FIXME: Get real NF */
+ status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
+ rxd->ppdu_start.rssi_comb;
+ status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
+}
+
+static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd)
+{
+ /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
+ * means all prior MSDUs in a PPDU are reported to mac80211 without the
+ * TSF. Is it worth holding frames until end of PPDU is known?
+ *
+ * FIXME: Can we get/compute 64bit TSF?
+ */
+ status->mactime = __le32_to_cpu(rxd->ppdu_end.tsf_timestamp);
+ status->flag |= RX_FLAG_MACTIME_END;
+}
+
+static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *status)
+{
+ struct sk_buff *first;
+ struct htt_rx_desc *rxd;
+ bool is_first_ppdu;
+ bool is_last_ppdu;
+
+ if (skb_queue_empty(amsdu))
+ return;
+
+ first = skb_peek(amsdu);
+ rxd = (void *)first->data - sizeof(*rxd);
+
+ is_first_ppdu = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
+ is_last_ppdu = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
+
+ if (is_first_ppdu) {
+ /* New PPDU starts so clear out the old per-PPDU status. */
+ status->freq = 0;
+ status->rate_idx = 0;
+ status->vht_nss = 0;
+ status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
+ status->flag &= ~(RX_FLAG_HT |
+ RX_FLAG_VHT |
+ RX_FLAG_SHORT_GI |
+ RX_FLAG_40MHZ |
+ RX_FLAG_MACTIME_END);
+ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ ath10k_htt_rx_h_signal(ar, status, rxd);
+ ath10k_htt_rx_h_channel(ar, status);
+ ath10k_htt_rx_h_rates(ar, status, rxd);
+ }
+
+ if (is_last_ppdu)
+ ath10k_htt_rx_h_mactime(ar, status, rxd);
+}
+
static const char * const tid_to_ac[] = {
"BE",
"BK",
@@ -892,6 +801,8 @@ static void ath10k_process_rx(struct ath10k *ar,
!!(status->flag & RX_FLAG_AMSDU_MORE));
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
skb->data, skb->len);
+ trace_ath10k_rx_hdr(ar, skb->data, skb->len);
+ trace_ath10k_rx_payload(ar, skb->data, skb->len);
ieee80211_rx(ar->hw, skb);
}
@@ -902,187 +813,263 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
}
-static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
- struct ieee80211_rx_status *rx_status,
- struct sk_buff *skb_in)
+static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ enum htt_rx_mpdu_encrypt_type enctype,
+ bool is_decrypted)
{
- struct ath10k *ar = htt->ar;
+ struct ieee80211_hdr *hdr;
struct htt_rx_desc *rxd;
- struct sk_buff *skb = skb_in;
- struct sk_buff *first;
- enum rx_msdu_decap_format fmt;
- enum htt_rx_mpdu_encrypt_type enctype;
+ size_t hdr_len;
+ size_t crypto_len;
+ bool is_first;
+ bool is_last;
+
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ is_first = !!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+
+ /* Delivered decapped frame:
+ * [802.11 header]
+ * [crypto param] <-- can be trimmed if !fcs_err &&
+ * !decrypt_err && !peer_idx_invalid
+ * [amsdu header] <-- only if A-MSDU
+ * [rfc1042/llc]
+ * [payload]
+ * [FCS] <-- at end, needs to be trimmed
+ */
+
+ /* This probably shouldn't happen but warn just in case */
+ if (unlikely(WARN_ON_ONCE(!is_first)))
+ return;
+
+ /* This probably shouldn't happen but warn just in case */
+ if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
+ return;
+
+ skb_trim(msdu, msdu->len - FCS_LEN);
+
+ /* In most cases this will be true for sniffed frames. It makes sense
+ * to deliver them as-is without stripping the crypto param. This would
+ * also make sense for software based decryption (which is not
+ * implemented in ath10k).
+ *
+ * If there's no error then the frame is decrypted. At least that is
+ * the case for frames that come in via fragmented rx indication.
+ */
+ if (!is_decrypted)
+ return;
+
+ /* The payload is decrypted so strip crypto params. Start from tail
+ * since hdr is used to compute some stuff.
+ */
+
+ hdr = (void *)msdu->data;
+
+ /* Tail */
+ skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
+
+ /* MMIC */
+ if (!ieee80211_has_morefrags(hdr->frame_control) &&
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ skb_trim(msdu, msdu->len - 8);
+
+ /* Head */
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ memmove((void *)msdu->data + crypto_len,
+ (void *)msdu->data, hdr_len);
+ skb_pull(msdu, crypto_len);
+}
+
+static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ const u8 first_hdr[64])
+{
struct ieee80211_hdr *hdr;
- u8 hdr_buf[64], da[ETH_ALEN], sa[ETH_ALEN], *qos;
- unsigned int hdr_len;
+ size_t hdr_len;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
- rxd = (void *)skb->data - sizeof(*rxd);
- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+ /* Delivered decapped frame:
+ * [nwifi 802.11 header] <-- replaced with 802.11 hdr
+ * [rfc1042/llc]
+ *
+ * Note: The nwifi header doesn't have QoS Control and is
+ * (always?) a 3addr frame.
+ *
+ * Note2: There's no A-MSDU subframe header. Even if it's part
+ * of an A-MSDU.
+ */
+
+ /* pull decapped header and copy SA & DA */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
+ ether_addr_copy(da, ieee80211_get_DA(hdr));
+ ether_addr_copy(sa, ieee80211_get_SA(hdr));
+ skb_pull(msdu, hdr_len);
- hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
- memcpy(hdr_buf, hdr, hdr_len);
- hdr = (struct ieee80211_hdr *)hdr_buf;
-
- first = skb;
- while (skb) {
- void *decap_hdr;
- int len;
-
- rxd = (void *)skb->data - sizeof(*rxd);
- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
- decap_hdr = (void *)rxd->rx_hdr_status;
-
- skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
-
- /* First frame in an A-MSDU chain has more decapped data. */
- if (skb == first) {
- len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
- len += round_up(ath10k_htt_rx_crypto_param_len(ar,
- enctype), 4);
- decap_hdr += len;
- }
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
- switch (fmt) {
- case RX_MSDU_DECAP_RAW:
- /* remove trailing FCS */
- skb_trim(skb, skb->len - FCS_LEN);
- break;
- case RX_MSDU_DECAP_NATIVE_WIFI:
- /* pull decapped header and copy SA & DA */
- hdr = (struct ieee80211_hdr *)skb->data;
- hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
- ether_addr_copy(da, ieee80211_get_DA(hdr));
- ether_addr_copy(sa, ieee80211_get_SA(hdr));
- skb_pull(skb, hdr_len);
-
- /* push original 802.11 header */
- hdr = (struct ieee80211_hdr *)hdr_buf;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
-
- /* original A-MSDU header has the bit set but we're
- * not including A-MSDU subframe header */
- hdr = (struct ieee80211_hdr *)skb->data;
- qos = ieee80211_get_qos_ctl(hdr);
- qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
-
- /* original 802.11 header has a different DA and in
- * case of 4addr it may also have different SA
- */
- ether_addr_copy(ieee80211_get_DA(hdr), da);
- ether_addr_copy(ieee80211_get_SA(hdr), sa);
- break;
- case RX_MSDU_DECAP_ETHERNET2_DIX:
- /* strip ethernet header and insert decapped 802.11
- * header, amsdu subframe header and rfc1042 header */
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
- len = 0;
- len += sizeof(struct rfc1042_hdr);
- len += sizeof(struct amsdu_subframe_hdr);
+static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
+ struct sk_buff *msdu,
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ieee80211_hdr *hdr;
+ struct htt_rx_desc *rxd;
+ size_t hdr_len, crypto_len;
+ void *rfc1042;
+ bool is_first, is_last, is_amsdu;
- skb_pull(skb, sizeof(struct ethhdr));
- memcpy(skb_push(skb, len), decap_hdr, len);
- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
- break;
- case RX_MSDU_DECAP_8023_SNAP_LLC:
- /* insert decapped 802.11 header making a singly
- * A-MSDU */
- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
- break;
- }
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ hdr = (void *)rxd->rx_hdr_status;
- skb_in = skb;
- ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype, fmt,
- false);
- skb = skb->next;
- skb_in->next = NULL;
+ is_first = !!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+ is_amsdu = !(is_first && is_last);
- if (skb)
- rx_status->flag |= RX_FLAG_AMSDU_MORE;
- else
- rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
+ rfc1042 = hdr;
+
+ if (is_first) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
- ath10k_process_rx(htt->ar, rx_status, skb_in);
+ rfc1042 += round_up(hdr_len, 4) +
+ round_up(crypto_len, 4);
}
- /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
- * monitor interface active for sniffing purposes. */
+ if (is_amsdu)
+ rfc1042 += sizeof(struct amsdu_subframe_hdr);
+
+ return rfc1042;
}
-static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
- struct ieee80211_rx_status *rx_status,
- struct sk_buff *skb)
+static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ const u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype)
{
- struct ath10k *ar = htt->ar;
- struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
- enum rx_msdu_decap_format fmt;
- enum htt_rx_mpdu_encrypt_type enctype;
- int hdr_len;
+ struct ethhdr *eth;
+ size_t hdr_len;
void *rfc1042;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
- /* This shouldn't happen. If it does than it may be a FW bug. */
- if (skb->next) {
- ath10k_warn(ar, "htt rx received chained non A-MSDU frame\n");
- ath10k_htt_rx_free_msdu_chain(skb->next);
- skb->next = NULL;
- }
+ /* Delivered decapped frame:
+ * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
+ * [payload]
+ */
- rxd = (void *)skb->data - sizeof(*rxd);
- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
- hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+ rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
+ if (WARN_ON_ONCE(!rfc1042))
+ return;
+
+ /* pull decapped header and copy SA & DA */
+ eth = (struct ethhdr *)msdu->data;
+ ether_addr_copy(da, eth->h_dest);
+ ether_addr_copy(sa, eth->h_source);
+ skb_pull(msdu, sizeof(struct ethhdr));
+
+ /* push rfc1042/llc/snap */
+ memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
+ sizeof(struct rfc1042_hdr));
+
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
- skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
- switch (fmt) {
+static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ const u8 first_hdr[64])
+{
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+
+ /* Delivered decapped frame:
+ * [amsdu header] <-- replaced with 802.11 hdr
+ * [rfc1042/llc]
+ * [payload]
+ */
+
+ skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
+
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+}
+
+static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype,
+ bool is_decrypted)
+{
+ struct htt_rx_desc *rxd;
+ enum rx_msdu_decap_format decap;
+ struct ieee80211_hdr *hdr;
+
+ /* First msdu's decapped header:
+ * [802.11 header] <-- padded to 4 bytes long
+ * [crypto param] <-- padded to 4 bytes long
+ * [amsdu header] <-- only if A-MSDU
+ * [rfc1042/llc]
+ *
+ * Other (2nd, 3rd, ..) msdu's decapped header:
+ * [amsdu header] <-- only if A-MSDU
+ * [rfc1042/llc]
+ */
+
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ hdr = (void *)rxd->rx_hdr_status;
+ decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
+
+ switch (decap) {
case RX_MSDU_DECAP_RAW:
- /* remove trailing FCS */
- skb_trim(skb, skb->len - FCS_LEN);
+ ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
+ is_decrypted);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
- /* Pull decapped header */
- hdr = (struct ieee80211_hdr *)skb->data;
- hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
- skb_pull(skb, hdr_len);
-
- /* Push original header */
- hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
break;
case RX_MSDU_DECAP_ETHERNET2_DIX:
- /* strip ethernet header and insert decapped 802.11 header and
- * rfc1042 header */
-
- rfc1042 = hdr;
- rfc1042 += roundup(hdr_len, 4);
- rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(ar,
- enctype), 4);
-
- skb_pull(skb, sizeof(struct ethhdr));
- memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
- rfc1042, sizeof(struct rfc1042_hdr));
- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
break;
case RX_MSDU_DECAP_8023_SNAP_LLC:
- /* remove A-MSDU subframe header and insert
- * decapped 802.11 header. rfc1042 header is already there */
-
- skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+ ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
break;
}
-
- ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt, false);
-
- ath10k_process_rx(htt->ar, rx_status, skb);
}
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
@@ -1116,10 +1103,128 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
return CHECKSUM_UNNECESSARY;
}
-static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
+static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
+{
+ msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
+}
+
+static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *status)
{
- struct sk_buff *next = msdu_head->next;
- struct sk_buff *to_free = next;
+ struct sk_buff *first;
+ struct sk_buff *last;
+ struct sk_buff *msdu;
+ struct htt_rx_desc *rxd;
+ struct ieee80211_hdr *hdr;
+ enum htt_rx_mpdu_encrypt_type enctype;
+ u8 first_hdr[64];
+ u8 *qos;
+ size_t hdr_len;
+ bool has_fcs_err;
+ bool has_crypto_err;
+ bool has_tkip_err;
+ bool has_peer_idx_invalid;
+ bool is_decrypted;
+ u32 attention;
+
+ if (skb_queue_empty(amsdu))
+ return;
+
+ first = skb_peek(amsdu);
+ rxd = (void *)first->data - sizeof(*rxd);
+
+ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+ /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
+ * decapped header. It'll be used for undecapping of each MSDU.
+ */
+ hdr = (void *)rxd->rx_hdr_status;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(first_hdr, hdr, hdr_len);
+
+ /* Each A-MSDU subframe will use the original header as the base and be
+ * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
+ */
+ hdr = (void *)first_hdr;
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+ /* Some attention flags are valid only in the last MSDU. */
+ last = skb_peek_tail(amsdu);
+ rxd = (void *)last->data - sizeof(*rxd);
+ attention = __le32_to_cpu(rxd->attention.flags);
+
+ has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
+ has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
+ has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
+ has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
+
+ /* Note: If hardware captures an encrypted frame that it can't decrypt,
+ * e.g. due to fcs error, missing peer or invalid key data it will
+ * report the frame as raw.
+ */
+ is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
+ !has_fcs_err &&
+ !has_crypto_err &&
+ !has_peer_idx_invalid);
+
+ /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
+ status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
+ RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED);
+
+ if (has_fcs_err)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (has_tkip_err)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (is_decrypted)
+ status->flag |= RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+
+ skb_queue_walk(amsdu, msdu) {
+ ath10k_htt_rx_h_csum_offload(msdu);
+ ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
+ is_decrypted);
+
+ /* Undecapping involves copying the original 802.11 header back
+ * to sk_buff. If frame is protected and hardware has decrypted
+ * it then remove the protected bit.
+ */
+ if (!is_decrypted)
+ continue;
+
+ hdr = (void *)msdu->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ }
+}
+
+static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *status)
+{
+ struct sk_buff *msdu;
+
+ while ((msdu = __skb_dequeue(amsdu))) {
+ /* Setup per-MSDU flags */
+ if (skb_queue_empty(amsdu))
+ status->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ status->flag |= RX_FLAG_AMSDU_MORE;
+
+ ath10k_process_rx(ar, status, msdu);
+ }
+}
+
+static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
+{
+ struct sk_buff *skb, *first;
int space;
int total_len = 0;
@@ -1130,113 +1235,142 @@ static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
* skb?
*/
- msdu_head->next = NULL;
+ first = __skb_dequeue(amsdu);
/* Allocate total length all at once. */
- while (next) {
- total_len += next->len;
- next = next->next;
- }
+ skb_queue_walk(amsdu, skb)
+ total_len += skb->len;
- space = total_len - skb_tailroom(msdu_head);
+ space = total_len - skb_tailroom(first);
if ((space > 0) &&
- (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) {
+ (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
/* TODO: bump some rx-oom error stat */
/* put it back together so we can free the
* whole list at once.
*/
- msdu_head->next = to_free;
+ __skb_queue_head(amsdu, first);
return -1;
}
/* Walk list again, copying contents into
* msdu_head
*/
- next = to_free;
- while (next) {
- skb_copy_from_linear_data(next, skb_put(msdu_head, next->len),
- next->len);
- next = next->next;
+ while ((skb = __skb_dequeue(amsdu))) {
+ skb_copy_from_linear_data(skb, skb_put(first, skb->len),
+ skb->len);
+ dev_kfree_skb_any(skb);
}
- /* If here, we have consolidated skb. Free the
- * fragments and pass the main skb on up the
- * stack.
- */
- ath10k_htt_rx_free_msdu_chain(to_free);
+ __skb_queue_head(amsdu, first);
return 0;
}
-static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
- struct sk_buff *head,
- enum htt_rx_mpdu_status status,
- bool channel_set,
- u32 attention)
+static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ bool chained)
{
- struct ath10k *ar = htt->ar;
+ struct sk_buff *first;
+ struct htt_rx_desc *rxd;
+ enum rx_msdu_decap_format decap;
- if (head->len == 0) {
- ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt rx dropping due to zero-len\n");
- return false;
- }
+ first = skb_peek(amsdu);
+ rxd = (void *)first->data - sizeof(*rxd);
+ decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
- if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
- ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt rx dropping due to decrypt-err\n");
- return false;
- }
+ if (!chained)
+ return;
- if (!channel_set) {
- ath10k_warn(ar, "no channel configured; ignoring frame!\n");
- return false;
+ /* FIXME: Current unchaining logic can only handle simple case of raw
+ * msdu chaining. If decapping is other than raw the chaining may be
+ * more complex and this isn't handled by the current code. Don't even
+ * try re-constructing such frames - it'll be pretty much garbage.
+ */
+ if (decap != RX_MSDU_DECAP_RAW ||
+ skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
+ __skb_queue_purge(amsdu);
+ return;
}
- /* Skip mgmt frames while we handle this in WMI */
- if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
- attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
+ ath10k_unchain_msdu(amsdu);
+}
+
+static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct sk_buff *msdu;
+ struct htt_rx_desc *rxd;
+ bool is_mgmt;
+ bool has_fcs_err;
+
+ msdu = skb_peek(amsdu);
+ rxd = (void *)msdu->data - sizeof(*rxd);
+
+ /* FIXME: It might be a good idea to do some fuzzy-testing to drop
+ * invalid/dangerous frames.
+ */
+
+ if (!rx_status->freq) {
+ ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
return false;
}
- if (status != HTT_RX_IND_MPDU_STATUS_OK &&
- status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
- status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
- !htt->ar->monitor_started) {
- ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt rx ignoring frame w/ status %d\n",
- status);
+ is_mgmt = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
+ has_fcs_err = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
+
+ /* Management frames are handled via WMI events. The pros of such
+ * approach is that channel is explicitly provided in WMI events
+ * whereas HTT doesn't provide channel information for Rxed frames.
+ *
+ * However some firmware revisions don't report corrupted frames via
+ * WMI so don't drop them.
+ */
+ if (is_mgmt && !has_fcs_err) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
return false;
}
- if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
- ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt rx CAC running\n");
+ if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
return false;
}
return true;
}
+static void ath10k_htt_rx_h_filter(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *rx_status)
+{
+ if (skb_queue_empty(amsdu))
+ return;
+
+ if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
+ return;
+
+ __skb_queue_purge(amsdu);
+}
+
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
struct ath10k *ar = htt->ar;
struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct htt_rx_indication_mpdu_range *mpdu_ranges;
- struct htt_rx_desc *rxd;
- enum htt_rx_mpdu_status status;
- struct ieee80211_hdr *hdr;
+ struct sk_buff_head amsdu;
int num_mpdu_ranges;
- u32 attention;
int fw_desc_len;
u8 *fw_desc;
- bool channel_set;
- int i, j;
- int ret;
+ int i, ret, mpdu_count = 0;
lockdep_assert_held(&htt->rx_ring.lock);
+ if (htt->rx_confused)
+ return;
+
fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
fw_desc = (u8 *)&rx->fw_desc;
@@ -1244,92 +1378,33 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
- /* Fill this once, while this is per-ppdu */
- if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
- memset(rx_status, 0, sizeof(*rx_status));
- rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
- rx->ppdu.combined_rssi;
- }
-
- if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
- /* TSF available only in 32-bit */
- rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
- rx_status->flag |= RX_FLAG_MACTIME_END;
- }
-
- channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
-
- if (channel_set) {
- ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
- rx->ppdu.info0,
- __le32_to_cpu(rx->ppdu.info1),
- __le32_to_cpu(rx->ppdu.info2),
- rx_status);
- }
-
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
rx, sizeof(*rx) +
(sizeof(struct htt_rx_indication_mpdu_range) *
num_mpdu_ranges));
- for (i = 0; i < num_mpdu_ranges; i++) {
- status = mpdu_ranges[i].mpdu_range_status;
-
- for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
- struct sk_buff *msdu_head, *msdu_tail;
-
- attention = 0;
- msdu_head = NULL;
- msdu_tail = NULL;
- ret = ath10k_htt_rx_amsdu_pop(htt,
- &fw_desc,
- &fw_desc_len,
- &msdu_head,
- &msdu_tail,
- &attention);
-
- if (ret < 0) {
- ath10k_warn(ar, "failed to pop amsdu from htt rx ring %d\n",
- ret);
- ath10k_htt_rx_free_msdu_chain(msdu_head);
- continue;
- }
-
- rxd = container_of((void *)msdu_head->data,
- struct htt_rx_desc,
- msdu_payload);
-
- if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
- status,
- channel_set,
- attention)) {
- ath10k_htt_rx_free_msdu_chain(msdu_head);
- continue;
- }
-
- if (ret > 0 &&
- ath10k_unchain_msdu(msdu_head) < 0) {
- ath10k_htt_rx_free_msdu_chain(msdu_head);
- continue;
- }
-
- if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- else
- rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
-
- if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
- rx_status->flag |= RX_FLAG_MMIC_ERROR;
- else
- rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
-
- hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
-
- if (ath10k_htt_rx_hdr_is_amsdu(hdr))
- ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
- else
- ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
+ for (i = 0; i < num_mpdu_ranges; i++)
+ mpdu_count += mpdu_ranges[i].mpdu_count;
+
+ while (mpdu_count--) {
+ __skb_queue_head_init(&amsdu);
+ ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
+ &fw_desc_len, &amsdu);
+ if (ret < 0) {
+ ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
+ __skb_queue_purge(&amsdu);
+ /* FIXME: It's probably a good idea to reboot the
+ * device instead of leaving it inoperable.
+ */
+ htt->rx_confused = true;
+ break;
}
+
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
+ ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
}
tasklet_schedule(&htt->rx_replenish_task);
@@ -1339,108 +1414,44 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
struct htt_rx_fragment_indication *frag)
{
struct ath10k *ar = htt->ar;
- struct sk_buff *msdu_head, *msdu_tail;
- enum htt_rx_mpdu_encrypt_type enctype;
- struct htt_rx_desc *rxd;
- enum rx_msdu_decap_format fmt;
struct ieee80211_rx_status *rx_status = &htt->rx_status;
- struct ieee80211_hdr *hdr;
+ struct sk_buff_head amsdu;
int ret;
- bool tkip_mic_err;
- bool decrypt_err;
u8 *fw_desc;
- int fw_desc_len, hdrlen, paramlen;
- int trim;
- u32 attention = 0;
+ int fw_desc_len;
fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
fw_desc = (u8 *)frag->fw_msdu_rx_desc;
- msdu_head = NULL;
- msdu_tail = NULL;
+ __skb_queue_head_init(&amsdu);
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
- &msdu_head, &msdu_tail,
- &attention);
+ &amsdu);
spin_unlock_bh(&htt->rx_ring.lock);
+ tasklet_schedule(&htt->rx_replenish_task);
+
ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
if (ret) {
ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
ret);
- ath10k_htt_rx_free_msdu_chain(msdu_head);
+ __skb_queue_purge(&amsdu);
return;
}
- /* FIXME: implement signal strength */
- rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
-
- hdr = (struct ieee80211_hdr *)msdu_head->data;
- rxd = (void *)msdu_head->data - sizeof(*rxd);
- tkip_mic_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
- decrypt_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
- RX_MSDU_START_INFO1_DECAP_FORMAT);
-
- if (fmt != RX_MSDU_DECAP_RAW) {
- ath10k_warn(ar, "we dont support non-raw fragmented rx yet\n");
- dev_kfree_skb_any(msdu_head);
- goto end;
- }
-
- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
- ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype, fmt,
- true);
- msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
-
- if (tkip_mic_err)
- ath10k_warn(ar, "tkip mic error\n");
-
- if (decrypt_err) {
- ath10k_warn(ar, "decryption err in fragmented rx\n");
- dev_kfree_skb_any(msdu_head);
- goto end;
- }
-
- if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
- paramlen = ath10k_htt_rx_crypto_param_len(ar, enctype);
-
- /* It is more efficient to move the header than the payload */
- memmove((void *)msdu_head->data + paramlen,
- (void *)msdu_head->data,
- hdrlen);
- skb_pull(msdu_head, paramlen);
- hdr = (struct ieee80211_hdr *)msdu_head->data;
- }
-
- /* remove trailing FCS */
- trim = 4;
-
- /* remove crypto trailer */
- trim += ath10k_htt_rx_crypto_tail_len(ar, enctype);
-
- /* last fragment of TKIP frags has MIC */
- if (!ieee80211_has_morefrags(hdr->frame_control) &&
- enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
- trim += 8;
-
- if (trim > msdu_head->len) {
- ath10k_warn(ar, "htt rx fragment: trailer longer than the frame itself? drop\n");
- dev_kfree_skb_any(msdu_head);
- goto end;
+ if (skb_queue_len(&amsdu) != 1) {
+ ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
+ __skb_queue_purge(&amsdu);
+ return;
}
- skb_trim(msdu_head, msdu_head->len - trim);
-
- ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
- msdu_head->data, msdu_head->len);
- ath10k_process_rx(htt->ar, rx_status, msdu_head);
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
-end:
if (fw_desc_len > 0) {
ath10k_dbg(ar, ATH10K_DBG_HTT,
"expecting more fragmented rx in one indication %d\n",
@@ -1674,6 +1685,15 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_RX_DELBA:
ath10k_htt_rx_delba(ar, resp);
break;
+ case HTT_T2H_MSG_TYPE_PKTLOG: {
+ struct ath10k_pktlog_hdr *hdr =
+ (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
+
+ trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
+ sizeof(*hdr) +
+ __le16_to_cpu(hdr->size));
+ break;
+ }
case HTT_T2H_MSG_TYPE_RX_FLUSH: {
/* Ignore this event because mac80211 takes care of Rx
* aggregation reordering.
@@ -1681,8 +1701,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
default:
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt event (%d) not handled\n",
- resp->hdr.msg_type);
+ ath10k_warn(ar, "htt event (%d) not handled\n",
+ resp->hdr.msg_type);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
break;
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index bd87a35201d8..4bc51d8a14a3 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -92,7 +92,6 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
struct ath10k *ar = htt->ar;
spin_lock_init(&htt->tx_lock);
- init_waitqueue_head(&htt->empty_tx_wq);
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
@@ -555,14 +554,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
- skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
+ skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
+ skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);
+ trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
+ "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
flags0, flags1, msdu->len, msdu_id, frags_paddr,
- (u32)skb_cb->paddr, vdev_id, tid);
+ (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
msdu->data, msdu->len);
+ trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
+ trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
sg_items[0].transfer_id = 0;
sg_items[0].transfer_context = NULL;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 3cf5702c1e7e..dfedfd0e0f34 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -20,15 +20,16 @@
#include "targaddrs.h"
+#define ATH10K_FW_DIR "ath10k"
+
/* QCA988X 1.0 definitions (unsupported) */
#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
/* QCA988X 2.0 definitions */
#define QCA988X_HW_2_0_VERSION 0x4100016c
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
-#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
+#define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
-#define QCA988X_HW_2_0_FW_3_FILE "firmware-3.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
@@ -43,6 +44,8 @@
#define REG_DUMP_COUNT_QCA988X 60
+#define QCA988X_CAL_DATA_LEN 2116
+
struct ath10k_fw_ie {
__le32 id;
__le32 len;
@@ -78,6 +81,15 @@ enum ath10k_mcast2ucast_mode {
ATH10K_MCAST2UCAST_ENABLED = 1,
};
+struct ath10k_pktlog_hdr {
+ __le16 flags;
+ __le16 missed_cnt;
+ __le16 log_type;
+ __le16 size;
+ __le32 timestamp;
+ u8 payload[0];
+} __packed;
+
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
@@ -85,11 +97,13 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_DMA_BURST_SIZE 0
#define TARGET_MAC_AGGR_DELIM 0
#define TARGET_AST_SKID_LIMIT 16
-#define TARGET_NUM_PEERS 16
+#define TARGET_NUM_STATIONS 16
+#define TARGET_NUM_PEERS ((TARGET_NUM_STATIONS) + \
+ (TARGET_NUM_VDEVS))
#define TARGET_NUM_OFFLOAD_PEERS 0
#define TARGET_NUM_OFFLOAD_REORDER_BUFS 0
#define TARGET_NUM_PEER_KEYS 2
-#define TARGET_NUM_TIDS (2 * ((TARGET_NUM_PEERS) + (TARGET_NUM_VDEVS)))
+#define TARGET_NUM_TIDS ((TARGET_NUM_PEERS) * 2)
#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_TIMEOUT_LO_PRI 100
@@ -120,12 +134,15 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_10X_DMA_BURST_SIZE 0
#define TARGET_10X_MAC_AGGR_DELIM 0
#define TARGET_10X_AST_SKID_LIMIT 16
-#define TARGET_10X_NUM_PEERS (128 + (TARGET_10X_NUM_VDEVS))
-#define TARGET_10X_NUM_PEERS_MAX 128
+#define TARGET_10X_NUM_STATIONS 128
+#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \
+ (TARGET_10X_NUM_VDEVS))
#define TARGET_10X_NUM_OFFLOAD_PEERS 0
#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
#define TARGET_10X_NUM_PEER_KEYS 2
-#define TARGET_10X_NUM_TIDS 256
+#define TARGET_10X_NUM_TIDS_MAX 256
+#define TARGET_10X_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \
+ (TARGET_10X_NUM_PEERS) * 2)
#define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
@@ -279,6 +296,7 @@ enum ath10k_mcast2ucast_mode {
#define SI_RX_DATA1_OFFSET 0x00000014
#define CORE_CTRL_CPU_INTR_MASK 0x00002000
+#define CORE_CTRL_PCIE_REG_31_MASK 0x00000800
#define CORE_CTRL_ADDRESS 0x0000
#define PCIE_INTR_ENABLE_ADDRESS 0x0008
#define PCIE_INTR_CAUSE_ADDRESS 0x000c
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 46709301a51e..c4005670cba2 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -136,7 +136,9 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
if (ret)
return ret;
+ spin_lock_bh(&ar->data_lock);
peer->keys[i] = arvif->wep_keys[i];
+ spin_unlock_bh(&ar->data_lock);
}
return 0;
@@ -173,12 +175,39 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
i, ret);
+ spin_lock_bh(&ar->data_lock);
peer->keys[i] = NULL;
+ spin_unlock_bh(&ar->data_lock);
}
return first_errno;
}
+bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
+ u8 keyidx)
+{
+ struct ath10k_peer *peer;
+ int i;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ /* We don't know which vdev this peer belongs to,
+ * since WMI doesn't give us that information.
+ *
+ * FIXME: multi-bss needs to be handled.
+ */
+ peer = ath10k_peer_find(ar, 0, addr);
+ if (!peer)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
+ return true;
+ }
+
+ return false;
+}
+
static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
struct ieee80211_key_conf *key)
{
@@ -326,6 +355,9 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
lockdep_assert_held(&ar->conf_mutex);
+ if (ar->num_peers >= ar->max_num_peers)
+ return -ENOBUFS;
+
ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
if (ret) {
ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
@@ -339,9 +371,8 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
addr, vdev_id, ret);
return ret;
}
- spin_lock_bh(&ar->data_lock);
+
ar->num_peers++;
- spin_unlock_bh(&ar->data_lock);
return 0;
}
@@ -391,15 +422,11 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
return 0;
}
-static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
+static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
{
struct ath10k *ar = arvif->ar;
u32 vdev_param;
- if (value != 0xFFFFFFFF)
- value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
- ATH10K_RTS_MAX);
-
vdev_param = ar->wmi.vdev_param->rts_threshold;
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
}
@@ -432,9 +459,7 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
if (ret)
return ret;
- spin_lock_bh(&ar->data_lock);
ar->num_peers--;
- spin_unlock_bh(&ar->data_lock);
return 0;
}
@@ -471,20 +496,59 @@ static void ath10k_peer_cleanup_all(struct ath10k *ar)
list_del(&peer->list);
kfree(peer);
}
- ar->num_peers = 0;
spin_unlock_bh(&ar->data_lock);
+
+ ar->num_peers = 0;
+ ar->num_stations = 0;
}
/************************/
/* Interface management */
/************************/
+void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!arvif->beacon)
+ return;
+
+ if (!arvif->beacon_buf)
+ dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
+ arvif->beacon->len, DMA_TO_DEVICE);
+
+ dev_kfree_skb_any(arvif->beacon);
+
+ arvif->beacon = NULL;
+ arvif->beacon_sent = false;
+}
+
+static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath10k_mac_vif_beacon_free(arvif);
+
+ if (arvif->beacon_buf) {
+ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+ arvif->beacon_buf, arvif->beacon_paddr);
+ arvif->beacon_buf = NULL;
+ }
+}
+
static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ return -ESHUTDOWN;
+
ret = wait_for_completion_timeout(&ar->vdev_setup_done,
ATH10K_VDEV_SETUP_TIMEOUT_HZ);
if (ret == 0)
@@ -517,6 +581,8 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
arg.channel.max_reg_power = channel->max_reg_power * 2;
arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
+ reinit_completion(&ar->vdev_setup_done);
+
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
@@ -564,6 +630,8 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar)
ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
ar->monitor_vdev_id, ret);
+ reinit_completion(&ar->vdev_setup_done);
+
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
@@ -590,9 +658,9 @@ static int ath10k_monitor_vdev_create(struct ath10k *ar)
return -ENOMEM;
}
- bit = ffs(ar->free_vdev_map);
+ bit = __ffs64(ar->free_vdev_map);
- ar->monitor_vdev_id = bit - 1;
+ ar->monitor_vdev_id = bit;
ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
WMI_VDEV_TYPE_MONITOR,
@@ -603,7 +671,7 @@ static int ath10k_monitor_vdev_create(struct ath10k *ar)
return ret;
}
- ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id);
+ ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
ar->monitor_vdev_id);
@@ -623,7 +691,7 @@ static int ath10k_monitor_vdev_delete(struct ath10k *ar)
return ret;
}
- ar->free_vdev_map |= 1 << ar->monitor_vdev_id;
+ ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
ar->monitor_vdev_id);
@@ -909,15 +977,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
arvif->is_up = false;
spin_lock_bh(&arvif->ar->data_lock);
- if (arvif->beacon) {
- dma_unmap_single(arvif->ar->dev,
- ATH10K_SKB_CB(arvif->beacon)->paddr,
- arvif->beacon->len, DMA_TO_DEVICE);
- dev_kfree_skb_any(arvif->beacon);
-
- arvif->beacon = NULL;
- arvif->beacon_sent = false;
- }
+ ath10k_mac_vif_beacon_free(arvif);
spin_unlock_bh(&arvif->ar->data_lock);
return;
@@ -966,14 +1026,6 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
if (is_zero_ether_addr(arvif->bssid))
return;
- ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
- arvif->bssid);
- if (ret) {
- ath10k_warn(ar, "failed to delete IBSS BSSID peer %pM for vdev %d: %d\n",
- arvif->bssid, arvif->vdev_id, ret);
- return;
- }
-
memset(arvif->bssid, 0, ETH_ALEN);
return;
@@ -1042,51 +1094,45 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
/* Station management */
/**********************/
+static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
+ struct ieee80211_vif *vif)
+{
+ /* Some firmware revisions have unstable STA powersave when listen
+ * interval is set too high (e.g. 5). The symptoms are firmware doesn't
+ * generate NullFunc frames properly even if buffered frames have been
+ * indicated in Beacon TIM. Firmware would seldom wake up to pull
+ * buffered frames. Often pinging the device from AP would simply fail.
+ *
+ * As a workaround set it to 1.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION)
+ return 1;
+
+ return ar->hw->conf.listen_interval;
+}
+
static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
- struct ath10k_vif *arvif,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
- struct ieee80211_bss_conf *bss_conf,
struct wmi_peer_assoc_complete_arg *arg)
{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
lockdep_assert_held(&ar->conf_mutex);
ether_addr_copy(arg->addr, sta->addr);
arg->vdev_id = arvif->vdev_id;
arg->peer_aid = sta->aid;
arg->peer_flags |= WMI_PEER_AUTH;
-
- if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
- /*
- * Seems FW have problems with Power Save in STA
- * mode when we setup this parameter to high (eg. 5).
- * Often we see that FW don't send NULL (with clean P flags)
- * frame even there is info about buffered frames in beacons.
- * Sometimes we have to wait more than 10 seconds before FW
- * will wakeup. Often sending one ping from AP to our device
- * just fail (more than 50%).
- *
- * Seems setting this FW parameter to 1 couse FW
- * will check every beacon and will wakup immediately
- * after detection buffered data.
- */
- arg->peer_listen_intval = 1;
- else
- arg->peer_listen_intval = ar->hw->conf.listen_interval;
-
+ arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
arg->peer_num_spatial_streams = 1;
-
- /*
- * The assoc capabilities are available only in managed mode.
- */
- if (arvif->vdev_type == WMI_VDEV_TYPE_STA && bss_conf)
- arg->peer_caps = bss_conf->assoc_capability;
+ arg->peer_caps = vif->bss_conf.assoc_capability;
}
static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
- struct ath10k_vif *arvif,
+ struct ieee80211_vif *vif,
struct wmi_peer_assoc_complete_arg *arg)
{
- struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_bss_conf *info = &vif->bss_conf;
struct cfg80211_bss *bss;
const u8 *rsnie = NULL;
@@ -1343,11 +1389,12 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
}
static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
- struct ath10k_vif *arvif,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
- struct ieee80211_bss_conf *bss_conf,
struct wmi_peer_assoc_complete_arg *arg)
{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_AP:
if (sta->wme)
@@ -1359,7 +1406,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
}
break;
case WMI_VDEV_TYPE_STA:
- if (bss_conf->qos)
+ if (vif->bss_conf.qos)
arg->peer_flags |= WMI_PEER_QOS;
break;
default:
@@ -1368,7 +1415,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
}
static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
- struct ath10k_vif *arvif,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
@@ -1419,22 +1466,21 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
}
static int ath10k_peer_assoc_prepare(struct ath10k *ar,
- struct ath10k_vif *arvif,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
- struct ieee80211_bss_conf *bss_conf,
struct wmi_peer_assoc_complete_arg *arg)
{
lockdep_assert_held(&ar->conf_mutex);
memset(arg, 0, sizeof(*arg));
- ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg);
- ath10k_peer_assoc_h_crypto(ar, arvif, arg);
+ ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_crypto(ar, vif, arg);
ath10k_peer_assoc_h_rates(ar, sta, arg);
ath10k_peer_assoc_h_ht(ar, sta, arg);
ath10k_peer_assoc_h_vht(ar, sta, arg);
- ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg);
- ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg);
+ ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
return 0;
}
@@ -1480,6 +1526,9 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
lockdep_assert_held(&ar->conf_mutex);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
+ arvif->vdev_id, arvif->bssid, arvif->aid);
+
rcu_read_lock();
ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
@@ -1494,8 +1543,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
* before calling ath10k_setup_peer_smps() which might sleep. */
ht_cap = ap_sta->ht_cap;
- ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
- bss_conf, &peer_arg);
+ ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
if (ret) {
ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
bss_conf->bssid, arvif->vdev_id, ret);
@@ -1523,6 +1571,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
"mac vdev %d up (associated) bssid %pM aid %d\n",
arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+ WARN_ON(arvif->is_up);
+
arvif->aid = bss_conf->aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid);
@@ -1536,9 +1586,6 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
arvif->is_up = true;
}
-/*
- * FIXME: flush TIDs
- */
static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1548,45 +1595,30 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
lockdep_assert_held(&ar->conf_mutex);
- /*
- * For some reason, calling VDEV-DOWN before VDEV-STOP
- * makes the FW to send frames via HTT after disassociation.
- * No idea why this happens, even though VDEV-DOWN is supposed
- * to be analogous to link down, so just stop the VDEV.
- */
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
- arvif->vdev_id);
-
- /* FIXME: check return value */
- ret = ath10k_vdev_stop(arvif);
-
- /*
- * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
- * report beacons from previously associated network through HTT.
- * This in turn would spam mac80211 WARN_ON if we bring down all
- * interfaces as it expects there is no rx when no interface is
- * running.
- */
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
+ arvif->vdev_id, arvif->bssid);
- /* FIXME: why don't we print error if wmi call fails? */
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "faield to down vdev %i: %d\n",
+ arvif->vdev_id, ret);
arvif->def_wep_key_idx = 0;
-
- arvif->is_started = false;
arvif->is_up = false;
}
-static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
- struct ieee80211_sta *sta, bool reassoc)
+static int ath10k_station_assoc(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool reassoc)
{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct wmi_peer_assoc_complete_arg peer_arg;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
+ ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
if (ret) {
ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
@@ -1601,43 +1633,51 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
return ret;
}
- ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
- if (ret) {
- ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
-
- if (!sta->wme && !reassoc) {
- arvif->num_legacy_stations++;
- ret = ath10k_recalc_rtscts_prot(arvif);
+ /* Re-assoc is run only to update supported rates for given station. It
+ * doesn't make much sense to reconfigure the peer completely.
+ */
+ if (!reassoc) {
+ ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
+ &sta->ht_cap);
if (ret) {
- ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
return ret;
}
- }
- ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
- if (ret) {
- ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
+ ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
+ if (ret) {
+ ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
- ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
- if (ret) {
- ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
- sta->addr, arvif->vdev_id, ret);
- return ret;
+ if (!sta->wme) {
+ arvif->num_legacy_stations++;
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
}
return ret;
}
-static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
+static int ath10k_station_disassoc(struct ath10k *ar,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
@@ -1729,6 +1769,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
ch->passive = passive;
ch->freq = channel->center_freq;
+ ch->band_center_freq1 = channel->center_freq;
ch->min_power = 0;
ch->max_power = channel->max_power * 2;
ch->max_reg_power = channel->max_reg_power * 2;
@@ -1983,6 +2024,18 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
}
}
+static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar)
+{
+ /* FIXME: Not really sure since when the behaviour changed. At some
+ * point new firmware stopped requiring creation of peer entries for
+ * offchannel tx (and actually creating them causes issues with wmi-htc
+ * tx credit replenishment and reliability). Assuming it's at least 3.4
+ * because that's when the `freq` was introduced to TX_FRM HTT command.
+ */
+ return !(ar->htt.target_version_major >= 3 &&
+ ar->htt.target_version_minor >= 4);
+}
+
static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -2158,10 +2211,10 @@ void __ath10k_scan_finish(struct ath10k *ar)
case ATH10K_SCAN_IDLE:
break;
case ATH10K_SCAN_RUNNING:
- case ATH10K_SCAN_ABORTING:
if (ar->scan.is_roc)
ieee80211_remain_on_channel_expired(ar->hw);
- else
+ case ATH10K_SCAN_ABORTING:
+ if (!ar->scan.is_roc)
ieee80211_scan_completed(ar->hw,
(ar->scan.state ==
ATH10K_SCAN_ABORTING));
@@ -2327,23 +2380,28 @@ static void ath10k_tx(struct ieee80211_hw *hw,
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
spin_lock_bh(&ar->data_lock);
- ATH10K_SKB_CB(skb)->htt.is_offchan = true;
+ ATH10K_SKB_CB(skb)->htt.freq = ar->scan.roc_freq;
ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
spin_unlock_bh(&ar->data_lock);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
- skb);
+ if (ath10k_mac_need_offchan_tx_work(ar)) {
+ ATH10K_SKB_CB(skb)->htt.freq = 0;
+ ATH10K_SKB_CB(skb)->htt.is_offchan = true;
- skb_queue_tail(&ar->offchan_tx_queue, skb);
- ieee80211_queue_work(hw, &ar->offchan_tx_work);
- return;
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
+ skb);
+
+ skb_queue_tail(&ar->offchan_tx_queue, skb);
+ ieee80211_queue_work(hw, &ar->offchan_tx_work);
+ return;
+ }
}
ath10k_tx_htt(ar, skb);
}
/* Must not be called with conf_mutex held as workers can use that also. */
-static void ath10k_drain_tx(struct ath10k *ar)
+void ath10k_drain_tx(struct ath10k *ar)
{
/* make sure rcu-protected mac80211 tx path itself is drained */
synchronize_net();
@@ -2376,16 +2434,8 @@ void ath10k_halt(struct ath10k *ar)
ath10k_hif_power_down(ar);
spin_lock_bh(&ar->data_lock);
- list_for_each_entry(arvif, &ar->arvifs, list) {
- if (!arvif->beacon)
- continue;
-
- dma_unmap_single(arvif->ar->dev,
- ATH10K_SKB_CB(arvif->beacon)->paddr,
- arvif->beacon->len, DMA_TO_DEVICE);
- dev_kfree_skb_any(arvif->beacon);
- arvif->beacon = NULL;
- }
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ ath10k_mac_vif_beacon_cleanup(arvif);
spin_unlock_bh(&ar->data_lock);
}
@@ -2408,12 +2458,28 @@ static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0;
}
+static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
+{
+ /* It is not clear that allowing gaps in chainmask
+ * is helpful. Probably it will not do what user
+ * is hoping for, so warn in that case.
+ */
+ if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
+ return;
+
+ ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
+ dbg, cm);
+}
+
static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
+ ath10k_check_chain_mask(ar, tx_ant, "tx");
+ ath10k_check_chain_mask(ar, rx_ant, "rx");
+
ar->cfg_tx_chainmask = tx_ant;
ar->cfg_rx_chainmask = rx_ant;
@@ -2677,12 +2743,68 @@ static void ath10k_config_chan(struct ath10k *ar)
ath10k_monitor_recalc(ar);
}
+static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
+{
+ int ret;
+ u32 param;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
+
+ param = ar->wmi.pdev_param->txpower_limit2g;
+ ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
+ if (ret) {
+ ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
+ txpower, ret);
+ return ret;
+ }
+
+ param = ar->wmi.pdev_param->txpower_limit5g;
+ ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
+ if (ret) {
+ ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
+ txpower, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_txpower_recalc(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret, txpower = -1;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ WARN_ON(arvif->txpower < 0);
+
+ if (txpower == -1)
+ txpower = arvif->txpower;
+ else
+ txpower = min(txpower, arvif->txpower);
+ }
+
+ if (WARN_ON(txpower == -1))
+ return -EINVAL;
+
+ ret = ath10k_mac_txpower_setup(ar, txpower);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup tx power %d: %d\n",
+ txpower, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath10k *ar = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
- u32 param;
mutex_lock(&ar->conf_mutex);
@@ -2706,25 +2828,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
}
}
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac config power %d\n",
- hw->conf.power_level);
-
- param = ar->wmi.pdev_param->txpower_limit2g;
- ret = ath10k_wmi_pdev_set_param(ar, param,
- hw->conf.power_level * 2);
- if (ret)
- ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
- hw->conf.power_level, ret);
-
- param = ar->wmi.pdev_param->txpower_limit5g;
- ret = ath10k_wmi_pdev_set_param(ar, param,
- hw->conf.power_level * 2);
- if (ret)
- ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
- hw->conf.power_level, ret);
- }
-
if (changed & IEEE80211_CONF_CHANGE_PS)
ath10k_config_ps(ar);
@@ -2739,6 +2842,17 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
return ret;
}
+static u32 get_nss_from_chainmask(u16 chain_mask)
+{
+ if ((chain_mask & 0x15) == 0x15)
+ return 4;
+ else if ((chain_mask & 0x7) == 0x7)
+ return 3;
+ else if ((chain_mask & 0x3) == 0x3)
+ return 2;
+ return 1;
+}
+
/*
* TODO:
* Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
@@ -2772,9 +2886,12 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = -EBUSY;
goto err;
}
- bit = ffs(ar->free_vdev_map);
+ bit = __ffs64(ar->free_vdev_map);
- arvif->vdev_id = bit - 1;
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
+ bit, ar->free_vdev_map);
+
+ arvif->vdev_id = bit;
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
if (ar->p2p)
@@ -2804,8 +2921,39 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
break;
}
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",
- arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
+ /* Some firmware revisions don't wait for beacon tx completion before
+ * sending another SWBA event. This could lead to hardware using old
+ * (freed) beacon data in some cases, e.g. tx credit starvation
+ * combined with missed TBTT. This is very very rare.
+ *
+ * On non-IOMMU-enabled hosts this could be a possible security issue
+ * because hw could beacon some random data on the air. On
+ * IOMMU-enabled hosts DMAR faults would occur in most cases and target
+ * device would crash.
+ *
+ * Since there are no beacon tx completions (implicit nor explicit)
+ * propagated to host the only workaround for this is to allocate a
+ * DMA-coherent buffer for a lifetime of a vif and use it for all
+ * beacon tx commands. Worst case for this approach is some beacons may
+ * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP) {
+ arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
+ IEEE80211_MAX_FRAME_LEN,
+ &arvif->beacon_paddr,
+ GFP_ATOMIC);
+ if (!arvif->beacon_buf) {
+ ret = -ENOMEM;
+ ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
+ ret);
+ goto err;
+ }
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
+ arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
+ arvif->beacon_buf ? "single-buf" : "per-skb");
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
arvif->vdev_subtype, vif->addr);
@@ -2815,7 +2963,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err;
}
- ar->free_vdev_map &= ~(1 << arvif->vdev_id);
+ ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
list_add(&arvif->list, &ar->arvifs);
vdev_param = ar->wmi.vdev_param->def_keyid;
@@ -2837,6 +2985,20 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err_vdev_delete;
}
+ if (ar->cfg_tx_chainmask) {
+ u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+
+ vdev_param = ar->wmi.vdev_param->nss;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ nss);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
+ arvif->vdev_id, ar->cfg_tx_chainmask, nss,
+ ret);
+ goto err_vdev_delete;
+ }
+ }
+
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
if (ret) {
@@ -2899,6 +3061,13 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err_peer_delete;
}
+ arvif->txpower = vif->bss_conf.txpower;
+ ret = ath10k_mac_txpower_recalc(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+ goto err_peer_delete;
+ }
+
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -2908,10 +3077,16 @@ err_peer_delete:
err_vdev_delete:
ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
- ar->free_vdev_map |= 1 << arvif->vdev_id;
+ ar->free_vdev_map |= 1LL << arvif->vdev_id;
list_del(&arvif->list);
err:
+ if (arvif->beacon_buf) {
+ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+ arvif->beacon_buf, arvif->beacon_paddr);
+ arvif->beacon_buf = NULL;
+ }
+
mutex_unlock(&ar->conf_mutex);
return ret;
@@ -2924,19 +3099,12 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
int ret;
- mutex_lock(&ar->conf_mutex);
-
cancel_work_sync(&arvif->wep_key_work);
- spin_lock_bh(&ar->data_lock);
- if (arvif->beacon) {
- dma_unmap_single(arvif->ar->dev,
- ATH10K_SKB_CB(arvif->beacon)->paddr,
- arvif->beacon->len, DMA_TO_DEVICE);
- dev_kfree_skb_any(arvif->beacon);
- arvif->beacon = NULL;
- }
+ mutex_lock(&ar->conf_mutex);
+ spin_lock_bh(&ar->data_lock);
+ ath10k_mac_vif_beacon_cleanup(arvif);
spin_unlock_bh(&ar->data_lock);
ret = ath10k_spectral_vif_stop(arvif);
@@ -2944,7 +3112,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
arvif->vdev_id, ret);
- ar->free_vdev_map |= 1 << arvif->vdev_id;
+ ar->free_vdev_map |= 1LL << arvif->vdev_id;
list_del(&arvif->list);
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
@@ -3068,54 +3236,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->u.ap.hidden_ssid = info->hidden_ssid;
}
- /*
- * Firmware manages AP self-peer internally so make sure to not create
- * it in driver. Otherwise AP self-peer deletion may timeout later.
- */
- if (changed & BSS_CHANGED_BSSID &&
- vif->type != NL80211_IFTYPE_AP) {
- if (!is_zero_ether_addr(info->bssid)) {
- ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac vdev %d create peer %pM\n",
- arvif->vdev_id, info->bssid);
-
- ret = ath10k_peer_create(ar, arvif->vdev_id,
- info->bssid);
- if (ret)
- ath10k_warn(ar, "failed to add peer %pM for vdev %d when changing bssid: %i\n",
- info->bssid, arvif->vdev_id, ret);
-
- if (vif->type == NL80211_IFTYPE_STATION) {
- /*
- * this is never erased as we it for crypto key
- * clearing; this is FW requirement
- */
- ether_addr_copy(arvif->bssid, info->bssid);
-
- ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac vdev %d start %pM\n",
- arvif->vdev_id, info->bssid);
-
- ret = ath10k_vdev_start(arvif);
- if (ret) {
- ath10k_warn(ar, "failed to start vdev %i: %d\n",
- arvif->vdev_id, ret);
- goto exit;
- }
-
- arvif->is_started = true;
- }
-
- /*
- * Mac80211 does not keep IBSS bssid when leaving IBSS,
- * so driver need to store it. It is needed when leaving
- * IBSS in order to remove BSSID peer.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC)
- memcpy(arvif->bssid, info->bssid,
- ETH_ALEN);
- }
- }
+ if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
+ ether_addr_copy(arvif->bssid, info->bssid);
if (changed & BSS_CHANGED_BEACON_ENABLED)
ath10k_control_beaconing(arvif, info);
@@ -3177,10 +3299,21 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ath10k_monitor_stop(ar);
ath10k_bss_assoc(hw, vif, info);
ath10k_monitor_recalc(ar);
+ } else {
+ ath10k_bss_disassoc(hw, vif);
}
}
-exit:
+ if (changed & BSS_CHANGED_TXPOWER) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
+ arvif->vdev_id, info->txpower);
+
+ arvif->txpower = info->txpower;
+ ret = ath10k_mac_txpower_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -3266,9 +3399,10 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
- cancel_delayed_work_sync(&ar->scan.timeout);
ath10k_scan_abort(ar);
mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
}
static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
@@ -3453,7 +3587,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
sta->addr);
- err = ath10k_station_assoc(ar, arvif, sta, true);
+ err = ath10k_station_assoc(ar, arvif->vif, sta, true);
if (err)
ath10k_warn(ar, "failed to reassociate station: %pM\n",
sta->addr);
@@ -3462,6 +3596,37 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
mutex_unlock(&ar->conf_mutex);
}
+static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ return 0;
+
+ if (ar->num_stations >= ar->max_num_stations)
+ return -ENOBUFS;
+
+ ar->num_stations++;
+
+ return 0;
+}
+
+static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ return;
+
+ ar->num_stations--;
+}
+
static int ath10k_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -3471,7 +3636,6 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
- int max_num_peers;
int ret = 0;
if (old_state == IEEE80211_STA_NOTEXIST &&
@@ -3489,31 +3653,46 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
if (old_state == IEEE80211_STA_NOTEXIST &&
- new_state == IEEE80211_STA_NONE &&
- vif->type != NL80211_IFTYPE_STATION) {
+ new_state == IEEE80211_STA_NONE) {
/*
* New station addition.
*/
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
- max_num_peers = TARGET_10X_NUM_PEERS_MAX - 1;
- else
- max_num_peers = TARGET_NUM_PEERS;
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
+ arvif->vdev_id, sta->addr,
+ ar->num_stations + 1, ar->max_num_stations,
+ ar->num_peers + 1, ar->max_num_peers);
- if (ar->num_peers >= max_num_peers) {
- ath10k_warn(ar, "number of peers exceeded: peers number %d (max peers %d)\n",
- ar->num_peers, max_num_peers);
- ret = -ENOBUFS;
+ ret = ath10k_mac_inc_num_stations(arvif);
+ if (ret) {
+ ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
+ ar->max_num_stations);
goto exit;
}
- ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac vdev %d peer create %pM (new sta) num_peers %d\n",
- arvif->vdev_id, sta->addr, ar->num_peers);
-
ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
- if (ret)
+ if (ret) {
ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
sta->addr, arvif->vdev_id, ret);
+ ath10k_mac_dec_num_stations(arvif);
+ goto exit;
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ WARN_ON(arvif->is_started);
+
+ ret = ath10k_vdev_start(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to start vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ WARN_ON(ath10k_peer_delete(ar, arvif->vdev_id,
+ sta->addr));
+ ath10k_mac_dec_num_stations(arvif);
+ goto exit;
+ }
+
+ arvif->is_started = true;
+ }
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
/*
@@ -3522,13 +3701,24 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d peer delete %pM (sta gone)\n",
arvif->vdev_id, sta->addr);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ WARN_ON(!arvif->is_started);
+
+ ret = ath10k_vdev_stop(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
+ }
+
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
sta->addr, arvif->vdev_id, ret);
- if (vif->type == NL80211_IFTYPE_STATION)
- ath10k_bss_disassoc(hw, vif);
+ ath10k_mac_dec_num_stations(arvif);
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
(vif->type == NL80211_IFTYPE_AP ||
@@ -3539,7 +3729,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
sta->addr);
- ret = ath10k_station_assoc(ar, arvif, sta, false);
+ ret = ath10k_station_assoc(ar, vif, sta, false);
if (ret)
ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
@@ -3553,7 +3743,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
sta->addr);
- ret = ath10k_station_disassoc(ar, arvif, sta);
+ ret = ath10k_station_disassoc(ar, vif, sta);
if (ret)
ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
@@ -3717,6 +3907,8 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
if (ret)
goto exit;
+ duration = max(duration, WMI_SCAN_CHAN_MIN_TIME_MSEC);
+
memset(&arg, 0, sizeof(arg));
ath10k_wmi_start_scan_init(ar, &arg);
arg.vdev_id = arvif->vdev_id;
@@ -3761,10 +3953,11 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
struct ath10k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
- cancel_delayed_work_sync(&ar->scan.timeout);
ath10k_scan_abort(ar);
mutex_unlock(&ar->conf_mutex);
+ cancel_delayed_work_sync(&ar->scan.timeout);
+
return 0;
}
@@ -3807,7 +4000,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
arvif->vdev_id, value);
- ret = ath10k_mac_set_rts(arvif, value);
+ ret = ath10k_mac_set_frag(arvif, value);
if (ret) {
ath10k_warn(ar, "failed to set fragmentation threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -3843,7 +4036,9 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
empty = (ar->htt.num_pending_tx == 0);
spin_unlock_bh(&ar->htt.tx_lock);
- skip = (ar->state == ATH10K_STATE_WEDGED);
+ skip = (ar->state == ATH10K_STATE_WEDGED) ||
+ test_bit(ATH10K_FLAG_CRASH_FLUSH,
+ &ar->dev_flags);
(empty || skip);
}), ATH10K_FLUSH_TIMEOUT_HZ);
@@ -3929,10 +4124,14 @@ exit:
}
#endif
-static void ath10k_restart_complete(struct ieee80211_hw *hw)
+static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
{
struct ath10k *ar = hw->priv;
+ if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+ return;
+
mutex_lock(&ar->conf_mutex);
/* If device failed to restart it will be in a different state, e.g.
@@ -3940,6 +4139,7 @@ static void ath10k_restart_complete(struct ieee80211_hw *hw)
if (ar->state == ATH10K_STATE_RESTARTED) {
ath10k_info(ar, "device successfully recovered\n");
ar->state = ATH10K_STATE_ON;
+ ieee80211_wake_queues(ar->hw);
}
mutex_unlock(&ar->conf_mutex);
@@ -3975,6 +4175,9 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
survey->channel = &sband->channels[idx];
+ if (ar->rx_channel == survey->channel)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
@@ -4022,6 +4225,10 @@ ath10k_default_bitrate_mask(struct ath10k *ar,
u32 legacy = 0x00ff;
u8 ht = 0xff, i;
u16 vht = 0x3ff;
+ u16 nrf = ar->num_rf_chains;
+
+ if (ar->cfg_tx_chainmask)
+ nrf = get_nss_from_chainmask(ar->cfg_tx_chainmask);
switch (band) {
case IEEE80211_BAND_2GHZ:
@@ -4037,11 +4244,11 @@ ath10k_default_bitrate_mask(struct ath10k *ar,
if (mask->control[band].legacy != legacy)
return false;
- for (i = 0; i < ar->num_rf_chains; i++)
+ for (i = 0; i < nrf; i++)
if (mask->control[band].ht_mcs[i] != ht)
return false;
- for (i = 0; i < ar->num_rf_chains; i++)
+ for (i = 0; i < nrf; i++)
if (mask->control[band].vht_mcs[i] != vht)
return false;
@@ -4292,6 +4499,9 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
u8 fixed_nss = ar->num_rf_chains;
u8 force_sgi;
+ if (ar->cfg_tx_chainmask)
+ fixed_nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+
force_sgi = mask->control[band].gi;
if (force_sgi == NL80211_TXRATE_FORCE_LGI)
return -EINVAL;
@@ -4450,12 +4660,15 @@ static const struct ieee80211_ops ath10k_ops = {
.tx_last_beacon = ath10k_tx_last_beacon,
.set_antenna = ath10k_set_antenna,
.get_antenna = ath10k_get_antenna,
- .restart_complete = ath10k_restart_complete,
+ .reconfig_complete = ath10k_reconfig_complete,
.get_survey = ath10k_get_survey,
.set_bitrate_mask = ath10k_set_bitrate_mask,
.sta_rc_update = ath10k_sta_rc_update,
.get_tsf = ath10k_get_tsf,
.ampdu_action = ath10k_ampdu_action,
+ .get_et_sset_count = ath10k_debug_get_et_sset_count,
+ .get_et_stats = ath10k_debug_get_et_stats,
+ .get_et_strings = ath10k_debug_get_et_strings,
CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
@@ -4800,15 +5013,6 @@ int ath10k_mac_register(struct ath10k *ar)
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP);
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
- /* TODO: Have to deal with 2x2 chips if/when the come out. */
- ar->supp_tx_chainmask = TARGET_10X_TX_CHAIN_MASK;
- ar->supp_rx_chainmask = TARGET_10X_RX_CHAIN_MASK;
- } else {
- ar->supp_tx_chainmask = TARGET_TX_CHAIN_MASK;
- ar->supp_rx_chainmask = TARGET_RX_CHAIN_MASK;
- }
-
ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask;
ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask;
@@ -4827,10 +5031,6 @@ int ath10k_mac_register(struct ath10k *ar)
IEEE80211_HW_AP_LINK_PS |
IEEE80211_HW_SPECTRUM_MGMT;
- /* MSDU can have HTT TX fragment pushed in front. The additional 4
- * bytes is used for padding/alignment if necessary. */
- ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4;
-
ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
@@ -4854,6 +5054,8 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
+
/*
* on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 6c80eeada3e2..68296117d203 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -21,6 +21,8 @@
#include <net/mac80211.h>
#include "core.h"
+#define WEP_KEYID_SHIFT 6
+
struct ath10k_generic_iter {
struct ath10k *ar;
int ret;
@@ -39,6 +41,10 @@ void ath10k_offchan_tx_work(struct work_struct *work);
void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
void ath10k_halt(struct ath10k *ar);
+void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif);
+void ath10k_drain_tx(struct ath10k *ar);
+bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
+ u8 keyidx);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 59e0ea83be50..7abb8367119a 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -485,6 +485,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
void *data_buf = NULL;
int i;
+ spin_lock_bh(&ar_pci->ce_lock);
+
ce_diag = ar_pci->ce_diag;
/*
@@ -511,7 +513,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
nbytes = min_t(unsigned int, remaining_bytes,
DIAG_TRANSFER_LIMIT);
- ret = ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
+ ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
if (ret != 0)
goto done;
@@ -527,15 +529,15 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
address);
- ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
- 0);
+ ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
+ 0);
if (ret)
goto done;
i = 0;
- while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id) != 0) {
+ while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
+ &completed_nbytes,
+ &id) != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
ret = -EBUSY;
@@ -554,9 +556,9 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
}
i = 0;
- while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id, &flags) != 0) {
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
+ &completed_nbytes,
+ &id, &flags) != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -591,6 +593,8 @@ done:
dma_free_coherent(ar->dev, orig_nbytes, data_buf,
ce_data_base);
+ spin_unlock_bh(&ar_pci->ce_lock);
+
return ret;
}
@@ -648,6 +652,8 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
dma_addr_t ce_data_base = 0;
int i;
+ spin_lock_bh(&ar_pci->ce_lock);
+
ce_diag = ar_pci->ce_diag;
/*
@@ -688,7 +694,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
/* Set up to receive directly into Target(!) address */
- ret = ath10k_ce_rx_post_buf(ce_diag, NULL, address);
+ ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
if (ret != 0)
goto done;
@@ -696,15 +702,15 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* Request CE to send caller-supplied data that
* was copied to bounce buffer to Target(!) address.
*/
- ret = ath10k_ce_send(ce_diag, NULL, (u32)ce_data,
- nbytes, 0, 0);
+ ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
+ nbytes, 0, 0);
if (ret != 0)
goto done;
i = 0;
- while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id) != 0) {
+ while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
+ &completed_nbytes,
+ &id) != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -724,9 +730,9 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
}
i = 0;
- while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id, &flags) != 0) {
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
+ &completed_nbytes,
+ &id, &flags) != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -760,6 +766,8 @@ done:
ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
address, ret);
+ spin_unlock_bh(&ar_pci->ce_lock);
+
return ret;
}
@@ -815,20 +823,24 @@ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
- void *transfer_context;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
u32 ce_data;
unsigned int nbytes;
unsigned int transfer_id;
- while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
- &ce_data, &nbytes,
- &transfer_id) == 0) {
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
+ &nbytes, &transfer_id) == 0) {
/* no need to call tx completion for NULL pointers */
- if (transfer_context == NULL)
+ if (skb == NULL)
continue;
- cb->tx_completion(ar, transfer_context, transfer_id);
+ __skb_queue_tail(&list, skb);
}
+
+ while ((skb = __skb_dequeue(&list)))
+ cb->tx_completion(ar, skb);
}
/* Called by lower (CE) layer when data is received from the Target. */
@@ -839,12 +851,14 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
struct sk_buff *skb;
+ struct sk_buff_head list;
void *transfer_context;
u32 ce_data;
unsigned int nbytes, max_nbytes;
unsigned int transfer_id;
unsigned int flags;
+ __skb_queue_head_init(&list);
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
&ce_data, &nbytes, &transfer_id,
&flags) == 0) {
@@ -861,7 +875,16 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
}
skb_put(skb, nbytes);
- cb->rx_completion(ar, skb, pipe_info->pipe_num);
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
+ ce_state->id, skb->len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
+ skb->data, skb->len);
+
+ cb->rx_completion(ar, skb);
}
ath10k_pci_rx_post_pipe(pipe_info);
@@ -936,6 +959,12 @@ err:
return err;
}
+static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
+{
+ return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
+}
+
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -986,6 +1015,8 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
spin_lock_bh(&ar->data_lock);
+ ar->stats.fw_crash_counter++;
+
crash_data = ath10k_debug_get_new_fw_crash_data(ar);
if (crash_data)
@@ -1121,14 +1152,37 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
&dl_is_polled);
}
-static void ath10k_pci_irq_disable(struct ath10k *ar)
+static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
+ u32 val;
+
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
+ val &= ~CORE_CTRL_PCIE_REG_31_MASK;
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+}
+static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
+ val |= CORE_CTRL_PCIE_REG_31_MASK;
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+}
+
+static void ath10k_pci_irq_disable(struct ath10k *ar)
+{
ath10k_ce_disable_interrupts(ar);
ath10k_pci_disable_and_clear_legacy_irq(ar);
- /* FIXME: How to mask all MSI interrupts? */
+ ath10k_pci_irq_msi_fw_mask(ar);
+}
+
+static void ath10k_pci_irq_sync(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
synchronize_irq(ar_pci->pdev->irq + i);
@@ -1138,7 +1192,7 @@ static void ath10k_pci_irq_enable(struct ath10k *ar)
{
ath10k_ce_enable_interrupts(ar);
ath10k_pci_enable_legacy_irq(ar);
- /* FIXME: How to unmask all MSI interrupts? */
+ ath10k_pci_irq_msi_fw_unmask(ar);
}
static int ath10k_pci_hif_start(struct ath10k *ar)
@@ -1151,64 +1205,74 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
return 0;
}
-static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
{
struct ath10k *ar;
- struct ath10k_pci *ar_pci;
- struct ath10k_ce_pipe *ce_hdl;
- u32 buf_sz;
- struct sk_buff *netbuf;
- u32 ce_data;
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct sk_buff *skb;
+ int i;
- buf_sz = pipe_info->buf_sz;
+ ar = pci_pipe->hif_ce_state;
+ ce_pipe = pci_pipe->ce_hdl;
+ ce_ring = ce_pipe->dest_ring;
- /* Unused Copy Engine */
- if (buf_sz == 0)
+ if (!ce_ring)
return;
- ar = pipe_info->hif_ce_state;
- ar_pci = ath10k_pci_priv(ar);
- ce_hdl = pipe_info->ce_hdl;
+ if (!pci_pipe->buf_sz)
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
+ continue;
+
+ ce_ring->per_transfer_context[i] = NULL;
- while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
- &ce_data) == 0) {
- dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
- netbuf->len + skb_tailroom(netbuf),
+ dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
- dev_kfree_skb_any(netbuf);
+ dev_kfree_skb_any(skb);
}
}
-static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
+static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
- struct ath10k_ce_pipe *ce_hdl;
- struct sk_buff *netbuf;
- u32 ce_data;
- unsigned int nbytes;
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct ce_desc *ce_desc;
+ struct sk_buff *skb;
unsigned int id;
- u32 buf_sz;
+ int i;
- buf_sz = pipe_info->buf_sz;
+ ar = pci_pipe->hif_ce_state;
+ ar_pci = ath10k_pci_priv(ar);
+ ce_pipe = pci_pipe->ce_hdl;
+ ce_ring = ce_pipe->src_ring;
- /* Unused Copy Engine */
- if (buf_sz == 0)
+ if (!ce_ring)
return;
- ar = pipe_info->hif_ce_state;
- ar_pci = ath10k_pci_priv(ar);
- ce_hdl = pipe_info->ce_hdl;
+ if (!pci_pipe->buf_sz)
+ return;
- while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
- &ce_data, &nbytes, &id) == 0) {
- /* no need to call tx completion for NULL pointers */
- if (!netbuf)
+ ce_desc = ce_ring->shadow_base;
+ if (WARN_ON(!ce_desc))
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
continue;
- ar_pci->msg_callbacks_current.tx_completion(ar,
- netbuf,
- id);
+ ce_ring->per_transfer_context[i] = NULL;
+ id = MS(__le16_to_cpu(ce_desc[i].flags),
+ CE_DESC_FLAGS_META_DATA);
+
+ ar_pci->msg_callbacks_current.tx_completion(ar, skb);
}
}
@@ -1266,6 +1330,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_warm_reset(ar);
ath10k_pci_irq_disable(ar);
+ ath10k_pci_irq_sync(ar);
ath10k_pci_flush(ar);
}
@@ -1386,6 +1451,9 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
&nbytes, &transfer_id, &flags))
return;
+ if (WARN_ON_ONCE(!xfer))
+ return;
+
if (!xfer->wait_for_resp) {
ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
return;
@@ -1569,23 +1637,40 @@ static int ath10k_pci_init_config(struct ath10k *ar)
return 0;
}
-static int ath10k_pci_alloc_ce(struct ath10k *ar)
+static int ath10k_pci_alloc_pipes(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pipe;
int i, ret;
for (i = 0; i < CE_COUNT; i++) {
- ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+ pipe = &ar_pci->pipe_info[i];
+ pipe->ce_hdl = &ar_pci->ce_states[i];
+ pipe->pipe_num = i;
+ pipe->hif_ce_state = ar;
+
+ ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
+ ath10k_pci_ce_send_done,
+ ath10k_pci_ce_recv_data);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
i, ret);
return ret;
}
+
+ /* Last CE is Diagnostic Window */
+ if (i == CE_COUNT - 1) {
+ ar_pci->ce_diag = pipe->ce_hdl;
+ continue;
+ }
+
+ pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
}
return 0;
}
-static void ath10k_pci_free_ce(struct ath10k *ar)
+static void ath10k_pci_free_pipes(struct ath10k *ar)
{
int i;
@@ -1593,39 +1678,17 @@ static void ath10k_pci_free_ce(struct ath10k *ar)
ath10k_ce_free_pipe(ar, i);
}
-static int ath10k_pci_ce_init(struct ath10k *ar)
+static int ath10k_pci_init_pipes(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_pci_pipe *pipe_info;
- const struct ce_attr *attr;
- int pipe_num, ret;
+ int i, ret;
- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
- pipe_info = &ar_pci->pipe_info[pipe_num];
- pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
- pipe_info->pipe_num = pipe_num;
- pipe_info->hif_ce_state = ar;
- attr = &host_ce_config_wlan[pipe_num];
-
- ret = ath10k_ce_init_pipe(ar, pipe_num, attr,
- ath10k_pci_ce_send_done,
- ath10k_pci_ce_recv_data);
+ for (i = 0; i < CE_COUNT; i++) {
+ ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
if (ret) {
ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
- pipe_num, ret);
+ i, ret);
return ret;
}
-
- if (pipe_num == CE_COUNT - 1) {
- /*
- * Reserve the ultimate CE for
- * diagnostic Window support
- */
- ar_pci->ce_diag = pipe_info->ce_hdl;
- continue;
- }
-
- pipe_info->buf_sz = (size_t)(attr->src_sz_max);
}
return 0;
@@ -1666,93 +1729,167 @@ static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
msleep(10);
}
-static int ath10k_pci_warm_reset(struct ath10k *ar)
+static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
{
u32 val;
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
-
- /* debug */
- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
- PCIE_INTR_CAUSE_ADDRESS);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
- val);
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
- CPU_INTR_ADDRESS);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
- val);
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
+}
- /* disable pending irqs */
- ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
- PCIE_INTR_ENABLE_ADDRESS, 0);
+static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
+{
+ u32 val;
- ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
- PCIE_INTR_CLR_ADDRESS, ~0);
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
- msleep(100);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CE_RST_MASK);
+ msleep(10);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val & ~SOC_RESET_CONTROL_CE_RST_MASK);
+}
- /* clear fw indicator */
- ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
+static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
+{
+ u32 val;
- /* clear target LF timer interrupts */
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
SOC_LF_TIMER_CONTROL0_ADDRESS);
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
SOC_LF_TIMER_CONTROL0_ADDRESS,
val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
+}
- /* reset CE */
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_RESET_CONTROL_ADDRESS);
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
- val | SOC_RESET_CONTROL_CE_RST_MASK);
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_RESET_CONTROL_ADDRESS);
- msleep(10);
+static int ath10k_pci_warm_reset(struct ath10k *ar)
+{
+ int ret;
- /* unreset CE */
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
- val & ~SOC_RESET_CONTROL_CE_RST_MASK);
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_RESET_CONTROL_ADDRESS);
- msleep(10);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
+
+ spin_lock_bh(&ar->data_lock);
+ ar->stats.fw_warm_reset_counter++;
+ spin_unlock_bh(&ar->data_lock);
+ ath10k_pci_irq_disable(ar);
+
+ /* Make sure the target CPU is not doing anything dangerous, e.g. if it
+ * were to access copy engine while host performs copy engine reset
+ * then it is possible for the device to confuse pci-e controller to
+ * the point of bringing host system to a complete stop (i.e. hang).
+ */
ath10k_pci_warm_reset_si0(ar);
+ ath10k_pci_warm_reset_cpu(ar);
+ ath10k_pci_init_pipes(ar);
+ ath10k_pci_wait_for_target_init(ar);
- /* debug */
- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
- PCIE_INTR_CAUSE_ADDRESS);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
- val);
+ ath10k_pci_warm_reset_clear_lf(ar);
+ ath10k_pci_warm_reset_ce(ar);
+ ath10k_pci_warm_reset_cpu(ar);
+ ath10k_pci_init_pipes(ar);
- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
- CPU_INTR_ADDRESS);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
- val);
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
+ return ret;
+ }
- /* CPU warm reset */
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_RESET_CONTROL_ADDRESS);
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
- val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
- SOC_RESET_CONTROL_ADDRESS);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n",
- val);
+ return 0;
+}
- msleep(100);
+static int ath10k_pci_chip_reset(struct ath10k *ar)
+{
+ int i, ret;
+ u32 val;
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset\n");
+
+ /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
+ * It is thus preferred to use warm reset which is safer but may not be
+ * able to recover the device from all possible fail scenarios.
+ *
+ * Warm reset doesn't always work on first try so attempt it a few
+ * times before giving up.
+ */
+ for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
+ ret = ath10k_pci_warm_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
+ i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
+ ret);
+ continue;
+ }
+
+ /* FIXME: Sometimes copy engine doesn't recover after warm
+ * reset. In most cases this needs cold reset. In some of these
+ * cases the device is in such a state that a cold reset may
+ * lock up the host.
+ *
+ * Reading any host interest register via copy engine is
+ * sufficient to verify if device is capable of booting
+ * firmware blob.
+ */
+ ret = ath10k_pci_init_pipes(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to init copy engine: %d\n",
+ ret);
+ continue;
+ }
+
+ ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
+ &val);
+ if (ret) {
+ ath10k_warn(ar, "failed to poke copy engine: %d\n",
+ ret);
+ continue;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
+ return 0;
+ }
+
+ if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
+ ath10k_warn(ar, "refusing cold reset as requested\n");
+ return -EPERM;
+ }
+
+ ret = ath10k_pci_cold_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (cold)\n");
return 0;
}
-static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
+static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
int ret;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
+
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to wake up target: %d\n", ret);
+ return ret;
+ }
+
/*
* Bring the target up cleanly.
*
@@ -1763,26 +1900,16 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
* is in an unexpected state. We try to catch that here in order to
* reset the Target and retry the probe.
*/
- if (cold_reset)
- ret = ath10k_pci_cold_reset(ar);
- else
- ret = ath10k_pci_warm_reset(ar);
-
+ ret = ath10k_pci_chip_reset(ar);
if (ret) {
- ath10k_err(ar, "failed to reset target: %d\n", ret);
- goto err;
+ ath10k_err(ar, "failed to reset chip: %d\n", ret);
+ goto err_sleep;
}
- ret = ath10k_pci_ce_init(ar);
+ ret = ath10k_pci_init_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to initialize CE: %d\n", ret);
- goto err;
- }
-
- ret = ath10k_pci_wait_for_target_init(ar);
- if (ret) {
- ath10k_err(ar, "failed to wait for target to init: %d\n", ret);
- goto err_ce;
+ goto err_sleep;
}
ret = ath10k_pci_init_config(ar);
@@ -1801,73 +1928,21 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
err_ce:
ath10k_pci_ce_deinit(ar);
- ath10k_pci_warm_reset(ar);
-err:
- return ret;
-}
-
-static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
-{
- int i, ret;
-
- /*
- * Sometime warm reset succeeds after retries.
- *
- * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
- * at first try.
- */
- for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
- ret = __ath10k_pci_hif_power_up(ar, false);
- if (ret == 0)
- break;
-
- ath10k_warn(ar, "failed to warm reset (attempt %d out of %d): %d\n",
- i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
- }
+err_sleep:
+ ath10k_pci_sleep(ar);
return ret;
}
-static int ath10k_pci_hif_power_up(struct ath10k *ar)
-{
- int ret;
-
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
-
- /*
- * Hardware CUS232 version 2 has some issues with cold reset and the
- * preferred (and safer) way to perform a device reset is through a
- * warm reset.
- *
- * Warm reset doesn't always work though so fall back to cold reset may
- * be necessary.
- */
- ret = ath10k_pci_hif_power_up_warm(ar);
- if (ret) {
- ath10k_warn(ar, "failed to power up target using warm reset: %d\n",
- ret);
-
- if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
- return ret;
-
- ath10k_warn(ar, "trying cold reset\n");
-
- ret = __ath10k_pci_hif_power_up(ar, true);
- if (ret) {
- ath10k_err(ar, "failed to power up target using cold reset too (%d)\n",
- ret);
- return ret;
- }
- }
-
- return 0;
-}
-
static void ath10k_pci_hif_power_down(struct ath10k *ar)
{
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
- ath10k_pci_warm_reset(ar);
+ /* Currently hif_power_up performs effectively a reset and hif_stop
+ * resets the chip as well so there's no point in resetting here.
+ */
+
+ ath10k_pci_sleep(ar);
}
#ifdef CONFIG_PM
@@ -1921,6 +1996,8 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.tx_sg = ath10k_pci_hif_tx_sg,
+ .diag_read = ath10k_pci_hif_diag_read,
+ .diag_write = ath10k_pci_diag_write_mem,
.exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
.start = ath10k_pci_hif_start,
.stop = ath10k_pci_hif_stop,
@@ -1931,6 +2008,8 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
.power_up = ath10k_pci_hif_power_up,
.power_down = ath10k_pci_hif_power_down,
+ .read32 = ath10k_pci_read32,
+ .write32 = ath10k_pci_write32,
#ifdef CONFIG_PM
.suspend = ath10k_pci_hif_suspend,
.resume = ath10k_pci_hif_resume,
@@ -2250,14 +2329,14 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
if (ar_pci->num_msi_intrs == 0)
/* Fix potential race by repeating CORE_BASE writes */
- ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
- PCIE_INTR_ENABLE_ADDRESS,
- PCIE_INTR_FIRMWARE_MASK |
- PCIE_INTR_CE_MASK_ALL);
+ ath10k_pci_enable_legacy_irq(ar);
mdelay(10);
} while (time_before(jiffies, timeout));
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_mask(ar);
+
if (val == 0xffffffff) {
ath10k_err(ar, "failed to read device register, device is gone\n");
return -EIO;
@@ -2287,6 +2366,12 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
+ spin_lock_bh(&ar->data_lock);
+
+ ar->stats.fw_cold_reset_counter++;
+
+ spin_unlock_bh(&ar->data_lock);
+
/* Put Target, including PCIe, into RESET. */
val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
val |= 1;
@@ -2400,6 +2485,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
u32 chip_id;
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev,
+ ATH10K_BUS_PCI,
&ath10k_pci_hif_ops);
if (!ar) {
dev_err(&pdev->dev, "failed to allocate core\n");
@@ -2435,7 +2521,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_sleep;
}
- ret = ath10k_pci_alloc_ce(ar);
+ ret = ath10k_pci_alloc_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
ret);
@@ -2443,25 +2529,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
}
ath10k_pci_ce_deinit(ar);
-
- ret = ath10k_ce_disable_interrupts(ar);
- if (ret) {
- ath10k_err(ar, "failed to disable copy engine interrupts: %d\n",
- ret);
- goto err_free_ce;
- }
-
- /* Workaround: There's no known way to mask all possible interrupts via
- * device CSR. The only way to make sure device doesn't assert
- * interrupts is to reset it. Interrupts are then disabled on host
- * after handlers are registered.
- */
- ath10k_pci_warm_reset(ar);
+ ath10k_pci_irq_disable(ar);
ret = ath10k_pci_init_irq(ar);
if (ret) {
ath10k_err(ar, "failed to init irqs: %d\n", ret);
- goto err_free_ce;
+ goto err_free_pipes;
}
ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
@@ -2474,8 +2547,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_deinit_irq;
}
- /* This shouldn't race as the device has been reset above. */
- ath10k_pci_irq_disable(ar);
+ ath10k_pci_sleep(ar);
ret = ath10k_core_register(ar, chip_id);
if (ret) {
@@ -2492,8 +2564,8 @@ err_free_irq:
err_deinit_irq:
ath10k_pci_deinit_irq(ar);
-err_free_ce:
- ath10k_pci_free_ce(ar);
+err_free_pipes:
+ ath10k_pci_free_pipes(ar);
err_sleep:
ath10k_pci_sleep(ar);
@@ -2527,8 +2599,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_pci_kill_tasklet(ar);
ath10k_pci_deinit_irq(ar);
ath10k_pci_ce_deinit(ar);
- ath10k_pci_free_ce(ar);
- ath10k_pci_sleep(ar);
+ ath10k_pci_free_pipes(ar);
ath10k_pci_release(ar);
ath10k_core_destroy(ar);
}
@@ -2565,5 +2636,7 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_3_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index 3e1454b74e00..63ce61fcdac8 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -56,14 +56,14 @@ static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len,
}
int ath10k_spectral_process_fft(struct ath10k *ar,
- struct wmi_single_phyerr_rx_event *event,
- struct phyerr_fft_report *fftr,
+ const struct wmi_phyerr *phyerr,
+ const struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf)
{
struct fft_sample_ath10k *fft_sample;
u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS];
u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag;
- u32 reg0, reg1, nf_list1, nf_list2;
+ u32 reg0, reg1;
u8 chain_idx, *bins;
int dc_pos;
@@ -82,7 +82,7 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
/* TODO: there might be a reason why the hardware reports 20/40/80 MHz,
* but the results/plots suggest that its actually 22/44/88 MHz.
*/
- switch (event->hdr.chan_width_mhz) {
+ switch (phyerr->chan_width_mhz) {
case 20:
fft_sample->chan_width_mhz = 22;
break;
@@ -101,7 +101,7 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
fft_sample->chan_width_mhz = 88;
break;
default:
- fft_sample->chan_width_mhz = event->hdr.chan_width_mhz;
+ fft_sample->chan_width_mhz = phyerr->chan_width_mhz;
}
fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB);
@@ -110,36 +110,22 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
fft_sample->max_magnitude = __cpu_to_be16(peak_mag);
fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX);
- fft_sample->rssi = event->hdr.rssi_combined;
+ fft_sample->rssi = phyerr->rssi_combined;
total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB);
base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB);
fft_sample->total_gain_db = __cpu_to_be16(total_gain_db);
fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db);
- freq1 = __le16_to_cpu(event->hdr.freq1);
- freq2 = __le16_to_cpu(event->hdr.freq2);
+ freq1 = __le16_to_cpu(phyerr->freq1);
+ freq2 = __le16_to_cpu(phyerr->freq2);
fft_sample->freq1 = __cpu_to_be16(freq1);
fft_sample->freq2 = __cpu_to_be16(freq2);
- nf_list1 = __le32_to_cpu(event->hdr.nf_list_1);
- nf_list2 = __le32_to_cpu(event->hdr.nf_list_2);
chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX);
- switch (chain_idx) {
- case 0:
- fft_sample->noise = __cpu_to_be16(nf_list1 & 0xffffu);
- break;
- case 1:
- fft_sample->noise = __cpu_to_be16((nf_list1 >> 16) & 0xffffu);
- break;
- case 2:
- fft_sample->noise = __cpu_to_be16(nf_list2 & 0xffffu);
- break;
- case 3:
- fft_sample->noise = __cpu_to_be16((nf_list2 >> 16) & 0xffffu);
- break;
- }
+ fft_sample->noise = __cpu_to_be16(
+ __le16_to_cpu(phyerr->nf_chains[chain_idx]));
bins = (u8 *)fftr;
bins += sizeof(*fftr);
diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h
index ddc57c557272..042f5b302c75 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.h
+++ b/drivers/net/wireless/ath/ath10k/spectral.h
@@ -47,8 +47,8 @@ enum ath10k_spectral_mode {
#ifdef CONFIG_ATH10K_DEBUGFS
int ath10k_spectral_process_fft(struct ath10k *ar,
- struct wmi_single_phyerr_rx_event *event,
- struct phyerr_fft_report *fftr,
+ const struct wmi_phyerr *phyerr,
+ const struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf);
int ath10k_spectral_start(struct ath10k *ar);
int ath10k_spectral_vif_stop(struct ath10k_vif *arvif);
@@ -59,8 +59,8 @@ void ath10k_spectral_destroy(struct ath10k *ar);
static inline int
ath10k_spectral_process_fft(struct ath10k *ar,
- struct wmi_single_phyerr_rx_event *event,
- struct phyerr_fft_report *fftr,
+ const struct wmi_phyerr *phyerr,
+ const struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf)
{
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 574b75ab2609..b289378b6e3e 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -20,6 +20,15 @@
#include <linux/tracepoint.h>
#include "core.h"
+#if !defined(_TRACE_H_)
+static inline u32 ath10k_frm_hdr_len(const void *buf)
+{
+ const struct ieee80211_hdr *hdr = buf;
+
+ return ieee80211_hdrlen(hdr->frame_control);
+}
+#endif
+
#define _TRACE_H_
/* create empty functions when tracing is disabled */
@@ -138,7 +147,8 @@ TRACE_EVENT(ath10k_log_dbg_dump,
);
TRACE_EVENT(ath10k_wmi_cmd,
- TP_PROTO(struct ath10k *ar, int id, void *buf, size_t buf_len, int ret),
+ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
+ int ret),
TP_ARGS(ar, id, buf, buf_len, ret),
@@ -171,7 +181,7 @@ TRACE_EVENT(ath10k_wmi_cmd,
);
TRACE_EVENT(ath10k_wmi_event,
- TP_PROTO(struct ath10k *ar, int id, void *buf, size_t buf_len),
+ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
TP_ARGS(ar, id, buf, buf_len),
@@ -201,7 +211,7 @@ TRACE_EVENT(ath10k_wmi_event,
);
TRACE_EVENT(ath10k_htt_stats,
- TP_PROTO(struct ath10k *ar, void *buf, size_t buf_len),
+ TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
TP_ARGS(ar, buf, buf_len),
@@ -228,7 +238,7 @@ TRACE_EVENT(ath10k_htt_stats,
);
TRACE_EVENT(ath10k_wmi_dbglog,
- TP_PROTO(struct ath10k *ar, void *buf, size_t buf_len),
+ TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
TP_ARGS(ar, buf, buf_len),
@@ -254,6 +264,195 @@ TRACE_EVENT(ath10k_wmi_dbglog,
)
);
+TRACE_EVENT(ath10k_htt_pktlog,
+ TP_PROTO(struct ath10k *ar, const void *buf, u16 buf_len),
+
+ TP_ARGS(ar, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, buf_len)
+ __dynamic_array(u8, pktlog, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(pktlog), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s size %hu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_htt_tx,
+ TP_PROTO(struct ath10k *ar, u16 msdu_id, u16 msdu_len,
+ u8 vdev_id, u8 tid),
+
+ TP_ARGS(ar, msdu_id, msdu_len, vdev_id, tid),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, msdu_id)
+ __field(u16, msdu_len)
+ __field(u8, vdev_id)
+ __field(u8, tid)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->msdu_id = msdu_id;
+ __entry->msdu_len = msdu_len;
+ __entry->vdev_id = vdev_id;
+ __entry->tid = tid;
+ ),
+
+ TP_printk(
+ "%s %s msdu_id %d msdu_len %d vdev_id %d tid %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->msdu_id,
+ __entry->msdu_len,
+ __entry->vdev_id,
+ __entry->tid
+ )
+);
+
+TRACE_EVENT(ath10k_txrx_tx_unref,
+ TP_PROTO(struct ath10k *ar, u16 msdu_id),
+
+ TP_ARGS(ar, msdu_id),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, msdu_id)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->msdu_id = msdu_id;
+ ),
+
+ TP_printk(
+ "%s %s msdu_id %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->msdu_id
+ )
+);
+
+DECLARE_EVENT_CLASS(ath10k_hdr_event,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(size_t, len)
+ __dynamic_array(u8, data, ath10k_frm_hdr_len(data))
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->len = ath10k_frm_hdr_len(data);
+ memcpy(__get_dynamic_array(data), data, __entry->len);
+ ),
+
+ TP_printk(
+ "%s %s len %zu\n",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+DECLARE_EVENT_CLASS(ath10k_payload_event,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(size_t, len)
+ __dynamic_array(u8, payload, (len - ath10k_frm_hdr_len(data)))
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->len = len - ath10k_frm_hdr_len(data);
+ memcpy(__get_dynamic_array(payload),
+ data + ath10k_frm_hdr_len(data), __entry->len);
+ ),
+
+ TP_printk(
+ "%s %s len %zu\n",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+DEFINE_EVENT(ath10k_hdr_event, ath10k_tx_hdr,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_payload_event, ath10k_tx_payload,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_hdr_event, ath10k_rx_hdr,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_payload_event, ath10k_rx_payload,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+TRACE_EVENT(ath10k_htt_rx_desc,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, len)
+ __dynamic_array(u8, rxdesc, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(rxdesc), data, len);
+ ),
+
+ TP_printk(
+ "%s %s rxdesc len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index a0cbc21d0d4b..7579de8e7a8c 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -78,6 +78,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
+ trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
@@ -145,7 +146,8 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
mapped = !!ath10k_peer_find(ar, vdev_id, addr);
spin_unlock_bh(&ar->data_lock);
- mapped == expect_mapped;
+ (mapped == expect_mapped ||
+ test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
}), 3*HZ);
if (ret <= 0)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 2c42bd504b79..c0f3e4d09263 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -609,6 +609,40 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = {
.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
};
+static void
+ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
+ const struct wmi_channel_arg *arg)
+{
+ u32 flags = 0;
+
+ memset(ch, 0, sizeof(*ch));
+
+ if (arg->passive)
+ flags |= WMI_CHAN_FLAG_PASSIVE;
+ if (arg->allow_ibss)
+ flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
+ if (arg->allow_ht)
+ flags |= WMI_CHAN_FLAG_ALLOW_HT;
+ if (arg->allow_vht)
+ flags |= WMI_CHAN_FLAG_ALLOW_VHT;
+ if (arg->ht40plus)
+ flags |= WMI_CHAN_FLAG_HT40_PLUS;
+ if (arg->chan_radar)
+ flags |= WMI_CHAN_FLAG_DFS;
+
+ ch->mhz = __cpu_to_le32(arg->freq);
+ ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
+ ch->band_center_freq2 = 0;
+ ch->min_power = arg->min_power;
+ ch->max_power = arg->max_power;
+ ch->reg_power = arg->max_reg_power;
+ ch->antenna_max = arg->max_antenna_gain;
+
+ /* mode & flags share storage */
+ ch->mode = arg->mode;
+ ch->flags |= __cpu_to_le32(flags);
+}
+
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
int ret;
@@ -745,6 +779,10 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
ath10k_wmi_tx_beacons_nowait(ar);
ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
+
+ if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ ret = -ESHUTDOWN;
+
(ret != -EAGAIN);
}), 3*HZ);
@@ -800,6 +838,8 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
fc & IEEE80211_FCTL_STYPE);
+ trace_ath10k_tx_hdr(ar, skb->data, skb->len);
+ trace_ath10k_tx_payload(ar, skb->data, skb->len);
/* Send the management frame buffer to the target */
ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
@@ -1073,13 +1113,46 @@ static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
return rate_idx;
}
+/* If keys are configured, HW decrypts all frames
+ * with protected bit set. Mark such frames as decrypted.
+ */
+static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ unsigned int hdrlen;
+ bool peer_key;
+ u8 *addr, keyidx;
+
+ if (!ieee80211_is_auth(hdr->frame_control) ||
+ !ieee80211_has_protected(hdr->frame_control))
+ return;
+
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
+ return;
+
+ keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
+ addr = ieee80211_get_SA(hdr);
+
+ spin_lock_bh(&ar->data_lock);
+ peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (peer_key) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac wep key present for peer %pM\n", addr);
+ status->flag |= RX_FLAG_DECRYPTED;
+ }
+}
+
static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_mgmt_rx_event_v1 *ev_v1;
struct wmi_mgmt_rx_event_v2 *ev_v2;
struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- struct ieee80211_channel *ch;
struct ieee80211_hdr *hdr;
u32 rx_status;
u32 channel;
@@ -1127,30 +1200,34 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
- if (rx_status & WMI_RX_STATUS_ERR_CRC)
- status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (rx_status & WMI_RX_STATUS_ERR_CRC) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
if (rx_status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
- /* HW can Rx CCK rates on 5GHz. In that case phy_mode is set to
+ /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
* MODE_11B. This means phy_mode is not a reliable source for the band
- * of mgmt rx. */
-
- ch = ar->scan_channel;
- if (!ch)
- ch = ar->rx_channel;
-
- if (ch) {
- status->band = ch->band;
-
- if (phy_mode == MODE_11B &&
- status->band == IEEE80211_BAND_5GHZ)
- ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
+ * of mgmt rx.
+ */
+ if (channel >= 1 && channel <= 14) {
+ status->band = IEEE80211_BAND_2GHZ;
+ } else if (channel >= 36 && channel <= 165) {
+ status->band = IEEE80211_BAND_5GHZ;
} else {
- ath10k_warn(ar, "using (unreliable) phy_mode to extract band for mgmt rx\n");
- status->band = phy_mode_to_band(phy_mode);
+ /* Shouldn't happen unless list of advertised channels to
+ * mac80211 has been changed.
+ */
+ WARN_ON_ONCE(1);
+ dev_kfree_skb(skb);
+ return 0;
}
+ if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
+
status->freq = ieee80211_channel_to_frequency(channel, status->band);
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
status->rate_idx = get_rate_idx(rate, status->band);
@@ -1160,6 +1237,8 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
+ ath10k_wmi_handle_wep_reauth(ar, skb, status);
+
/* FW delivers WEP Shared Auth frame with Protected Bit set and
* encrypted payload. However in case of PMF it delivers decrypted
* frames with Protected Bit set. */
@@ -1295,14 +1374,196 @@ static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
+static void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ const struct wal_dbg_tx_stats *tx = &src->wal.tx;
+ const struct wal_dbg_rx_stats *rx = &src->wal.rx;
+
+ dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
+ dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
+ dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
+ dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
+ dst->cycle_count = __le32_to_cpu(src->cycle_count);
+ dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
+ dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
+
+ dst->comp_queued = __le32_to_cpu(tx->comp_queued);
+ dst->comp_delivered = __le32_to_cpu(tx->comp_delivered);
+ dst->msdu_enqued = __le32_to_cpu(tx->msdu_enqued);
+ dst->mpdu_enqued = __le32_to_cpu(tx->mpdu_enqued);
+ dst->wmm_drop = __le32_to_cpu(tx->wmm_drop);
+ dst->local_enqued = __le32_to_cpu(tx->local_enqued);
+ dst->local_freed = __le32_to_cpu(tx->local_freed);
+ dst->hw_queued = __le32_to_cpu(tx->hw_queued);
+ dst->hw_reaped = __le32_to_cpu(tx->hw_reaped);
+ dst->underrun = __le32_to_cpu(tx->underrun);
+ dst->tx_abort = __le32_to_cpu(tx->tx_abort);
+ dst->mpdus_requed = __le32_to_cpu(tx->mpdus_requed);
+ dst->tx_ko = __le32_to_cpu(tx->tx_ko);
+ dst->data_rc = __le32_to_cpu(tx->data_rc);
+ dst->self_triggers = __le32_to_cpu(tx->self_triggers);
+ dst->sw_retry_failure = __le32_to_cpu(tx->sw_retry_failure);
+ dst->illgl_rate_phy_err = __le32_to_cpu(tx->illgl_rate_phy_err);
+ dst->pdev_cont_xretry = __le32_to_cpu(tx->pdev_cont_xretry);
+ dst->pdev_tx_timeout = __le32_to_cpu(tx->pdev_tx_timeout);
+ dst->pdev_resets = __le32_to_cpu(tx->pdev_resets);
+ dst->phy_underrun = __le32_to_cpu(tx->phy_underrun);
+ dst->txop_ovf = __le32_to_cpu(tx->txop_ovf);
+
+ dst->mid_ppdu_route_change = __le32_to_cpu(rx->mid_ppdu_route_change);
+ dst->status_rcvd = __le32_to_cpu(rx->status_rcvd);
+ dst->r0_frags = __le32_to_cpu(rx->r0_frags);
+ dst->r1_frags = __le32_to_cpu(rx->r1_frags);
+ dst->r2_frags = __le32_to_cpu(rx->r2_frags);
+ dst->r3_frags = __le32_to_cpu(rx->r3_frags);
+ dst->htt_msdus = __le32_to_cpu(rx->htt_msdus);
+ dst->htt_mpdus = __le32_to_cpu(rx->htt_mpdus);
+ dst->loc_msdus = __le32_to_cpu(rx->loc_msdus);
+ dst->loc_mpdus = __le32_to_cpu(rx->loc_mpdus);
+ dst->oversize_amsdu = __le32_to_cpu(rx->oversize_amsdu);
+ dst->phy_errs = __le32_to_cpu(rx->phy_errs);
+ dst->phy_err_drop = __le32_to_cpu(rx->phy_err_drop);
+ dst->mpdu_errs = __le32_to_cpu(rx->mpdu_errs);
+}
+
+static void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
+ struct ath10k_fw_stats_peer *dst)
+{
+ ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+ dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+ dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+}
+
+static int ath10k_wmi_main_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats(src, dst);
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(src, dst);
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10x_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_10x_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats(&src->old, dst);
+
+ dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
+ dst->rts_bad = __le32_to_cpu(src->rts_bad);
+ dst->rts_good = __le32_to_cpu(src->rts_good);
+ dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
+ dst->no_beacons = __le32_to_cpu(src->no_beacons);
+ dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10x_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->old, dst);
+
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ return ath10k_wmi_10x_pull_fw_stats(ar, skb, stats);
+ else
+ return ath10k_wmi_main_pull_fw_stats(ar, skb, stats);
+}
+
static void ath10k_wmi_event_update_stats(struct ath10k *ar,
struct sk_buff *skb)
{
- struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
-
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
-
- ath10k_debug_read_target_stats(ar, ev);
+ ath10k_debug_fw_stats_process(ar, skb);
}
static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
@@ -1579,6 +1840,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
struct wmi_bcn_info *bcn_info;
struct ath10k_vif *arvif;
struct sk_buff *bcn;
+ dma_addr_t paddr;
int ret, vdev_id = 0;
ev = (struct wmi_host_swba_event *)skb->data;
@@ -1647,27 +1909,37 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_warn(ar, "SWBA overrun on vdev %d\n",
arvif->vdev_id);
- dma_unmap_single(arvif->ar->dev,
- ATH10K_SKB_CB(arvif->beacon)->paddr,
- arvif->beacon->len, DMA_TO_DEVICE);
- dev_kfree_skb_any(arvif->beacon);
- arvif->beacon = NULL;
+ ath10k_mac_vif_beacon_free(arvif);
}
- ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
- bcn->data, bcn->len,
- DMA_TO_DEVICE);
- ret = dma_mapping_error(arvif->ar->dev,
- ATH10K_SKB_CB(bcn)->paddr);
- if (ret) {
- ath10k_warn(ar, "failed to map beacon: %d\n", ret);
- dev_kfree_skb_any(bcn);
- goto skip;
+ if (!arvif->beacon_buf) {
+ paddr = dma_map_single(arvif->ar->dev, bcn->data,
+ bcn->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(arvif->ar->dev, paddr);
+ if (ret) {
+ ath10k_warn(ar, "failed to map beacon: %d\n",
+ ret);
+ dev_kfree_skb_any(bcn);
+ goto skip;
+ }
+
+ ATH10K_SKB_CB(bcn)->paddr = paddr;
+ } else {
+ if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
+ ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
+ bcn->len, IEEE80211_MAX_FRAME_LEN);
+ skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
+ }
+ memcpy(arvif->beacon_buf, bcn->data, bcn->len);
+ ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
}
arvif->beacon = bcn;
arvif->beacon_sent = false;
+ trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
+ trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
+
ath10k_wmi_tx_beacon_nowait(arvif);
skip:
spin_unlock_bh(&ar->data_lock);
@@ -1681,8 +1953,8 @@ static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
}
static void ath10k_dfs_radar_report(struct ath10k *ar,
- struct wmi_single_phyerr_rx_event *event,
- struct phyerr_radar_report *rr,
+ const struct wmi_phyerr *phyerr,
+ const struct phyerr_radar_report *rr,
u64 tsf)
{
u32 reg0, reg1, tsf32l;
@@ -1715,12 +1987,12 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
return;
/* report event to DFS pattern detector */
- tsf32l = __le32_to_cpu(event->hdr.tsf_timestamp);
+ tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
tsf64 = tsf & (~0xFFFFFFFFULL);
tsf64 |= tsf32l;
width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
- rssi = event->hdr.rssi_combined;
+ rssi = phyerr->rssi_combined;
/* hardware store this as 8 bit signed value,
* set to zero if negative number
@@ -1759,8 +2031,8 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
}
static int ath10k_dfs_fft_report(struct ath10k *ar,
- struct wmi_single_phyerr_rx_event *event,
- struct phyerr_fft_report *fftr,
+ const struct wmi_phyerr *phyerr,
+ const struct phyerr_fft_report *fftr,
u64 tsf)
{
u32 reg0, reg1;
@@ -1768,7 +2040,7 @@ static int ath10k_dfs_fft_report(struct ath10k *ar,
reg0 = __le32_to_cpu(fftr->reg0);
reg1 = __le32_to_cpu(fftr->reg1);
- rssi = event->hdr.rssi_combined;
+ rssi = phyerr->rssi_combined;
ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
@@ -1797,20 +2069,20 @@ static int ath10k_dfs_fft_report(struct ath10k *ar,
}
static void ath10k_wmi_event_dfs(struct ath10k *ar,
- struct wmi_single_phyerr_rx_event *event,
+ const struct wmi_phyerr *phyerr,
u64 tsf)
{
int buf_len, tlv_len, res, i = 0;
- struct phyerr_tlv *tlv;
- struct phyerr_radar_report *rr;
- struct phyerr_fft_report *fftr;
- u8 *tlv_buf;
+ const struct phyerr_tlv *tlv;
+ const struct phyerr_radar_report *rr;
+ const struct phyerr_fft_report *fftr;
+ const u8 *tlv_buf;
- buf_len = __le32_to_cpu(event->hdr.buf_len);
+ buf_len = __le32_to_cpu(phyerr->buf_len);
ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
- event->hdr.phy_err_code, event->hdr.rssi_combined,
- __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len);
+ phyerr->phy_err_code, phyerr->rssi_combined,
+ __le32_to_cpu(phyerr->tsf_timestamp), tsf, buf_len);
/* Skip event if DFS disabled */
if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
@@ -1825,9 +2097,9 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
return;
}
- tlv = (struct phyerr_tlv *)&event->bufp[i];
+ tlv = (struct phyerr_tlv *)&phyerr->buf[i];
tlv_len = __le16_to_cpu(tlv->len);
- tlv_buf = &event->bufp[i + sizeof(*tlv)];
+ tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
tlv_len, tlv->tag, tlv->sig);
@@ -1841,7 +2113,7 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
}
rr = (struct phyerr_radar_report *)tlv_buf;
- ath10k_dfs_radar_report(ar, event, rr, tsf);
+ ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
break;
case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
@@ -1851,7 +2123,7 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
}
fftr = (struct phyerr_fft_report *)tlv_buf;
- res = ath10k_dfs_fft_report(ar, event, fftr, tsf);
+ res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
if (res)
return;
break;
@@ -1863,16 +2135,16 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
static void
ath10k_wmi_event_spectral_scan(struct ath10k *ar,
- struct wmi_single_phyerr_rx_event *event,
+ const struct wmi_phyerr *phyerr,
u64 tsf)
{
int buf_len, tlv_len, res, i = 0;
struct phyerr_tlv *tlv;
- u8 *tlv_buf;
- struct phyerr_fft_report *fftr;
+ const void *tlv_buf;
+ const struct phyerr_fft_report *fftr;
size_t fftr_len;
- buf_len = __le32_to_cpu(event->hdr.buf_len);
+ buf_len = __le32_to_cpu(phyerr->buf_len);
while (i < buf_len) {
if (i + sizeof(*tlv) > buf_len) {
@@ -1881,9 +2153,9 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar,
return;
}
- tlv = (struct phyerr_tlv *)&event->bufp[i];
+ tlv = (struct phyerr_tlv *)&phyerr->buf[i];
tlv_len = __le16_to_cpu(tlv->len);
- tlv_buf = &event->bufp[i + sizeof(*tlv)];
+ tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
if (i + sizeof(*tlv) + tlv_len > buf_len) {
ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
@@ -1900,8 +2172,8 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar,
}
fftr_len = tlv_len - sizeof(*fftr);
- fftr = (struct phyerr_fft_report *)tlv_buf;
- res = ath10k_spectral_process_fft(ar, event,
+ fftr = tlv_buf;
+ res = ath10k_spectral_process_fft(ar, phyerr,
fftr, fftr_len,
tsf);
if (res < 0) {
@@ -1918,8 +2190,8 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar,
static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
{
- struct wmi_comb_phyerr_rx_event *comb_event;
- struct wmi_single_phyerr_rx_event *event;
+ const struct wmi_phyerr_event *ev;
+ const struct wmi_phyerr *phyerr;
u32 count, i, buf_len, phy_err_code;
u64 tsf;
int left_len = skb->len;
@@ -1927,38 +2199,38 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
ATH10K_DFS_STAT_INC(ar, phy_errors);
/* Check if combined event available */
- if (left_len < sizeof(*comb_event)) {
+ if (left_len < sizeof(*ev)) {
ath10k_warn(ar, "wmi phyerr combined event wrong len\n");
return;
}
- left_len -= sizeof(*comb_event);
+ left_len -= sizeof(*ev);
/* Check number of included events */
- comb_event = (struct wmi_comb_phyerr_rx_event *)skb->data;
- count = __le32_to_cpu(comb_event->hdr.num_phyerr_events);
+ ev = (const struct wmi_phyerr_event *)skb->data;
+ count = __le32_to_cpu(ev->num_phyerrs);
- tsf = __le32_to_cpu(comb_event->hdr.tsf_u32);
+ tsf = __le32_to_cpu(ev->tsf_u32);
tsf <<= 32;
- tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32);
+ tsf |= __le32_to_cpu(ev->tsf_l32);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi event phyerr count %d tsf64 0x%llX\n",
count, tsf);
- event = (struct wmi_single_phyerr_rx_event *)comb_event->bufp;
+ phyerr = ev->phyerrs;
for (i = 0; i < count; i++) {
/* Check if we can read event header */
- if (left_len < sizeof(*event)) {
+ if (left_len < sizeof(*phyerr)) {
ath10k_warn(ar, "single event (%d) wrong head len\n",
i);
return;
}
- left_len -= sizeof(*event);
+ left_len -= sizeof(*phyerr);
- buf_len = __le32_to_cpu(event->hdr.buf_len);
- phy_err_code = event->hdr.phy_err_code;
+ buf_len = __le32_to_cpu(phyerr->buf_len);
+ phy_err_code = phyerr->phy_err_code;
if (left_len < buf_len) {
ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
@@ -1969,20 +2241,20 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
switch (phy_err_code) {
case PHY_ERROR_RADAR:
- ath10k_wmi_event_dfs(ar, event, tsf);
+ ath10k_wmi_event_dfs(ar, phyerr, tsf);
break;
case PHY_ERROR_SPECTRAL_SCAN:
- ath10k_wmi_event_spectral_scan(ar, event, tsf);
+ ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
break;
case PHY_ERROR_FALSE_RADAR_EXT:
- ath10k_wmi_event_dfs(ar, event, tsf);
- ath10k_wmi_event_spectral_scan(ar, event, tsf);
+ ath10k_wmi_event_dfs(ar, phyerr, tsf);
+ ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
break;
default:
break;
}
- event += sizeof(*event) + buf_len;
+ phyerr = (void *)phyerr + sizeof(*phyerr) + buf_len;
}
}
@@ -2028,7 +2300,7 @@ static void ath10k_wmi_event_debug_print(struct ath10k *ar,
/* the last byte is always reserved for the null character */
buf[i] = '\0';
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf);
+ ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
}
static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
@@ -2163,30 +2435,117 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
return 0;
}
-static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
- struct sk_buff *skb)
+static int ath10k_wmi_main_pull_svc_rdy_ev(struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ struct wmi_service_ready_event *ev;
+ size_t i, n;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ ev = (void *)skb->data;
+ skb_pull(skb, sizeof(*ev));
+ arg->min_tx_power = ev->hw_min_tx_power;
+ arg->max_tx_power = ev->hw_max_tx_power;
+ arg->ht_cap = ev->ht_cap_info;
+ arg->vht_cap = ev->vht_cap_info;
+ arg->sw_ver0 = ev->sw_version;
+ arg->sw_ver1 = ev->sw_version_1;
+ arg->phy_capab = ev->phy_capability;
+ arg->num_rf_chains = ev->num_rf_chains;
+ arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->num_mem_reqs = ev->num_mem_reqs;
+ arg->service_map = ev->wmi_service_bitmap;
+ arg->service_map_len = sizeof(ev->wmi_service_bitmap);
+
+ n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
+ ARRAY_SIZE(arg->mem_reqs));
+ for (i = 0; i < n; i++)
+ arg->mem_reqs[i] = &ev->mem_reqs[i];
+
+ if (skb->len <
+ __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
+ return -EPROTO;
+
+ return 0;
+}
+
+static int ath10k_wmi_10x_pull_svc_rdy_ev(struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ struct wmi_10x_service_ready_event *ev;
+ int i, n;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ ev = (void *)skb->data;
+ skb_pull(skb, sizeof(*ev));
+ arg->min_tx_power = ev->hw_min_tx_power;
+ arg->max_tx_power = ev->hw_max_tx_power;
+ arg->ht_cap = ev->ht_cap_info;
+ arg->vht_cap = ev->vht_cap_info;
+ arg->sw_ver0 = ev->sw_version;
+ arg->phy_capab = ev->phy_capability;
+ arg->num_rf_chains = ev->num_rf_chains;
+ arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->num_mem_reqs = ev->num_mem_reqs;
+ arg->service_map = ev->wmi_service_bitmap;
+ arg->service_map_len = sizeof(ev->wmi_service_bitmap);
+
+ n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
+ ARRAY_SIZE(arg->mem_reqs));
+ for (i = 0; i < n; i++)
+ arg->mem_reqs[i] = &ev->mem_reqs[i];
+
+ if (skb->len <
+ __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
+ return -EPROTO;
+
+ return 0;
+}
+
+static void ath10k_wmi_event_service_ready(struct ath10k *ar,
+ struct sk_buff *skb)
{
- struct wmi_service_ready_event *ev = (void *)skb->data;
- DECLARE_BITMAP(svc_bmap, WMI_SERVICE_MAX) = {};
+ struct wmi_svc_rdy_ev_arg arg = {};
+ u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
+ int ret;
- if (skb->len < sizeof(*ev)) {
- ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
- skb->len, sizeof(*ev));
+ memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
+
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ ret = ath10k_wmi_10x_pull_svc_rdy_ev(skb, &arg);
+ wmi_10x_svc_map(arg.service_map, ar->wmi.svc_map,
+ arg.service_map_len);
+ } else {
+ ret = ath10k_wmi_main_pull_svc_rdy_ev(skb, &arg);
+ wmi_main_svc_map(arg.service_map, ar->wmi.svc_map,
+ arg.service_map_len);
+ }
+
+ if (ret) {
+ ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
return;
}
- ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
- ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
- ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
- ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
+ ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
+ ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
+ ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
+ ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
ar->fw_version_major =
- (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
- ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
+ (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
+ ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
ar->fw_version_release =
- (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
- ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
- ar->phy_capability = __le32_to_cpu(ev->phy_capability);
- ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
+ (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
+ ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
+ ar->phy_capability = __le32_to_cpu(arg.phy_capab);
+ ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
+ ar->ath_common.regulatory.current_rd = __le32_to_cpu(arg.eeprom_rd);
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
+ arg.service_map, arg.service_map_len);
/* only manually set fw features when not using FW IE format */
if (ar->fw_api == 1 && ar->fw_version_build > 636)
@@ -2198,13 +2557,8 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
}
- ar->ath_common.regulatory.current_rd =
- __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
-
- wmi_main_svc_map(ev->wmi_service_bitmap, svc_bmap);
- ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap));
- ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
- ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap));
+ ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
+ ar->supp_rx_chainmask = (1 << ar->num_rf_chains) - 1;
if (strlen(ar->hw->wiphy->fw_version) == 0) {
snprintf(ar->hw->wiphy->fw_version,
@@ -2216,93 +2570,18 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
ar->fw_version_build);
}
- /* FIXME: it probably should be better to support this */
- if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
- ath10k_warn(ar, "target requested %d memory chunks; ignoring\n",
- __le32_to_cpu(ev->num_mem_reqs));
- }
-
- ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
- __le32_to_cpu(ev->sw_version),
- __le32_to_cpu(ev->sw_version_1),
- __le32_to_cpu(ev->abi_version),
- __le32_to_cpu(ev->phy_capability),
- __le32_to_cpu(ev->ht_cap_info),
- __le32_to_cpu(ev->vht_cap_info),
- __le32_to_cpu(ev->vht_supp_mcs),
- __le32_to_cpu(ev->sys_cap_info),
- __le32_to_cpu(ev->num_mem_reqs),
- __le32_to_cpu(ev->num_rf_chains));
-
- complete(&ar->wmi.service_ready);
-}
-
-static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
- struct sk_buff *skb)
-{
- u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
- int ret;
- struct wmi_service_ready_event_10x *ev = (void *)skb->data;
- DECLARE_BITMAP(svc_bmap, WMI_SERVICE_MAX) = {};
-
- if (skb->len < sizeof(*ev)) {
- ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
- skb->len, sizeof(*ev));
- return;
- }
-
- ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
- ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
- ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
- ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
- ar->fw_version_major =
- (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
- ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
- ar->phy_capability = __le32_to_cpu(ev->phy_capability);
- ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
-
- if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
- ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
- ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
- ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
- }
-
- ar->ath_common.regulatory.current_rd =
- __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
-
- wmi_10x_svc_map(ev->wmi_service_bitmap, svc_bmap);
- ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap));
- ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
- ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap));
-
- if (strlen(ar->hw->wiphy->fw_version) == 0) {
- snprintf(ar->hw->wiphy->fw_version,
- sizeof(ar->hw->wiphy->fw_version),
- "%u.%u",
- ar->fw_version_major,
- ar->fw_version_minor);
- }
-
- num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs);
-
- if (num_mem_reqs > ATH10K_MAX_MEM_REQS) {
+ num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
+ if (num_mem_reqs > WMI_MAX_MEM_REQS) {
ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
num_mem_reqs);
return;
}
- if (!num_mem_reqs)
- goto exit;
-
- ath10k_dbg(ar, ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
- num_mem_reqs);
-
for (i = 0; i < num_mem_reqs; ++i) {
- req_id = __le32_to_cpu(ev->mem_reqs[i].req_id);
- num_units = __le32_to_cpu(ev->mem_reqs[i].num_units);
- unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size);
- num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info);
+ req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
+ num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
+ unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
+ num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
/* number of units to allocate is number of
@@ -2316,7 +2595,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
req_id,
- __le32_to_cpu(ev->mem_reqs[i].num_units),
+ __le32_to_cpu(arg.mem_reqs[i]->num_units),
num_unit_info,
unit_size,
num_units);
@@ -2327,23 +2606,23 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
return;
}
-exit:
ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
- __le32_to_cpu(ev->sw_version),
- __le32_to_cpu(ev->abi_version),
- __le32_to_cpu(ev->phy_capability),
- __le32_to_cpu(ev->ht_cap_info),
- __le32_to_cpu(ev->vht_cap_info),
- __le32_to_cpu(ev->vht_supp_mcs),
- __le32_to_cpu(ev->sys_cap_info),
- __le32_to_cpu(ev->num_mem_reqs),
- __le32_to_cpu(ev->num_rf_chains));
+ "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
+ __le32_to_cpu(arg.min_tx_power),
+ __le32_to_cpu(arg.max_tx_power),
+ __le32_to_cpu(arg.ht_cap),
+ __le32_to_cpu(arg.vht_cap),
+ __le32_to_cpu(arg.sw_ver0),
+ __le32_to_cpu(arg.sw_ver1),
+ __le32_to_cpu(arg.phy_capab),
+ __le32_to_cpu(arg.num_rf_chains),
+ __le32_to_cpu(arg.eeprom_rd),
+ __le32_to_cpu(arg.num_mem_reqs));
complete(&ar->wmi.service_ready);
}
-static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
@@ -2466,10 +2745,10 @@ static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_event_vdev_install_key_complete(ar, skb);
break;
case WMI_SERVICE_READY_EVENTID:
- ath10k_wmi_service_ready_event_rx(ar, skb);
+ ath10k_wmi_event_service_ready(ar, skb);
break;
case WMI_READY_EVENTID:
- ath10k_wmi_ready_event_rx(ar, skb);
+ ath10k_wmi_event_ready(ar, skb);
break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
@@ -2586,10 +2865,10 @@ static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_event_vdev_resume_req(ar, skb);
break;
case WMI_10X_SERVICE_READY_EVENTID:
- ath10k_wmi_10x_service_ready_event_rx(ar, skb);
+ ath10k_wmi_event_service_ready(ar, skb);
break;
case WMI_10X_READY_EVENTID:
- ath10k_wmi_ready_event_rx(ar, skb);
+ ath10k_wmi_event_ready(ar, skb);
break;
case WMI_10X_PDEV_UTF_EVENTID:
/* ignore utf events */
@@ -2697,10 +2976,10 @@ static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_event_vdev_resume_req(ar, skb);
break;
case WMI_10_2_SERVICE_READY_EVENTID:
- ath10k_wmi_10x_service_ready_event_rx(ar, skb);
+ ath10k_wmi_event_service_ready(ar, skb);
break;
case WMI_10_2_READY_EVENTID:
- ath10k_wmi_ready_event_rx(ar, skb);
+ ath10k_wmi_event_ready(ar, skb);
break;
case WMI_10_2_RTT_KEEPALIVE_EVENTID:
case WMI_10_2_GPIO_INPUT_EVENTID:
@@ -2732,45 +3011,6 @@ static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
}
}
-/* WMI Initialization functions */
-int ath10k_wmi_attach(struct ath10k *ar)
-{
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
- if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
- ar->wmi.cmd = &wmi_10_2_cmd_map;
- else
- ar->wmi.cmd = &wmi_10x_cmd_map;
-
- ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
- ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
- } else {
- ar->wmi.cmd = &wmi_cmd_map;
- ar->wmi.vdev_param = &wmi_vdev_param_map;
- ar->wmi.pdev_param = &wmi_pdev_param_map;
- }
-
- init_completion(&ar->wmi.service_ready);
- init_completion(&ar->wmi.unified_ready);
- init_waitqueue_head(&ar->wmi.tx_credits_wq);
-
- return 0;
-}
-
-void ath10k_wmi_detach(struct ath10k *ar)
-{
- int i;
-
- /* free the host memory chunks requested by firmware */
- for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
- dma_free_coherent(ar->dev,
- ar->wmi.mem_chunks[i].len,
- ar->wmi.mem_chunks[i].vaddr,
- ar->wmi.mem_chunks[i].paddr);
- }
-
- ar->wmi.num_mem_chunks = 0;
-}
-
int ath10k_wmi_connect(struct ath10k *ar)
{
int status;
@@ -2865,42 +3105,6 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
ctl2g, ctl5g);
}
-int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
- const struct wmi_channel_arg *arg)
-{
- struct wmi_set_channel_cmd *cmd;
- struct sk_buff *skb;
- u32 ch_flags = 0;
-
- if (arg->passive)
- return -EINVAL;
-
- skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
- if (!skb)
- return -ENOMEM;
-
- if (arg->chan_radar)
- ch_flags |= WMI_CHAN_FLAG_DFS;
-
- cmd = (struct wmi_set_channel_cmd *)skb->data;
- cmd->chan.mhz = __cpu_to_le32(arg->freq);
- cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
- cmd->chan.mode = arg->mode;
- cmd->chan.flags |= __cpu_to_le32(ch_flags);
- cmd->chan.min_power = arg->min_power;
- cmd->chan.max_power = arg->max_power;
- cmd->chan.reg_power = arg->max_reg_power;
- cmd->chan.reg_classid = arg->reg_class_id;
- cmd->chan.antenna_max = arg->max_antenna_gain;
-
- ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi set channel mode %d freq %d\n",
- arg->mode, arg->freq);
-
- return ath10k_wmi_cmd_send(ar, skb,
- ar->wmi.cmd->pdev_set_channel_cmdid);
-}
-
int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
{
struct wmi_pdev_suspend_cmd *cmd;
@@ -2951,16 +3155,37 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
}
+static void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
+ struct wmi_host_mem_chunks *chunks)
+{
+ struct host_memory_chunk *chunk;
+ int i;
+
+ chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ chunk = &chunks->items[i];
+ chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+ chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
+ chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi chunk %d len %d requested, addr 0x%llx\n",
+ i,
+ ar->wmi.mem_chunks[i].len,
+ (unsigned long long)ar->wmi.mem_chunks[i].paddr);
+ }
+}
+
static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
{
struct wmi_init_cmd *cmd;
struct sk_buff *buf;
struct wmi_resource_config config = {};
u32 len, val;
- int i;
config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
- config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
+ config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
config.num_offload_reorder_bufs =
@@ -3019,32 +3244,8 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
cmd = (struct wmi_init_cmd *)buf->data;
- if (ar->wmi.num_mem_chunks == 0) {
- cmd->num_host_mem_chunks = 0;
- goto out;
- }
-
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
- ar->wmi.num_mem_chunks);
-
- cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
-
- for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
- cmd->host_mem_chunks[i].ptr =
- __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
- cmd->host_mem_chunks[i].size =
- __cpu_to_le32(ar->wmi.mem_chunks[i].len);
- cmd->host_mem_chunks[i].req_id =
- __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
-
- ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi chunk %d len %d requested, addr 0x%llx\n",
- i,
- ar->wmi.mem_chunks[i].len,
- (unsigned long long)ar->wmi.mem_chunks[i].paddr);
- }
-out:
memcpy(&cmd->resource_config, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
@@ -3056,7 +3257,6 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
struct sk_buff *buf;
struct wmi_resource_config_10x config = {};
u32 len, val;
- int i;
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
@@ -3110,32 +3310,8 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
cmd = (struct wmi_init_cmd_10x *)buf->data;
- if (ar->wmi.num_mem_chunks == 0) {
- cmd->num_host_mem_chunks = 0;
- goto out;
- }
-
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
- ar->wmi.num_mem_chunks);
-
- cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
-
- for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
- cmd->host_mem_chunks[i].ptr =
- __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
- cmd->host_mem_chunks[i].size =
- __cpu_to_le32(ar->wmi.mem_chunks[i].len);
- cmd->host_mem_chunks[i].req_id =
- __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
-
- ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi chunk %d len %d requested, addr 0x%llx\n",
- i,
- ar->wmi.mem_chunks[i].len,
- (unsigned long long)ar->wmi.mem_chunks[i].paddr);
- }
-out:
memcpy(&cmd->resource_config, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
@@ -3147,7 +3323,6 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
struct sk_buff *buf;
struct wmi_resource_config_10x config = {};
u32 len, val;
- int i;
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
@@ -3201,32 +3376,8 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
cmd = (struct wmi_init_cmd_10_2 *)buf->data;
- if (ar->wmi.num_mem_chunks == 0) {
- cmd->num_host_mem_chunks = 0;
- goto out;
- }
-
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
- ar->wmi.num_mem_chunks);
-
- cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
-
- for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
- cmd->host_mem_chunks[i].ptr =
- __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
- cmd->host_mem_chunks[i].size =
- __cpu_to_le32(ar->wmi.mem_chunks[i].len);
- cmd->host_mem_chunks[i].req_id =
- __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
-
- ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi chunk %d len %d requested, addr 0x%llx\n",
- i,
- ar->wmi.mem_chunks[i].len,
- (unsigned long long)ar->wmi.mem_chunks[i].paddr);
- }
-out:
memcpy(&cmd->resource_config.common, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
@@ -3248,52 +3399,50 @@ int ath10k_wmi_cmd_init(struct ath10k *ar)
return ret;
}
-static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar,
- const struct wmi_start_scan_arg *arg)
+static int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
{
- int len;
+ if (arg->ie_len && !arg->ie)
+ return -EINVAL;
+ if (arg->n_channels && !arg->channels)
+ return -EINVAL;
+ if (arg->n_ssids && !arg->ssids)
+ return -EINVAL;
+ if (arg->n_bssids && !arg->bssids)
+ return -EINVAL;
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
- len = sizeof(struct wmi_start_scan_cmd_10x);
- else
- len = sizeof(struct wmi_start_scan_cmd);
+ if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
+ return -EINVAL;
+ if (arg->n_channels > ARRAY_SIZE(arg->channels))
+ return -EINVAL;
+ if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
+ return -EINVAL;
+ if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
+ return -EINVAL;
- if (arg->ie_len) {
- if (!arg->ie)
- return -EINVAL;
- if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
- return -EINVAL;
+ return 0;
+}
+static size_t
+ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
+{
+ int len = 0;
+
+ if (arg->ie_len) {
len += sizeof(struct wmi_ie_data);
len += roundup(arg->ie_len, 4);
}
if (arg->n_channels) {
- if (!arg->channels)
- return -EINVAL;
- if (arg->n_channels > ARRAY_SIZE(arg->channels))
- return -EINVAL;
-
len += sizeof(struct wmi_chan_list);
len += sizeof(__le32) * arg->n_channels;
}
if (arg->n_ssids) {
- if (!arg->ssids)
- return -EINVAL;
- if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
- return -EINVAL;
-
len += sizeof(struct wmi_ssid_list);
len += sizeof(struct wmi_ssid) * arg->n_ssids;
}
if (arg->n_bssids) {
- if (!arg->bssids)
- return -EINVAL;
- if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
- return -EINVAL;
-
len += sizeof(struct wmi_bssid_list);
len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
}
@@ -3301,28 +3450,12 @@ static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar,
return len;
}
-int ath10k_wmi_start_scan(struct ath10k *ar,
- const struct wmi_start_scan_arg *arg)
+static void
+ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
+ const struct wmi_start_scan_arg *arg)
{
- struct wmi_start_scan_cmd *cmd;
- struct sk_buff *skb;
- struct wmi_ie_data *ie;
- struct wmi_chan_list *channels;
- struct wmi_ssid_list *ssids;
- struct wmi_bssid_list *bssids;
u32 scan_id;
u32 scan_req_id;
- int off;
- int len = 0;
- int i;
-
- len = ath10k_wmi_start_scan_calc_len(ar, arg);
- if (len < 0)
- return len; /* len contains error code here */
-
- skb = ath10k_wmi_alloc_skb(ar, len);
- if (!skb)
- return -ENOMEM;
scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
scan_id |= arg->scan_id;
@@ -3330,35 +3463,36 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
scan_req_id |= arg->scan_req_id;
- cmd = (struct wmi_start_scan_cmd *)skb->data;
- cmd->scan_id = __cpu_to_le32(scan_id);
- cmd->scan_req_id = __cpu_to_le32(scan_req_id);
- cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
- cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
- cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
- cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
- cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
- cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time);
- cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time);
- cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
- cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
- cmd->idle_time = __cpu_to_le32(arg->idle_time);
- cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time);
- cmd->probe_delay = __cpu_to_le32(arg->probe_delay);
- cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
-
- /* TLV list starts after fields included in the struct */
- /* There's just one filed that differes the two start_scan
- * structures - burst_duration, which we are not using btw,
- no point to make the split here, just shift the buffer to fit with
- given FW */
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
- off = sizeof(struct wmi_start_scan_cmd_10x);
- else
- off = sizeof(struct wmi_start_scan_cmd);
+ cmn->scan_id = __cpu_to_le32(scan_id);
+ cmn->scan_req_id = __cpu_to_le32(scan_req_id);
+ cmn->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmn->scan_priority = __cpu_to_le32(arg->scan_priority);
+ cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
+ cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
+ cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
+ cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time);
+ cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time);
+ cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
+ cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
+ cmn->idle_time = __cpu_to_le32(arg->idle_time);
+ cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time);
+ cmn->probe_delay = __cpu_to_le32(arg->probe_delay);
+ cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
+}
+
+static void
+ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_ie_data *ie;
+ struct wmi_chan_list *channels;
+ struct wmi_ssid_list *ssids;
+ struct wmi_bssid_list *bssids;
+ void *ptr = tlvs->tlvs;
+ int i;
if (arg->n_channels) {
- channels = (void *)skb->data + off;
+ channels = ptr;
channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
channels->num_chan = __cpu_to_le32(arg->n_channels);
@@ -3366,12 +3500,12 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
channels->channel_list[i].freq =
__cpu_to_le16(arg->channels[i]);
- off += sizeof(*channels);
- off += sizeof(__le32) * arg->n_channels;
+ ptr += sizeof(*channels);
+ ptr += sizeof(__le32) * arg->n_channels;
}
if (arg->n_ssids) {
- ssids = (void *)skb->data + off;
+ ssids = ptr;
ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
@@ -3383,12 +3517,12 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
arg->ssids[i].len);
}
- off += sizeof(*ssids);
- off += sizeof(struct wmi_ssid) * arg->n_ssids;
+ ptr += sizeof(*ssids);
+ ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
}
if (arg->n_bssids) {
- bssids = (void *)skb->data + off;
+ bssids = ptr;
bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
@@ -3397,23 +3531,57 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
arg->bssids[i].bssid,
ETH_ALEN);
- off += sizeof(*bssids);
- off += sizeof(struct wmi_mac_addr) * arg->n_bssids;
+ ptr += sizeof(*bssids);
+ ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
}
if (arg->ie_len) {
- ie = (void *)skb->data + off;
+ ie = ptr;
ie->tag = __cpu_to_le32(WMI_IE_TAG);
ie->ie_len = __cpu_to_le32(arg->ie_len);
memcpy(ie->ie_data, arg->ie, arg->ie_len);
- off += sizeof(*ie);
- off += roundup(arg->ie_len, 4);
+ ptr += sizeof(*ie);
+ ptr += roundup(arg->ie_len, 4);
}
+}
- if (off != skb->len) {
- dev_kfree_skb(skb);
- return -EINVAL;
+int ath10k_wmi_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct sk_buff *skb;
+ size_t len;
+ int ret;
+
+ ret = ath10k_wmi_start_scan_verify(arg);
+ if (ret)
+ return ret;
+
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ len = sizeof(struct wmi_10x_start_scan_cmd) +
+ ath10k_wmi_start_scan_tlvs_len(arg);
+ else
+ len = sizeof(struct wmi_start_scan_cmd) +
+ ath10k_wmi_start_scan_tlvs_len(arg);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ struct wmi_10x_start_scan_cmd *cmd;
+
+ cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
+ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+ ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+ } else {
+ struct wmi_start_scan_cmd *cmd;
+
+ cmd = (struct wmi_start_scan_cmd *)skb->data;
+ cmd->burst_duration_ms = __cpu_to_le32(0);
+
+ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+ ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
}
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
@@ -3532,7 +3700,6 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar,
struct sk_buff *skb;
const char *cmdname;
u32 flags = 0;
- u32 ch_flags = 0;
if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
@@ -3559,8 +3726,6 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar,
flags |= WMI_VDEV_START_HIDDEN_SSID;
if (arg->pmf_enabled)
flags |= WMI_VDEV_START_PMF_ENABLED;
- if (arg->channel.chan_radar)
- ch_flags |= WMI_CHAN_FLAG_DFS;
cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
@@ -3576,18 +3741,7 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar,
memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
}
- cmd->chan.mhz = __cpu_to_le32(arg->channel.freq);
-
- cmd->chan.band_center_freq1 =
- __cpu_to_le32(arg->channel.band_center_freq1);
-
- cmd->chan.mode = arg->channel.mode;
- cmd->chan.flags |= __cpu_to_le32(ch_flags);
- cmd->chan.min_power = arg->channel.min_power;
- cmd->chan.max_power = arg->channel.max_power;
- cmd->chan.reg_power = arg->channel.max_reg_power;
- cmd->chan.reg_classid = arg->channel.reg_class_id;
- cmd->chan.antenna_max = arg->channel.max_antenna_gain;
+ ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
@@ -3968,35 +4122,10 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
for (i = 0; i < arg->n_channels; i++) {
- u32 flags = 0;
-
ch = &arg->channels[i];
ci = &cmd->chan_info[i];
- if (ch->passive)
- flags |= WMI_CHAN_FLAG_PASSIVE;
- if (ch->allow_ibss)
- flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
- if (ch->allow_ht)
- flags |= WMI_CHAN_FLAG_ALLOW_HT;
- if (ch->allow_vht)
- flags |= WMI_CHAN_FLAG_ALLOW_VHT;
- if (ch->ht40plus)
- flags |= WMI_CHAN_FLAG_HT40_PLUS;
- if (ch->chan_radar)
- flags |= WMI_CHAN_FLAG_DFS;
-
- ci->mhz = __cpu_to_le32(ch->freq);
- ci->band_center_freq1 = __cpu_to_le32(ch->freq);
- ci->band_center_freq2 = 0;
- ci->min_power = ch->min_power;
- ci->max_power = ch->max_power;
- ci->reg_power = ch->max_reg_power;
- ci->antenna_max = ch->max_antenna_gain;
-
- /* mode & flags share storage */
- ci->mode = ch->mode;
- ci->flags |= __cpu_to_le32(flags);
+ ath10k_wmi_put_wmi_channel(ci, ch);
}
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
@@ -4108,9 +4237,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
- ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
- else
ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
+ else
+ ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
} else {
ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
}
@@ -4267,3 +4396,73 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
}
+
+int ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
+{
+ struct wmi_pdev_pktlog_enable_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ ev_bitmap &= ATH10K_PKTLOG_ANY;
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi enable pktlog filter:%x\n", ev_bitmap);
+
+ cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
+ cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_pktlog_enable_cmdid);
+}
+
+int ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, 0);
+ if (!skb)
+ return -ENOMEM;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_pktlog_disable_cmdid);
+}
+
+int ath10k_wmi_attach(struct ath10k *ar)
+{
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
+ ar->wmi.cmd = &wmi_10_2_cmd_map;
+ else
+ ar->wmi.cmd = &wmi_10x_cmd_map;
+
+ ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ } else {
+ ar->wmi.cmd = &wmi_cmd_map;
+ ar->wmi.vdev_param = &wmi_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_pdev_param_map;
+ }
+
+ init_completion(&ar->wmi.service_ready);
+ init_completion(&ar->wmi.unified_ready);
+
+ return 0;
+}
+
+void ath10k_wmi_detach(struct ath10k *ar)
+{
+ int i;
+
+ /* free the host memory chunks requested by firmware */
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ dma_free_coherent(ar->dev,
+ ar->wmi.mem_chunks[i].len,
+ ar->wmi.mem_chunks[i].vaddr,
+ ar->wmi.mem_chunks[i].paddr);
+ }
+
+ ar->wmi.num_mem_chunks = 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 86f5ebccfe79..21391929d318 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -222,128 +222,131 @@ static inline char *wmi_service_name(int service_id)
#undef SVCSTR
}
-#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \
- (__le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
+#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
+ ((svc_id) < (len) && \
+ __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
BIT((svc_id)%(sizeof(u32))))
-#define SVCMAP(x, y) \
+#define SVCMAP(x, y, len) \
do { \
- if (WMI_SERVICE_IS_ENABLED((in), (x))) \
+ if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \
__set_bit(y, out); \
} while (0)
-static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out)
+static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
+ size_t len)
{
SVCMAP(WMI_10X_SERVICE_BEACON_OFFLOAD,
- WMI_SERVICE_BEACON_OFFLOAD);
+ WMI_SERVICE_BEACON_OFFLOAD, len);
SVCMAP(WMI_10X_SERVICE_SCAN_OFFLOAD,
- WMI_SERVICE_SCAN_OFFLOAD);
+ WMI_SERVICE_SCAN_OFFLOAD, len);
SVCMAP(WMI_10X_SERVICE_ROAM_OFFLOAD,
- WMI_SERVICE_ROAM_OFFLOAD);
+ WMI_SERVICE_ROAM_OFFLOAD, len);
SVCMAP(WMI_10X_SERVICE_BCN_MISS_OFFLOAD,
- WMI_SERVICE_BCN_MISS_OFFLOAD);
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
SVCMAP(WMI_10X_SERVICE_STA_PWRSAVE,
- WMI_SERVICE_STA_PWRSAVE);
+ WMI_SERVICE_STA_PWRSAVE, len);
SVCMAP(WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE,
- WMI_SERVICE_STA_ADVANCED_PWRSAVE);
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
SVCMAP(WMI_10X_SERVICE_AP_UAPSD,
- WMI_SERVICE_AP_UAPSD);
+ WMI_SERVICE_AP_UAPSD, len);
SVCMAP(WMI_10X_SERVICE_AP_DFS,
- WMI_SERVICE_AP_DFS);
+ WMI_SERVICE_AP_DFS, len);
SVCMAP(WMI_10X_SERVICE_11AC,
- WMI_SERVICE_11AC);
+ WMI_SERVICE_11AC, len);
SVCMAP(WMI_10X_SERVICE_BLOCKACK,
- WMI_SERVICE_BLOCKACK);
+ WMI_SERVICE_BLOCKACK, len);
SVCMAP(WMI_10X_SERVICE_PHYERR,
- WMI_SERVICE_PHYERR);
+ WMI_SERVICE_PHYERR, len);
SVCMAP(WMI_10X_SERVICE_BCN_FILTER,
- WMI_SERVICE_BCN_FILTER);
+ WMI_SERVICE_BCN_FILTER, len);
SVCMAP(WMI_10X_SERVICE_RTT,
- WMI_SERVICE_RTT);
+ WMI_SERVICE_RTT, len);
SVCMAP(WMI_10X_SERVICE_RATECTRL,
- WMI_SERVICE_RATECTRL);
+ WMI_SERVICE_RATECTRL, len);
SVCMAP(WMI_10X_SERVICE_WOW,
- WMI_SERVICE_WOW);
+ WMI_SERVICE_WOW, len);
SVCMAP(WMI_10X_SERVICE_RATECTRL_CACHE,
- WMI_SERVICE_RATECTRL_CACHE);
+ WMI_SERVICE_RATECTRL_CACHE, len);
SVCMAP(WMI_10X_SERVICE_IRAM_TIDS,
- WMI_SERVICE_IRAM_TIDS);
+ WMI_SERVICE_IRAM_TIDS, len);
SVCMAP(WMI_10X_SERVICE_BURST,
- WMI_SERVICE_BURST);
+ WMI_SERVICE_BURST, len);
SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
- WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT);
+ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
SVCMAP(WMI_10X_SERVICE_FORCE_FW_HANG,
- WMI_SERVICE_FORCE_FW_HANG);
+ WMI_SERVICE_FORCE_FW_HANG, len);
SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
- WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT);
+ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
}
-static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out)
+static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
+ size_t len)
{
SVCMAP(WMI_MAIN_SERVICE_BEACON_OFFLOAD,
- WMI_SERVICE_BEACON_OFFLOAD);
+ WMI_SERVICE_BEACON_OFFLOAD, len);
SVCMAP(WMI_MAIN_SERVICE_SCAN_OFFLOAD,
- WMI_SERVICE_SCAN_OFFLOAD);
+ WMI_SERVICE_SCAN_OFFLOAD, len);
SVCMAP(WMI_MAIN_SERVICE_ROAM_OFFLOAD,
- WMI_SERVICE_ROAM_OFFLOAD);
+ WMI_SERVICE_ROAM_OFFLOAD, len);
SVCMAP(WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD,
- WMI_SERVICE_BCN_MISS_OFFLOAD);
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
SVCMAP(WMI_MAIN_SERVICE_STA_PWRSAVE,
- WMI_SERVICE_STA_PWRSAVE);
+ WMI_SERVICE_STA_PWRSAVE, len);
SVCMAP(WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE,
- WMI_SERVICE_STA_ADVANCED_PWRSAVE);
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
SVCMAP(WMI_MAIN_SERVICE_AP_UAPSD,
- WMI_SERVICE_AP_UAPSD);
+ WMI_SERVICE_AP_UAPSD, len);
SVCMAP(WMI_MAIN_SERVICE_AP_DFS,
- WMI_SERVICE_AP_DFS);
+ WMI_SERVICE_AP_DFS, len);
SVCMAP(WMI_MAIN_SERVICE_11AC,
- WMI_SERVICE_11AC);
+ WMI_SERVICE_11AC, len);
SVCMAP(WMI_MAIN_SERVICE_BLOCKACK,
- WMI_SERVICE_BLOCKACK);
+ WMI_SERVICE_BLOCKACK, len);
SVCMAP(WMI_MAIN_SERVICE_PHYERR,
- WMI_SERVICE_PHYERR);
+ WMI_SERVICE_PHYERR, len);
SVCMAP(WMI_MAIN_SERVICE_BCN_FILTER,
- WMI_SERVICE_BCN_FILTER);
+ WMI_SERVICE_BCN_FILTER, len);
SVCMAP(WMI_MAIN_SERVICE_RTT,
- WMI_SERVICE_RTT);
+ WMI_SERVICE_RTT, len);
SVCMAP(WMI_MAIN_SERVICE_RATECTRL,
- WMI_SERVICE_RATECTRL);
+ WMI_SERVICE_RATECTRL, len);
SVCMAP(WMI_MAIN_SERVICE_WOW,
- WMI_SERVICE_WOW);
+ WMI_SERVICE_WOW, len);
SVCMAP(WMI_MAIN_SERVICE_RATECTRL_CACHE,
- WMI_SERVICE_RATECTRL_CACHE);
+ WMI_SERVICE_RATECTRL_CACHE, len);
SVCMAP(WMI_MAIN_SERVICE_IRAM_TIDS,
- WMI_SERVICE_IRAM_TIDS);
+ WMI_SERVICE_IRAM_TIDS, len);
SVCMAP(WMI_MAIN_SERVICE_ARPNS_OFFLOAD,
- WMI_SERVICE_ARPNS_OFFLOAD);
+ WMI_SERVICE_ARPNS_OFFLOAD, len);
SVCMAP(WMI_MAIN_SERVICE_NLO,
- WMI_SERVICE_NLO);
+ WMI_SERVICE_NLO, len);
SVCMAP(WMI_MAIN_SERVICE_GTK_OFFLOAD,
- WMI_SERVICE_GTK_OFFLOAD);
+ WMI_SERVICE_GTK_OFFLOAD, len);
SVCMAP(WMI_MAIN_SERVICE_SCAN_SCH,
- WMI_SERVICE_SCAN_SCH);
+ WMI_SERVICE_SCAN_SCH, len);
SVCMAP(WMI_MAIN_SERVICE_CSA_OFFLOAD,
- WMI_SERVICE_CSA_OFFLOAD);
+ WMI_SERVICE_CSA_OFFLOAD, len);
SVCMAP(WMI_MAIN_SERVICE_CHATTER,
- WMI_SERVICE_CHATTER);
+ WMI_SERVICE_CHATTER, len);
SVCMAP(WMI_MAIN_SERVICE_COEX_FREQAVOID,
- WMI_SERVICE_COEX_FREQAVOID);
+ WMI_SERVICE_COEX_FREQAVOID, len);
SVCMAP(WMI_MAIN_SERVICE_PACKET_POWER_SAVE,
- WMI_SERVICE_PACKET_POWER_SAVE);
+ WMI_SERVICE_PACKET_POWER_SAVE, len);
SVCMAP(WMI_MAIN_SERVICE_FORCE_FW_HANG,
- WMI_SERVICE_FORCE_FW_HANG);
+ WMI_SERVICE_FORCE_FW_HANG, len);
SVCMAP(WMI_MAIN_SERVICE_GPIO,
- WMI_SERVICE_GPIO);
+ WMI_SERVICE_GPIO, len);
SVCMAP(WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
- WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM);
+ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len);
SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
- WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG);
+ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
- WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG);
+ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
SVCMAP(WMI_MAIN_SERVICE_STA_KEEP_ALIVE,
- WMI_SERVICE_STA_KEEP_ALIVE);
+ WMI_SERVICE_STA_KEEP_ALIVE, len);
SVCMAP(WMI_MAIN_SERVICE_TX_ENCAP,
- WMI_SERVICE_TX_ENCAP);
+ WMI_SERVICE_TX_ENCAP, len);
}
#undef SVCMAP
@@ -1428,11 +1431,11 @@ struct wmi_service_ready_event {
* where FW can access this memory directly (or) by DMA.
*/
__le32 num_mem_reqs;
- struct wlan_host_mem_req mem_reqs[1];
+ struct wlan_host_mem_req mem_reqs[0];
} __packed;
/* This is the definition from 10.X firmware branch */
-struct wmi_service_ready_event_10x {
+struct wmi_10x_service_ready_event {
__le32 sw_version;
__le32 abi_version;
@@ -1467,7 +1470,7 @@ struct wmi_service_ready_event_10x {
*/
__le32 num_mem_reqs;
- struct wlan_host_mem_req mem_reqs[1];
+ struct wlan_host_mem_req mem_reqs[0];
} __packed;
#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
@@ -1883,38 +1886,26 @@ struct host_memory_chunk {
__le32 size;
} __packed;
+struct wmi_host_mem_chunks {
+ __le32 count;
+ /* some fw revisions require at least 1 chunk regardless of count */
+ struct host_memory_chunk items[1];
+} __packed;
+
struct wmi_init_cmd {
struct wmi_resource_config resource_config;
- __le32 num_host_mem_chunks;
-
- /*
- * variable number of host memory chunks.
- * This should be the last element in the structure
- */
- struct host_memory_chunk host_mem_chunks[1];
+ struct wmi_host_mem_chunks mem_chunks;
} __packed;
/* _10x stucture is from 10.X FW API */
struct wmi_init_cmd_10x {
struct wmi_resource_config_10x resource_config;
- __le32 num_host_mem_chunks;
-
- /*
- * variable number of host memory chunks.
- * This should be the last element in the structure
- */
- struct host_memory_chunk host_mem_chunks[1];
+ struct wmi_host_mem_chunks mem_chunks;
} __packed;
struct wmi_init_cmd_10_2 {
struct wmi_resource_config_10_2 resource_config;
- __le32 num_host_mem_chunks;
-
- /*
- * variable number of host memory chunks.
- * This should be the last element in the structure
- */
- struct host_memory_chunk host_mem_chunks[1];
+ struct wmi_host_mem_chunks mem_chunks;
} __packed;
struct wmi_chan_list_entry {
@@ -1964,6 +1955,11 @@ struct wmi_ssid_list {
#define WLAN_SCAN_PARAMS_MAX_BSSID 4
#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+/* Values lower than this may be refused by some firmware revisions with a scan
+ * completion with a timedout reason.
+ */
+#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40
+
/* Scan priority numbers must be sequential, starting with 0 */
enum wmi_scan_priority {
WMI_SCAN_PRIORITY_VERY_LOW = 0,
@@ -1974,7 +1970,7 @@ enum wmi_scan_priority {
WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
};
-struct wmi_start_scan_cmd {
+struct wmi_start_scan_common {
/* Scan ID */
__le32 scan_id;
/* Scan requestor ID */
@@ -2032,95 +2028,25 @@ struct wmi_start_scan_cmd {
__le32 probe_delay;
/* Scan control flags */
__le32 scan_ctrl_flags;
-
- /* Burst duration time in msecs */
- __le32 burst_duration;
- /*
- * TLV (tag length value ) paramerters follow the scan_cmd structure.
- * TLV can contain channel list, bssid list, ssid list and
- * ie. the TLV tags are defined above;
- */
} __packed;
-/* This is the definition from 10.X firmware branch */
-struct wmi_start_scan_cmd_10x {
- /* Scan ID */
- __le32 scan_id;
-
- /* Scan requestor ID */
- __le32 scan_req_id;
-
- /* VDEV id(interface) that is requesting scan */
- __le32 vdev_id;
-
- /* Scan Priority, input to scan scheduler */
- __le32 scan_priority;
-
- /* Scan events subscription */
- __le32 notify_scan_events;
-
- /* dwell time in msec on active channels */
- __le32 dwell_time_active;
-
- /* dwell time in msec on passive channels */
- __le32 dwell_time_passive;
-
- /*
- * min time in msec on the BSS channel,only valid if atleast one
- * VDEV is active
- */
- __le32 min_rest_time;
-
- /*
- * max rest time in msec on the BSS channel,only valid if at least
- * one VDEV is active
- */
- /*
- * the scanner will rest on the bss channel at least min_rest_time
- * after min_rest_time the scanner will start checking for tx/rx
- * activity on all VDEVs. if there is no activity the scanner will
- * switch to off channel. if there is activity the scanner will let
- * the radio on the bss channel until max_rest_time expires.at
- * max_rest_time scanner will switch to off channel irrespective of
- * activity. activity is determined by the idle_time parameter.
- */
- __le32 max_rest_time;
-
- /*
- * time before sending next set of probe requests.
- * The scanner keeps repeating probe requests transmission with
- * period specified by repeat_probe_time.
- * The number of probe requests specified depends on the ssid_list
- * and bssid_list
- */
- __le32 repeat_probe_time;
-
- /* time in msec between 2 consequetive probe requests with in a set. */
- __le32 probe_spacing_time;
-
- /*
- * data inactivity time in msec on bss channel that will be used by
- * scanner for measuring the inactivity.
- */
- __le32 idle_time;
-
- /* maximum time in msec allowed for scan */
- __le32 max_scan_time;
-
- /*
- * delay in msec before sending first probe request after switching
- * to a channel
+struct wmi_start_scan_tlvs {
+ /* TLV parameters. These includes channel list, ssid list, bssid list,
+ * extra ies.
*/
- __le32 probe_delay;
+ u8 tlvs[0];
+} __packed;
- /* Scan control flags */
- __le32 scan_ctrl_flags;
+struct wmi_start_scan_cmd {
+ struct wmi_start_scan_common common;
+ __le32 burst_duration_ms;
+ struct wmi_start_scan_tlvs tlvs;
+} __packed;
- /*
- * TLV (tag length value ) paramerters follow the scan_cmd structure.
- * TLV can contain channel list, bssid list, ssid list and
- * ie. the TLV tags are defined above;
- */
+/* This is the definition from 10.X firmware branch */
+struct wmi_10x_start_scan_cmd {
+ struct wmi_start_scan_common common;
+ struct wmi_start_scan_tlvs tlvs;
} __packed;
struct wmi_ssid_arg {
@@ -2306,94 +2232,25 @@ struct wmi_mgmt_rx_event_v2 {
#define PHY_ERROR_FALSE_RADAR_EXT 0x24
#define PHY_ERROR_RADAR 0x05
-struct wmi_single_phyerr_rx_hdr {
- /* TSF timestamp */
+struct wmi_phyerr {
__le32 tsf_timestamp;
-
- /*
- * Current freq1, freq2
- *
- * [7:0]: freq1[lo]
- * [15:8] : freq1[hi]
- * [23:16]: freq2[lo]
- * [31:24]: freq2[hi]
- */
__le16 freq1;
__le16 freq2;
-
- /*
- * Combined RSSI over all chains and channel width for this PHY error
- *
- * [7:0]: RSSI combined
- * [15:8]: Channel width (MHz)
- * [23:16]: PHY error code
- * [24:16]: reserved (future use)
- */
u8 rssi_combined;
u8 chan_width_mhz;
u8 phy_err_code;
u8 rsvd0;
-
- /*
- * RSSI on chain 0 through 3
- *
- * This is formatted the same as the PPDU_START RX descriptor
- * field:
- *
- * [7:0]: pri20
- * [15:8]: sec20
- * [23:16]: sec40
- * [31:24]: sec80
- */
-
- __le32 rssi_chain0;
- __le32 rssi_chain1;
- __le32 rssi_chain2;
- __le32 rssi_chain3;
-
- /*
- * Last calibrated NF value for chain 0 through 3
- *
- * nf_list_1:
- *
- * + [15:0] - chain 0
- * + [31:16] - chain 1
- *
- * nf_list_2:
- *
- * + [15:0] - chain 2
- * + [31:16] - chain 3
- */
- __le32 nf_list_1;
- __le32 nf_list_2;
-
- /* Length of the frame */
+ __le32 rssi_chains[4];
+ __le16 nf_chains[4];
__le32 buf_len;
+ u8 buf[0];
} __packed;
-struct wmi_single_phyerr_rx_event {
- /* Phy error event header */
- struct wmi_single_phyerr_rx_hdr hdr;
- /* frame buffer */
- u8 bufp[0];
-} __packed;
-
-struct wmi_comb_phyerr_rx_hdr {
- /* Phy error phy error count */
- __le32 num_phyerr_events;
+struct wmi_phyerr_event {
+ __le32 num_phyerrs;
__le32 tsf_l32;
__le32 tsf_u32;
-} __packed;
-
-struct wmi_comb_phyerr_rx_event {
- /* Phy error phy error count */
- struct wmi_comb_phyerr_rx_hdr hdr;
- /*
- * frame buffer - contains multiple payloads in the order:
- * header - payload, header - payload...
- * (The header is of type: wmi_single_phyerr_rx_hdr)
- */
- u8 bufp[0];
+ struct wmi_phyerr phyerrs[0];
} __packed;
#define PHYERR_TLV_SIG 0xBB
@@ -2908,11 +2765,6 @@ enum wmi_tp_scale {
WMI_TP_SCALE_SIZE = 5, /* max num of enum */
};
-struct wmi_set_channel_cmd {
- /* channel (only frequency and mode info are used) */
- struct wmi_channel chan;
-} __packed;
-
struct wmi_pdev_chanlist_update_event {
/* number of channels */
__le32 num_chan;
@@ -2943,6 +2795,10 @@ struct wmi_pdev_set_channel_cmd {
struct wmi_channel chan;
} __packed;
+struct wmi_pdev_pktlog_enable_cmd {
+ __le32 ev_bitmap;
+} __packed;
+
/* Customize the DSCP (bit) to TID (0-7) mapping for QOS */
#define WMI_DSCP_MAP_MAX (64)
struct wmi_pdev_set_dscp_tid_map_cmd {
@@ -3177,7 +3033,7 @@ struct wmi_stats_event {
* PDEV statistics
* TODO: add all PDEV stats here
*/
-struct wmi_pdev_stats_old {
+struct wmi_pdev_stats {
__le32 chan_nf; /* Channel noise floor */
__le32 tx_frame_count; /* TX frame count */
__le32 rx_frame_count; /* RX frame count */
@@ -3188,15 +3044,8 @@ struct wmi_pdev_stats_old {
struct wal_dbg_stats wal; /* WAL dbg stats */
} __packed;
-struct wmi_pdev_stats_10x {
- __le32 chan_nf; /* Channel noise floor */
- __le32 tx_frame_count; /* TX frame count */
- __le32 rx_frame_count; /* RX frame count */
- __le32 rx_clear_count; /* rx clear count */
- __le32 cycle_count; /* cycle count */
- __le32 phy_err_count; /* Phy error count */
- __le32 chan_tx_pwr; /* channel tx power */
- struct wal_dbg_stats wal; /* WAL dbg stats */
+struct wmi_10x_pdev_stats {
+ struct wmi_pdev_stats old;
__le32 ack_rx_bad;
__le32 rts_bad;
__le32 rts_good;
@@ -3217,16 +3066,14 @@ struct wmi_vdev_stats {
* peer statistics.
* TODO: add more stats
*/
-struct wmi_peer_stats_old {
+struct wmi_peer_stats {
struct wmi_mac_addr peer_macaddr;
__le32 peer_rssi;
__le32 peer_tx_rate;
} __packed;
-struct wmi_peer_stats_10x {
- struct wmi_mac_addr peer_macaddr;
- __le32 peer_rssi;
- __le32 peer_tx_rate;
+struct wmi_10x_peer_stats {
+ struct wmi_peer_stats old;
__le32 peer_rx_rate;
} __packed;
@@ -4708,7 +4555,6 @@ struct wmi_dbglog_cfg_cmd {
__le32 config_valid;
} __packed;
-#define ATH10K_RTS_MAX 2347
#define ATH10K_FRAGMT_THRESHOLD_MIN 540
#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
@@ -4719,8 +4565,27 @@ struct wmi_dbglog_cfg_cmd {
/* By default disable power save for IBSS */
#define ATH10K_DEFAULT_ATIM 0
+#define WMI_MAX_MEM_REQS 16
+
+struct wmi_svc_rdy_ev_arg {
+ __le32 min_tx_power;
+ __le32 max_tx_power;
+ __le32 ht_cap;
+ __le32 vht_cap;
+ __le32 sw_ver0;
+ __le32 sw_ver1;
+ __le32 phy_capab;
+ __le32 num_rf_chains;
+ __le32 eeprom_rd;
+ __le32 num_mem_reqs;
+ const __le32 *service_map;
+ size_t service_map_len;
+ const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
+};
+
struct ath10k;
struct ath10k_vif;
+struct ath10k_fw_stats;
int ath10k_wmi_attach(struct ath10k *ar);
void ath10k_wmi_detach(struct ath10k *ar);
@@ -4732,8 +4597,6 @@ int ath10k_wmi_connect(struct ath10k *ar);
struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
-int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
- const struct wmi_channel_arg *);
int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
@@ -4794,5 +4657,9 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type, u32 delay_ms);
int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable);
+int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
+ struct ath10k_fw_stats *stats);
+int ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 ev_list);
+int ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar);
#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index ab2709a43768..19eab2a69ad5 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -547,7 +547,9 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
static void
-ath5k_sw_scan_start(struct ieee80211_hw *hw)
+ath5k_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct ath5k_hw *ah = hw->priv;
if (!ah->assoc)
@@ -556,7 +558,7 @@ ath5k_sw_scan_start(struct ieee80211_hw *hw)
static void
-ath5k_sw_scan_complete(struct ieee80211_hw *hw)
+ath5k_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct ath5k_hw *ah = hw->priv;
ath5k_hw_set_ledstate(ah, ah->assoc ?
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 0583c69d26db..ddaad712c59a 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -225,13 +225,7 @@ ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
} else {
switch (queue_type) {
case AR5K_TX_QUEUE_DATA:
- for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
- ah->ah_txq[queue].tqi_type !=
- AR5K_TX_QUEUE_INACTIVE; queue++) {
-
- if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
- return -EINVAL;
- }
+ queue = queue_info->tqi_subtype;
break;
case AR5K_TX_QUEUE_UAPSD:
queue = AR5K_TX_QUEUE_ID_UAPSD;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index ba60e37213eb..7a5337877a0c 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -2976,11 +2976,11 @@ static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev,
- const u8 *mac)
+ struct station_del_parameters *params)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
- const u8 *addr = mac ? mac : bcast_addr;
+ const u8 *addr = params->mac ? params->mac : bcast_addr;
return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, WMI_AP_DEAUTH,
addr, WLAN_REASON_PREV_AUTH_NOT_VALID);
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index 05debf700a84..4f82e8632d37 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -22,7 +22,7 @@
#define ATH6KL_MAX_IE 256
-__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(2, 3) void ath6kl_printk(const char *level, const char *fmt, ...);
/*
* Reflects the version of binary interface exposed by ATH6KL target
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 55c4064dd506..81ba48d2938b 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -37,76 +37,64 @@ struct ath6kl_fwlog_slot {
#define ATH6KL_FWLOG_VALID_MASK 0x1ffff
-int ath6kl_printk(const char *level, const char *fmt, ...)
+void ath6kl_printk(const char *level, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int rtn;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- rtn = printk("%sath6kl: %pV", level, &vaf);
+ printk("%sath6kl: %pV", level, &vaf);
va_end(args);
-
- return rtn;
}
EXPORT_SYMBOL(ath6kl_printk);
-int ath6kl_info(const char *fmt, ...)
+void ath6kl_info(const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
- int ret;
va_start(args, fmt);
vaf.va = &args;
- ret = ath6kl_printk(KERN_INFO, "%pV", &vaf);
+ ath6kl_printk(KERN_INFO, "%pV", &vaf);
trace_ath6kl_log_info(&vaf);
va_end(args);
-
- return ret;
}
EXPORT_SYMBOL(ath6kl_info);
-int ath6kl_err(const char *fmt, ...)
+void ath6kl_err(const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
- int ret;
va_start(args, fmt);
vaf.va = &args;
- ret = ath6kl_printk(KERN_ERR, "%pV", &vaf);
+ ath6kl_printk(KERN_ERR, "%pV", &vaf);
trace_ath6kl_log_err(&vaf);
va_end(args);
-
- return ret;
}
EXPORT_SYMBOL(ath6kl_err);
-int ath6kl_warn(const char *fmt, ...)
+void ath6kl_warn(const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
- int ret;
va_start(args, fmt);
vaf.va = &args;
- ret = ath6kl_printk(KERN_WARNING, "%pV", &vaf);
+ ath6kl_printk(KERN_WARNING, "%pV", &vaf);
trace_ath6kl_log_warn(&vaf);
va_end(args);
-
- return ret;
}
EXPORT_SYMBOL(ath6kl_warn);
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index e194c10d9f00..19106ed28961 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -50,10 +50,10 @@ enum ATH6K_DEBUG_MASK {
};
extern unsigned int debug_mask;
-__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
-__printf(1, 2) int ath6kl_info(const char *fmt, ...);
-__printf(1, 2) int ath6kl_err(const char *fmt, ...);
-__printf(1, 2) int ath6kl_warn(const char *fmt, ...);
+__printf(2, 3) void ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(1, 2) void ath6kl_info(const char *fmt, ...);
+__printf(1, 2) void ath6kl_err(const char *fmt, ...);
+__printf(1, 2) void ath6kl_warn(const char *fmt, ...);
enum ath6kl_war {
ATH6KL_WAR_INVALID_RATE,
@@ -81,10 +81,9 @@ int ath6kl_debug_init_fs(struct ath6kl *ar);
void ath6kl_debug_cleanup(struct ath6kl *ar);
#else
-static inline int ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask,
- const char *fmt, ...)
+static inline void ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask,
+ const char *fmt, ...)
{
- return 0;
}
static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index a6a5e40b3e98..9da3594fd010 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -1193,18 +1193,10 @@ static int ath6kl_usb_pm_resume(struct usb_interface *interface)
return 0;
}
-static int ath6kl_usb_pm_reset_resume(struct usb_interface *intf)
-{
- if (usb_get_intfdata(intf))
- ath6kl_usb_remove(intf);
- return 0;
-}
-
#else
#define ath6kl_usb_pm_suspend NULL
#define ath6kl_usb_pm_resume NULL
-#define ath6kl_usb_pm_reset_resume NULL
#endif
@@ -1222,7 +1214,6 @@ static struct usb_driver ath6kl_usb_driver = {
.probe = ath6kl_usb_probe,
.suspend = ath6kl_usb_pm_suspend,
.resume = ath6kl_usb_pm_resume,
- .reset_resume = ath6kl_usb_pm_reset_resume,
.disconnect = ath6kl_usb_remove,
.id_table = ath6kl_usb_ids,
.supports_autosuspend = true,
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 896e63281b3b..fee0cadb0f5e 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -3,6 +3,8 @@ config ATH9K_HW
config ATH9K_COMMON
tristate
select ATH_COMMON
+ select DEBUG_FS
+ select RELAY
config ATH9K_DFS_DEBUGFS
def_bool y
depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED
@@ -148,6 +150,11 @@ config ATH9K_CHANNEL_CONTEXT
for multi-channel concurrency. Enable this if P2P PowerSave support
is required.
+config ATH9K_PCOEM
+ bool "Atheros ath9k support for PC OEM cards" if EXPERT
+ depends on ATH9K
+ default y
+
config ATH9K_HTC
tristate "Atheros HTC based wireless cards support"
depends on USB && MAC80211
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 73704c1be736..473972288a84 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -16,8 +16,7 @@ ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
ath9k-$(CONFIG_ATH9K_TX99) += tx99.o
ath9k-$(CONFIG_ATH9K_WOW) += wow.o
-ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o \
- spectral.o
+ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
ath9k-$(CONFIG_ATH9K_STATION_STATISTICS) += debug_sta.o
@@ -32,7 +31,6 @@ ath9k_hw-y:= \
ar5008_phy.o \
ar9002_calib.o \
ar9003_calib.o \
- ar9003_rtt.o \
calib.o \
eeprom.o \
eeprom_def.o \
@@ -50,6 +48,8 @@ ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o
ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
ar9003_mci.o
+ath9k_hw-$(CONFIG_ATH9K_PCOEM) += ar9003_rtt.o
+
ath9k_hw-$(CONFIG_ATH9K_DYNACK) += dynack.o
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
@@ -58,7 +58,8 @@ obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
ath9k_common-y:= common.o \
common-init.o \
common-beacon.o \
- common-debug.o
+ common-debug.o \
+ common-spectral.o
ath9k_htc-y += htc_hst.o \
hif_usb.o \
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index b72d0be716db..5829074208fa 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -1190,7 +1190,7 @@ static void ar5008_hw_set_nf_limits(struct ath_hw *ah)
static void ar5008_hw_set_radar_params(struct ath_hw *ah,
struct ath_hw_radar_conf *conf)
{
- u32 radar_0 = 0, radar_1 = 0;
+ u32 radar_0 = 0, radar_1;
if (!conf) {
REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA);
@@ -1204,6 +1204,9 @@ static void ar5008_hw_set_radar_params(struct ath_hw *ah,
radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI);
radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND);
+ radar_1 = REG_READ(ah, AR_PHY_RADAR_1);
+ radar_1 &= ~(AR_PHY_RADAR_1_MAXLEN | AR_PHY_RADAR_1_RELSTEP_THRESH |
+ AR_PHY_RADAR_1_RELPWR_THRESH);
radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI;
radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK;
radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN);
@@ -1225,7 +1228,7 @@ static void ar5008_hw_set_radar_conf(struct ath_hw *ah)
conf->fir_power = -33;
conf->radar_rssi = 20;
conf->pulse_height = 10;
- conf->pulse_rssi = 24;
+ conf->pulse_rssi = 15;
conf->pulse_inband = 15;
conf->pulse_maxlen = 255;
conf->pulse_inband_step = 12;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index cdc74005650c..42190b67c671 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -657,31 +657,29 @@ static void ar9002_hw_olc_temp_compensation(struct ath_hw *ah)
ar9280_hw_olc_temp_compensation(ah);
}
-static bool ar9002_hw_calibrate(struct ath_hw *ah,
- struct ath9k_channel *chan,
- u8 rxchainmask,
- bool longcal)
+static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
+ u8 rxchainmask, bool longcal)
{
- bool iscaldone = true;
struct ath9k_cal_list *currCal = ah->cal_list_curr;
- bool nfcal, nfcal_pending = false;
+ bool nfcal, nfcal_pending = false, percal_pending;
+ int ret;
nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
if (ah->caldata)
nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
- if (currCal && !nfcal &&
- (currCal->calState == CAL_RUNNING ||
- currCal->calState == CAL_WAITING)) {
- iscaldone = ar9002_hw_per_calibration(ah, chan,
- rxchainmask, currCal);
- if (iscaldone) {
- ah->cal_list_curr = currCal = currCal->calNext;
-
- if (currCal->calState == CAL_WAITING) {
- iscaldone = false;
- ath9k_hw_reset_calibration(ah, currCal);
- }
+ percal_pending = (currCal &&
+ (currCal->calState == CAL_RUNNING ||
+ currCal->calState == CAL_WAITING));
+
+ if (percal_pending && !nfcal) {
+ if (!ar9002_hw_per_calibration(ah, chan, rxchainmask, currCal))
+ return 0;
+
+ ah->cal_list_curr = currCal = currCal->calNext;
+ if (currCal->calState == CAL_WAITING) {
+ ath9k_hw_reset_calibration(ah, currCal);
+ return 0;
}
}
@@ -698,7 +696,9 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
* NF is slow time-variant, so it is OK to use a
* historical value.
*/
- ath9k_hw_loadnf(ah, ah->curchan);
+ ret = ath9k_hw_loadnf(ah, ah->curchan);
+ if (ret < 0)
+ return ret;
}
if (longcal) {
@@ -709,7 +709,7 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
}
}
- return iscaldone;
+ return !percal_pending;
}
/* Carrier leakage Calibration fix */
@@ -856,6 +856,8 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
/* Do PA Calibration */
ar9002_hw_pa_cal(ah, true);
+ ath9k_hw_loadnf(ah, chan);
+ ath9k_hw_start_nfcal(ah, true);
if (ah->caldata)
set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 2a93519f4bdf..f816909d9474 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -281,7 +281,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
| (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
- | SM(i->txpower, AR_XmitPower0)
+ | SM(i->txpower[0], AR_XmitPower0)
| (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
| (i->flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
| (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
@@ -307,9 +307,9 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
| set11nRateFlags(i->rates, 3)
| SM(i->rtscts_rate, AR_RTSCTSRate);
- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower, AR_XmitPower1);
- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower, AR_XmitPower2);
- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower, AR_XmitPower3);
+ ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
+ ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
+ ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
}
static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 9a2afa2c690b..fc08162b5820 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -643,9 +643,12 @@ static void ar9002_hw_spectral_scan_config(struct ath_hw *ah,
* and fix otherwise.
*/
count = param->count;
- if (param->endless)
- count = 0x80;
- else if (count & 0x80)
+ if (param->endless) {
+ if (AR_SREV_9271(ah))
+ count = 0;
+ else
+ count = 0x80;
+ } else if (count & 0x80)
count = 0x7f;
REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index ac8301ef5242..06ab71db6e80 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -121,13 +121,12 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
return iscaldone;
}
-static bool ar9003_hw_calibrate(struct ath_hw *ah,
- struct ath9k_channel *chan,
- u8 rxchainmask,
- bool longcal)
+static int ar9003_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
+ u8 rxchainmask, bool longcal)
{
bool iscaldone = true;
struct ath9k_cal_list *currCal = ah->cal_list_curr;
+ int ret;
/*
* For given calibration:
@@ -163,7 +162,9 @@ static bool ar9003_hw_calibrate(struct ath_hw *ah,
* NF is slow time-variant, so it is OK to use a historical
* value.
*/
- ath9k_hw_loadnf(ah, ah->curchan);
+ ret = ath9k_hw_loadnf(ah, ah->curchan);
+ if (ret < 0)
+ return ret;
/* start NF calibration, without updating BB NF register */
ath9k_hw_start_nfcal(ah, false);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 80c6eacbda53..08225a0067c2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -4079,27 +4079,28 @@ static int ar9003_hw_get_thermometer(struct ath_hw *ah)
static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
{
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
int thermometer = ar9003_hw_get_thermometer(ah);
u8 therm_on = (thermometer < 0) ? 0 : 1;
REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
- if (ah->caps.tx_chainmask & BIT(1))
+ if (pCap->chip_chainmask & BIT(1))
REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
- if (ah->caps.tx_chainmask & BIT(2))
+ if (pCap->chip_chainmask & BIT(2))
REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
therm_on = (thermometer < 0) ? 0 : (thermometer == 0);
REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
- if (ah->caps.tx_chainmask & BIT(1)) {
+ if (pCap->chip_chainmask & BIT(1)) {
therm_on = (thermometer < 0) ? 0 : (thermometer == 1);
REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
}
- if (ah->caps.tx_chainmask & BIT(2)) {
+ if (pCap->chip_chainmask & BIT(2)) {
therm_on = (thermometer < 0) ? 0 : (thermometer == 2);
REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
@@ -4376,6 +4377,25 @@ static u8 ar9003_hw_eeprom_get_cck_tgt_pwr(struct ath_hw *ah,
targetPowerArray, numPiers);
}
+static void ar9003_hw_selfgen_tpc_txpower(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 *pwr_array)
+{
+ u32 val;
+
+ /* target power values for self generated frames (ACK,RTS/CTS) */
+ if (IS_CHAN_2GHZ(chan)) {
+ val = SM(pwr_array[ALL_TARGET_LEGACY_1L_5L], AR_TPC_ACK) |
+ SM(pwr_array[ALL_TARGET_LEGACY_1L_5L], AR_TPC_CTS) |
+ SM(0x3f, AR_TPC_CHIRP) | SM(0x3f, AR_TPC_RPT);
+ } else {
+ val = SM(pwr_array[ALL_TARGET_LEGACY_6_24], AR_TPC_ACK) |
+ SM(pwr_array[ALL_TARGET_LEGACY_6_24], AR_TPC_CTS) |
+ SM(0x3f, AR_TPC_CHIRP) | SM(0x3f, AR_TPC_RPT);
+ }
+ REG_WRITE(ah, AR_TPC, val);
+}
+
/* Set tx power registers to array of values passed in */
static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
{
@@ -5311,6 +5331,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
struct ar9300_modal_eep_header *modal_hdr;
u8 targetPowerValT2[ar9300RateSize];
u8 target_power_val_t2_eep[ar9300RateSize];
+ u8 targetPowerValT2_tpc[ar9300RateSize];
unsigned int i = 0, paprd_scale_factor = 0;
u8 pwr_idx, min_pwridx = 0;
@@ -5362,6 +5383,9 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
twiceAntennaReduction,
powerLimit);
+ memcpy(targetPowerValT2_tpc, targetPowerValT2,
+ sizeof(targetPowerValT2));
+
if (ar9003_is_paprd_enabled(ah)) {
for (i = 0; i < ar9300RateSize; i++) {
if ((ah->paprd_ratemask & (1 << i)) &&
@@ -5395,6 +5419,30 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
ar9003_hw_calibration_apply(ah, chan->channel);
ar9003_paprd_set_txpower(ah, chan, targetPowerValT2);
+
+ ar9003_hw_selfgen_tpc_txpower(ah, chan, targetPowerValT2);
+
+ /* TPC initializations */
+ if (ah->tpc_enabled) {
+ u32 val;
+
+ ar9003_hw_init_rate_txpower(ah, targetPowerValT2_tpc, chan);
+
+ /* Enable TPC */
+ REG_WRITE(ah, AR_PHY_PWRTX_MAX,
+ AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE);
+ /* Disable per chain power reduction */
+ val = REG_READ(ah, AR_PHY_POWER_TX_SUB);
+ if (AR_SREV_9340(ah))
+ REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
+ val & 0xFFFFFFC0);
+ else
+ REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
+ val & 0xFFFFF000);
+ } else {
+ /* Disable TPC */
+ REG_WRITE(ah, AR_PHY_PWRTX_MAX, 0);
+ }
}
static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index ddef9eedbac6..06ad2172030e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -333,12 +333,29 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
qca953x_1p0_soc_preamble);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
qca953x_1p0_soc_postamble);
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- qca953x_1p0_common_wo_xlna_rx_gain_table);
- INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
- qca953x_1p0_common_wo_xlna_rx_gain_bounds);
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- qca953x_1p0_modes_no_xpa_tx_gain_table);
+
+ if (AR_SREV_9531_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_2p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_2p0_common_wo_xlna_rx_gain_bounds);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_1p0_common_wo_xlna_rx_gain_bounds);
+ }
+
+ if (AR_SREV_9531_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_2p0_modes_no_xpa_tx_gain_table);
+ else if (AR_SREV_9531_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p1_modes_no_xpa_tx_gain_table);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p0_modes_no_xpa_tx_gain_table);
+
INIT_INI_ARRAY(&ah->iniModesFastClock,
qca953x_1p0_modes_fast_clock);
} else if (AR_SREV_9580(ah)) {
@@ -518,9 +535,15 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9550(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar955x_1p0_modes_xpa_tx_gain_table);
- else if (AR_SREV_9531(ah))
+ else if (AR_SREV_9531_10(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p0_modes_xpa_tx_gain_table);
+ else if (AR_SREV_9531_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p1_modes_xpa_tx_gain_table);
+ else if (AR_SREV_9531_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- qca953x_1p0_modes_xpa_tx_gain_table);
+ qca953x_2p0_modes_xpa_tx_gain_table);
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_lowest_ob_db_tx_gain_table);
@@ -562,7 +585,10 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar955x_1p0_modes_no_xpa_tx_gain_table);
else if (AR_SREV_9531(ah)) {
- if (AR_SREV_9531_11(ah))
+ if (AR_SREV_9531_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_2p0_modes_no_xpa_tx_gain_table);
+ else if (AR_SREV_9531_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
qca953x_1p1_modes_no_xpa_tx_gain_table);
else
@@ -670,9 +696,6 @@ static void ar9003_tx_gain_table_mode5(struct ath_hw *ah)
if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_green_ob_db_tx_gain_1_1);
- else if (AR_SREV_9340(ah))
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9340Modes_ub124_tx_gain_table_1p0);
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_type5_tx_gain_table);
@@ -792,11 +815,16 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
ar955x_1p0_common_wo_xlna_rx_gain_table);
INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
ar955x_1p0_common_wo_xlna_rx_gain_bounds);
- } else if (AR_SREV_9531(ah)) {
+ } else if (AR_SREV_9531_10(ah) || AR_SREV_9531_11(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
qca953x_1p0_common_wo_xlna_rx_gain_table);
INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
qca953x_1p0_common_wo_xlna_rx_gain_bounds);
+ } else if (AR_SREV_9531_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_2p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_2p0_common_wo_xlna_rx_gain_bounds);
} else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9580_1p0_wo_xlna_rx_gain_table);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 057b1657c428..da84b705cbcd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -101,7 +101,7 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
| (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
- | SM(i->txpower, AR_XmitPower0)
+ | SM(i->txpower[0], AR_XmitPower0)
| (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
| (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
| (i->flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0)
@@ -152,9 +152,9 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
- ACCESS_ONCE(ads->ctl20) = SM(i->txpower, AR_XmitPower1);
- ACCESS_ONCE(ads->ctl21) = SM(i->txpower, AR_XmitPower2);
- ACCESS_ONCE(ads->ctl22) = SM(i->txpower, AR_XmitPower3);
+ ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
+ ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
+ ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
}
static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 1e8ea5e4d4ca..ae6cde273414 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -18,6 +18,21 @@
#include "hw.h"
#include "ar9003_phy.h"
+#define AR9300_OFDM_RATES 8
+#define AR9300_HT_SS_RATES 8
+#define AR9300_HT_DS_RATES 8
+#define AR9300_HT_TS_RATES 8
+
+#define AR9300_11NA_OFDM_SHIFT 0
+#define AR9300_11NA_HT_SS_SHIFT 8
+#define AR9300_11NA_HT_DS_SHIFT 16
+#define AR9300_11NA_HT_TS_SHIFT 24
+
+#define AR9300_11NG_OFDM_SHIFT 4
+#define AR9300_11NG_HT_SS_SHIFT 12
+#define AR9300_11NG_HT_DS_SHIFT 20
+#define AR9300_11NG_HT_TS_SHIFT 28
+
static const int firstep_table[] =
/* level: 0 1 2 3 4 5 6 7 8 */
{ -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
@@ -40,6 +55,71 @@ static const int m2ThreshLowExt_off = 127;
static const int m1ThreshExt_off = 127;
static const int m2ThreshExt_off = 127;
+static const u8 ofdm2pwr[] = {
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_36,
+ ALL_TARGET_LEGACY_48,
+ ALL_TARGET_LEGACY_54
+};
+
+static const u8 mcs2pwr_ht20[] = {
+ ALL_TARGET_HT20_0_8_16,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_4,
+ ALL_TARGET_HT20_5,
+ ALL_TARGET_HT20_6,
+ ALL_TARGET_HT20_7,
+ ALL_TARGET_HT20_0_8_16,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_12,
+ ALL_TARGET_HT20_13,
+ ALL_TARGET_HT20_14,
+ ALL_TARGET_HT20_15,
+ ALL_TARGET_HT20_0_8_16,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_20,
+ ALL_TARGET_HT20_21,
+ ALL_TARGET_HT20_22,
+ ALL_TARGET_HT20_23
+};
+
+static const u8 mcs2pwr_ht40[] = {
+ ALL_TARGET_HT40_0_8_16,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_4,
+ ALL_TARGET_HT40_5,
+ ALL_TARGET_HT40_6,
+ ALL_TARGET_HT40_7,
+ ALL_TARGET_HT40_0_8_16,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_12,
+ ALL_TARGET_HT40_13,
+ ALL_TARGET_HT40_14,
+ ALL_TARGET_HT40_15,
+ ALL_TARGET_HT40_0_8_16,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_20,
+ ALL_TARGET_HT40_21,
+ ALL_TARGET_HT40_22,
+ ALL_TARGET_HT40_23,
+};
+
/**
* ar9003_hw_set_channel - set channel on single-chip device
* @ah: atheros hardware structure
@@ -1361,7 +1441,7 @@ static void ar9003_hw_set_radar_params(struct ath_hw *ah,
struct ath_hw_radar_conf *conf)
{
unsigned int regWrites = 0;
- u32 radar_0 = 0, radar_1 = 0;
+ u32 radar_0 = 0, radar_1;
if (!conf) {
REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA);
@@ -1375,6 +1455,9 @@ static void ar9003_hw_set_radar_params(struct ath_hw *ah,
radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI);
radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND);
+ radar_1 = REG_READ(ah, AR_PHY_RADAR_1);
+ radar_1 &= ~(AR_PHY_RADAR_1_MAXLEN | AR_PHY_RADAR_1_RELSTEP_THRESH |
+ AR_PHY_RADAR_1_RELPWR_THRESH);
radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI;
radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK;
radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN);
@@ -1401,7 +1484,7 @@ static void ar9003_hw_set_radar_conf(struct ath_hw *ah)
conf->fir_power = -28;
conf->radar_rssi = 0;
conf->pulse_height = 10;
- conf->pulse_rssi = 24;
+ conf->pulse_rssi = 15;
conf->pulse_inband = 8;
conf->pulse_maxlen = 255;
conf->pulse_inband_step = 12;
@@ -1796,6 +1879,100 @@ static void ar9003_hw_tx99_set_txpower(struct ath_hw *ah, u8 txpower)
ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_14], 0));
}
+static void ar9003_hw_init_txpower_cck(struct ath_hw *ah, u8 *rate_array)
+{
+ ah->tx_power[0] = rate_array[ALL_TARGET_LEGACY_1L_5L];
+ ah->tx_power[1] = rate_array[ALL_TARGET_LEGACY_1L_5L];
+ ah->tx_power[2] = min(rate_array[ALL_TARGET_LEGACY_1L_5L],
+ rate_array[ALL_TARGET_LEGACY_5S]);
+ ah->tx_power[3] = min(rate_array[ALL_TARGET_LEGACY_11L],
+ rate_array[ALL_TARGET_LEGACY_11S]);
+}
+
+static void ar9003_hw_init_txpower_ofdm(struct ath_hw *ah, u8 *rate_array,
+ int offset)
+{
+ int i, j;
+
+ for (i = offset; i < offset + AR9300_OFDM_RATES; i++) {
+ /* OFDM rate to power table idx */
+ j = ofdm2pwr[i - offset];
+ ah->tx_power[i] = rate_array[j];
+ }
+}
+
+static void ar9003_hw_init_txpower_ht(struct ath_hw *ah, u8 *rate_array,
+ int ss_offset, int ds_offset,
+ int ts_offset, bool is_40)
+{
+ int i, j, mcs_idx = 0;
+ const u8 *mcs2pwr = (is_40) ? mcs2pwr_ht40 : mcs2pwr_ht20;
+
+ for (i = ss_offset; i < ss_offset + AR9300_HT_SS_RATES; i++) {
+ j = mcs2pwr[mcs_idx];
+ ah->tx_power[i] = rate_array[j];
+ mcs_idx++;
+ }
+
+ for (i = ds_offset; i < ds_offset + AR9300_HT_DS_RATES; i++) {
+ j = mcs2pwr[mcs_idx];
+ ah->tx_power[i] = rate_array[j];
+ mcs_idx++;
+ }
+
+ for (i = ts_offset; i < ts_offset + AR9300_HT_TS_RATES; i++) {
+ j = mcs2pwr[mcs_idx];
+ ah->tx_power[i] = rate_array[j];
+ mcs_idx++;
+ }
+}
+
+static void ar9003_hw_init_txpower_stbc(struct ath_hw *ah, int ss_offset,
+ int ds_offset, int ts_offset)
+{
+ memcpy(&ah->tx_power_stbc[ss_offset], &ah->tx_power[ss_offset],
+ AR9300_HT_SS_RATES);
+ memcpy(&ah->tx_power_stbc[ds_offset], &ah->tx_power[ds_offset],
+ AR9300_HT_DS_RATES);
+ memcpy(&ah->tx_power_stbc[ts_offset], &ah->tx_power[ts_offset],
+ AR9300_HT_TS_RATES);
+}
+
+void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array,
+ struct ath9k_channel *chan)
+{
+ if (IS_CHAN_5GHZ(chan)) {
+ ar9003_hw_init_txpower_ofdm(ah, rate_array,
+ AR9300_11NA_OFDM_SHIFT);
+ if (IS_CHAN_HT20(chan) || IS_CHAN_HT40(chan)) {
+ ar9003_hw_init_txpower_ht(ah, rate_array,
+ AR9300_11NA_HT_SS_SHIFT,
+ AR9300_11NA_HT_DS_SHIFT,
+ AR9300_11NA_HT_TS_SHIFT,
+ IS_CHAN_HT40(chan));
+ ar9003_hw_init_txpower_stbc(ah,
+ AR9300_11NA_HT_SS_SHIFT,
+ AR9300_11NA_HT_DS_SHIFT,
+ AR9300_11NA_HT_TS_SHIFT);
+ }
+ } else {
+ ar9003_hw_init_txpower_cck(ah, rate_array);
+ ar9003_hw_init_txpower_ofdm(ah, rate_array,
+ AR9300_11NG_OFDM_SHIFT);
+ if (IS_CHAN_HT20(chan) || IS_CHAN_HT40(chan)) {
+ ar9003_hw_init_txpower_ht(ah, rate_array,
+ AR9300_11NG_HT_SS_SHIFT,
+ AR9300_11NG_HT_DS_SHIFT,
+ AR9300_11NG_HT_TS_SHIFT,
+ IS_CHAN_HT40(chan));
+ ar9003_hw_init_txpower_stbc(ah,
+ AR9300_11NG_HT_SS_SHIFT,
+ AR9300_11NG_HT_DS_SHIFT,
+ AR9300_11NG_HT_TS_SHIFT);
+ }
+ }
+}
+
void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.h b/drivers/net/wireless/ath/ath9k/ar9003_rtt.h
index a43b30d723a4..6290467a75a0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.h
@@ -17,6 +17,7 @@
#ifndef AR9003_RTT_H
#define AR9003_RTT_H
+#ifdef CONFIG_ATH9K_PCOEM
void ar9003_hw_rtt_enable(struct ath_hw *ah);
void ar9003_hw_rtt_disable(struct ath_hw *ah);
void ar9003_hw_rtt_set_mask(struct ath_hw *ah, u32 rtt_mask);
@@ -25,5 +26,40 @@ void ar9003_hw_rtt_load_hist(struct ath_hw *ah);
void ar9003_hw_rtt_fill_hist(struct ath_hw *ah);
void ar9003_hw_rtt_clear_hist(struct ath_hw *ah);
bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan);
+#else
+static inline void ar9003_hw_rtt_enable(struct ath_hw *ah)
+{
+}
+
+static inline void ar9003_hw_rtt_disable(struct ath_hw *ah)
+{
+}
+
+static inline void ar9003_hw_rtt_set_mask(struct ath_hw *ah, u32 rtt_mask)
+{
+}
+
+static inline bool ar9003_hw_rtt_force_restore(struct ath_hw *ah)
+{
+ return false;
+}
+
+static inline void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
+{
+}
+
+static inline void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
+{
+}
+
+static inline void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
+{
+}
+
+static inline bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ return false;
+}
+#endif
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
index 812a9d787bf3..159cc6fd2362 100644
--- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -20,6 +20,8 @@
#define qca953x_1p0_mac_postamble ar9300_2p2_mac_postamble
+#define qca953x_1p0_soc_preamble ar955x_1p0_soc_preamble
+
#define qca953x_1p0_soc_postamble ar9300_2p2_soc_postamble
#define qca953x_1p0_common_rx_gain_table ar9300Common_rx_gain_table_2p2
@@ -28,6 +30,10 @@
#define qca953x_1p0_modes_fast_clock ar9300Modes_fast_clock_2p2
+#define qca953x_1p0_common_wo_xlna_rx_gain_bounds ar955x_1p0_common_wo_xlna_rx_gain_bounds
+
+#define qca953x_1p0_common_rx_gain_bounds ar955x_1p0_common_rx_gain_bounds
+
static const u32 qca953x_1p0_mac_core[][2] = {
/* Addr allmodes */
{0x00000008, 0x00000000},
@@ -490,35 +496,6 @@ static const u32 qca953x_1p0_radio_postamble[][5] = {
{0x00016540, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
};
-static const u32 qca953x_1p0_soc_preamble[][2] = {
- /* Addr allmodes */
- {0x00007000, 0x00000000},
- {0x00007004, 0x00000000},
- {0x00007008, 0x00000000},
- {0x0000700c, 0x00000000},
- {0x0000701c, 0x00000000},
- {0x00007020, 0x00000000},
- {0x00007024, 0x00000000},
- {0x00007028, 0x00000000},
- {0x0000702c, 0x00000000},
- {0x00007030, 0x00000000},
- {0x00007034, 0x00000002},
- {0x00007038, 0x000004c2},
- {0x00007048, 0x00000000},
-};
-
-static const u32 qca953x_1p0_common_rx_gain_bounds[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
- {0x00009e48, 0x5030201a, 0x5030201a, 0x50302018, 0x50302018},
-};
-
-static const u32 qca953x_1p0_common_wo_xlna_rx_gain_bounds[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
- {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
-};
-
static const u32 qca953x_1p0_modes_xpa_tx_gain_table[][2] = {
/* Addr allmodes */
{0x0000a2dc, 0xfffd5aaa},
@@ -715,8 +692,73 @@ static const u32 qca953x_1p1_modes_no_xpa_tx_gain_table[][2] = {
{0x00016448, 0x6c927a70},
};
+static const u32 qca953x_1p1_modes_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xfffb52aa},
+ {0x0000a2e0, 0xfffd64cc},
+ {0x0000a2e4, 0xfffe80f0},
+ {0x0000a2e8, 0xffffff00},
+ {0x0000a410, 0x000050d5},
+ {0x0000a500, 0x00000000},
+ {0x0000a504, 0x04000002},
+ {0x0000a508, 0x08000004},
+ {0x0000a50c, 0x0c000006},
+ {0x0000a510, 0x1000000a},
+ {0x0000a514, 0x1400000c},
+ {0x0000a518, 0x1800000e},
+ {0x0000a51c, 0x1c000048},
+ {0x0000a520, 0x2000004a},
+ {0x0000a524, 0x2400004c},
+ {0x0000a528, 0x2800004e},
+ {0x0000a52c, 0x2b00024a},
+ {0x0000a530, 0x2f00024c},
+ {0x0000a534, 0x3300024e},
+ {0x0000a538, 0x36000668},
+ {0x0000a53c, 0x38000669},
+ {0x0000a540, 0x3a000868},
+ {0x0000a544, 0x3d00086a},
+ {0x0000a548, 0x4000086c},
+ {0x0000a54c, 0x4200086e},
+ {0x0000a550, 0x43000a6e},
+ {0x0000a554, 0x43000a6e},
+ {0x0000a558, 0x43000a6e},
+ {0x0000a55c, 0x43000a6e},
+ {0x0000a560, 0x43000a6e},
+ {0x0000a564, 0x43000a6e},
+ {0x0000a568, 0x43000a6e},
+ {0x0000a56c, 0x43000a6e},
+ {0x0000a570, 0x43000a6e},
+ {0x0000a574, 0x43000a6e},
+ {0x0000a578, 0x43000a6e},
+ {0x0000a57c, 0x43000a6e},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x03804000},
+ {0x0000a610, 0x03804e01},
+ {0x0000a614, 0x03804e01},
+ {0x0000a618, 0x03804e01},
+ {0x0000a61c, 0x04009002},
+ {0x0000a620, 0x04009002},
+ {0x0000a624, 0x04009002},
+ {0x0000a628, 0x04009002},
+ {0x0000a62c, 0x04009002},
+ {0x0000a630, 0x04009002},
+ {0x0000a634, 0x04009002},
+ {0x0000a638, 0x04009002},
+ {0x0000a63c, 0x04009002},
+ {0x0000b2dc, 0xfffb52aa},
+ {0x0000b2e0, 0xfffd64cc},
+ {0x0000b2e4, 0xfffe80f0},
+ {0x0000b2e8, 0xffffff00},
+ {0x00016044, 0x024922db},
+ {0x00016048, 0x6c927a70},
+ {0x00016444, 0x024922db},
+ {0x00016448, 0x6c927a70},
+};
+
static const u32 qca953x_2p0_baseband_core[][2] = {
- /* Addr allmodes */
+ /* Addr allmodes */
{0x00009800, 0xafe68e30},
{0x00009804, 0xfd14e000},
{0x00009808, 0x9c0a9f6b},
@@ -914,4 +956,400 @@ static const u32 qca953x_2p0_baseband_postamble[][5] = {
{0x0000b284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
};
+static const u32 qca953x_2p0_common_wo_xlna_rx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 qca953x_2p0_common_wo_xlna_rx_gain_bounds[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+};
+
+static const u32 qca953x_2p0_modes_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xfffb52aa},
+ {0x0000a2e0, 0xfffd64cc},
+ {0x0000a2e4, 0xfffe80f0},
+ {0x0000a2e8, 0xffffff00},
+ {0x0000a410, 0x000050d5},
+ {0x0000a500, 0x00000000},
+ {0x0000a504, 0x04000002},
+ {0x0000a508, 0x08000004},
+ {0x0000a50c, 0x0c000006},
+ {0x0000a510, 0x1000000a},
+ {0x0000a514, 0x1400000c},
+ {0x0000a518, 0x1800000e},
+ {0x0000a51c, 0x1c000048},
+ {0x0000a520, 0x2000004a},
+ {0x0000a524, 0x2400004c},
+ {0x0000a528, 0x2800004e},
+ {0x0000a52c, 0x2b00024a},
+ {0x0000a530, 0x2f00024c},
+ {0x0000a534, 0x3300024e},
+ {0x0000a538, 0x36000668},
+ {0x0000a53c, 0x38000669},
+ {0x0000a540, 0x3a000868},
+ {0x0000a544, 0x3d00086a},
+ {0x0000a548, 0x4000086c},
+ {0x0000a54c, 0x4200086e},
+ {0x0000a550, 0x43000a6e},
+ {0x0000a554, 0x43000a6e},
+ {0x0000a558, 0x43000a6e},
+ {0x0000a55c, 0x43000a6e},
+ {0x0000a560, 0x43000a6e},
+ {0x0000a564, 0x43000a6e},
+ {0x0000a568, 0x43000a6e},
+ {0x0000a56c, 0x43000a6e},
+ {0x0000a570, 0x43000a6e},
+ {0x0000a574, 0x43000a6e},
+ {0x0000a578, 0x43000a6e},
+ {0x0000a57c, 0x43000a6e},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x03804000},
+ {0x0000a610, 0x03804e01},
+ {0x0000a614, 0x03804e01},
+ {0x0000a618, 0x03804e01},
+ {0x0000a61c, 0x04009002},
+ {0x0000a620, 0x04009002},
+ {0x0000a624, 0x04009002},
+ {0x0000a628, 0x04009002},
+ {0x0000a62c, 0x04009002},
+ {0x0000a630, 0x04009002},
+ {0x0000a634, 0x04009002},
+ {0x0000a638, 0x04009002},
+ {0x0000a63c, 0x04009002},
+ {0x0000b2dc, 0xfffb52aa},
+ {0x0000b2e0, 0xfffd64cc},
+ {0x0000b2e4, 0xfffe80f0},
+ {0x0000b2e8, 0xffffff00},
+ {0x00016044, 0x024922db},
+ {0x00016048, 0x6c927a70},
+ {0x00016444, 0x024922db},
+ {0x00016448, 0x6c927a70},
+};
+
+static const u32 qca953x_2p0_modes_no_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xffd5f552},
+ {0x0000a2e0, 0xffe60664},
+ {0x0000a2e4, 0xfff80780},
+ {0x0000a2e8, 0xfffff800},
+ {0x0000a410, 0x000050de},
+ {0x0000a500, 0x00000061},
+ {0x0000a504, 0x04000063},
+ {0x0000a508, 0x08000065},
+ {0x0000a50c, 0x0c000261},
+ {0x0000a510, 0x10000263},
+ {0x0000a514, 0x14000265},
+ {0x0000a518, 0x18000482},
+ {0x0000a51c, 0x1b000484},
+ {0x0000a520, 0x1f000486},
+ {0x0000a524, 0x240008c2},
+ {0x0000a528, 0x28000cc1},
+ {0x0000a52c, 0x2d000ce3},
+ {0x0000a530, 0x31000ce5},
+ {0x0000a534, 0x350010e5},
+ {0x0000a538, 0x360012e5},
+ {0x0000a53c, 0x380014e5},
+ {0x0000a540, 0x3b0018e5},
+ {0x0000a544, 0x3d001d04},
+ {0x0000a548, 0x3e001d05},
+ {0x0000a54c, 0x40001d07},
+ {0x0000a550, 0x42001f27},
+ {0x0000a554, 0x43001f67},
+ {0x0000a558, 0x46001fe7},
+ {0x0000a55c, 0x47001f2b},
+ {0x0000a560, 0x49001f0d},
+ {0x0000a564, 0x4b001ed2},
+ {0x0000a568, 0x4c001ed4},
+ {0x0000a56c, 0x4e001f15},
+ {0x0000a570, 0x4f001ff6},
+ {0x0000a574, 0x4f001ff6},
+ {0x0000a578, 0x4f001ff6},
+ {0x0000a57c, 0x4f001ff6},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x00804201},
+ {0x0000a610, 0x01008201},
+ {0x0000a614, 0x0180c402},
+ {0x0000a618, 0x0180c603},
+ {0x0000a61c, 0x0180c603},
+ {0x0000a620, 0x01c10603},
+ {0x0000a624, 0x01c10704},
+ {0x0000a628, 0x02c18b05},
+ {0x0000a62c, 0x02c14c07},
+ {0x0000a630, 0x01008704},
+ {0x0000a634, 0x01c10402},
+ {0x0000a638, 0x0301cc07},
+ {0x0000a63c, 0x0301cc07},
+ {0x0000b2dc, 0xffd5f552},
+ {0x0000b2e0, 0xffe60664},
+ {0x0000b2e4, 0xfff80780},
+ {0x0000b2e8, 0xfffff800},
+ {0x00016044, 0x049242db},
+ {0x00016048, 0x6c927a70},
+ {0x00016444, 0x049242db},
+ {0x00016448, 0x6c927a70},
+};
+
#endif /* INITVALS_953X_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
index 74d8bc05b317..fd6a84ccd49e 100644
--- a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -507,7 +507,7 @@ static const u32 ar955x_1p0_baseband_core[][2] = {
{0x00009d04, 0x40206c10},
{0x00009d08, 0x009c4060},
{0x00009d0c, 0x9883800a},
- {0x00009d10, 0x01834061},
+ {0x00009d10, 0x01884061},
{0x00009d14, 0x00c0040b},
{0x00009d18, 0x00000000},
{0x00009e08, 0x0038230c},
@@ -545,9 +545,9 @@ static const u32 ar955x_1p0_baseband_core[][2] = {
{0x0000a370, 0x00000000},
{0x0000a390, 0x00000001},
{0x0000a394, 0x00000444},
- {0x0000a398, 0x1f020503},
- {0x0000a39c, 0x29180c03},
- {0x0000a3a0, 0x9a8b6844},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
{0x0000a3a4, 0x00000000},
{0x0000a3a8, 0xaaaaaaaa},
{0x0000a3ac, 0x3c466478},
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index a5ca65240af3..5d4629f96c15 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -24,7 +24,149 @@
#define ar9580_1p0_soc_postamble ar9300_2p2_soc_postamble
-#define ar9580_1p0_radio_core ar9300_2p2_radio_core
+static const u32 ar9580_1p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db2db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x76d005b5},
+ {0x00016050, 0x556cf031},
+ {0x00016054, 0x13449440},
+ {0x00016058, 0x0c51c92c},
+ {0x0001605c, 0x3db7fffc},
+ {0x00016060, 0xfffffffc},
+ {0x00016064, 0x000f0278},
+ {0x0001606c, 0x6db60000},
+ {0x00016080, 0x00000000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x54214514},
+ {0x0001608c, 0x119f481e},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd2888888},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480080},
+ {0x000160c0, 0x00adb6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x01e6c000},
+ {0x00016100, 0x3fffbe01},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x00000000},
+ {0x00016280, 0x058a0001},
+ {0x00016284, 0x3d840208},
+ {0x00016288, 0x05a20408},
+ {0x0001628c, 0x00038c07},
+ {0x00016290, 0x00000004},
+ {0x00016294, 0x458a214f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db2db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x76d005b5},
+ {0x00016450, 0x556cf031},
+ {0x00016454, 0x13449440},
+ {0x00016458, 0x0c51c92c},
+ {0x0001645c, 0x3db7fffc},
+ {0x00016460, 0xfffffffc},
+ {0x00016464, 0x000f0278},
+ {0x0001646c, 0x6db60000},
+ {0x00016500, 0x3fffbe01},
+ {0x00016504, 0xfff80000},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x00000000},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00800700},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+ {0x00016800, 0x36db2db6},
+ {0x00016804, 0x6db6db40},
+ {0x00016808, 0x73f00000},
+ {0x0001680c, 0x00000000},
+ {0x00016840, 0x7f80fff8},
+ {0x0001684c, 0x76d005b5},
+ {0x00016850, 0x556cf031},
+ {0x00016854, 0x13449440},
+ {0x00016858, 0x0c51c92c},
+ {0x0001685c, 0x3db7fffc},
+ {0x00016860, 0xfffffffc},
+ {0x00016864, 0x000f0278},
+ {0x0001686c, 0x6db60000},
+ {0x00016900, 0x3fffbe01},
+ {0x00016904, 0xfff80000},
+ {0x00016908, 0x00080010},
+ {0x00016944, 0x02084080},
+ {0x00016948, 0x00000000},
+ {0x00016b80, 0x00000000},
+ {0x00016b84, 0x00000000},
+ {0x00016b88, 0x00800700},
+ {0x00016b8c, 0x00800700},
+ {0x00016b90, 0x00800700},
+ {0x00016b94, 0x00000000},
+ {0x00016b98, 0x00000000},
+ {0x00016b9c, 0x00000000},
+ {0x00016ba0, 0x00000001},
+ {0x00016ba4, 0x00000001},
+ {0x00016ba8, 0x00000000},
+ {0x00016bac, 0x00000000},
+ {0x00016bb0, 0x00000000},
+ {0x00016bb4, 0x00000000},
+ {0x00016bb8, 0x00000000},
+ {0x00016bbc, 0x00000000},
+ {0x00016bc0, 0x000000a0},
+ {0x00016bc4, 0x000c0000},
+ {0x00016bc8, 0x14021402},
+ {0x00016bcc, 0x00001402},
+ {0x00016bd0, 0x00000000},
+ {0x00016bd4, 0x00000000},
+};
#define ar9580_1p0_mac_postamble ar9300_2p2_mac_postamble
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 01a7db061c6a..1a9fe0983a6b 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -28,7 +28,6 @@
#include "debug.h"
#include "mci.h"
#include "dfs.h"
-#include "spectral.h"
struct ath_node;
struct ath_vif;
@@ -190,6 +189,7 @@ struct ath_frame_info {
u8 rtscts_rate;
u8 retries : 7;
u8 baw_tracked : 1;
+ u8 tx_power;
};
struct ath_rxbuf {
@@ -345,7 +345,9 @@ struct ath_chanctx {
u64 tsf_val;
u32 last_beacon;
+ int flush_timeout;
u16 txpower;
+ u16 cur_txpower;
bool offchannel;
bool stopped;
bool active;
@@ -362,7 +364,7 @@ enum ath_chanctx_event {
ATH_CHANCTX_EVENT_BEACON_SENT,
ATH_CHANCTX_EVENT_TSF_TIMER,
ATH_CHANCTX_EVENT_BEACON_RECEIVED,
- ATH_CHANCTX_EVENT_ASSOC,
+ ATH_CHANCTX_EVENT_AUTHORIZED,
ATH_CHANCTX_EVENT_SWITCH,
ATH_CHANCTX_EVENT_ASSIGN,
ATH_CHANCTX_EVENT_UNASSIGN,
@@ -380,10 +382,12 @@ enum ath_chanctx_state {
struct ath_chanctx_sched {
bool beacon_pending;
+ bool beacon_adjust;
bool offchannel_pending;
bool wait_switch;
bool force_noa_update;
bool extend_absence;
+ bool mgd_prepare_tx;
enum ath_chanctx_state state;
u8 beacon_miss;
@@ -468,6 +472,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force);
void ath_offchannel_next(struct ath_softc *sc);
void ath_scan_complete(struct ath_softc *sc, bool abort);
void ath_roc_complete(struct ath_softc *sc, bool abort);
+struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc);
#else
@@ -540,7 +545,6 @@ static inline void ath_chanctx_check_active(struct ath_softc *sc,
#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
-int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan);
void ath_startrecv(struct ath_softc *sc);
bool ath_stoprecv(struct ath_softc *sc);
u32 ath_calcrxfilter(struct ath_softc *sc);
@@ -595,7 +599,7 @@ struct ath_vif {
u16 seq_no;
/* BSS info */
- u8 bssid[ETH_ALEN];
+ u8 bssid[ETH_ALEN] __aligned(2);
u16 aid;
bool assoc;
@@ -618,6 +622,7 @@ struct ath_vif {
u32 noa_start;
u32 noa_duration;
bool periodic_noa;
+ bool oneshot_noa;
};
struct ath9k_vif_iter_data {
@@ -715,7 +720,8 @@ int ath_update_survey_stats(struct ath_softc *sc);
void ath_update_survey_nf(struct ath_softc *sc, int channel);
void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
void ath_ps_full_sleep(unsigned long data);
-void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
+ bool sw_pending, bool timeout_override);
/**********/
/* BTCOEX */
@@ -927,6 +933,7 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
#define ATH9K_PCI_AR9565_2ANT 0x0100
#define ATH9K_PCI_NO_PLL_PWRSAVE 0x0200
#define ATH9K_PCI_KILLER 0x0400
+#define ATH9K_PCI_LED_ACT_HI 0x0800
/*
* Default cache line size, in bytes.
@@ -975,6 +982,7 @@ struct ath_softc {
struct ath_chanctx_sched sched;
struct ath_offchannel offchannel;
struct ath_chanctx *next_chan;
+ struct completion go_beacon;
#endif
unsigned long driver_data;
@@ -982,7 +990,6 @@ struct ath_softc {
u8 gtt_cnt;
u32 intrstatus;
u16 ps_flags; /* PS_* */
- u16 curtxpow;
bool ps_enabled;
bool ps_idle;
short nbcnvifs;
@@ -1023,10 +1030,8 @@ struct ath_softc {
struct dfs_pattern_detector *dfs_detector;
u64 dfs_prev_pulse_ts;
u32 wow_enabled;
- /* relay(fs) channel for spectral scan */
- struct rchan *rfs_chan_spec_scan;
- enum spectral_mode spectral_mode;
- struct ath_spec_scan spec_config;
+
+ struct ath_spec_scan_priv spec_priv;
struct ieee80211_vif *tx99_vif;
struct sk_buff *tx99_skb;
@@ -1069,7 +1074,7 @@ void ath9k_tasklet(unsigned long data);
int ath_cabq_update(struct ath_softc *);
u8 ath9k_parse_mpdudensity(u8 mpdudensity);
irqreturn_t ath_isr(int irq, void *dev);
-int ath_reset(struct ath_softc *sc);
+int ath_reset(struct ath_softc *sc, struct ath9k_channel *hchan);
void ath_cancel_work(struct ath_softc *sc);
void ath_restart_work(struct ath_softc *sc);
int ath9k_init_device(u16 devid, struct ath_softc *sc,
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index ecb783beeec2..cb366adc820b 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -78,7 +78,7 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
struct ath_tx_info info;
struct ieee80211_supported_band *sband;
u8 chainmask = ah->txchainmask;
- u8 rate = 0;
+ u8 i, rate = 0;
sband = &common->sbands[sc->cur_chandef.chan->band];
rate = sband->bitrates[rateidx].hw_value;
@@ -88,7 +88,8 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
memset(&info, 0, sizeof(info));
info.pkt_len = skb->len + FCS_LEN;
info.type = ATH9K_PKT_TYPE_BEACON;
- info.txpower = MAX_RATE_POWER;
+ for (i = 0; i < 4; i++)
+ info.txpower[i] = MAX_RATE_POWER;
info.keyix = ATH9K_TXKEYIX_INVALID;
info.keytype = ATH9K_KEY_TYPE_CLEAR;
info.flags = ATH9K_TXDESC_NOACK | ATH9K_TXDESC_CLRDMASK;
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 278365b8a895..e200a6e3aca5 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -234,7 +234,7 @@ void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
}
-void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
+int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath9k_nfcal_hist *h = NULL;
unsigned i, j;
@@ -301,7 +301,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
ath_dbg(common, ANY,
"Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
REG_READ(ah, AR_PHY_AGC_CONTROL));
- return;
+ return -ETIMEDOUT;
}
/*
@@ -322,6 +322,8 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
}
}
REGWRITE_BUFFER_FLUSH(ah);
+
+ return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index b8ed95e9a335..87badf4bb8a4 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -109,7 +109,7 @@ struct ath9k_pacal_info{
bool ath9k_hw_reset_calvalid(struct ath_hw *ah);
void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update);
-void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
+int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan);
void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
struct ath9k_channel *chan);
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 945c89826b14..206665059d66 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -66,7 +66,7 @@ static int ath_set_channel(struct ath_softc *sc)
}
hchan = &sc->sc_ah->channels[pos];
- r = ath_reset_internal(sc, hchan);
+ r = ath_reset(sc, hchan);
if (r)
return r;
@@ -92,8 +92,8 @@ static int ath_set_channel(struct ath_softc *sc)
} else {
/* perform spectral scan if requested. */
if (test_bit(ATH_OP_SCANNING, &common->op_flags) &&
- sc->spectral_mode == SPECTRAL_CHANSCAN)
- ath9k_spectral_scan_trigger(hw);
+ sc->spec_priv.spectral_mode == SPECTRAL_CHANSCAN)
+ ath9k_cmn_spectral_scan_trigger(common, &sc->spec_priv);
}
return 0;
@@ -117,6 +117,7 @@ void ath_chanctx_init(struct ath_softc *sc)
cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20);
INIT_LIST_HEAD(&ctx->vifs);
ctx->txpower = ATH_TXPOWER_MAX;
+ ctx->flush_timeout = HZ / 5; /* 200ms */
for (j = 0; j < ARRAY_SIZE(ctx->acq); j++)
INIT_LIST_HEAD(&ctx->acq[j]);
}
@@ -145,6 +146,36 @@ void ath_chanctx_set_channel(struct ath_softc *sc, struct ath_chanctx *ctx,
#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+/*************/
+/* Utilities */
+/*************/
+
+struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc)
+{
+ struct ath_chanctx *ctx;
+ struct ath_vif *avp;
+ struct ieee80211_vif *vif;
+
+ spin_lock_bh(&sc->chan_lock);
+
+ ath_for_each_chanctx(sc, ctx) {
+ if (!ctx->active)
+ continue;
+
+ list_for_each_entry(avp, &ctx->vifs, list) {
+ vif = avp->vif;
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO) {
+ spin_unlock_bh(&sc->chan_lock);
+ return ctx;
+ }
+ }
+ }
+
+ spin_unlock_bh(&sc->chan_lock);
+ return NULL;
+}
+
/**********************************************************/
/* Functions to handle the channel context state machine. */
/**********************************************************/
@@ -171,7 +202,7 @@ static const char *chanctx_event_string(enum ath_chanctx_event ev)
case_rtn_string(ATH_CHANCTX_EVENT_BEACON_SENT);
case_rtn_string(ATH_CHANCTX_EVENT_TSF_TIMER);
case_rtn_string(ATH_CHANCTX_EVENT_BEACON_RECEIVED);
- case_rtn_string(ATH_CHANCTX_EVENT_ASSOC);
+ case_rtn_string(ATH_CHANCTX_EVENT_AUTHORIZED);
case_rtn_string(ATH_CHANCTX_EVENT_SWITCH);
case_rtn_string(ATH_CHANCTX_EVENT_ASSIGN);
case_rtn_string(ATH_CHANCTX_EVENT_UNASSIGN);
@@ -198,6 +229,7 @@ static const char *chanctx_state_string(enum ath_chanctx_state state)
void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_chanctx *ictx;
struct ath_vif *avp;
bool active = false;
u8 n_active = 0;
@@ -205,6 +237,28 @@ void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx)
if (!ctx)
return;
+ if (ctx == &sc->offchannel.chan) {
+ spin_lock_bh(&sc->chan_lock);
+
+ if (likely(sc->sched.channel_switch_time))
+ ctx->flush_timeout =
+ usecs_to_jiffies(sc->sched.channel_switch_time);
+ else
+ ctx->flush_timeout =
+ msecs_to_jiffies(10);
+
+ spin_unlock_bh(&sc->chan_lock);
+
+ /*
+ * There is no need to iterate over the
+ * active/assigned channel contexts if
+ * the current context is offchannel.
+ */
+ return;
+ }
+
+ ictx = ctx;
+
list_for_each_entry(avp, &ctx->vifs, list) {
struct ieee80211_vif *vif = avp->vif;
@@ -227,12 +281,23 @@ void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx)
n_active++;
}
+ spin_lock_bh(&sc->chan_lock);
+
if (n_active <= 1) {
+ ictx->flush_timeout = HZ / 5;
clear_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags);
+ spin_unlock_bh(&sc->chan_lock);
return;
}
- if (test_and_set_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
+
+ ictx->flush_timeout = usecs_to_jiffies(sc->sched.channel_switch_time);
+
+ if (test_and_set_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) {
+ spin_unlock_bh(&sc->chan_lock);
return;
+ }
+
+ spin_unlock_bh(&sc->chan_lock);
if (ath9k_is_chanctx_enabled()) {
ath_chanctx_event(sc, NULL,
@@ -301,6 +366,111 @@ static void ath_chanctx_setup_timer(struct ath_softc *sc, u32 tsf_time)
"Setup chanctx timer with timeout: %d ms\n", jiffies_to_msecs(tsf_time));
}
+static void ath_chanctx_handle_bmiss(struct ath_softc *sc,
+ struct ath_chanctx *ctx,
+ struct ath_vif *avp)
+{
+ /*
+ * Clear the extend_absence flag if it had been
+ * set during the previous beacon transmission,
+ * since we need to revert to the normal NoA
+ * schedule.
+ */
+ if (ctx->active && sc->sched.extend_absence) {
+ avp->noa_duration = 0;
+ sc->sched.extend_absence = false;
+ }
+
+ /* If at least two consecutive beacons were missed on the STA
+ * chanctx, stay on the STA channel for one extra beacon period,
+ * to resync the timer properly.
+ */
+ if (ctx->active && sc->sched.beacon_miss >= 2) {
+ avp->noa_duration = 0;
+ sc->sched.extend_absence = true;
+ }
+}
+
+static void ath_chanctx_offchannel_noa(struct ath_softc *sc,
+ struct ath_chanctx *ctx,
+ struct ath_vif *avp,
+ u32 tsf_time)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ avp->noa_index++;
+ avp->offchannel_start = tsf_time;
+ avp->offchannel_duration = sc->sched.offchannel_duration;
+
+ ath_dbg(common, CHAN_CTX,
+ "offchannel noa_duration: %d, noa_start: %d, noa_index: %d\n",
+ avp->offchannel_duration,
+ avp->offchannel_start,
+ avp->noa_index);
+
+ /*
+ * When multiple contexts are active, the NoA
+ * has to be recalculated and advertised after
+ * an offchannel operation.
+ */
+ if (ctx->active && avp->noa_duration)
+ avp->noa_duration = 0;
+}
+
+static void ath_chanctx_set_periodic_noa(struct ath_softc *sc,
+ struct ath_vif *avp,
+ struct ath_beacon_config *cur_conf,
+ u32 tsf_time,
+ u32 beacon_int)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ avp->noa_index++;
+ avp->noa_start = tsf_time;
+
+ if (sc->sched.extend_absence)
+ avp->noa_duration = (3 * beacon_int / 2) +
+ sc->sched.channel_switch_time;
+ else
+ avp->noa_duration =
+ TU_TO_USEC(cur_conf->beacon_interval) / 2 +
+ sc->sched.channel_switch_time;
+
+ if (test_bit(ATH_OP_SCANNING, &common->op_flags) ||
+ sc->sched.extend_absence)
+ avp->periodic_noa = false;
+ else
+ avp->periodic_noa = true;
+
+ ath_dbg(common, CHAN_CTX,
+ "noa_duration: %d, noa_start: %d, noa_index: %d, periodic: %d\n",
+ avp->noa_duration,
+ avp->noa_start,
+ avp->noa_index,
+ avp->periodic_noa);
+}
+
+static void ath_chanctx_set_oneshot_noa(struct ath_softc *sc,
+ struct ath_vif *avp,
+ u32 tsf_time,
+ u32 duration)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ avp->noa_index++;
+ avp->noa_start = tsf_time;
+ avp->periodic_noa = false;
+ avp->oneshot_noa = true;
+ avp->noa_duration = duration + sc->sched.channel_switch_time;
+
+ ath_dbg(common, CHAN_CTX,
+ "oneshot noa_duration: %d, noa_start: %d, noa_index: %d, periodic: %d\n",
+ avp->noa_duration,
+ avp->noa_start,
+ avp->noa_index,
+ avp->periodic_noa);
+}
+
void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
enum ath_chanctx_event ev)
{
@@ -327,6 +497,14 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
if (avp->offchannel_duration)
avp->offchannel_duration = 0;
+ if (avp->oneshot_noa) {
+ avp->noa_duration = 0;
+ avp->oneshot_noa = false;
+
+ ath_dbg(common, CHAN_CTX,
+ "Clearing oneshot NoA\n");
+ }
+
if (avp->chanctx != sc->cur_chan) {
ath_dbg(common, CHAN_CTX,
"Contexts differ, not preparing beacon\n");
@@ -356,6 +534,24 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
"Move chanctx state from WAIT_FOR_TIMER to WAIT_FOR_BEACON\n");
}
+ if (sc->sched.mgd_prepare_tx)
+ sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
+
+ /*
+ * When a context becomes inactive, for example,
+ * disassociation of a station context, the NoA
+ * attribute needs to be removed from subsequent
+ * beacons.
+ */
+ if (!ctx->active && avp->noa_duration &&
+ sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON) {
+ avp->noa_duration = 0;
+ avp->periodic_noa = false;
+
+ ath_dbg(common, CHAN_CTX,
+ "Clearing NoA schedule\n");
+ }
+
if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON)
break;
@@ -378,45 +574,22 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
* values and increment the index.
*/
if (sc->next_chan == &sc->offchannel.chan) {
- avp->noa_index++;
- avp->offchannel_start = tsf_time;
- avp->offchannel_duration = sc->sched.offchannel_duration;
-
- ath_dbg(common, CHAN_CTX,
- "offchannel noa_duration: %d, noa_start: %d, noa_index: %d\n",
- avp->offchannel_duration,
- avp->offchannel_start,
- avp->noa_index);
-
- /*
- * When multiple contexts are active, the NoA
- * has to be recalculated and advertised after
- * an offchannel operation.
- */
- if (ctx->active && avp->noa_duration)
- avp->noa_duration = 0;
-
+ ath_chanctx_offchannel_noa(sc, ctx, avp, tsf_time);
break;
}
- /*
- * Clear the extend_absence flag if it had been
- * set during the previous beacon transmission,
- * since we need to revert to the normal NoA
- * schedule.
- */
- if (ctx->active && sc->sched.extend_absence) {
- avp->noa_duration = 0;
- sc->sched.extend_absence = false;
- }
+ ath_chanctx_handle_bmiss(sc, ctx, avp);
- /* If at least two consecutive beacons were missed on the STA
- * chanctx, stay on the STA channel for one extra beacon period,
- * to resync the timer properly.
+ /*
+ * If a mgd_prepare_tx() has been called by mac80211,
+ * a one-shot NoA needs to be sent. This can happen
+ * with one or more active channel contexts - in both
+ * cases, a new NoA schedule has to be advertised.
*/
- if (ctx->active && sc->sched.beacon_miss >= 2) {
- avp->noa_duration = 0;
- sc->sched.extend_absence = true;
+ if (sc->sched.mgd_prepare_tx) {
+ ath_chanctx_set_oneshot_noa(sc, avp, tsf_time,
+ jiffies_to_usecs(HZ / 5));
+ break;
}
/* Prevent wrap-around issues */
@@ -429,31 +602,9 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
* announcement.
*/
if (ctx->active &&
- (!avp->noa_duration || sc->sched.force_noa_update)) {
- avp->noa_index++;
- avp->noa_start = tsf_time;
-
- if (sc->sched.extend_absence)
- avp->noa_duration = (3 * beacon_int / 2) +
- sc->sched.channel_switch_time;
- else
- avp->noa_duration =
- TU_TO_USEC(cur_conf->beacon_interval) / 2 +
- sc->sched.channel_switch_time;
-
- if (test_bit(ATH_OP_SCANNING, &common->op_flags) ||
- sc->sched.extend_absence)
- avp->periodic_noa = false;
- else
- avp->periodic_noa = true;
-
- ath_dbg(common, CHAN_CTX,
- "noa_duration: %d, noa_start: %d, noa_index: %d, periodic: %d\n",
- avp->noa_duration,
- avp->noa_start,
- avp->noa_index,
- avp->periodic_noa);
- }
+ (!avp->noa_duration || sc->sched.force_noa_update))
+ ath_chanctx_set_periodic_noa(sc, avp, cur_conf,
+ tsf_time, beacon_int);
if (ctx->active && sc->sched.force_noa_update)
sc->sched.force_noa_update = false;
@@ -467,6 +618,15 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
}
sc->sched.beacon_pending = false;
+
+ if (sc->sched.mgd_prepare_tx) {
+ sc->sched.mgd_prepare_tx = false;
+ complete(&sc->go_beacon);
+ ath_dbg(common, CHAN_CTX,
+ "Beacon sent, complete go_beacon\n");
+ break;
+ }
+
if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON)
break;
@@ -495,10 +655,16 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
sc->cur_chan == &sc->offchannel.chan)
break;
- ath_chanctx_adjust_tbtt_delta(sc);
sc->sched.beacon_pending = false;
sc->sched.beacon_miss = 0;
+ if (sc->sched.state == ATH_CHANCTX_STATE_FORCE_ACTIVE ||
+ !sc->sched.beacon_adjust ||
+ !sc->cur_chan->tsf_val)
+ break;
+
+ ath_chanctx_adjust_tbtt_delta(sc);
+
/* TSF time might have been updated by the incoming beacon,
* need update the channel switch timer to reflect the change.
*/
@@ -507,10 +673,10 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL);
tsf_time += ath9k_hw_gettsf32(ah);
-
+ sc->sched.beacon_adjust = false;
ath_chanctx_setup_timer(sc, tsf_time);
break;
- case ATH_CHANCTX_EVENT_ASSOC:
+ case ATH_CHANCTX_EVENT_AUTHORIZED:
if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE ||
avp->chanctx != sc->cur_chan)
break;
@@ -552,6 +718,7 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
ath_chanctx_setup_timer(sc, tsf_time);
sc->sched.beacon_pending = true;
+ sc->sched.beacon_adjust = true;
break;
case ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL:
if (sc->cur_chan == &sc->offchannel.chan ||
@@ -578,22 +745,6 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
ieee80211_queue_work(sc->hw, &sc->chanctx_work);
break;
case ATH_CHANCTX_EVENT_ASSIGN:
- /*
- * When adding a new channel context, check if a scan
- * is in progress and abort it since the addition of
- * a new channel context is usually followed by VIF
- * assignment, in which case we have to start multi-channel
- * operation.
- */
- if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
- ath_dbg(common, CHAN_CTX,
- "Aborting HW scan to add new context\n");
-
- spin_unlock_bh(&sc->chan_lock);
- del_timer_sync(&sc->offchannel.timer);
- ath_scan_complete(sc, true);
- spin_lock_bh(&sc->chan_lock);
- }
break;
case ATH_CHANCTX_EVENT_CHANGE:
break;
@@ -751,6 +902,11 @@ void ath_offchannel_next(struct ath_softc *sc)
sc->offchannel.state = ATH_OFFCHANNEL_ROC_START;
ath_chanctx_offchan_switch(sc, sc->offchannel.roc_chan);
} else {
+ spin_lock_bh(&sc->chan_lock);
+ sc->sched.offchannel_pending = false;
+ sc->sched.wait_switch = false;
+ spin_unlock_bh(&sc->chan_lock);
+
ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false),
NULL);
sc->offchannel.state = ATH_OFFCHANNEL_IDLE;
@@ -770,8 +926,7 @@ void ath_roc_complete(struct ath_softc *sc, bool abort)
sc->offchannel.roc_vif = NULL;
sc->offchannel.roc_chan = NULL;
- if (!abort)
- ieee80211_remain_on_channel_expired(sc->hw);
+ ieee80211_remain_on_channel_expired(sc->hw);
ath_offchannel_next(sc);
ath9k_ps_restore(sc);
}
@@ -808,7 +963,7 @@ static void ath_scan_send_probe(struct ath_softc *sc,
struct ieee80211_tx_info *info;
int band = sc->offchannel.chan.chandef.chan->band;
- skb = ieee80211_probereq_get(sc->hw, vif,
+ skb = ieee80211_probereq_get(sc->hw, vif->addr,
ssid->ssid, ssid->ssid_len, req->ie_len);
if (!skb)
return;
@@ -902,9 +1057,8 @@ static void ath_offchannel_timer(unsigned long data)
break;
case ATH_OFFCHANNEL_ROC_START:
case ATH_OFFCHANNEL_ROC_WAIT:
- ctx = ath_chanctx_get_oper_chan(sc, false);
sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE;
- ath_chanctx_switch(sc, ctx, NULL);
+ ath_roc_complete(sc, false);
break;
default:
break;
@@ -1034,7 +1188,6 @@ static void ath_offchannel_channel_change(struct ath_softc *sc)
ieee80211_ready_on_channel(sc->hw);
break;
case ATH_OFFCHANNEL_ROC_DONE:
- ath_roc_complete(sc, false);
break;
default:
break;
@@ -1082,10 +1235,11 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
ath9k_chanctx_stop_queues(sc, sc->cur_chan);
queues_stopped = true;
- __ath9k_flush(sc->hw, ~0, true);
+ __ath9k_flush(sc->hw, ~0, true, false, false);
if (ath_chanctx_send_ps_frame(sc, true))
- __ath9k_flush(sc->hw, BIT(IEEE80211_AC_VO), false);
+ __ath9k_flush(sc->hw, BIT(IEEE80211_AC_VO),
+ false, false, false);
send_ps = true;
spin_lock_bh(&sc->chan_lock);
@@ -1177,6 +1331,8 @@ void ath9k_init_channel_context(struct ath_softc *sc)
(unsigned long)sc);
setup_timer(&sc->sched.timer, ath_chanctx_timer,
(unsigned long)sc);
+
+ init_completion(&sc->go_beacon);
}
void ath9k_deinit_channel_context(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 8f68426ca653..ec93ddf0863a 100644
--- a/drivers/net/wireless/ath/ath9k/spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -24,23 +24,24 @@ static s8 fix_rssi_inv_only(u8 rssi_val)
return (s8) rssi_val;
}
-static void ath_debug_send_fft_sample(struct ath_softc *sc,
+static void ath_debug_send_fft_sample(struct ath_spec_scan_priv *spec_priv,
struct fft_sample_tlv *fft_sample_tlv)
{
int length;
- if (!sc->rfs_chan_spec_scan)
+ if (!spec_priv->rfs_chan_spec_scan)
return;
length = __be16_to_cpu(fft_sample_tlv->length) +
sizeof(*fft_sample_tlv);
- relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv, length);
+ relay_write(spec_priv->rfs_chan_spec_scan, fft_sample_tlv, length);
}
/* returns 1 if this was a spectral frame, even if not handled. */
-int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
+int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr,
struct ath_rx_status *rs, u64 tsf)
{
- struct ath_hw *ah = sc->sc_ah;
+ struct ath_hw *ah = spec_priv->ah;
+ struct ath_common *common = ath9k_hw_common(spec_priv->ah);
u8 num_bins, *bins, *vdata = (u8 *)hdr;
struct fft_sample_ht20 fft_sample_20;
struct fft_sample_ht20_40 fft_sample_40;
@@ -67,7 +68,7 @@ int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
return 0;
- chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef);
+ chan_type = cfg80211_get_chandef_type(&common->hw->conf.chandef);
if ((chan_type == NL80211_CHAN_HT40MINUS) ||
(chan_type == NL80211_CHAN_HT40PLUS)) {
fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
@@ -199,10 +200,11 @@ int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
tlv = (struct fft_sample_tlv *)&fft_sample_20;
}
- ath_debug_send_fft_sample(sc, tlv);
+ ath_debug_send_fft_sample(spec_priv, tlv);
return 1;
}
+EXPORT_SYMBOL(ath_cmn_process_fft);
/*********************/
/* spectral_scan_ctl */
@@ -211,11 +213,11 @@ int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
char *mode = "";
unsigned int len;
- switch (sc->spectral_mode) {
+ switch (spec_priv->spectral_mode) {
case SPECTRAL_DISABLED:
mode = "disable";
break;
@@ -233,12 +235,84 @@ static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, mode, len);
}
+void ath9k_cmn_spectral_scan_trigger(struct ath_common *common,
+ struct ath_spec_scan_priv *spec_priv)
+{
+ struct ath_hw *ah = spec_priv->ah;
+ u32 rxfilter;
+
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
+ if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
+ ath_err(common, "spectrum analyzer not implemented on this hardware\n");
+ return;
+ }
+
+ ath_ps_ops(common)->wakeup(common);
+ rxfilter = ath9k_hw_getrxfilter(ah);
+ ath9k_hw_setrxfilter(ah, rxfilter |
+ ATH9K_RX_FILTER_PHYRADAR |
+ ATH9K_RX_FILTER_PHYERR);
+
+ /* TODO: usually this should not be neccesary, but for some reason
+ * (or in some mode?) the trigger must be called after the
+ * configuration, otherwise the register will have its values reset
+ * (on my ar9220 to value 0x01002310)
+ */
+ ath9k_cmn_spectral_scan_config(common, spec_priv, spec_priv->spectral_mode);
+ ath9k_hw_ops(ah)->spectral_scan_trigger(ah);
+ ath_ps_ops(common)->restore(common);
+}
+EXPORT_SYMBOL(ath9k_cmn_spectral_scan_trigger);
+
+int ath9k_cmn_spectral_scan_config(struct ath_common *common,
+ struct ath_spec_scan_priv *spec_priv,
+ enum spectral_mode spectral_mode)
+{
+ struct ath_hw *ah = spec_priv->ah;
+
+ if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
+ ath_err(common, "spectrum analyzer not implemented on this hardware\n");
+ return -1;
+ }
+
+ switch (spectral_mode) {
+ case SPECTRAL_DISABLED:
+ spec_priv->spec_config.enabled = 0;
+ break;
+ case SPECTRAL_BACKGROUND:
+ /* send endless samples.
+ * TODO: is this really useful for "background"?
+ */
+ spec_priv->spec_config.endless = 1;
+ spec_priv->spec_config.enabled = 1;
+ break;
+ case SPECTRAL_CHANSCAN:
+ case SPECTRAL_MANUAL:
+ spec_priv->spec_config.endless = 0;
+ spec_priv->spec_config.enabled = 1;
+ break;
+ default:
+ return -1;
+ }
+
+ ath_ps_ops(common)->wakeup(common);
+ ath9k_hw_ops(ah)->spectral_scan_config(ah, &spec_priv->spec_config);
+ ath_ps_ops(common)->restore(common);
+
+ spec_priv->spectral_mode = spectral_mode;
+
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_cmn_spectral_scan_config);
+
static ssize_t write_file_spec_scan_ctl(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
+ struct ath_common *common = ath9k_hw_common(spec_priv->ah);
char buf[32];
ssize_t len;
@@ -252,18 +326,18 @@ static ssize_t write_file_spec_scan_ctl(struct file *file,
buf[len] = '\0';
if (strncmp("trigger", buf, 7) == 0) {
- ath9k_spectral_scan_trigger(sc->hw);
+ ath9k_cmn_spectral_scan_trigger(common, spec_priv);
} else if (strncmp("background", buf, 10) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
+ ath9k_cmn_spectral_scan_config(common, spec_priv, SPECTRAL_BACKGROUND);
ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
} else if (strncmp("chanscan", buf, 8) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_CHANSCAN);
+ ath9k_cmn_spectral_scan_config(common, spec_priv, SPECTRAL_CHANSCAN);
ath_dbg(common, CONFIG, "spectral scan: channel scan mode enabled\n");
} else if (strncmp("manual", buf, 6) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_MANUAL);
+ ath9k_cmn_spectral_scan_config(common, spec_priv, SPECTRAL_MANUAL);
ath_dbg(common, CONFIG, "spectral scan: manual mode enabled\n");
} else if (strncmp("disable", buf, 7) == 0) {
- ath9k_spectral_scan_config(sc->hw, SPECTRAL_DISABLED);
+ ath9k_cmn_spectral_scan_config(common, spec_priv, SPECTRAL_DISABLED);
ath_dbg(common, CONFIG, "spectral scan: disabled\n");
} else {
return -EINVAL;
@@ -288,11 +362,11 @@ static ssize_t read_file_spectral_short_repeat(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
char buf[32];
unsigned int len;
- len = sprintf(buf, "%d\n", sc->spec_config.short_repeat);
+ len = sprintf(buf, "%d\n", spec_priv->spec_config.short_repeat);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -300,7 +374,7 @@ static ssize_t write_file_spectral_short_repeat(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
unsigned long val;
char buf[32];
ssize_t len;
@@ -316,7 +390,7 @@ static ssize_t write_file_spectral_short_repeat(struct file *file,
if (val > 1)
return -EINVAL;
- sc->spec_config.short_repeat = val;
+ spec_priv->spec_config.short_repeat = val;
return count;
}
@@ -336,11 +410,11 @@ static ssize_t read_file_spectral_count(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
char buf[32];
unsigned int len;
- len = sprintf(buf, "%d\n", sc->spec_config.count);
+ len = sprintf(buf, "%d\n", spec_priv->spec_config.count);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -348,7 +422,7 @@ static ssize_t write_file_spectral_count(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
unsigned long val;
char buf[32];
ssize_t len;
@@ -364,7 +438,7 @@ static ssize_t write_file_spectral_count(struct file *file,
if (val > 255)
return -EINVAL;
- sc->spec_config.count = val;
+ spec_priv->spec_config.count = val;
return count;
}
@@ -384,11 +458,11 @@ static ssize_t read_file_spectral_period(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
char buf[32];
unsigned int len;
- len = sprintf(buf, "%d\n", sc->spec_config.period);
+ len = sprintf(buf, "%d\n", spec_priv->spec_config.period);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -396,7 +470,7 @@ static ssize_t write_file_spectral_period(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
unsigned long val;
char buf[32];
ssize_t len;
@@ -412,7 +486,7 @@ static ssize_t write_file_spectral_period(struct file *file,
if (val > 255)
return -EINVAL;
- sc->spec_config.period = val;
+ spec_priv->spec_config.period = val;
return count;
}
@@ -432,11 +506,11 @@ static ssize_t read_file_spectral_fft_period(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
char buf[32];
unsigned int len;
- len = sprintf(buf, "%d\n", sc->spec_config.fft_period);
+ len = sprintf(buf, "%d\n", spec_priv->spec_config.fft_period);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -444,7 +518,7 @@ static ssize_t write_file_spectral_fft_period(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath_softc *sc = file->private_data;
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
unsigned long val;
char buf[32];
ssize_t len;
@@ -460,7 +534,7 @@ static ssize_t write_file_spectral_fft_period(struct file *file,
if (val > 15)
return -EINVAL;
- sc->spec_config.fft_period = val;
+ spec_priv->spec_config.fft_period = val;
return count;
}
@@ -506,38 +580,41 @@ static struct rchan_callbacks rfs_spec_scan_cb = {
/* Debug Init/Deinit */
/*********************/
-void ath9k_spectral_deinit_debug(struct ath_softc *sc)
+void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
{
- if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
- relay_close(sc->rfs_chan_spec_scan);
- sc->rfs_chan_spec_scan = NULL;
+ if (config_enabled(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) {
+ relay_close(spec_priv->rfs_chan_spec_scan);
+ spec_priv->rfs_chan_spec_scan = NULL;
}
}
+EXPORT_SYMBOL(ath9k_cmn_spectral_deinit_debug);
-void ath9k_spectral_init_debug(struct ath_softc *sc)
+void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv,
+ struct dentry *debugfs_phy)
{
- sc->rfs_chan_spec_scan = relay_open("spectral_scan",
- sc->debug.debugfs_phy,
+ spec_priv->rfs_chan_spec_scan = relay_open("spectral_scan",
+ debugfs_phy,
1024, 256, &rfs_spec_scan_cb,
NULL);
debugfs_create_file("spectral_scan_ctl",
S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
+ debugfs_phy, spec_priv,
&fops_spec_scan_ctl);
debugfs_create_file("spectral_short_repeat",
S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
+ debugfs_phy, spec_priv,
&fops_spectral_short_repeat);
debugfs_create_file("spectral_count",
S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
+ debugfs_phy, spec_priv,
&fops_spectral_count);
debugfs_create_file("spectral_period",
S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
+ debugfs_phy, spec_priv,
&fops_spectral_period);
debugfs_create_file("spectral_fft_period",
S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc,
+ debugfs_phy, spec_priv,
&fops_spectral_fft_period);
}
+EXPORT_SYMBOL(ath9k_cmn_spectral_init_debug);
diff --git a/drivers/net/wireless/ath/ath9k/spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h
index 7b410c6858b0..82d9dd29652c 100644
--- a/drivers/net/wireless/ath/ath9k/spectral.h
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.h
@@ -92,6 +92,13 @@ struct ath_ht20_40_fft_packet {
struct ath_radar_info radar_info;
} __packed;
+struct ath_spec_scan_priv {
+ struct ath_hw *ah;
+ /* relay(fs) channel for spectral scan */
+ struct rchan *rfs_chan_spec_scan;
+ enum spectral_mode spectral_mode;
+ struct ath_spec_scan spec_config;
+};
#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet))
@@ -123,23 +130,15 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
return bins[0] & 0x3f;
}
-void ath9k_spectral_init_debug(struct ath_softc *sc);
-void ath9k_spectral_deinit_debug(struct ath_softc *sc);
+void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, struct dentry *debugfs_phy);
+void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv);
-void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
-int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
+void ath9k_cmn_spectral_scan_trigger(struct ath_common *common,
+ struct ath_spec_scan_priv *spec_priv);
+int ath9k_cmn_spectral_scan_config(struct ath_common *common,
+ struct ath_spec_scan_priv *spec_priv,
enum spectral_mode spectral_mode);
-
-#ifdef CONFIG_ATH9K_DEBUGFS
-int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
+int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr,
struct ath_rx_status *rs, u64 tsf);
-#else
-static inline int ath_process_fft(struct ath_softc *sc,
- struct ieee80211_hdr *hdr,
- struct ath_rx_status *rs, u64 tsf)
-{
- return 0;
-}
-#endif /* CONFIG_ATH9K_DEBUGFS */
#endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 33b0c7aef2ea..e8c699446470 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -159,7 +159,7 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
if (test_bit(keyix, common->keymap))
rxs->flag |= RX_FLAG_DECRYPTED;
}
- if (ah->sw_mgmt_crypto &&
+ if (ah->sw_mgmt_crypto_rx &&
(rxs->flag & RX_FLAG_DECRYPTED) &&
ieee80211_is_mgmt(fc))
/* Use software decrypt for management frames. */
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index ffc454b18637..2b79a568e803 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -24,6 +24,7 @@
#include "common-init.h"
#include "common-beacon.h"
#include "common-debug.h"
+#include "common-spectral.h"
/* Common header for Atheros 802.11n base driver cores */
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 5c45e787814e..696e3d5309c6 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -828,13 +828,14 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
i = 0;
ath_for_each_chanctx(sc, ctx) {
- if (!ctx->assigned || list_empty(&ctx->vifs))
+ if (list_empty(&ctx->vifs))
continue;
ath9k_calculate_iter_data(sc, ctx, &iter_data);
len += scnprintf(buf + len, sizeof(buf) - len,
- "VIF-COUNTS: CTX %i AP: %i STA: %i MESH: %i WDS: %i",
- i++, iter_data.naps, iter_data.nstations,
+ "VIFS: CTX %i(%i) AP: %i STA: %i MESH: %i WDS: %i",
+ i++, (int)(ctx->assigned), iter_data.naps,
+ iter_data.nstations,
iter_data.nmeshes, iter_data.nwds);
len += scnprintf(buf + len, sizeof(buf) - len,
" ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
@@ -852,36 +853,31 @@ static ssize_t read_file_reset(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
+ static const char * const reset_cause[__RESET_TYPE_MAX] = {
+ [RESET_TYPE_BB_HANG] = "Baseband Hang",
+ [RESET_TYPE_BB_WATCHDOG] = "Baseband Watchdog",
+ [RESET_TYPE_FATAL_INT] = "Fatal HW Error",
+ [RESET_TYPE_TX_ERROR] = "TX HW error",
+ [RESET_TYPE_TX_GTT] = "Transmit timeout",
+ [RESET_TYPE_TX_HANG] = "TX Path Hang",
+ [RESET_TYPE_PLL_HANG] = "PLL RX Hang",
+ [RESET_TYPE_MAC_HANG] = "MAC Hang",
+ [RESET_TYPE_BEACON_STUCK] = "Stuck Beacon",
+ [RESET_TYPE_MCI] = "MCI Reset",
+ [RESET_TYPE_CALIBRATION] = "Calibration error",
+ };
char buf[512];
unsigned int len = 0;
+ int i;
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Baseband Hang",
- sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Baseband Watchdog",
- sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Fatal HW Error",
- sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "TX HW error",
- sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "TX Path Hang",
- sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "PLL RX Hang",
- sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "MAC Hang",
- sc->debug.stats.reset[RESET_TYPE_MAC_HANG]);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "Stuck Beacon",
- sc->debug.stats.reset[RESET_TYPE_BEACON_STUCK]);
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%17s: %2d\n", "MCI Reset",
- sc->debug.stats.reset[RESET_TYPE_MCI]);
+ for (i = 0; i < ARRAY_SIZE(reset_cause); i++) {
+ if (!reset_cause[i])
+ continue;
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", reset_cause[i],
+ sc->debug.stats.reset[i]);
+ }
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1315,7 +1311,7 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw,
void ath9k_deinit_debug(struct ath_softc *sc)
{
- ath9k_spectral_deinit_debug(sc);
+ ath9k_cmn_spectral_deinit_debug(&sc->spec_priv);
}
int ath9k_init_debug(struct ath_hw *ah)
@@ -1335,7 +1331,7 @@ int ath9k_init_debug(struct ath_hw *ah)
ath9k_dfs_init_debug(sc);
ath9k_tx99_init_debug(sc);
- ath9k_spectral_init_debug(sc);
+ ath9k_cmn_spectral_init_debug(&sc->spec_priv, sc->debug.debugfs_phy);
debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_dma);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 53ae15bd0c9d..bd75b1f716db 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -49,6 +49,7 @@ enum ath_reset_type {
RESET_TYPE_MAC_HANG,
RESET_TYPE_BEACON_STUCK,
RESET_TYPE_MCI,
+ RESET_TYPE_CALIBRATION,
__RESET_TYPE_MAX
};
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 3218ca994746..122b846b8ec0 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -262,7 +262,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
{
struct ar5416_eeprom_def *eep = &ah->eeprom.def;
struct ath_common *common = ath9k_hw_common(ah);
- u16 *eepdata, temp, magic, magic2;
+ u16 *eepdata, temp, magic;
u32 sum = 0, el;
bool need_swap = false;
int i, addr, size;
@@ -272,27 +272,16 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
return false;
}
- if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic);
-
- if (magic != AR5416_EEPROM_MAGIC) {
- magic2 = swab16(magic);
-
- if (magic2 == AR5416_EEPROM_MAGIC) {
- size = sizeof(struct ar5416_eeprom_def);
- need_swap = true;
- eepdata = (u16 *) (&ah->eeprom);
+ if (swab16(magic) == AR5416_EEPROM_MAGIC &&
+ !(ah->ah_flags & AH_NO_EEP_SWAP)) {
+ size = sizeof(struct ar5416_eeprom_def);
+ need_swap = true;
+ eepdata = (u16 *) (&ah->eeprom);
- for (addr = 0; addr < size / sizeof(u16); addr++) {
- temp = swab16(*eepdata);
- *eepdata = temp;
- eepdata++;
- }
- } else {
- ath_err(common,
- "Invalid EEPROM Magic. Endianness mismatch.\n");
- return -EINVAL;
- }
+ for (addr = 0; addr < size / sizeof(u16); addr++) {
+ temp = swab16(*eepdata);
+ *eepdata = temp;
+ eepdata++;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index b1956bf6e01e..2fef7a480fec 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -25,7 +25,12 @@ static void ath_led_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct ath_softc *sc = container_of(led_cdev, struct ath_softc, led_cdev);
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, (brightness == LED_OFF));
+ u32 val = (brightness == LED_OFF);
+
+ if (sc->sc_ah->config.led_active_high)
+ val = !val;
+
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, val);
}
void ath_deinit_leds(struct ath_softc *sc)
@@ -82,7 +87,7 @@ void ath_fill_led_pin(struct ath_softc *sc)
ath9k_hw_cfg_output(ah, ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
/* LED off, active low */
- ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+ ath9k_hw_set_gpio(ah, ah->led_pin, (ah->config.led_active_high) ? 0 : 1);
}
#endif
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 09a5d72f3ff5..9dde265d3f84 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -481,6 +481,7 @@ struct ath9k_htc_priv {
unsigned long op_flags;
struct ath9k_hw_cal_data caldata;
+ struct ath_spec_scan_priv spec_priv;
spinlock_t beacon_lock;
struct ath_beacon_config cur_beacon_conf;
@@ -625,8 +626,12 @@ int ath9k_htc_resume(struct htc_target *htc_handle);
#endif
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
int ath9k_htc_init_debug(struct ath_hw *ah);
+void ath9k_htc_deinit_debug(struct ath9k_htc_priv *priv);
#else
static inline int ath9k_htc_init_debug(struct ath_hw *ah) { return 0; };
+static inline void ath9k_htc_deinit_debug(struct ath9k_htc_priv *priv)
+{
+}
#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
#endif /* HTC_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index 8b529e4b8ac4..8cef1edcc621 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -490,6 +490,10 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
WARN_ON(i != ATH9K_HTC_SSTATS_LEN);
}
+void ath9k_htc_deinit_debug(struct ath9k_htc_priv *priv)
+{
+ ath9k_cmn_spectral_deinit_debug(&priv->spec_priv);
+}
int ath9k_htc_init_debug(struct ath_hw *ah)
{
@@ -501,6 +505,8 @@ int ath9k_htc_init_debug(struct ath_hw *ah)
if (!priv->debug.debugfs_phy)
return -ENOMEM;
+ ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy);
+
debugfs_create_file("tgt_int_stats", S_IRUSR, priv->debug.debugfs_phy,
priv, &fops_tgt_int_stats);
debugfs_create_file("tgt_tx_stats", S_IRUSR, priv->debug.debugfs_phy,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 4014c4be6e79..e8fa9448da24 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -53,6 +53,21 @@ static const struct ieee80211_tpt_blink ath9k_htc_tpt_blink[] = {
};
#endif
+static void ath9k_htc_op_ps_wakeup(struct ath_common *common)
+{
+ ath9k_htc_ps_wakeup((struct ath9k_htc_priv *) common->priv);
+}
+
+static void ath9k_htc_op_ps_restore(struct ath_common *common)
+{
+ ath9k_htc_ps_restore((struct ath9k_htc_priv *) common->priv);
+}
+
+static struct ath_ps_ops ath9k_htc_ps_ops = {
+ .wakeup = ath9k_htc_op_ps_wakeup,
+ .restore = ath9k_htc_op_ps_restore,
+};
+
static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
{
int time_left;
@@ -87,6 +102,7 @@ static void ath9k_deinit_device(struct ath9k_htc_priv *priv)
wiphy_rfkill_stop_polling(hw->wiphy);
ath9k_deinit_leds(priv);
+ ath9k_htc_deinit_debug(priv);
ieee80211_unregister_hw(hw);
ath9k_rx_cleanup(priv);
ath9k_tx_cleanup(priv);
@@ -449,6 +465,14 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
common->last_rssi = ATH_RSSI_DUMMY_MARKER;
priv->ah->opmode = NL80211_IFTYPE_STATION;
+
+ priv->spec_priv.ah = priv->ah;
+ priv->spec_priv.spec_config.enabled = 0;
+ priv->spec_priv.spec_config.short_repeat = false;
+ priv->spec_priv.spec_config.count = 8;
+ priv->spec_priv.spec_config.endless = false;
+ priv->spec_priv.spec_config.period = 0x12;
+ priv->spec_priv.spec_config.fft_period = 0x02;
}
static int ath9k_init_priv(struct ath9k_htc_priv *priv,
@@ -478,6 +502,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
common = ath9k_hw_common(ah);
common->ops = &ah->reg_ops;
+ common->ps_ops = &ath9k_htc_ps_ops;
common->bus_ops = &ath9k_usb_bus_ops;
common->ah = ah;
common->hw = priv->hw;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 994fff1ff519..92d5a6c5a225 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -314,6 +314,10 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
mod_timer(&priv->tx.cleanup_timer,
jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
+ /* perform spectral scan if requested. */
+ if (test_bit(ATH_OP_SCANNING, &common->op_flags) &&
+ priv->spec_priv.spectral_mode == SPECTRAL_CHANSCAN)
+ ath9k_cmn_spectral_scan_trigger(common, &priv->spec_priv);
err:
ath9k_htc_ps_restore(priv);
return ret;
@@ -1443,7 +1447,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- if (priv->ah->sw_mgmt_crypto &&
+ if (priv->ah->sw_mgmt_crypto_tx &&
key->cipher == WLAN_CIPHER_SUITE_CCMP)
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ret = 0;
@@ -1687,7 +1691,9 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
return ret;
}
-static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
+static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_common *common = ath9k_hw_common(priv->ah);
@@ -1701,7 +1707,8 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
mutex_unlock(&priv->mutex);
}
-static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
+static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_common *common = ath9k_hw_common(priv->ah);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index f0484b1b617e..a0f58e2aa553 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -946,7 +946,7 @@ static inline void convert_htc_flag(struct ath_rx_status *rx_stats,
static void rx_status_htc_to_ath(struct ath_rx_status *rx_stats,
struct ath_htc_rx_status *rxstatus)
{
- rx_stats->rs_datalen = rxstatus->rs_datalen;
+ rx_stats->rs_datalen = be16_to_cpu(rxstatus->rs_datalen);
rx_stats->rs_status = rxstatus->rs_status;
rx_stats->rs_phyerr = rxstatus->rs_phyerr;
rx_stats->rs_rssi = rxstatus->rs_rssi;
@@ -1012,6 +1012,20 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
* separately to avoid doing two lookups for a rate for each frame.
*/
hdr = (struct ieee80211_hdr *)skb->data;
+
+ /*
+ * Process PHY errors and return so that the packet
+ * can be dropped.
+ */
+ if (rx_stats.rs_status & ATH9K_RXERR_PHY) {
+ /* TODO: Not using DFS processing now. */
+ if (ath_cmn_process_fft(&priv->spec_priv, hdr,
+ &rx_stats, rx_status->mactime)) {
+ /* TODO: Code to collect spectral scan statistics */
+ }
+ goto rx_next;
+ }
+
if (!ath9k_cmn_rx_accept(common, hdr, rx_status, &rx_stats,
&decrypt_error, priv->rxfilter))
goto rx_next;
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 8e85efeaeffc..88769b64b20b 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -41,10 +41,9 @@ static inline void ath9k_hw_set_desc_link(struct ath_hw *ah, void *ds,
ath9k_hw_ops(ah)->set_desc_link(ds, link);
}
-static inline bool ath9k_hw_calibrate(struct ath_hw *ah,
- struct ath9k_channel *chan,
- u8 rxchainmask,
- bool longcal)
+static inline int ath9k_hw_calibrate(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask, bool longcal)
{
return ath9k_hw_ops(ah)->calibrate(ah, chan, rxchainmask, longcal);
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2ad605760e21..6d4b273469b1 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/time.h>
#include <linux/bitops.h>
+#include <linux/etherdevice.h>
#include <asm/unaligned.h>
#include "hw.h"
@@ -446,8 +447,16 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
common->macaddr[2 * i] = eeval >> 8;
common->macaddr[2 * i + 1] = eeval & 0xff;
}
- if (sum == 0 || sum == 0xffff * 3)
- return -EADDRNOTAVAIL;
+ if (!is_valid_ether_addr(common->macaddr)) {
+ ath_err(common,
+ "eeprom contains invalid mac address: %pM\n",
+ common->macaddr);
+
+ random_ether_addr(common->macaddr);
+ ath_err(common,
+ "random mac address will be used: %pM\n",
+ common->macaddr);
+ }
return 0;
}
@@ -1576,16 +1585,22 @@ static void ath9k_hw_init_mfp(struct ath_hw *ah)
* frames when constructing CCMP AAD. */
REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT,
0xc7ff);
- ah->sw_mgmt_crypto = false;
+ if (AR_SREV_9271(ah) || AR_DEVID_7010(ah))
+ ah->sw_mgmt_crypto_tx = true;
+ else
+ ah->sw_mgmt_crypto_tx = false;
+ ah->sw_mgmt_crypto_rx = false;
} else if (AR_SREV_9160_10_OR_LATER(ah)) {
/* Disable hardware crypto for management frames */
REG_CLR_BIT(ah, AR_PCU_MISC_MODE2,
AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE);
REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT);
- ah->sw_mgmt_crypto = true;
+ ah->sw_mgmt_crypto_tx = true;
+ ah->sw_mgmt_crypto_rx = true;
} else {
- ah->sw_mgmt_crypto = true;
+ ah->sw_mgmt_crypto_tx = true;
+ ah->sw_mgmt_crypto_rx = true;
}
}
@@ -1932,6 +1947,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REGWRITE_BUFFER_FLUSH(ah);
+ ath9k_hw_gen_timer_start_tsf2(ah);
+
ath9k_hw_init_desc(ah);
if (ath9k_hw_btcoex_is_enabled(ah))
@@ -1940,8 +1957,10 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_check_bt(ah);
- ath9k_hw_loadnf(ah, chan);
- ath9k_hw_start_nfcal(ah, true);
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ath9k_hw_loadnf(ah, chan);
+ ath9k_hw_start_nfcal(ah, true);
+ }
if (AR_SREV_9300_20_OR_LATER(ah))
ar9003_hw_bb_watchdog_config(ah);
@@ -2309,7 +2328,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah);
- unsigned int chip_chainmask;
u16 eeval;
u8 ant_div_ctl1, tx_chainmask, rx_chainmask;
@@ -2329,31 +2347,40 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
}
eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE);
- if ((eeval & (AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A)) == 0) {
- ath_err(common,
- "no band has been marked as supported in EEPROM\n");
- return -EINVAL;
+
+ if (eeval & AR5416_OPFLAGS_11A) {
+ if (ah->disable_5ghz)
+ ath_warn(common, "disabling 5GHz band\n");
+ else
+ pCap->hw_caps |= ATH9K_HW_CAP_5GHZ;
}
- if (eeval & AR5416_OPFLAGS_11A)
- pCap->hw_caps |= ATH9K_HW_CAP_5GHZ;
+ if (eeval & AR5416_OPFLAGS_11G) {
+ if (ah->disable_2ghz)
+ ath_warn(common, "disabling 2GHz band\n");
+ else
+ pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
+ }
- if (eeval & AR5416_OPFLAGS_11G)
- pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
+ if ((pCap->hw_caps & (ATH9K_HW_CAP_2GHZ | ATH9K_HW_CAP_5GHZ)) == 0) {
+ ath_err(common, "both bands are disabled\n");
+ return -EINVAL;
+ }
if (AR_SREV_9485(ah) ||
AR_SREV_9285(ah) ||
AR_SREV_9330(ah) ||
AR_SREV_9565(ah))
- chip_chainmask = 1;
- else if (AR_SREV_9462(ah))
- chip_chainmask = 3;
+ pCap->chip_chainmask = 1;
else if (!AR_SREV_9280_20_OR_LATER(ah))
- chip_chainmask = 7;
- else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah))
- chip_chainmask = 3;
+ pCap->chip_chainmask = 7;
+ else if (!AR_SREV_9300_20_OR_LATER(ah) ||
+ AR_SREV_9340(ah) ||
+ AR_SREV_9462(ah) ||
+ AR_SREV_9531(ah))
+ pCap->chip_chainmask = 3;
else
- chip_chainmask = 7;
+ pCap->chip_chainmask = 7;
pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK);
/*
@@ -2371,8 +2398,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
/* Use rx_chainmask from EEPROM. */
pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK);
- pCap->tx_chainmask = fixup_chainmask(chip_chainmask, pCap->tx_chainmask);
- pCap->rx_chainmask = fixup_chainmask(chip_chainmask, pCap->rx_chainmask);
+ pCap->tx_chainmask = fixup_chainmask(pCap->chip_chainmask, pCap->tx_chainmask);
+ pCap->rx_chainmask = fixup_chainmask(pCap->chip_chainmask, pCap->rx_chainmask);
ah->txchainmask = pCap->tx_chainmask;
ah->rxchainmask = pCap->rx_chainmask;
@@ -2886,6 +2913,16 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_gettsf32);
+void ath9k_hw_gen_timer_start_tsf2(struct ath_hw *ah)
+{
+ struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
+
+ if (timer_table->tsf2_enabled) {
+ REG_SET_BIT(ah, AR_DIRECT_CONNECT, AR_DC_AP_STA_EN);
+ REG_SET_BIT(ah, AR_RESET_TSF, AR_RESET_TSF2_ONCE);
+ }
+}
+
struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
void (*trigger)(void *),
void (*overflow)(void *),
@@ -2896,7 +2933,11 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
struct ath_gen_timer *timer;
if ((timer_index < AR_FIRST_NDP_TIMER) ||
- (timer_index >= ATH_MAX_GEN_TIMER))
+ (timer_index >= ATH_MAX_GEN_TIMER))
+ return NULL;
+
+ if ((timer_index > AR_FIRST_NDP_TIMER) &&
+ !AR_SREV_9300_20_OR_LATER(ah))
return NULL;
timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
@@ -2910,6 +2951,11 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
timer->overflow = overflow;
timer->arg = arg;
+ if ((timer_index > AR_FIRST_NDP_TIMER) && !timer_table->tsf2_enabled) {
+ timer_table->tsf2_enabled = true;
+ ath9k_hw_gen_timer_start_tsf2(ah);
+ }
+
return timer;
}
EXPORT_SYMBOL(ath_gen_timer_alloc);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 975074fc11bc..1cbd33551513 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -217,8 +217,8 @@
#define AH_WOW_BEACON_MISS BIT(3)
enum ath_hw_txq_subtype {
- ATH_TXQ_AC_BE = 0,
- ATH_TXQ_AC_BK = 1,
+ ATH_TXQ_AC_BK = 0,
+ ATH_TXQ_AC_BE = 1,
ATH_TXQ_AC_VI = 2,
ATH_TXQ_AC_VO = 3,
};
@@ -244,13 +244,20 @@ enum ath9k_hw_caps {
ATH9K_HW_CAP_2GHZ = BIT(11),
ATH9K_HW_CAP_5GHZ = BIT(12),
ATH9K_HW_CAP_APM = BIT(13),
+#ifdef CONFIG_ATH9K_PCOEM
ATH9K_HW_CAP_RTT = BIT(14),
ATH9K_HW_CAP_MCI = BIT(15),
- ATH9K_HW_CAP_DFS = BIT(16),
- ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17),
- ATH9K_HW_CAP_PAPRD = BIT(18),
- ATH9K_HW_CAP_FCC_BAND_SWITCH = BIT(19),
- ATH9K_HW_CAP_BT_ANT_DIV = BIT(20),
+ ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(16),
+ ATH9K_HW_CAP_BT_ANT_DIV = BIT(17),
+#else
+ ATH9K_HW_CAP_RTT = 0,
+ ATH9K_HW_CAP_MCI = 0,
+ ATH9K_HW_WOW_DEVICE_CAPABLE = 0,
+ ATH9K_HW_CAP_BT_ANT_DIV = 0,
+#endif
+ ATH9K_HW_CAP_DFS = BIT(18),
+ ATH9K_HW_CAP_PAPRD = BIT(19),
+ ATH9K_HW_CAP_FCC_BAND_SWITCH = BIT(20),
};
/*
@@ -269,6 +276,7 @@ struct ath9k_hw_capabilities {
u16 rts_aggr_limit;
u8 tx_chainmask;
u8 rx_chainmask;
+ u8 chip_chainmask;
u8 max_txchains;
u8 max_rxchains;
u8 num_gpio_pins;
@@ -322,6 +330,7 @@ struct ath9k_ops_config {
bool alt_mingainidx;
bool no_pll_pwrsave;
bool tx_gain_buffalo;
+ bool led_active_high;
};
enum ath9k_int {
@@ -517,6 +526,7 @@ struct ath_gen_timer {
struct ath_gen_timer_table {
struct ath_gen_timer *timers[ATH_MAX_GEN_TIMER];
u16 timer_mask;
+ bool tsf2_enabled;
};
struct ath_hw_antcomb_conf {
@@ -681,10 +691,8 @@ struct ath_hw_ops {
bool power_off);
void (*rx_enable)(struct ath_hw *ah);
void (*set_desc_link)(void *ds, u32 link);
- bool (*calibrate)(struct ath_hw *ah,
- struct ath9k_channel *chan,
- u8 rxchainmask,
- bool longcal);
+ int (*calibrate)(struct ath_hw *ah, struct ath9k_channel *chan,
+ u8 rxchainmask, bool longcal);
bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked,
u32 *sync_cause_p);
void (*set_txdesc)(struct ath_hw *ah, void *ds,
@@ -726,6 +734,7 @@ enum ath_cal_list {
#define AH_USE_EEPROM 0x1
#define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
#define AH_FASTCC 0x4
+#define AH_NO_EEP_SWAP 0x8 /* Do not swap EEPROM data */
struct ath_hw {
struct ath_ops reg_ops;
@@ -747,7 +756,8 @@ struct ath_hw {
} eeprom;
const struct eeprom_ops *eep_ops;
- bool sw_mgmt_crypto;
+ bool sw_mgmt_crypto_tx;
+ bool sw_mgmt_crypto_rx;
bool is_pciexpress;
bool aspm_enabled;
bool is_monitoring;
@@ -924,10 +934,16 @@ struct ath_hw {
bool is_clk_25mhz;
int (*get_mac_revision)(void);
int (*external_reset)(void);
+ bool disable_2ghz;
+ bool disable_5ghz;
const struct firmware *eeprom_blob;
struct ath_dynack dynack;
+
+ bool tpc_enabled;
+ u8 tx_power[Ar5416RateSize];
+ u8 tx_power_stbc[Ar5416RateSize];
};
struct ath_bus_ops {
@@ -1027,6 +1043,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
struct ath_gen_timer *timer,
u32 timer_next,
u32 timer_period);
+void ath9k_hw_gen_timer_start_tsf2(struct ath_hw *ah);
void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer);
void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer);
@@ -1067,6 +1084,8 @@ int ar9003_paprd_init_table(struct ath_hw *ah);
bool ar9003_paprd_is_done(struct ath_hw *ah);
bool ar9003_is_paprd_enabled(struct ath_hw *ah);
void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
+void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array,
+ struct ath9k_channel *chan);
/* Hardware family op attach helpers */
int ar5008_hw_attach_phy_ops(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 3bd030494986..d1c39346b264 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -88,6 +88,21 @@ static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
static void ath9k_deinit_softc(struct ath_softc *sc);
+static void ath9k_op_ps_wakeup(struct ath_common *common)
+{
+ ath9k_ps_wakeup((struct ath_softc *) common->priv);
+}
+
+static void ath9k_op_ps_restore(struct ath_common *common)
+{
+ ath9k_ps_restore((struct ath_softc *) common->priv);
+}
+
+static struct ath_ps_ops ath9k_ps_ops = {
+ .wakeup = ath9k_op_ps_wakeup,
+ .restore = ath9k_op_ps_restore,
+};
+
/*
* Read and write, they both share the same lock. We do this to serialize
* reads and writes on Atheros 802.11n PCI devices only. This is required
@@ -172,17 +187,20 @@ static void ath9k_reg_notifier(struct wiphy *wiphy,
ath_reg_notifier_apply(wiphy, request, reg);
/* Set tx power */
- if (ah->curchan) {
- sc->cur_chan->txpower = 2 * ah->curchan->chan->max_power;
- ath9k_ps_wakeup(sc);
- ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
- sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
- /* synchronize DFS detector if regulatory domain changed */
- if (sc->dfs_detector != NULL)
- sc->dfs_detector->set_dfs_domain(sc->dfs_detector,
- request->dfs_region);
- ath9k_ps_restore(sc);
- }
+ if (!ah->curchan)
+ return;
+
+ sc->cur_chan->txpower = 2 * ah->curchan->chan->max_power;
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
+ ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower,
+ sc->cur_chan->txpower,
+ &sc->cur_chan->cur_txpower);
+ /* synchronize DFS detector if regulatory domain changed */
+ if (sc->dfs_detector != NULL)
+ sc->dfs_detector->set_dfs_domain(sc->dfs_detector,
+ request->dfs_region);
+ ath9k_ps_restore(sc);
}
/*
@@ -348,12 +366,13 @@ static void ath9k_init_misc(struct ath_softc *sc)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
- sc->spec_config.enabled = 0;
- sc->spec_config.short_repeat = true;
- sc->spec_config.count = 8;
- sc->spec_config.endless = false;
- sc->spec_config.period = 0xFF;
- sc->spec_config.fft_period = 0xF;
+ sc->spec_priv.ah = sc->sc_ah;
+ sc->spec_priv.spec_config.enabled = 0;
+ sc->spec_priv.spec_config.short_repeat = true;
+ sc->spec_priv.spec_config.count = 8;
+ sc->spec_priv.spec_config.endless = false;
+ sc->spec_priv.spec_config.period = 0xFF;
+ sc->spec_priv.spec_config.fft_period = 0xF;
}
static void ath9k_init_pcoem_platform(struct ath_softc *sc)
@@ -362,6 +381,9 @@ static void ath9k_init_pcoem_platform(struct ath_softc *sc)
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
+ if (!IS_ENABLED(CONFIG_ATH9K_PCOEM))
+ return;
+
if (common->bus_ops->ath_bus_type != ATH_PCI)
return;
@@ -419,6 +441,9 @@ static void ath9k_init_pcoem_platform(struct ath_softc *sc)
ah->config.no_pll_pwrsave = true;
ath_info(common, "Disable PLL PowerSave\n");
}
+
+ if (sc->driver_data & ATH9K_PCI_LED_ACT_HI)
+ ah->config.led_active_high = true;
}
static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
@@ -507,10 +532,14 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ah->reg_ops.read = ath9k_ioread32;
ah->reg_ops.write = ath9k_iowrite32;
ah->reg_ops.rmw = ath9k_reg_rmw;
- sc->sc_ah = ah;
pCap = &ah->caps;
common = ath9k_hw_common(ah);
+
+ /* Will be cleared in ath9k_start() */
+ set_bit(ATH_OP_INVALID, &common->op_flags);
+
+ sc->sc_ah = ah;
sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
sc->tx99_power = MAX_RATE_POWER + 1;
init_waitqueue_head(&sc->tx_wait);
@@ -528,10 +557,15 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ah->is_clk_25mhz = pdata->is_clk_25mhz;
ah->get_mac_revision = pdata->get_mac_revision;
ah->external_reset = pdata->external_reset;
+ ah->disable_2ghz = pdata->disable_2ghz;
+ ah->disable_5ghz = pdata->disable_5ghz;
+ if (!pdata->endian_check)
+ ah->ah_flags |= AH_NO_EEP_SWAP;
}
common->ops = &ah->reg_ops;
common->bus_ops = bus_ops;
+ common->ps_ops = &ath9k_ps_ops;
common->ah = ah;
common->hw = sc->hw;
common->priv = sc;
@@ -866,9 +900,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
common = ath9k_hw_common(ah);
ath9k_set_hw_capab(sc, hw);
- /* Will be cleared in ath9k_start() */
- set_bit(ATH_OP_INVALID, &common->op_flags);
-
/* Initialize regulatory */
error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
ath9k_reg_notifier);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 2343f56e6498..b829263e3d0a 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -371,9 +371,15 @@ void ath_ani_calibrate(unsigned long data)
/* Perform calibration if necessary */
if (longcal || shortcal) {
- common->ani.caldone =
- ath9k_hw_calibrate(ah, ah->curchan,
- ah->rxchainmask, longcal);
+ int ret = ath9k_hw_calibrate(ah, ah->curchan, ah->rxchainmask,
+ longcal);
+ if (ret < 0) {
+ common->ani.caldone = 0;
+ ath9k_queue_reset(sc, RESET_TYPE_CALIBRATION);
+ return;
+ }
+
+ common->ani.caldone = ret;
}
ath_dbg(common, ANI,
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 275205ab5f15..3e58bfa0c1fd 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -311,14 +311,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
q = ATH9K_NUM_TX_QUEUES - 3;
break;
case ATH9K_TX_QUEUE_DATA:
- for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
- if (ah->txq[q].tqi_type ==
- ATH9K_TX_QUEUE_INACTIVE)
- break;
- if (q == ATH9K_NUM_TX_QUEUES) {
- ath_err(common, "No available TX queue\n");
- return -1;
- }
+ q = qinfo->tqi_subtype;
break;
default:
ath_err(common, "Invalid TX queue type: %u\n", type);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index aa69ceaad0be..e55fa11894b6 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -704,7 +704,7 @@ struct ath_tx_info {
enum ath9k_pkt_type type;
enum ath9k_key_type keytype;
u8 keyix;
- u8 txpower;
+ u8 txpower[4];
};
struct ath_hw;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 4f18a6be0c7d..9a72640237cb 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -54,7 +54,8 @@ u8 ath9k_parse_mpdudensity(u8 mpdudensity)
}
}
-static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq)
+static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq,
+ bool sw_pending)
{
bool pending = false;
@@ -65,6 +66,9 @@ static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq)
goto out;
}
+ if (!sw_pending)
+ goto out;
+
if (txq->mac80211_qnum >= 0) {
struct list_head *list;
@@ -229,8 +233,9 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
ath9k_calculate_summary_state(sc, sc->cur_chan);
ath_startrecv(sc);
- ath9k_cmn_update_txpow(ah, sc->curtxpow,
- sc->cur_chan->txpower, &sc->curtxpow);
+ ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower,
+ sc->cur_chan->txpower,
+ &sc->cur_chan->cur_txpower);
clear_bit(ATH_OP_HW_RESET, &common->op_flags);
if (!sc->cur_chan->offchannel && start) {
@@ -270,7 +275,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
return true;
}
-int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
+static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -281,6 +286,7 @@ int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
__ath_cancel_work(sc);
tasklet_disable(&sc->intr_tq);
+ tasklet_disable(&sc->bcon_tasklet);
spin_lock_bh(&sc->sc_pcu_lock);
if (!sc->cur_chan->offchannel) {
@@ -326,6 +332,7 @@ int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
out:
spin_unlock_bh(&sc->sc_pcu_lock);
+ tasklet_enable(&sc->bcon_tasklet);
tasklet_enable(&sc->intr_tq);
return r;
@@ -505,16 +512,13 @@ irqreturn_t ath_isr(int irq, void *dev)
if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
return IRQ_NONE;
- /* shared irq, not for us */
+ if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
+ return IRQ_NONE;
+ /* shared irq, not for us */
if (!ath9k_hw_intrpend(ah))
return IRQ_NONE;
- if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) {
- ath9k_hw_kill_interrupts(ah);
- return IRQ_HANDLED;
- }
-
/*
* Figure out the reason(s) for the interrupt. Note
* that the hal returns a pseudo-ISR that may include
@@ -525,6 +529,9 @@ irqreturn_t ath_isr(int irq, void *dev)
ath9k_debug_sync_cause(sc, sync_cause);
status &= ah->imask; /* discard unasked-for bits */
+ if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
+ return IRQ_HANDLED;
+
/*
* If there are no status bits set, then this interrupt was not
* for me (should have been caught above).
@@ -539,11 +546,10 @@ irqreturn_t ath_isr(int irq, void *dev)
sched = true;
/*
- * If a FATAL or RXORN interrupt is received, we have to reset the
- * chip immediately.
+ * If a FATAL interrupt is received, we have to reset the chip
+ * immediately.
*/
- if ((status & ATH9K_INT_FATAL) || ((status & ATH9K_INT_RXORN) &&
- !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)))
+ if (status & ATH9K_INT_FATAL)
goto chip_reset;
if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
@@ -598,23 +604,37 @@ chip_reset:
#undef SCHED_INTR
}
-int ath_reset(struct ath_softc *sc)
+/*
+ * This function is called when a HW reset cannot be deferred
+ * and has to be immediate.
+ */
+int ath_reset(struct ath_softc *sc, struct ath9k_channel *hchan)
{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int r;
+ ath9k_hw_kill_interrupts(sc->sc_ah);
+ set_bit(ATH_OP_HW_RESET, &common->op_flags);
+
ath9k_ps_wakeup(sc);
- r = ath_reset_internal(sc, NULL);
+ r = ath_reset_internal(sc, hchan);
ath9k_ps_restore(sc);
return r;
}
+/*
+ * When a HW reset can be deferred, it is added to the
+ * hw_reset_work workqueue, but we set ATH_OP_HW_RESET before
+ * queueing.
+ */
void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
#ifdef CONFIG_ATH9K_DEBUGFS
RESET_STAT_INC(sc, type);
#endif
+ ath9k_hw_kill_interrupts(sc->sc_ah);
set_bit(ATH_OP_HW_RESET, &common->op_flags);
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
}
@@ -623,7 +643,9 @@ void ath_reset_work(struct work_struct *work)
{
struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work);
- ath_reset(sc);
+ ath9k_ps_wakeup(sc);
+ ath_reset_internal(sc, NULL);
+ ath9k_ps_restore(sc);
}
/**********************/
@@ -707,7 +729,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (ah->led_pin >= 0) {
ath9k_hw_cfg_output(ah, ah->led_pin,
AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- ath9k_hw_set_gpio(ah, ah->led_pin, 0);
+ ath9k_hw_set_gpio(ah, ah->led_pin,
+ (ah->config.led_active_high) ? 1 : 0);
}
/*
@@ -849,7 +872,8 @@ static void ath9k_stop(struct ieee80211_hw *hw)
spin_lock_bh(&sc->sc_pcu_lock);
if (ah->led_pin >= 0) {
- ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+ ath9k_hw_set_gpio(ah, ah->led_pin,
+ (ah->config.led_active_high) ? 0 : 1);
ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
}
@@ -865,6 +889,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
&sc->cur_chan->chandef);
ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
+
+ set_bit(ATH_OP_INVALID, &common->op_flags);
+
ath9k_hw_phy_disable(ah);
ath9k_hw_configpcipowersave(ah, true);
@@ -873,7 +900,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath9k_ps_restore(sc);
- set_bit(ATH_OP_INVALID, &common->op_flags);
sc->ps_idle = prev_idle;
mutex_unlock(&sc->mutex);
@@ -1036,7 +1062,7 @@ static void ath9k_set_offchannel_state(struct ath_softc *sc)
eth_zero_addr(common->curbssid);
eth_broadcast_addr(common->bssidmask);
- ether_addr_copy(common->macaddr, vif->addr);
+ memcpy(common->macaddr, vif->addr, ETH_ALEN);
common->curaid = 0;
ah->opmode = vif->type;
ah->imask &= ~ATH9K_INT_SWBA;
@@ -1077,7 +1103,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
ath9k_calculate_iter_data(sc, ctx, &iter_data);
if (iter_data.has_hw_macaddr)
- ether_addr_copy(common->macaddr, iter_data.hw_macaddr);
+ memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
ath_hw_setbssidmask(common);
@@ -1167,7 +1193,8 @@ static void ath9k_assign_hw_queues(struct ieee80211_hw *hw,
for (i = 0; i < IEEE80211_NUM_ACS; i++)
vif->hw_queue[i] = i;
- if (vif->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT)
vif->cab_queue = hw->queues - 2;
else
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
@@ -1323,78 +1350,6 @@ static void ath9k_disable_ps(struct ath_softc *sc)
ath_dbg(common, PS, "PowerSave disabled\n");
}
-void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw)
-{
- struct ath_softc *sc = hw->priv;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- u32 rxfilter;
-
- if (config_enabled(CONFIG_ATH9K_TX99))
- return;
-
- if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
- ath_err(common, "spectrum analyzer not implemented on this hardware\n");
- return;
- }
-
- ath9k_ps_wakeup(sc);
- rxfilter = ath9k_hw_getrxfilter(ah);
- ath9k_hw_setrxfilter(ah, rxfilter |
- ATH9K_RX_FILTER_PHYRADAR |
- ATH9K_RX_FILTER_PHYERR);
-
- /* TODO: usually this should not be neccesary, but for some reason
- * (or in some mode?) the trigger must be called after the
- * configuration, otherwise the register will have its values reset
- * (on my ar9220 to value 0x01002310)
- */
- ath9k_spectral_scan_config(hw, sc->spectral_mode);
- ath9k_hw_ops(ah)->spectral_scan_trigger(ah);
- ath9k_ps_restore(sc);
-}
-
-int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
- enum spectral_mode spectral_mode)
-{
- struct ath_softc *sc = hw->priv;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
- ath_err(common, "spectrum analyzer not implemented on this hardware\n");
- return -1;
- }
-
- switch (spectral_mode) {
- case SPECTRAL_DISABLED:
- sc->spec_config.enabled = 0;
- break;
- case SPECTRAL_BACKGROUND:
- /* send endless samples.
- * TODO: is this really useful for "background"?
- */
- sc->spec_config.endless = 1;
- sc->spec_config.enabled = 1;
- break;
- case SPECTRAL_CHANSCAN:
- case SPECTRAL_MANUAL:
- sc->spec_config.endless = 0;
- sc->spec_config.enabled = 1;
- break;
- default:
- return -1;
- }
-
- ath9k_ps_wakeup(sc);
- ath9k_hw_ops(ah)->spectral_scan_config(ah, &sc->spec_config);
- ath9k_ps_restore(sc);
-
- sc->spectral_mode = spectral_mode;
-
- return 0;
-}
-
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath_softc *sc = hw->priv;
@@ -1455,8 +1410,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_POWER) {
ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level);
sc->cur_chan->txpower = 2 * conf->power_level;
- ath9k_cmn_update_txpow(ah, sc->curtxpow,
- sc->cur_chan->txpower, &sc->curtxpow);
+ ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower,
+ sc->cur_chan->txpower,
+ &sc->cur_chan->cur_txpower);
}
mutex_unlock(&sc->mutex);
@@ -1553,6 +1509,40 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
return 0;
}
+static int ath9k_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int ret = 0;
+
+ if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
+ ret = ath9k_sta_add(hw, vif, sta);
+ ath_dbg(common, CONFIG,
+ "Add station: %pM\n", sta->addr);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
+ ret = ath9k_sta_remove(hw, vif, sta);
+ ath_dbg(common, CONFIG,
+ "Remove station: %pM\n", sta->addr);
+ }
+
+ if (ath9k_is_chanctx_enabled()) {
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED)
+ ath_chanctx_event(sc, vif,
+ ATH_CHANCTX_EVENT_AUTHORIZED);
+ }
+ }
+
+ return ret;
+}
+
static void ath9k_sta_set_tx_filter(struct ath_hw *ah,
struct ath_node *an,
bool set)
@@ -1677,7 +1667,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- if (sc->sc_ah->sw_mgmt_crypto &&
+ if (sc->sc_ah->sw_mgmt_crypto_tx &&
key->cipher == WLAN_CIPHER_SUITE_CCMP)
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ret = 0;
@@ -1737,17 +1727,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
ath_dbg(common, CONFIG, "BSSID %pM Changed ASSOC %d\n",
bss_conf->bssid, bss_conf->assoc);
- ether_addr_copy(avp->bssid, bss_conf->bssid);
+ memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN);
avp->aid = bss_conf->aid;
avp->assoc = bss_conf->assoc;
ath9k_calculate_summary_state(sc, avp->chanctx);
-
- if (ath9k_is_chanctx_enabled()) {
- if (bss_conf->assoc)
- ath_chanctx_event(sc, vif,
- ATH_CHANCTX_EVENT_ASSOC);
- }
}
if (changed & BSS_CHANGED_IBSS) {
@@ -1843,6 +1827,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
u16 tid, u16 *ssn, u8 buf_size)
{
struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
bool flush = false;
int ret = 0;
@@ -1854,6 +1839,12 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_RX_STOP:
break;
case IEEE80211_AMPDU_TX_START:
+ if (ath9k_is_chanctx_enabled()) {
+ if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
+ ret = -EBUSY;
+ break;
+ }
+ }
ath9k_ps_wakeup(sc);
ret = ath_tx_aggr_start(sc, sta, tid, ssn);
if (!ret)
@@ -1967,7 +1958,8 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw,
mutex_unlock(&sc->mutex);
}
-static bool ath9k_has_tx_pending(struct ath_softc *sc)
+static bool ath9k_has_tx_pending(struct ath_softc *sc,
+ bool sw_pending)
{
int i, npend = 0;
@@ -1975,7 +1967,8 @@ static bool ath9k_has_tx_pending(struct ath_softc *sc)
if (!ATH_TXQ_SETUP(sc, i))
continue;
- npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
+ npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i],
+ sw_pending);
if (npend)
break;
}
@@ -1987,18 +1980,38 @@ static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (ath9k_is_chanctx_enabled()) {
+ if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
+ goto flush;
+ /*
+ * If MCC is active, extend the flush timeout
+ * and wait for the HW/SW queues to become
+ * empty. This needs to be done outside the
+ * sc->mutex lock to allow the channel scheduler
+ * to switch channel contexts.
+ *
+ * The vif queues have been stopped in mac80211,
+ * so there won't be any incoming frames.
+ */
+ __ath9k_flush(hw, queues, drop, true, true);
+ return;
+ }
+flush:
mutex_lock(&sc->mutex);
- __ath9k_flush(hw, queues, drop);
+ __ath9k_flush(hw, queues, drop, true, false);
mutex_unlock(&sc->mutex);
}
-void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
+ bool sw_pending, bool timeout_override)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- int timeout = HZ / 5; /* 200 ms */
+ int timeout;
bool drain_txq;
cancel_delayed_work_sync(&sc->tx_complete_work);
@@ -2013,7 +2026,17 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
return;
}
- if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc),
+ spin_lock_bh(&sc->chan_lock);
+ if (timeout_override)
+ timeout = HZ / 5;
+ else
+ timeout = sc->cur_chan->flush_timeout;
+ spin_unlock_bh(&sc->chan_lock);
+
+ ath_dbg(common, CHAN_CTX,
+ "Flush timeout: %d\n", jiffies_to_msecs(timeout));
+
+ if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc, sw_pending),
timeout) > 0)
drop = false;
@@ -2024,7 +2047,7 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
spin_unlock_bh(&sc->sc_pcu_lock);
if (!drain_txq)
- ath_reset(sc);
+ ath_reset(sc, NULL);
ath9k_ps_restore(sc);
}
@@ -2036,7 +2059,7 @@ static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
- return ath9k_has_tx_pending(sc);
+ return ath9k_has_tx_pending(sc, true);
}
static int ath9k_tx_last_beacon(struct ieee80211_hw *hw)
@@ -2167,14 +2190,17 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0;
}
-static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
+static void ath9k_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
set_bit(ATH_OP_SCANNING, &common->op_flags);
}
-static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
+static void ath9k_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -2183,6 +2209,28 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+static void ath9k_cancel_pending_offchannel(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (sc->offchannel.roc_vif) {
+ ath_dbg(common, CHAN_CTX,
+ "%s: Aborting RoC\n", __func__);
+
+ del_timer_sync(&sc->offchannel.timer);
+ if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
+ ath_roc_complete(sc, true);
+ }
+
+ if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
+ ath_dbg(common, CHAN_CTX,
+ "%s: Aborting HW scan\n", __func__);
+
+ del_timer_sync(&sc->offchannel.timer);
+ ath_scan_complete(sc, true);
+ }
+}
+
static int ath9k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
@@ -2313,7 +2361,6 @@ static int ath9k_add_chanctx(struct ieee80211_hw *hw,
conf->def.chan->center_freq);
ath_chanctx_set_channel(sc, ctx, &conf->def);
- ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_ASSIGN);
mutex_unlock(&sc->mutex);
return 0;
@@ -2370,6 +2417,8 @@ static int ath9k_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ath_chanctx *ctx = ath_chanctx_get(conf);
int i;
+ ath9k_cancel_pending_offchannel(sc);
+
mutex_lock(&sc->mutex);
ath_dbg(common, CHAN_CTX,
@@ -2399,6 +2448,8 @@ static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ath_chanctx *ctx = ath_chanctx_get(conf);
int ac;
+ ath9k_cancel_pending_offchannel(sc);
+
mutex_lock(&sc->mutex);
ath_dbg(common, CHAN_CTX,
@@ -2422,7 +2473,11 @@ static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_vif *avp = (struct ath_vif *) vif->drv_priv;
+ struct ath_beacon_config *cur_conf;
+ struct ath_chanctx *go_ctx;
+ unsigned long timeout;
bool changed = false;
+ u32 beacon_int;
if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
return;
@@ -2433,19 +2488,57 @@ static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
spin_lock_bh(&sc->chan_lock);
- if (sc->next_chan || (sc->cur_chan != avp->chanctx)) {
- sc->next_chan = avp->chanctx;
+ if (sc->next_chan || (sc->cur_chan != avp->chanctx))
changed = true;
+ spin_unlock_bh(&sc->chan_lock);
+
+ if (!changed)
+ goto out;
+
+ ath9k_cancel_pending_offchannel(sc);
+
+ go_ctx = ath_is_go_chanctx_present(sc);
+
+ if (go_ctx) {
+ /*
+ * Wait till the GO interface gets a chance
+ * to send out an NoA.
+ */
+ spin_lock_bh(&sc->chan_lock);
+ sc->sched.mgd_prepare_tx = true;
+ cur_conf = &go_ctx->beacon;
+ beacon_int = TU_TO_USEC(cur_conf->beacon_interval);
+ spin_unlock_bh(&sc->chan_lock);
+
+ timeout = usecs_to_jiffies(beacon_int * 2);
+ init_completion(&sc->go_beacon);
+
+ mutex_unlock(&sc->mutex);
+
+ if (wait_for_completion_timeout(&sc->go_beacon,
+ timeout) == 0) {
+ ath_dbg(common, CHAN_CTX,
+ "Failed to send new NoA\n");
+
+ spin_lock_bh(&sc->chan_lock);
+ sc->sched.mgd_prepare_tx = false;
+ spin_unlock_bh(&sc->chan_lock);
+ }
+
+ mutex_lock(&sc->mutex);
}
+
ath_dbg(common, CHAN_CTX,
- "%s: Set chanctx state to FORCE_ACTIVE, changed: %d\n",
- __func__, changed);
+ "%s: Set chanctx state to FORCE_ACTIVE for vif: %pM\n",
+ __func__, vif->addr);
+
+ spin_lock_bh(&sc->chan_lock);
+ sc->next_chan = avp->chanctx;
sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE;
spin_unlock_bh(&sc->chan_lock);
- if (changed)
- ath_chanctx_set_next(sc, true);
-
+ ath_chanctx_set_next(sc, true);
+out:
mutex_unlock(&sc->mutex);
}
@@ -2468,6 +2561,24 @@ void ath9k_fill_chanctx_ops(void)
#endif
+static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int *dbm)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_vif *avp = (void *)vif->drv_priv;
+
+ mutex_lock(&sc->mutex);
+ if (avp->chanctx)
+ *dbm = avp->chanctx->cur_txpower;
+ else
+ *dbm = sc->cur_chan->cur_txpower;
+ mutex_unlock(&sc->mutex);
+
+ *dbm /= 2;
+
+ return 0;
+}
+
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
@@ -2477,8 +2588,7 @@ struct ieee80211_ops ath9k_ops = {
.remove_interface = ath9k_remove_interface,
.config = ath9k_config,
.configure_filter = ath9k_configure_filter,
- .sta_add = ath9k_sta_add,
- .sta_remove = ath9k_sta_remove,
+ .sta_state = ath9k_sta_state,
.sta_notify = ath9k_sta_notify,
.conf_tx = ath9k_conf_tx,
.bss_info_changed = ath9k_bss_info_changed,
@@ -2515,4 +2625,5 @@ struct ieee80211_ops ath9k_ops = {
#endif
.sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete,
+ .get_txpower = ath9k_get_txpower,
};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index c018dea0b2e8..f009b5b57e5e 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -30,6 +30,7 @@ static const struct pci_device_id ath_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
+#ifdef CONFIG_ATH9K_PCOEM
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x002A,
PCI_VENDOR_ID_AZWAVE,
@@ -82,6 +83,7 @@ static const struct pci_device_id ath_pci_id_table[] = {
PCI_VENDOR_ID_AZWAVE,
0x2C37),
.driver_data = ATH9K_PCI_BT_ANT_DIV },
+#endif
{ PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
@@ -102,6 +104,7 @@ static const struct pci_device_id ath_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
+#ifdef CONFIG_ATH9K_PCOEM
/* PCI-E CUS198 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0032,
@@ -294,10 +297,12 @@ static const struct pci_device_id ath_pci_id_table[] = {
PCI_VENDOR_ID_ASUSTEK,
0x850D),
.driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+#endif
{ PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
{ PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
+#ifdef CONFIG_ATH9K_PCOEM
/* PCI-E CUS217 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0034,
@@ -652,11 +657,14 @@ static const struct pci_device_id ath_pci_id_table[] = {
0x0036,
PCI_VENDOR_ID_DELL,
0x020E),
- .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ .driver_data = ATH9K_PCI_AR9565_2ANT |
+ ATH9K_PCI_BT_ANT_DIV |
+ ATH9K_PCI_LED_ACT_HI},
/* PCI-E AR9565 (WB335) */
{ PCI_VDEVICE(ATHEROS, 0x0036),
.driver_data = ATH9K_PCI_BT_ANT_DIV },
+#endif
{ 0 }
};
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 6914e21816e4..7395afbc5124 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -870,7 +870,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
*/
if (rx_stats->rs_status & ATH9K_RXERR_PHY) {
ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime);
- if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
+ if (ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats, rx_status->mactime))
RX_STAT_INC(rx_spectral);
return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 2a938f4feac5..fb11a9172f38 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -892,10 +892,21 @@
(AR_SREV_9330((_ah)) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9330_12))
+#ifdef CONFIG_ATH9K_PCOEM
+#define AR_SREV_9462(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462))
#define AR_SREV_9485(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485))
+#define AR_SREV_9565(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
+#else
+#define AR_SREV_9462(_ah) 0
+#define AR_SREV_9485(_ah) 0
+#define AR_SREV_9565(_ah) 0
+#endif
+
#define AR_SREV_9485_11_OR_LATER(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485) && \
+ (AR_SREV_9485(_ah) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9485_11))
#define AR_SREV_9485_OR_LATER(_ah) \
(((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9485))
@@ -915,34 +926,30 @@
(AR_SREV_9285_12_OR_LATER(_ah) && \
((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
-#define AR_SREV_9462(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462))
#define AR_SREV_9462_20(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+ (AR_SREV_9462(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20))
#define AR_SREV_9462_21(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+ (AR_SREV_9462(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_21))
#define AR_SREV_9462_20_OR_LATER(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+ (AR_SREV_9462(_ah) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
#define AR_SREV_9462_21_OR_LATER(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+ (AR_SREV_9462(_ah) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_21))
-#define AR_SREV_9565(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
#define AR_SREV_9565_10(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+ (AR_SREV_9565(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_10))
#define AR_SREV_9565_101(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+ (AR_SREV_9565(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_101))
#define AR_SREV_9565_11(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+ (AR_SREV_9565(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_11))
#define AR_SREV_9565_11_OR_LATER(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+ (AR_SREV_9565(_ah) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9565_11))
#define AR_SREV_9550(_ah) \
@@ -1598,6 +1605,7 @@ enum {
#define AR_RESET_TSF 0x8020
#define AR_RESET_TSF_ONCE 0x01000000
+#define AR_RESET_TSF2_ONCE 0x02000000
#define AR_MAX_CFP_DUR 0x8038
#define AR_CFP_VAL 0x0000FFFF
@@ -1716,6 +1724,8 @@ enum {
#define AR_TPC_CTS_S 8
#define AR_TPC_CHIRP 0x003f0000
#define AR_TPC_CHIRP_S 16
+#define AR_TPC_RPT 0x3f000000
+#define AR_TPC_RPT_S 24
#define AR_QUIET1 0x80fc
#define AR_QUIET1_NEXT_QUIET_S 0
@@ -1959,6 +1969,8 @@ enum {
#define AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET 0x80000000
#define AR_MAC_PCU_GEN_TIMER_TSF_SEL 0x83d8
+#define AR_DIRECT_CONNECT 0x83a0
+#define AR_DC_AP_STA_EN 0x00000001
#define AR_AES_MUTE_MASK0 0x805c
#define AR_AES_MUTE_MASK0_FC 0x0000FFFF
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index 40ab65e6882f..ac4781f37e78 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -99,7 +99,7 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
static void ath9k_tx99_deinit(struct ath_softc *sc)
{
- ath_reset(sc);
+ ath_reset(sc, NULL);
ath9k_ps_wakeup(sc);
ath9k_tx99_stop(sc);
@@ -127,7 +127,7 @@ static int ath9k_tx99_init(struct ath_softc *sc)
memset(&txctl, 0, sizeof(txctl));
txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
- ath_reset(sc);
+ ath_reset(sc, NULL);
ath9k_ps_wakeup(sc);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index d6e54a3c88f6..e9bd02c2e844 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1096,6 +1096,37 @@ void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
}
}
+static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
+ u8 rateidx)
+{
+ u8 max_power;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (sc->tx99_state)
+ return MAX_RATE_POWER;
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ /* ar9002 is not sipported for the moment */
+ return MAX_RATE_POWER;
+ }
+
+ if (!bf->bf_state.bfs_paprd) {
+ struct sk_buff *skb = bf->bf_mpdu;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ath_frame_info *fi = get_frame_info(skb);
+
+ if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC))
+ max_power = min(ah->tx_power_stbc[rateidx],
+ fi->tx_power);
+ else
+ max_power = min(ah->tx_power[rateidx], fi->tx_power);
+ } else {
+ max_power = ah->paprd_training_power;
+ }
+
+ return max_power;
+}
+
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_info *info, int len, bool rts)
{
@@ -1166,6 +1197,8 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
is_40, is_sgi, is_sp);
if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
+
+ info->txpower[i] = ath_get_rate_txpower(sc, bf, rix);
continue;
}
@@ -1193,6 +1226,8 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
phy, rate->bitrate * 100, len, rix, is_sp);
+
+ info->txpower[i] = ath_get_rate_txpower(sc, bf, rix);
}
/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
@@ -1239,7 +1274,6 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
memset(&info, 0, sizeof(info));
info.is_first = true;
info.is_last = true;
- info.txpower = MAX_RATE_POWER;
info.qcu = txq->axq_qnum;
while (bf) {
@@ -2063,6 +2097,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
fi->keyix = ATH9K_TXKEYIX_INVALID;
fi->keytype = keytype;
fi->framelen = framelen;
+ fi->tx_power = MAX_RATE_POWER;
if (!rate)
return;
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
index b80b2138ce3c..dca6df13fd5b 100644
--- a/drivers/net/wireless/ath/carl9170/phy.c
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -994,7 +994,7 @@ static int carl9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz,
refsel0 = 0;
refsel1 = 1;
}
- chansel = byte_rev_table[chansel];
+ chansel = bitrev8(chansel);
} else {
if (freq == 2484) {
chansel = 10 + (freq - 2274) / 5;
@@ -1002,7 +1002,7 @@ static int carl9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz,
} else
chansel = 16 + (freq - 2272) / 5;
chansel *= 4;
- chansel = byte_rev_table[chansel];
+ chansel = bitrev8(chansel);
}
d1 = chansel;
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 650be79c7ac9..cfd0554cf140 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -86,7 +86,7 @@ static const struct radar_detector_specs fcc_radar_ref_types[] = {
FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
- FCC_PATTERN(4, 50, 100, 1000, 2000, 20, 1),
+ FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 20),
FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
};
@@ -105,7 +105,7 @@ static const struct radar_detector_specs jp_radar_ref_types[] = {
JP_PATTERN(4, 0, 5, 150, 230, 1, 23),
JP_PATTERN(5, 6, 10, 200, 500, 1, 16),
JP_PATTERN(6, 11, 20, 200, 500, 1, 12),
- JP_PATTERN(7, 50, 100, 1000, 2000, 20, 1),
+ JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20),
JP_PATTERN(5, 0, 1, 333, 333, 1, 9),
};
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index b71d2b33532d..267c35d1f699 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -494,7 +494,9 @@ out:
return ret;
}
-static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw)
+static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct wcn36xx *wcn = hw->priv;
@@ -502,7 +504,8 @@ static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw)
wcn36xx_smd_start_scan(wcn);
}
-static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw)
+static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct wcn36xx *wcn = hw->priv;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index d9f4b30dd343..38332a6dfb3a 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -792,12 +792,13 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
}
static int wil_cfg80211_del_station(struct wiphy *wiphy,
- struct net_device *dev, const u8 *mac)
+ struct net_device *dev,
+ struct station_del_parameters *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
mutex_lock(&wil->mutex);
- wil6210_disconnect(wil, mac);
+ wil6210_disconnect(wil, params->mac, params->reason_code, false);
mutex_unlock(&wil->mutex);
return 0;
diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c
index 8d99021d27a8..3249562d93b4 100644
--- a/drivers/net/wireless/ath/wil6210/debug.c
+++ b/drivers/net/wireless/ath/wil6210/debug.c
@@ -32,6 +32,23 @@ void wil_err(struct wil6210_priv *wil, const char *fmt, ...)
va_end(args);
}
+void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
+{
+ if (net_ratelimit()) {
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ netdev_err(ndev, "%pV", &vaf);
+ trace_wil6210_log_err(&vaf);
+ va_end(args);
+ }
+}
+
void wil_info(struct wil6210_priv *wil, const char *fmt, ...)
{
struct net_device *ndev = wil_to_ndev(wil);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 54a6ddc6301b..4e6e14501c2f 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -573,8 +573,10 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
if (!frame)
return -ENOMEM;
- if (copy_from_user(frame, buf, len))
+ if (copy_from_user(frame, buf, len)) {
+ kfree(frame);
return -EIO;
+ }
params.buf = frame;
params.len = len;
@@ -614,8 +616,10 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
return -ENOMEM;
rc = simple_write_to_buffer(wmi, len, ppos, buf, len);
- if (rc < 0)
+ if (rc < 0) {
+ kfree(wmi);
return rc;
+ }
cmd = &wmi[1];
cmdid = le16_to_cpu(wmi->id);
diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c
index 8c6f3b041f77..93c5cc16c515 100644
--- a/drivers/net/wireless/ath/wil6210/fw.c
+++ b/drivers/net/wireless/ath/wil6210/fw.c
@@ -15,7 +15,6 @@
*/
#include <linux/firmware.h>
#include <linux/module.h>
-#include <linux/pci.h>
#include <linux/crc32.h>
#include "wil6210.h"
#include "fw.h"
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 44cb71f5ea5b..d4acf93a9a02 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -446,7 +446,7 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
if (size >= sizeof(*hdr)) {
wil_err_fw(wil, "Stop at offset %ld"
" record type %d [%zd bytes]\n",
- (const void *)hdr - data,
+ (long)((const void *)hdr - data),
le16_to_cpu(hdr->type), hdr_sz);
}
return -EINVAL;
@@ -471,7 +471,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name)
size_t sz;
const void *d;
- rc = request_firmware(&fw, name, wil_to_pcie_dev(wil));
+ rc = request_firmware(&fw, name, wil_to_dev(wil));
if (rc) {
wil_err_fw(wil, "Failed to load firmware %s\n", name);
return rc;
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 90f416f239bd..4bcbd6297b3e 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -36,7 +36,8 @@
*/
#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL)
-#define WIL6210_IMC_RX BIT_DMA_EP_RX_ICR_RX_DONE
+#define WIL6210_IMC_RX (BIT_DMA_EP_RX_ICR_RX_DONE | \
+ BIT_DMA_EP_RX_ICR_RX_HTRSH)
#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
#define WIL6210_IMC_MISC (ISR_MISC_FW_READY | \
@@ -171,6 +172,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
u32 isr = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
+ bool need_unmask = true;
trace_wil6210_irq_rx(isr);
wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
@@ -182,12 +184,24 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
wil6210_mask_irq_rx(wil);
- if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
+ /* RX_DONE and RX_HTRSH interrupts are the same if interrupt
+ * moderation is not used. Interrupt moderation may cause RX
+ * buffer overflow while RX_DONE is delayed. The required
+ * action is always the same - should empty the accumulated
+ * packets from the RX ring.
+ */
+ if (isr & (BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH)) {
wil_dbg_irq(wil, "RX done\n");
- isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
+
+ if (isr & BIT_DMA_EP_RX_ICR_RX_HTRSH)
+ wil_err_ratelimited(wil, "Received \"Rx buffer is in risk "
+ "of overflow\" interrupt\n");
+
+ isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH);
if (test_bit(wil_status_reset_done, &wil->status)) {
if (test_bit(wil_status_napi_en, &wil->status)) {
wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
+ need_unmask = false;
napi_schedule(&wil->napi_rx);
} else {
wil_err(wil, "Got Rx interrupt while "
@@ -204,6 +218,10 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
/* Rx IRQ will be enabled when NAPI processing finished */
atomic_inc(&wil->isr_count_rx);
+
+ if (unlikely(need_unmask))
+ wil6210_unmask_irq_rx(wil);
+
return IRQ_HANDLED;
}
@@ -213,6 +231,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
u32 isr = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
+ bool need_unmask = true;
trace_wil6210_irq_tx(isr);
wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
@@ -231,6 +250,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
isr &= ~(BIT(25) - 1UL);
if (test_bit(wil_status_reset_done, &wil->status)) {
wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
+ need_unmask = false;
napi_schedule(&wil->napi_tx);
} else {
wil_err(wil, "Got Tx interrupt while in reset\n");
@@ -243,6 +263,10 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
/* Tx IRQ will be enabled when NAPI processing finished */
atomic_inc(&wil->isr_count_tx);
+
+ if (unlikely(need_unmask))
+ wil6210_unmask_irq_tx(wil);
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 6500caf8d609..8ff3fe34fe05 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -38,6 +38,65 @@ static unsigned int itr_trsh = WIL6210_ITR_TRSH_DEFAULT;
module_param(itr_trsh, uint, S_IRUGO);
MODULE_PARM_DESC(itr_trsh, " Interrupt moderation threshold, usecs.");
+/* We allow allocation of more than 1 page buffers to support large packets.
+ * It is suboptimal behavior performance wise in case MTU above page size.
+ */
+unsigned int mtu_max = TXRX_BUF_LEN_DEFAULT - ETH_HLEN;
+static int mtu_max_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ /* sets mtu_max directly. no need to restore it in case of
+ * illegal value since we assume this will fail insmod
+ */
+ ret = param_set_uint(val, kp);
+ if (ret)
+ return ret;
+
+ if (mtu_max < 68 || mtu_max > IEEE80211_MAX_DATA_LEN_DMG)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static struct kernel_param_ops mtu_max_ops = {
+ .set = mtu_max_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, S_IRUGO);
+MODULE_PARM_DESC(mtu_max, " Max MTU value.");
+
+static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT;
+static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT;
+
+static int ring_order_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+ uint x;
+
+ ret = kstrtouint(val, 0, &x);
+ if (ret)
+ return ret;
+
+ if ((x < WIL_RING_SIZE_ORDER_MIN) || (x > WIL_RING_SIZE_ORDER_MAX))
+ return -EINVAL;
+
+ *((uint *)kp->arg) = x;
+
+ return 0;
+}
+
+static struct kernel_param_ops ring_order_ops = {
+ .set = ring_order_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, S_IRUGO);
+MODULE_PARM_DESC(rx_ring_order, " Rx ring order; size = 1 << order");
+module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, S_IRUGO);
+MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order");
+
#define RST_DELAY (20) /* msec, for loop in @wil_target_reset */
#define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */
@@ -74,7 +133,8 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
__raw_writel(*s++, d++);
}
-static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
+static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
+ u16 reason_code, bool from_event)
{
uint i;
struct net_device *ndev = wil_to_ndev(wil);
@@ -86,7 +146,9 @@ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
sta->data_port_open = false;
if (sta->status != wil_sta_unused) {
- wmi_disconnect_sta(wil, sta->addr, WLAN_REASON_DEAUTH_LEAVING);
+ if (!from_event)
+ wmi_disconnect_sta(wil, sta->addr, reason_code);
+
switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
@@ -118,7 +180,8 @@ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
memset(&sta->stats, 0, sizeof(sta->stats));
}
-static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
+static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
+ u16 reason_code, bool from_event)
{
int cid = -ENOENT;
struct net_device *ndev = wil_to_ndev(wil);
@@ -133,10 +196,10 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
}
if (cid >= 0) /* disconnect 1 peer */
- wil_disconnect_cid(wil, cid);
+ wil_disconnect_cid(wil, cid, reason_code, from_event);
else /* disconnect all */
for (cid = 0; cid < WIL6210_MAX_CID; cid++)
- wil_disconnect_cid(wil, cid);
+ wil_disconnect_cid(wil, cid, reason_code, from_event);
/* link state */
switch (wdev->iftype) {
@@ -145,8 +208,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
wil_link_off(wil);
if (test_bit(wil_status_fwconnected, &wil->status)) {
clear_bit(wil_status_fwconnected, &wil->status);
- cfg80211_disconnected(ndev,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
+ cfg80211_disconnected(ndev, reason_code,
NULL, 0, GFP_KERNEL);
} else if (test_bit(wil_status_fwconnecting, &wil->status)) {
cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
@@ -166,7 +228,7 @@ static void wil_disconnect_worker(struct work_struct *work)
struct wil6210_priv, disconnect_worker);
mutex_lock(&wil->mutex);
- _wil6210_disconnect(wil, NULL);
+ _wil6210_disconnect(wil, NULL, WLAN_REASON_UNSPECIFIED, false);
mutex_unlock(&wil->mutex);
}
@@ -188,6 +250,7 @@ static void wil_scan_timer_fn(ulong x)
clear_bit(wil_status_fwready, &wil->status);
wil_err(wil, "Scan timeout detected, start fw error recovery\n");
+ wil->recovery_state = fw_recovery_pending;
schedule_work(&wil->fw_error_worker);
}
@@ -223,6 +286,11 @@ static void wil_fw_error_worker(struct work_struct *work)
wil_dbg_misc(wil, "fw error worker\n");
+ if (!netif_running(wil_to_ndev(wil))) {
+ wil_info(wil, "No recovery - interface is down\n");
+ return;
+ }
+
/* increment @recovery_count if less then WIL6210_FW_RECOVERY_TO
* passed since last recovery attempt
*/
@@ -257,9 +325,12 @@ static void wil_fw_error_worker(struct work_struct *work)
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
+ wil_info(wil, "No recovery for AP-like interface\n");
/* recovery in these modes is done by upper layers */
break;
default:
+ wil_err(wil, "No recovery - unknown interface type %d\n",
+ wdev->iftype);
break;
}
mutex_unlock(&wil->mutex);
@@ -291,7 +362,7 @@ static void wil_connect_worker(struct work_struct *work)
wil_dbg_wmi(wil, "Configure for connection CID %d\n", cid);
- rc = wil_vring_init_tx(wil, ringid, WIL6210_TX_RING_SIZE, cid, 0);
+ rc = wil_vring_init_tx(wil, ringid, 1 << tx_ring_order, cid, 0);
wil->pending_connect_cid = -1;
if (rc == 0) {
wil->sta[cid].status = wil_sta_connected;
@@ -346,12 +417,23 @@ int wil_priv_init(struct wil6210_priv *wil)
return 0;
}
-void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
+/**
+ * wil6210_disconnect - disconnect one connection
+ * @wil: driver context
+ * @bssid: peer to disconnect, NULL to disconnect all
+ * @reason_code: Reason code for the Disassociation frame
+ * @from_event: whether is invoked from FW event handler
+ *
+ * Disconnect and release associated resources. If invoked not from the
+ * FW event handler, issue WMI command(s) to trigger MAC disconnect.
+ */
+void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
+ u16 reason_code, bool from_event)
{
wil_dbg_misc(wil, "%s()\n", __func__);
del_timer_sync(&wil->connect_timer);
- _wil6210_disconnect(wil, bssid);
+ _wil6210_disconnect(wil, bssid, reason_code, from_event);
}
void wil_priv_deinit(struct wil6210_priv *wil)
@@ -363,7 +445,7 @@ void wil_priv_deinit(struct wil6210_priv *wil)
cancel_work_sync(&wil->disconnect_worker);
cancel_work_sync(&wil->fw_error_worker);
mutex_lock(&wil->mutex);
- wil6210_disconnect(wil, NULL);
+ wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
mutex_unlock(&wil->mutex);
wmi_event_flush(wil);
destroy_workqueue(wil->wmi_wq_conn);
@@ -395,7 +477,7 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
static int wil_target_reset(struct wil6210_priv *wil)
{
int delay = 0;
- u32 hw_state;
+ u32 x;
u32 rev_id;
bool is_sparrow = (wil->board->board == WIL_BOARD_SPARROW);
@@ -410,9 +492,25 @@ static int wil_target_reset(struct wil6210_priv *wil)
S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
wil_halt_cpu(wil);
- C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL); /* 40 MHz */
+
+ /* Clear Fw Download notification */
+ C(RGF_USER_USAGE_6, BIT(0));
if (is_sparrow) {
+ S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
+ /* XTAL stabilization should take about 3ms */
+ usleep_range(5000, 7000);
+ x = R(RGF_CAF_PLL_LOCK_STATUS);
+ if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
+ wil_err(wil, "Xtal stabilization timeout\n"
+ "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
+ return -ETIME;
+ }
+ /* switch 10k to XTAL*/
+ C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
+ /* 40 MHz */
+ C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
+
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
}
@@ -453,13 +551,13 @@ static int wil_target_reset(struct wil6210_priv *wil)
/* wait until device ready. typical time is 200..250 msec */
do {
msleep(RST_DELAY);
- hw_state = R(RGF_USER_HW_MACHINE_STATE);
+ x = R(RGF_USER_HW_MACHINE_STATE);
if (delay++ > RST_COUNT) {
wil_err(wil, "Reset not completed, hw_state 0x%08x\n",
- hw_state);
+ x);
return -ETIME;
}
- } while (hw_state != HW_MACHINE_BOOT_DONE);
+ } while (x != HW_MACHINE_BOOT_DONE);
/* TODO: Erez check rev_id != 1 */
if (!is_sparrow && (rev_id != 1))
@@ -535,7 +633,7 @@ int wil_reset(struct wil6210_priv *wil)
WARN_ON(test_bit(wil_status_napi_en, &wil->status));
cancel_work_sync(&wil->disconnect_worker);
- wil6210_disconnect(wil, NULL);
+ wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
wil->status = 0; /* prevent NAPI from being scheduled */
@@ -640,7 +738,7 @@ int __wil_up(struct wil6210_priv *wil)
return rc;
/* Rx VRING. After MAC and beacon */
- rc = wil_rx_init(wil);
+ rc = wil_rx_init(wil, 1 << rx_ring_order);
if (rc)
return rc;
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 239965106c05..e81703ca7701 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -41,7 +41,7 @@ static int wil_change_mtu(struct net_device *ndev, int new_mtu)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- if (new_mtu < 68 || new_mtu > (TX_BUF_LEN - ETH_HLEN)) {
+ if (new_mtu < 68 || new_mtu > mtu_max) {
wil_err(wil, "invalid MTU %d\n", new_mtu);
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 2936ef0c18cb..e3f8bdce5abc 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -206,12 +206,10 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
u32 i, int headroom)
{
struct device *dev = wil_to_dev(wil);
- unsigned int sz = RX_BUF_LEN;
+ unsigned int sz = mtu_max + ETH_HLEN;
struct vring_rx_desc dd, *d = &dd;
volatile struct vring_rx_desc *_d = &vring->va[i].rx;
dma_addr_t pa;
-
- /* TODO align */
struct sk_buff *skb = dev_alloc_skb(sz + headroom);
if (unlikely(!skb))
@@ -385,7 +383,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
struct vring_rx_desc *d;
struct sk_buff *skb;
dma_addr_t pa;
- unsigned int sz = RX_BUF_LEN;
+ unsigned int sz = mtu_max + ETH_HLEN;
u16 dmalen;
u8 ftype;
u8 ds_bits;
@@ -596,7 +594,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
wil_rx_refill(wil, v->size);
}
-int wil_rx_init(struct wil6210_priv *wil)
+int wil_rx_init(struct wil6210_priv *wil, u16 size)
{
struct vring *vring = &wil->vring_rx;
int rc;
@@ -608,7 +606,7 @@ int wil_rx_init(struct wil6210_priv *wil)
return -EINVAL;
}
- vring->size = WIL6210_RX_RING_SIZE;
+ vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
return rc;
@@ -646,7 +644,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
.action = cpu_to_le32(WMI_VRING_CMD_ADD),
.vring_cfg = {
.tx_sw_ring = {
- .max_mpdu_size = cpu_to_le16(TX_BUF_LEN),
+ .max_mpdu_size =
+ cpu_to_le16(mtu_max + ETH_HLEN),
.ring_size = cpu_to_le16(size),
},
.ringid = id,
@@ -927,8 +926,9 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
wil_dbg_txrx(wil, "%s()\n", __func__);
if (avail < 1 + nr_frags) {
- wil_err(wil, "Tx ring full. No space for %d fragments\n",
- 1 + nr_frags);
+ wil_err_ratelimited(wil,
+ "Tx ring full. No space for %d fragments\n",
+ 1 + nr_frags);
return -ENOMEM;
}
_d = &vring->va[i].tx;
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index de046716d2b7..630aeb5fa7f4 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -21,8 +21,8 @@
#define BUF_HW_OWNED (0)
/* size of max. Tx/Rx buffers, as supported by FW */
-#define RX_BUF_LEN (2242)
-#define TX_BUF_LEN (2242)
+#define TXRX_BUF_LEN_DEFAULT (2242)
+
/* how many bytes to reserve for rtap header? */
#define WIL6210_RTAP_SIZE (128)
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index ce6488e42091..c6ec5b99ac7d 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -24,6 +24,7 @@
#include "wil_platform.h"
extern bool no_fw_recovery;
+extern unsigned int mtu_max;
#define WIL_NAME "wil6210"
#define WIL_FW_NAME "wil6210.fw"
@@ -48,8 +49,11 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_MEM_SIZE (2*1024*1024UL)
-#define WIL6210_RX_RING_SIZE (128)
-#define WIL6210_TX_RING_SIZE (512)
+#define WIL_RX_RING_SIZE_ORDER_DEFAULT (9)
+#define WIL_TX_RING_SIZE_ORDER_DEFAULT (9)
+/* limit ring size in range [32..32k] */
+#define WIL_RING_SIZE_ORDER_MIN (5)
+#define WIL_RING_SIZE_ORDER_MAX (15)
#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
#define WIL6210_MAX_CID (8) /* HW limit */
#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
@@ -117,12 +121,15 @@ struct RGF_ICR {
#define BIT_USER_USER_ICR_SW_INT_2 BIT(18)
#define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0 (0x880c18)
#define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1 (0x880c2c)
+#define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */
+ #define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2)
#define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */
#define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0)
#define BIT_DMA_EP_TX_ICR_TX_DONE_N(n) BIT(n+1) /* n = [0..23] */
#define RGF_DMA_EP_RX_ICR (0x881bd0) /* struct RGF_ICR */
#define BIT_DMA_EP_RX_ICR_RX_DONE BIT(0)
+ #define BIT_DMA_EP_RX_ICR_RX_HTRSH BIT(1)
#define RGF_DMA_EP_MISC_ICR (0x881bec) /* struct RGF_ICR */
#define BIT_DMA_EP_MISC_ICR_RX_HTRSH BIT(0)
#define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1)
@@ -152,6 +159,10 @@ struct RGF_ICR {
#define RGF_MAC_MTRL_COUNTER_0 (0x886aa8)
#define RGF_CAF_ICR (0x88946c) /* struct RGF_ICR */
+#define RGF_CAF_OSC_CONTROL (0x88afa4)
+ #define BIT_CAF_OSC_XTAL_EN BIT(0)
+#define RGF_CAF_PLL_LOCK_STATUS (0x88afec)
+ #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
/* popular locations */
#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
@@ -461,10 +472,14 @@ struct wil6210_priv {
#define wdev_to_wil(w) (struct wil6210_priv *)(wdev_priv(w))
#define wil_to_ndev(i) (wil_to_wdev(i)->netdev)
#define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr))
-#define wil_to_pcie_dev(i) (&i->pdev->dev)
+__printf(2, 3)
void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...);
+__printf(2, 3)
void wil_err(struct wil6210_priv *wil, const char *fmt, ...);
+__printf(2, 3)
+void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...);
+__printf(2, 3)
void wil_info(struct wil6210_priv *wil, const char *fmt, ...);
#define wil_dbg(wil, fmt, arg...) do { \
netdev_dbg(wil_to_ndev(wil), fmt, ##arg); \
@@ -575,9 +590,10 @@ void wil_wdev_free(struct wil6210_priv *wil);
int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan);
int wmi_pcp_stop(struct wil6210_priv *wil);
-void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid);
+void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
+ u16 reason_code, bool from_event);
-int wil_rx_init(struct wil6210_priv *wil);
+int wil_rx_init(struct wil6210_priv *wil, u16 size);
void wil_rx_fini(struct wil6210_priv *wil);
/* TX API */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 4311df982c60..63476c86cd0e 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -478,15 +478,15 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
void *d, int len)
{
struct wmi_disconnect_event *evt = d;
+ u16 reason_code = le16_to_cpu(evt->protocol_reason_status);
- wil_dbg_wmi(wil, "Disconnect %pM reason %d proto %d wmi\n",
- evt->bssid,
- evt->protocol_reason_status, evt->disconnect_reason);
+ wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
+ evt->bssid, reason_code, evt->disconnect_reason);
wil->sinfo_gen++;
mutex_lock(&wil->mutex);
- wil6210_disconnect(wil, evt->bssid);
+ wil6210_disconnect(wil, evt->bssid, reason_code, true);
mutex_unlock(&wil->mutex);
}
@@ -1025,7 +1025,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
struct wmi_cfg_rx_chain_cmd cmd = {
.action = WMI_RX_CHAIN_ADD,
.rx_sw_ring = {
- .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
+ .max_mpdu_size = cpu_to_le16(mtu_max + ETH_HLEN),
.ring_mem_base = cpu_to_le64(vring->pa),
.ring_size = cpu_to_le16(vring->size),
},
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 5d4173ee55bc..47731cb0d815 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5110,7 +5110,9 @@ static void b43_op_sta_notify(struct ieee80211_hw *hw,
B43_WARN_ON(!vif || wl->vif != vif);
}
-static void b43_op_sw_scan_start_notifier(struct ieee80211_hw *hw)
+static void b43_op_sw_scan_start_notifier(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
@@ -5124,7 +5126,8 @@ static void b43_op_sw_scan_start_notifier(struct ieee80211_hw *hw)
mutex_unlock(&wl->mutex);
}
-static void b43_op_sw_scan_complete_notifier(struct ieee80211_hw *hw)
+static void b43_op_sw_scan_complete_notifier(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 90a977fe9a64..dc4c75083085 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -23,15 +23,15 @@ ccflags-y += -D__CHECK_ENDIAN__
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \
- wl_cfg80211.o \
+ cfg80211.o \
chip.o \
fwil.o \
fweh.o \
fwsignal.o \
p2p.o \
proto.o \
- dhd_common.o \
- dhd_linux.o \
+ common.o \
+ core.o \
firmware.o \
feature.o \
btcoex.o \
@@ -43,14 +43,14 @@ brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \
flowring.o \
msgbuf.o
brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
- dhd_sdio.o \
+ sdio.o \
bcmsdh.o
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
brcmfmac-$(CONFIG_BRCMFMAC_PCIE) += \
pcie.o
brcmfmac-$(CONFIG_BRCMDBG) += \
- dhd_dbg.o
+ debug.o
brcmfmac-$(CONFIG_BRCM_TRACING) += \
tracepoint.o
brcmfmac-$(CONFIG_OF) += \
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
index a159ff3427de..8e0e91c4a0b1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
@@ -25,10 +25,10 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
-#include "dhd_bus.h"
+#include "core.h"
+#include "bus.h"
#include "fwsignal.h"
-#include "dhd_dbg.h"
+#include "debug.h"
#include "tracepoint.h"
#include "proto.h"
#include "bcdc.h"
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 8dbd5dbb78fd..f754ffcd0308 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -41,9 +41,9 @@
#include <chipcommon.h>
#include <soc.h>
#include "chip.h"
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
-#include "sdio_host.h"
+#include "bus.h"
+#include "debug.h"
+#include "sdio.h"
#include "of.h"
#define SDIOH_API_ACCESS_RETRY_LIMIT 2
@@ -1064,6 +1064,16 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
if (!sdiodev->pdata)
brcmf_of_probe(sdiodev);
+#ifdef CONFIG_PM_SLEEP
+ /* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ
+ * is true or when platform data OOB irq is true).
+ */
+ if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
+ ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
+ (sdiodev->pdata->oob_irq_supported)))
+ bus_if->wowl_supported = true;
+#endif
+
atomic_set(&sdiodev->suspend, false);
init_waitqueue_head(&sdiodev->request_word_wait);
init_waitqueue_head(&sdiodev->request_buffer_wait);
@@ -1116,34 +1126,39 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
brcmf_dbg(SDIO, "Exit\n");
}
+void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+
+ brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
+ sdiodev->wowl_enabled = enabled;
+}
+
#ifdef CONFIG_PM_SLEEP
static int brcmf_ops_sdio_suspend(struct device *dev)
{
- mmc_pm_flag_t sdio_flags;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
- int ret = 0;
+ mmc_pm_flag_t sdio_flags;
brcmf_dbg(SDIO, "Enter\n");
- sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
- if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
- brcmf_err("Host can't keep power while suspended\n");
- return -EINVAL;
- }
-
atomic_set(&sdiodev->suspend, true);
- ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
- if (ret) {
- brcmf_err("Failed to set pm_flags\n");
- atomic_set(&sdiodev->suspend, false);
- return ret;
+ if (sdiodev->wowl_enabled) {
+ sdio_flags = MMC_PM_KEEP_POWER;
+ if (sdiodev->pdata->oob_irq_supported)
+ enable_irq_wake(sdiodev->pdata->oob_irq_nr);
+ else
+ sdio_flags = MMC_PM_WAKE_SDIO_IRQ;
+ if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
+ brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
}
brcmf_sdio_wd_timer(sdiodev->bus, 0);
- return ret;
+ return 0;
}
static int brcmf_ops_sdio_resume(struct device *dev)
@@ -1152,6 +1167,8 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
brcmf_dbg(SDIO, "Enter\n");
+ if (sdiodev->pdata->oob_irq_supported)
+ disable_irq_wake(sdiodev->pdata->oob_irq_nr);
brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
atomic_set(&sdiodev->suspend, false);
return 0;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c
index a29ac4977b3a..0445163991b7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c
@@ -20,13 +20,13 @@
#include <brcmu_wifi.h>
#include <brcmu_utils.h>
#include <defs.h>
-#include <dhd.h>
-#include <dhd_dbg.h>
+#include "core.h"
+#include "debug.h"
#include "fwil.h"
#include "fwil_types.h"
#include "btcoex.h"
#include "p2p.h"
-#include "wl_cfg80211.h"
+#include "cfg80211.h"
/* T1 start SCO/eSCO priority suppression */
#define BRCMF_BTCOEX_OPPR_WIN_TIME 2000
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/bus.h
index 80e73a1262be..ef344e47218a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bus.h
@@ -14,10 +14,10 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _BRCMF_BUS_H_
-#define _BRCMF_BUS_H_
+#ifndef BRCMFMAC_BUS_H
+#define BRCMFMAC_BUS_H
-#include "dhd_dbg.h"
+#include "debug.h"
/* IDs of the 6 default common rings of msgbuf protocol */
#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT 0
@@ -227,8 +227,7 @@ void brcmf_txflowblock(struct device *dev, bool state);
void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
int brcmf_bus_start(struct device *dev);
-s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data,
- u32 len);
+s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len);
void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
#ifdef CONFIG_BRCMFMAC_SDIO
@@ -241,4 +240,4 @@ void brcmf_usb_exit(void);
void brcmf_usb_register(void);
#endif
-#endif /* _BRCMF_BUS_H_ */
+#endif /* BRCMFMAC_BUS_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index 39b45c038a93..3aecc5f48719 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -26,18 +26,18 @@
#include <brcmu_utils.h>
#include <defs.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "debug.h"
#include "tracepoint.h"
#include "fwil_types.h"
#include "p2p.h"
#include "btcoex.h"
-#include "wl_cfg80211.h"
+#include "cfg80211.h"
#include "feature.h"
#include "fwil.h"
#include "proto.h"
#include "vendor.h"
-#include "dhd_bus.h"
+#include "bus.h"
#define BRCMF_SCAN_IE_LEN_MAX 2048
#define BRCMF_PNO_VERSION 2
@@ -520,6 +520,95 @@ brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev)
ADDR_INDIRECT);
}
+static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
+{
+ struct brcmf_mbss_ssid_le mbss_ssid_le;
+ int bsscfgidx;
+ int err;
+
+ memset(&mbss_ssid_le, 0, sizeof(mbss_ssid_le));
+ bsscfgidx = brcmf_get_next_free_bsscfgidx(ifp->drvr);
+ if (bsscfgidx < 0)
+ return bsscfgidx;
+
+ mbss_ssid_le.bsscfgidx = cpu_to_le32(bsscfgidx);
+ mbss_ssid_le.SSID_len = cpu_to_le32(5);
+ sprintf(mbss_ssid_le.SSID, "ssid%d" , bsscfgidx);
+
+ err = brcmf_fil_bsscfg_data_set(ifp, "bsscfg:ssid", &mbss_ssid_le,
+ sizeof(mbss_ssid_le));
+ if (err < 0)
+ brcmf_err("setting ssid failed %d\n", err);
+
+ return err;
+}
+
+/**
+ * brcmf_ap_add_vif() - create a new AP virtual interface for multiple BSS
+ *
+ * @wiphy: wiphy device of new interface.
+ * @name: name of the new interface.
+ * @flags: not used.
+ * @params: contains mac address for AP device.
+ */
+static
+struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name,
+ u32 *flags, struct vif_params *params)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+ struct brcmf_cfg80211_vif *vif;
+ int err;
+
+ if (brcmf_cfg80211_vif_event_armed(cfg))
+ return ERR_PTR(-EBUSY);
+
+ brcmf_dbg(INFO, "Adding vif \"%s\"\n", name);
+
+ vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_AP, false);
+ if (IS_ERR(vif))
+ return (struct wireless_dev *)vif;
+
+ brcmf_cfg80211_arm_vif_event(cfg, vif);
+
+ err = brcmf_cfg80211_request_ap_if(ifp);
+ if (err) {
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ goto fail;
+ }
+
+ /* wait for firmware event */
+ err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_ADD,
+ msecs_to_jiffies(1500));
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ if (!err) {
+ brcmf_err("timeout occurred\n");
+ err = -EIO;
+ goto fail;
+ }
+
+ /* interface created in firmware */
+ ifp = vif->ifp;
+ if (!ifp) {
+ brcmf_err("no if pointer provided\n");
+ err = -ENOENT;
+ goto fail;
+ }
+
+ strncpy(ifp->ndev->name, name, sizeof(ifp->ndev->name) - 1);
+ err = brcmf_net_attach(ifp, true);
+ if (err) {
+ brcmf_err("Registering netdevice failed\n");
+ goto fail;
+ }
+
+ return &ifp->vif->wdev;
+
+fail:
+ brcmf_free_vif(vif);
+ return ERR_PTR(err);
+}
+
static bool brcmf_is_apmode(struct brcmf_cfg80211_vif *vif)
{
enum nl80211_iftype iftype;
@@ -545,12 +634,16 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
switch (type) {
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
return ERR_PTR(-EOPNOTSUPP);
+ case NL80211_IFTYPE_AP:
+ wdev = brcmf_ap_add_vif(wiphy, name, flags, params);
+ if (!IS_ERR(wdev))
+ brcmf_cfg80211_update_proto_addr_mode(wdev);
+ return wdev;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_P2P_DEVICE:
@@ -1815,6 +1908,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
return -EIO;
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
+ clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
cfg80211_disconnected(ndev, reason_code, NULL, 0, GFP_KERNEL);
memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
@@ -2785,6 +2879,44 @@ static __always_inline void brcmf_delay(u32 ms)
}
}
+static s32 brcmf_config_wowl_pattern(struct brcmf_if *ifp, u8 cmd[4],
+ u8 *pattern, u32 patternsize, u8 *mask,
+ u32 packet_offset)
+{
+ struct brcmf_fil_wowl_pattern_le *filter;
+ u32 masksize;
+ u32 patternoffset;
+ u8 *buf;
+ u32 bufsize;
+ s32 ret;
+
+ masksize = (patternsize + 7) / 8;
+ patternoffset = sizeof(*filter) - sizeof(filter->cmd) + masksize;
+
+ bufsize = sizeof(*filter) + patternsize + masksize;
+ buf = kzalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ filter = (struct brcmf_fil_wowl_pattern_le *)buf;
+
+ memcpy(filter->cmd, cmd, 4);
+ filter->masksize = cpu_to_le32(masksize);
+ filter->offset = cpu_to_le32(packet_offset);
+ filter->patternoffset = cpu_to_le32(patternoffset);
+ filter->patternsize = cpu_to_le32(patternsize);
+ filter->type = cpu_to_le32(BRCMF_WOWL_PATTERN_TYPE_BITMAP);
+
+ if ((mask) && (masksize))
+ memcpy(buf + sizeof(*filter), mask, masksize);
+ if ((pattern) && (patternsize))
+ memcpy(buf + sizeof(*filter) + masksize, pattern, patternsize);
+
+ ret = brcmf_fil_iovar_data_set(ifp, "wowl_pattern", buf, bufsize);
+
+ kfree(buf);
+ return ret;
+}
+
static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
@@ -2794,10 +2926,11 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
brcmf_dbg(TRACE, "Enter\n");
if (cfg->wowl_enabled) {
+ brcmf_configure_arp_offload(ifp, true);
brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM,
cfg->pre_wowl_pmmode);
- brcmf_fil_iovar_data_set(ifp, "wowl_pattern", "clr", 4);
brcmf_fil_iovar_int_set(ifp, "wowl_clear", 0);
+ brcmf_config_wowl_pattern(ifp, "clr", NULL, 0, NULL, 0);
cfg->wowl_enabled = false;
}
return 0;
@@ -2808,21 +2941,29 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg,
struct cfg80211_wowlan *wowl)
{
u32 wowl_config;
+ u32 i;
brcmf_dbg(TRACE, "Suspend, wowl config.\n");
+ brcmf_configure_arp_offload(ifp, false);
brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_PM, &cfg->pre_wowl_pmmode);
brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, PM_MAX);
wowl_config = 0;
if (wowl->disconnect)
- wowl_config |= WL_WOWL_DIS | WL_WOWL_BCN | WL_WOWL_RETR;
- /* Note: if "wowl" target and not "wowlpf" then wowl_bcn_loss
- * should be configured. This paramater is not supported by
- * wowlpf.
- */
+ wowl_config = BRCMF_WOWL_DIS | BRCMF_WOWL_BCN | BRCMF_WOWL_RETR;
if (wowl->magic_pkt)
- wowl_config |= WL_WOWL_MAGIC;
+ wowl_config |= BRCMF_WOWL_MAGIC;
+ if ((wowl->patterns) && (wowl->n_patterns)) {
+ wowl_config |= BRCMF_WOWL_NET;
+ for (i = 0; i < wowl->n_patterns; i++) {
+ brcmf_config_wowl_pattern(ifp, "add",
+ (u8 *)wowl->patterns[i].pattern,
+ wowl->patterns[i].pattern_len,
+ (u8 *)wowl->patterns[i].mask,
+ wowl->patterns[i].pkt_offset);
+ }
+ }
brcmf_fil_iovar_int_set(ifp, "wowl", wowl_config);
brcmf_fil_iovar_int_set(ifp, "wowl_activate", 1);
brcmf_bus_wowl_config(cfg->pub->bus_if, true);
@@ -2885,7 +3026,7 @@ brcmf_update_pmklist(struct net_device *ndev,
struct brcmf_cfg80211_pmk_list *pmk_list, s32 err)
{
int i, j;
- int pmkid_len;
+ u32 pmkid_len;
pmkid_len = le32_to_cpu(pmk_list->pmkids.npmkid);
@@ -2913,8 +3054,7 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_if *ifp = netdev_priv(ndev);
struct pmkid_list *pmkids = &cfg->pmk_list->pmkids;
s32 err = 0;
- int i;
- int pmkid_len;
+ u32 pmkid_len, i;
brcmf_dbg(TRACE, "Enter\n");
if (!check_vif_up(ifp->vif))
@@ -2953,7 +3093,7 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_if *ifp = netdev_priv(ndev);
struct pmkid_list pmkid;
s32 err = 0;
- int i, pmkid_len;
+ u32 pmkid_len, i;
brcmf_dbg(TRACE, "Enter\n");
if (!check_vif_up(ifp->vif))
@@ -3314,11 +3454,10 @@ static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
}
static s32
-brcmf_configure_wpaie(struct net_device *ndev,
+brcmf_configure_wpaie(struct brcmf_if *ifp,
const struct brcmf_vs_tlv *wpa_ie,
bool is_rsn_ie)
{
- struct brcmf_if *ifp = netdev_priv(ndev);
u32 auth = 0; /* d11 open authentication */
u16 count;
s32 err = 0;
@@ -3793,6 +3932,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
enum nl80211_iftype dev_role;
struct brcmf_fil_bss_enable_le bss_enable;
u16 chanspec;
+ bool mbss;
brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
settings->chandef.chan->hw_value,
@@ -3803,6 +3943,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
settings->inactivity_timeout);
dev_role = ifp->vif->wdev.iftype;
+ mbss = ifp->vif->mbss;
memset(&ssid_le, 0, sizeof(ssid_le));
if (settings->ssid == NULL || settings->ssid_len == 0) {
@@ -3822,8 +3963,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len);
}
- brcmf_set_mpc(ifp, 0);
- brcmf_configure_arp_offload(ifp, false);
+ if (!mbss) {
+ brcmf_set_mpc(ifp, 0);
+ brcmf_configure_arp_offload(ifp, false);
+ }
/* find the RSN_IE */
rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
@@ -3837,13 +3980,16 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(TRACE, "WPA(2) IE is found\n");
if (wpa_ie != NULL) {
/* WPA IE */
- err = brcmf_configure_wpaie(ndev, wpa_ie, false);
+ err = brcmf_configure_wpaie(ifp, wpa_ie, false);
if (err < 0)
goto exit;
} else {
+ struct brcmf_vs_tlv *tmp_ie;
+
+ tmp_ie = (struct brcmf_vs_tlv *)rsn_ie;
+
/* RSN IE */
- err = brcmf_configure_wpaie(ndev,
- (struct brcmf_vs_tlv *)rsn_ie, true);
+ err = brcmf_configure_wpaie(ifp, tmp_ie, true);
if (err < 0)
goto exit;
}
@@ -3854,45 +4000,53 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
- chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef);
- err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
- if (err < 0) {
- brcmf_err("Set Channel failed: chspec=%d, %d\n", chanspec, err);
- goto exit;
- }
-
- if (settings->beacon_interval) {
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
- settings->beacon_interval);
+ if (!mbss) {
+ chanspec = chandef_to_chanspec(&cfg->d11inf,
+ &settings->chandef);
+ err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
if (err < 0) {
- brcmf_err("Beacon Interval Set Error, %d\n", err);
+ brcmf_err("Set Channel failed: chspec=%d, %d\n",
+ chanspec, err);
goto exit;
}
- }
- if (settings->dtim_period) {
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_DTIMPRD,
- settings->dtim_period);
- if (err < 0) {
- brcmf_err("DTIM Interval Set Error, %d\n", err);
- goto exit;
+
+ if (settings->beacon_interval) {
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
+ settings->beacon_interval);
+ if (err < 0) {
+ brcmf_err("Beacon Interval Set Error, %d\n",
+ err);
+ goto exit;
+ }
+ }
+ if (settings->dtim_period) {
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_DTIMPRD,
+ settings->dtim_period);
+ if (err < 0) {
+ brcmf_err("DTIM Interval Set Error, %d\n", err);
+ goto exit;
+ }
}
- }
- if (dev_role == NL80211_IFTYPE_AP) {
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
+ if (dev_role == NL80211_IFTYPE_AP) {
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
+ if (err < 0) {
+ brcmf_err("BRCMF_C_DOWN error %d\n", err);
+ goto exit;
+ }
+ brcmf_fil_iovar_int_set(ifp, "apsta", 0);
+ }
+
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
if (err < 0) {
- brcmf_err("BRCMF_C_DOWN error %d\n", err);
+ brcmf_err("SET INFRA error %d\n", err);
goto exit;
}
- brcmf_fil_iovar_int_set(ifp, "apsta", 0);
- }
-
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
- if (err < 0) {
- brcmf_err("SET INFRA error %d\n", err);
- goto exit;
}
if (dev_role == NL80211_IFTYPE_AP) {
+ if ((brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) && (!mbss))
+ brcmf_fil_iovar_int_set(ifp, "mbss", 1);
+
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
if (err < 0) {
brcmf_err("setting AP mode failed %d\n", err);
@@ -3937,7 +4091,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
exit:
- if (err) {
+ if ((err) && (!mbss)) {
brcmf_set_mpc(ifp, 1);
brcmf_configure_arp_offload(ifp, true);
}
@@ -3958,20 +4112,31 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
/* first to make sure they get processed by fw. */
msleep(400);
+ if (ifp->vif->mbss) {
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
+ return err;
+ }
+
memset(&join_params, 0, sizeof(join_params));
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
&join_params, sizeof(join_params));
if (err < 0)
brcmf_err("SET SSID error (%d)\n", err);
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
if (err < 0)
- brcmf_err("BRCMF_C_UP error %d\n", err);
+ brcmf_err("BRCMF_C_DOWN error %d\n", err);
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
if (err < 0)
brcmf_err("setting AP mode failed %d\n", err);
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0);
if (err < 0)
brcmf_err("setting INFRA mode failed %d\n", err);
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS))
+ brcmf_fil_iovar_int_set(ifp, "mbss", 0);
+ /* Bring device back up so it can be used again */
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
+ if (err < 0)
+ brcmf_err("BRCMF_C_UP error %d\n", err);
} else {
bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
bss_enable.enable = cpu_to_le32(0);
@@ -4004,24 +4169,24 @@ brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
static int
brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
- const u8 *mac)
+ struct station_del_parameters *params)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_scb_val_le scbval;
struct brcmf_if *ifp = netdev_priv(ndev);
s32 err;
- if (!mac)
+ if (!params->mac)
return -EFAULT;
- brcmf_dbg(TRACE, "Enter %pM\n", mac);
+ brcmf_dbg(TRACE, "Enter %pM\n", params->mac);
if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
if (!check_vif_up(ifp->vif))
return -EIO;
- memcpy(&scbval.ea, mac, ETH_ALEN);
+ memcpy(&scbval.ea, params->mac, ETH_ALEN);
scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING);
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
&scbval, sizeof(scbval));
@@ -4323,7 +4488,9 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
enum nl80211_iftype type,
bool pm_block)
{
+ struct brcmf_cfg80211_vif *vif_walk;
struct brcmf_cfg80211_vif *vif;
+ bool mbss;
brcmf_dbg(TRACE, "allocating virtual interface (size=%zu)\n",
sizeof(*vif));
@@ -4339,6 +4506,17 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
brcmf_init_prof(&vif->profile);
+ if (type == NL80211_IFTYPE_AP) {
+ mbss = false;
+ list_for_each_entry(vif_walk, &cfg->vif_list, list) {
+ if (vif_walk->wdev.iftype == NL80211_IFTYPE_AP) {
+ mbss = true;
+ break;
+ }
+ }
+ vif->mbss = mbss;
+ }
+
list_add_tail(&vif->list, &cfg->vif_list);
return vif;
}
@@ -4581,6 +4759,7 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
static int generation;
u32 event = e->event_code;
u32 reason = e->reason;
@@ -4591,6 +4770,8 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
ndev != cfg_to_ndev(cfg)) {
brcmf_dbg(CONN, "AP mode link down\n");
complete(&cfg->vif_disabled);
+ if (ifp->vif->mbss)
+ brcmf_remove_interface(ifp->drvr, ifp->bssidx);
return 0;
}
@@ -5382,7 +5563,28 @@ static int brcmf_setup_wiphybands(struct wiphy *wiphy)
return 0;
}
-static const struct ieee80211_iface_limit brcmf_iface_limits[] = {
+static const struct ieee80211_iface_limit brcmf_iface_limits_mbss[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC)
+ },
+ {
+ .max = 4,
+ .types = BIT(NL80211_IFTYPE_AP)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
+ }
+};
+
+static const struct ieee80211_iface_limit brcmf_iface_limits_sbss[] = {
{
.max = 2,
.types = BIT(NL80211_IFTYPE_STATION) |
@@ -5403,8 +5605,8 @@ static struct ieee80211_iface_combination brcmf_iface_combos[] = {
{
.max_interfaces = BRCMF_IFACE_MAX_CNT,
.num_different_channels = 1,
- .n_limits = ARRAY_SIZE(brcmf_iface_limits),
- .limits = brcmf_iface_limits
+ .n_limits = ARRAY_SIZE(brcmf_iface_limits_sbss),
+ .limits = brcmf_iface_limits_sbss,
}
};
@@ -5446,10 +5648,13 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
}
-
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support brcmf_wowlan_support = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .n_patterns = BRCMF_WOWL_MAXPATTERNS,
+ .pattern_max_len = BRCMF_WOWL_MAXPATTERNSIZE,
+ .pattern_min_len = 1,
+ .max_pkt_offset = 1500,
};
#endif
@@ -5477,6 +5682,10 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
ifc_combo = brcmf_iface_combos[0];
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
ifc_combo.num_different_channels = 2;
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) {
+ ifc_combo.n_limits = ARRAY_SIZE(brcmf_iface_limits_mbss),
+ ifc_combo.limits = brcmf_iface_limits_mbss;
+ }
wiphy->iface_combinations = kmemdup(&ifc_combo,
sizeof(ifc_combo),
GFP_KERNEL);
@@ -5613,7 +5822,8 @@ enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp)
return wdev->iftype;
}
-bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg, unsigned long state)
+bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg,
+ unsigned long state)
{
struct brcmf_cfg80211_vif *vif;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
index 6abf94e41d3d..9e98b8d52757 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
@@ -14,8 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _wl_cfg80211_h_
-#define _wl_cfg80211_h_
+#ifndef BRCMFMAC_CFG80211_H
+#define BRCMFMAC_CFG80211_H
/* for brcmu_d11inf */
#include <brcmu_d11.h>
@@ -183,6 +183,7 @@ struct vif_saved_ie {
* @pm_block: power-management blocked.
* @list: linked list.
* @mgmt_rx_reg: registered rx mgmt frame types.
+ * @mbss: Multiple BSS type, set if not first AP (not relevant for P2P).
*/
struct brcmf_cfg80211_vif {
struct brcmf_if *ifp;
@@ -194,6 +195,7 @@ struct brcmf_cfg80211_vif {
struct vif_saved_ie saved_ie;
struct list_head list;
u16 mgmt_rx_reg;
+ bool mbss;
};
/* association inform */
@@ -480,7 +482,8 @@ const struct brcmf_tlv *
brcmf_parse_tlvs(const void *buf, int buflen, uint key);
u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
struct ieee80211_channel *ch);
-bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg, unsigned long state);
+bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg,
+ unsigned long state);
void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
struct brcmf_cfg80211_vif *vif);
bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg);
@@ -493,4 +496,4 @@ void brcmf_set_mpc(struct brcmf_if *ndev, int mpc);
void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
void brcmf_cfg80211_free_netdev(struct net_device *ndev);
-#endif /* _wl_cfg80211_h_ */
+#endif /* BRCMFMAC_CFG80211_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index 95efde868db8..ddae0b5e56ec 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -25,7 +25,7 @@
#include <brcm_hw_ids.h>
#include <brcmu_utils.h>
#include <chipcommon.h>
-#include "dhd_dbg.h"
+#include "debug.h"
#include "chip.h"
/* SOC Interconnect types (aka chip types) */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/common.c b/drivers/net/wireless/brcm80211/brcmfmac/common.c
new file mode 100644
index 000000000000..1861a13e8d03
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/common.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
+#include "fwil.h"
+#include "fwil_types.h"
+#include "tracepoint.h"
+
+#define BRCMF_DEFAULT_BCN_TIMEOUT 3
+#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40
+#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
+
+/* boost value for RSSI_DELTA in preferred join selection */
+#define BRCMF_JOIN_PREF_RSSI_BOOST 8
+
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
+{
+ s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+ u8 buf[BRCMF_DCMD_SMLEN];
+ struct brcmf_join_pref_params join_pref_params[2];
+ char *ptr;
+ s32 err;
+
+ /* retreive mac address */
+ err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
+ sizeof(ifp->mac_addr));
+ if (err < 0) {
+ brcmf_err("Retreiving cur_etheraddr failed, %d\n",
+ err);
+ goto done;
+ }
+ memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
+
+ /* query for 'ver' to get version info from firmware */
+ memset(buf, 0, sizeof(buf));
+ strcpy(buf, "ver");
+ err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
+ if (err < 0) {
+ brcmf_err("Retreiving version information failed, %d\n",
+ err);
+ goto done;
+ }
+ ptr = (char *)buf;
+ strsep(&ptr, "\n");
+
+ /* Print fw version info */
+ brcmf_err("Firmware version = %s\n", buf);
+
+ /* locate firmware version number for ethtool */
+ ptr = strrchr(buf, ' ') + 1;
+ strlcpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
+
+ /* set mpc */
+ err = brcmf_fil_iovar_int_set(ifp, "mpc", 1);
+ if (err) {
+ brcmf_err("failed setting mpc\n");
+ goto done;
+ }
+
+ /*
+ * Setup timeout if Beacons are lost and roam is off to report
+ * link down
+ */
+ err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout",
+ BRCMF_DEFAULT_BCN_TIMEOUT);
+ if (err) {
+ brcmf_err("bcn_timeout error (%d)\n", err);
+ goto done;
+ }
+
+ /* Enable/Disable build-in roaming to allowed ext supplicant to take
+ * of romaing
+ */
+ err = brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
+ if (err) {
+ brcmf_err("roam_off error (%d)\n", err);
+ goto done;
+ }
+
+ /* Setup join_pref to select target by RSSI(with boost on 5GHz) */
+ join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
+ join_pref_params[0].len = 2;
+ join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
+ join_pref_params[0].band = WLC_BAND_5G;
+ join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
+ join_pref_params[1].len = 2;
+ join_pref_params[1].rssi_gain = 0;
+ join_pref_params[1].band = 0;
+ err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
+ sizeof(join_pref_params));
+ if (err)
+ brcmf_err("Set join_pref error (%d)\n", err);
+
+ /* Setup event_msgs, enable E_IF */
+ err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
+ BRCMF_EVENTING_MASK_LEN);
+ if (err) {
+ brcmf_err("Get event_msgs error (%d)\n", err);
+ goto done;
+ }
+ setbit(eventmask, BRCMF_E_IF);
+ err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask,
+ BRCMF_EVENTING_MASK_LEN);
+ if (err) {
+ brcmf_err("Set event_msgs error (%d)\n", err);
+ goto done;
+ }
+
+ /* Setup default scan channel time */
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
+ BRCMF_DEFAULT_SCAN_CHANNEL_TIME);
+ if (err) {
+ brcmf_err("BRCMF_C_SET_SCAN_CHANNEL_TIME error (%d)\n",
+ err);
+ goto done;
+ }
+
+ /* Setup default scan unassoc time */
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
+ BRCMF_DEFAULT_SCAN_UNASSOC_TIME);
+ if (err) {
+ brcmf_err("BRCMF_C_SET_SCAN_UNASSOC_TIME error (%d)\n",
+ err);
+ goto done;
+ }
+
+ /* do bus specific preinit here */
+ err = brcmf_bus_preinit(ifp->drvr->bus_if);
+done:
+ return err;
+}
+
+#if defined(CONFIG_BRCM_TRACING) || defined(CONFIG_BRCMDBG)
+void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ if (brcmf_msg_level & level)
+ pr_debug("%s %pV", func, &vaf);
+ trace_brcmf_dbg(level, func, &vaf);
+ va_end(args);
+}
+#endif
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
index c6d65b8e1e15..77656c711bed 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
@@ -19,7 +19,7 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
+#include "core.h"
#include "commonring.h"
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/core.c
index fb1043908a23..effe6d7831d9 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.c
@@ -22,12 +22,12 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
#include "fwil_types.h"
#include "p2p.h"
-#include "wl_cfg80211.h"
+#include "cfg80211.h"
#include "fwil.h"
#include "fwsignal.h"
#include "feature.h"
@@ -836,7 +836,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
return ifp;
}
-void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
+static void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
{
struct brcmf_if *ifp;
@@ -869,6 +869,38 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
}
}
+void brcmf_remove_interface(struct brcmf_pub *drvr, u32 bssidx)
+{
+ if (drvr->iflist[bssidx]) {
+ brcmf_fws_del_interface(drvr->iflist[bssidx]);
+ brcmf_del_if(drvr, bssidx);
+ }
+}
+
+int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr)
+{
+ int ifidx;
+ int bsscfgidx;
+ bool available;
+ int highest;
+
+ available = false;
+ bsscfgidx = 2;
+ highest = 2;
+ for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
+ if (drvr->iflist[ifidx]) {
+ if (drvr->iflist[ifidx]->bssidx == bsscfgidx)
+ bsscfgidx = highest + 1;
+ else if (drvr->iflist[ifidx]->bssidx > highest)
+ highest = drvr->iflist[ifidx]->bssidx;
+ } else {
+ available = true;
+ }
+ }
+
+ return available ? bsscfgidx : -ENOMEM;
+}
+
int brcmf_attach(struct device *dev)
{
struct brcmf_pub *drvr = NULL;
@@ -1033,10 +1065,7 @@ void brcmf_detach(struct device *dev)
/* make sure primary interface removed last */
for (i = BRCMF_MAX_IFS-1; i > -1; i--)
- if (drvr->iflist[i]) {
- brcmf_fws_del_interface(drvr->iflist[i]);
- brcmf_del_if(drvr, i);
- }
+ brcmf_remove_interface(drvr, i);
brcmf_cfg80211_detach(drvr->config);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/core.h
index 5e4317dbc2b0..23f74b139cc8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.h
@@ -18,8 +18,8 @@
* Common types *
*/
-#ifndef _BRCMF_H_
-#define _BRCMF_H_
+#ifndef BRCMFMAC_CORE_H
+#define BRCMFMAC_CORE_H
#include "fweh.h"
@@ -83,7 +83,6 @@ struct brcmf_pub {
/* Internal brcmf items */
uint hdrlen; /* Total BRCMF header length (proto + bus) */
uint rxsz; /* Rx buffer size bus module should use */
- u8 wme_dp; /* wme discard priority */
/* Dongle media info */
char fwver[BRCMF_DRIVER_FIRMWARE_VERSION_LEN];
@@ -176,7 +175,8 @@ char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
char *name, u8 *mac_addr);
-void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
+void brcmf_remove_interface(struct brcmf_pub *drvr, u32 bssidx);
+int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
@@ -186,4 +186,4 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
/* Sets dongle media info (drv_version, mac address). */
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
-#endif /* _BRCMF_H_ */
+#endif /* BRCMFMAC_CORE_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/debug.c
index be9f4f829192..9b473d50b005 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.c
@@ -19,9 +19,9 @@
#include <brcmu_wifi.h>
#include <brcmu_utils.h>
-#include "dhd.h"
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
static struct dentry *root_folder;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/debug.h
index dec40d316c82..eb0b8c47479d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.h
@@ -14,8 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _BRCMF_DBG_H_
-#define _BRCMF_DBG_H_
+#ifndef BRCMFMAC_DEBUG_H
+#define BRCMFMAC_DEBUG_H
/* message levels */
#define BRCMF_TRACE_VAL 0x00000002
@@ -133,4 +133,4 @@ int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
}
#endif
-#endif /* _BRCMF_DBG_H_ */
+#endif /* BRCMFMAC_DEBUG_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
deleted file mode 100644
index d991f8e3d9ec..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/netdevice.h>
-#include <brcmu_wifi.h>
-#include <brcmu_utils.h>
-#include "dhd.h"
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
-#include "fwil.h"
-#include "fwil_types.h"
-#include "tracepoint.h"
-
-#define PKTFILTER_BUF_SIZE 128
-#define BRCMF_DEFAULT_BCN_TIMEOUT 3
-#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40
-#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
-#define BRCMF_DEFAULT_PACKET_FILTER "100 0 0 0 0x01 0x00"
-
-/* boost value for RSSI_DELTA in preferred join selection */
-#define BRCMF_JOIN_PREF_RSSI_BOOST 8
-
-
-bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
- struct sk_buff *pkt, int prec)
-{
- struct sk_buff *p;
- int eprec = -1; /* precedence to evict from */
- bool discard_oldest;
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_pub *drvr = bus_if->drvr;
-
- /* Fast case, precedence queue is not full and we are also not
- * exceeding total queue length
- */
- if (!pktq_pfull(q, prec) && !pktq_full(q)) {
- brcmu_pktq_penq(q, prec, pkt);
- return true;
- }
-
- /* Determine precedence from which to evict packet, if any */
- if (pktq_pfull(q, prec))
- eprec = prec;
- else if (pktq_full(q)) {
- p = brcmu_pktq_peek_tail(q, &eprec);
- if (eprec > prec)
- return false;
- }
-
- /* Evict if needed */
- if (eprec >= 0) {
- /* Detect queueing to unconfigured precedence */
- discard_oldest = ac_bitmap_tst(drvr->wme_dp, eprec);
- if (eprec == prec && !discard_oldest)
- return false; /* refuse newer (incoming) packet */
- /* Evict packet according to discard policy */
- p = discard_oldest ? brcmu_pktq_pdeq(q, eprec) :
- brcmu_pktq_pdeq_tail(q, eprec);
- if (p == NULL)
- brcmf_err("brcmu_pktq_penq() failed, oldest %d\n",
- discard_oldest);
-
- brcmu_pkt_buf_free_skb(p);
- }
-
- /* Enqueue */
- p = brcmu_pktq_penq(q, prec, pkt);
- if (p == NULL)
- brcmf_err("brcmu_pktq_penq() failed\n");
-
- return p != NULL;
-}
-
-/* Convert user's input in hex pattern to byte-size mask */
-static int brcmf_c_pattern_atoh(char *src, char *dst)
-{
- int i;
- if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) {
- brcmf_err("Mask invalid format. Needs to start with 0x\n");
- return -EINVAL;
- }
- src = src + 2; /* Skip past 0x */
- if (strlen(src) % 2 != 0) {
- brcmf_err("Mask invalid format. Length must be even.\n");
- return -EINVAL;
- }
- for (i = 0; *src != '\0'; i++) {
- unsigned long res;
- char num[3];
- strncpy(num, src, 2);
- num[2] = '\0';
- if (kstrtoul(num, 16, &res))
- return -EINVAL;
- dst[i] = (u8)res;
- src += 2;
- }
- return i;
-}
-
-static void
-brcmf_c_pktfilter_offload_enable(struct brcmf_if *ifp, char *arg, int enable,
- int master_mode)
-{
- unsigned long res;
- char *argv;
- char *arg_save = NULL, *arg_org = NULL;
- s32 err;
- struct brcmf_pkt_filter_enable_le enable_parm;
-
- arg_save = kstrdup(arg, GFP_ATOMIC);
- if (!arg_save)
- goto fail;
-
- arg_org = arg_save;
-
- argv = strsep(&arg_save, " ");
-
- if (argv == NULL) {
- brcmf_err("No args provided\n");
- goto fail;
- }
-
- /* Parse packet filter id. */
- enable_parm.id = 0;
- if (!kstrtoul(argv, 0, &res))
- enable_parm.id = cpu_to_le32((u32)res);
-
- /* Enable/disable the specified filter. */
- enable_parm.enable = cpu_to_le32(enable);
-
- err = brcmf_fil_iovar_data_set(ifp, "pkt_filter_enable", &enable_parm,
- sizeof(enable_parm));
- if (err)
- brcmf_err("Set pkt_filter_enable error (%d)\n", err);
-
- /* Control the master mode */
- err = brcmf_fil_iovar_int_set(ifp, "pkt_filter_mode", master_mode);
- if (err)
- brcmf_err("Set pkt_filter_mode error (%d)\n", err);
-
-fail:
- kfree(arg_org);
-}
-
-static void brcmf_c_pktfilter_offload_set(struct brcmf_if *ifp, char *arg)
-{
- struct brcmf_pkt_filter_le *pkt_filter;
- unsigned long res;
- int buf_len;
- s32 err;
- u32 mask_size;
- u32 pattern_size;
- char *argv[8], *buf = NULL;
- int i = 0;
- char *arg_save = NULL, *arg_org = NULL;
-
- arg_save = kstrdup(arg, GFP_ATOMIC);
- if (!arg_save)
- goto fail;
-
- arg_org = arg_save;
-
- buf = kmalloc(PKTFILTER_BUF_SIZE, GFP_ATOMIC);
- if (!buf)
- goto fail;
-
- argv[i] = strsep(&arg_save, " ");
- while (argv[i]) {
- i++;
- if (i >= 8) {
- brcmf_err("Too many parameters\n");
- goto fail;
- }
- argv[i] = strsep(&arg_save, " ");
- }
-
- if (i != 6) {
- brcmf_err("Not enough args provided %d\n", i);
- goto fail;
- }
-
- pkt_filter = (struct brcmf_pkt_filter_le *)buf;
-
- /* Parse packet filter id. */
- pkt_filter->id = 0;
- if (!kstrtoul(argv[0], 0, &res))
- pkt_filter->id = cpu_to_le32((u32)res);
-
- /* Parse filter polarity. */
- pkt_filter->negate_match = 0;
- if (!kstrtoul(argv[1], 0, &res))
- pkt_filter->negate_match = cpu_to_le32((u32)res);
-
- /* Parse filter type. */
- pkt_filter->type = 0;
- if (!kstrtoul(argv[2], 0, &res))
- pkt_filter->type = cpu_to_le32((u32)res);
-
- /* Parse pattern filter offset. */
- pkt_filter->u.pattern.offset = 0;
- if (!kstrtoul(argv[3], 0, &res))
- pkt_filter->u.pattern.offset = cpu_to_le32((u32)res);
-
- /* Parse pattern filter mask. */
- mask_size = brcmf_c_pattern_atoh(argv[4],
- (char *)pkt_filter->u.pattern.mask_and_pattern);
-
- /* Parse pattern filter pattern. */
- pattern_size = brcmf_c_pattern_atoh(argv[5],
- (char *)&pkt_filter->u.pattern.mask_and_pattern[mask_size]);
-
- if (mask_size != pattern_size) {
- brcmf_err("Mask and pattern not the same size\n");
- goto fail;
- }
-
- pkt_filter->u.pattern.size_bytes = cpu_to_le32(mask_size);
- buf_len = offsetof(struct brcmf_pkt_filter_le,
- u.pattern.mask_and_pattern);
- buf_len += mask_size + pattern_size;
-
- err = brcmf_fil_iovar_data_set(ifp, "pkt_filter_add", pkt_filter,
- buf_len);
- if (err)
- brcmf_err("Set pkt_filter_add error (%d)\n", err);
-
-fail:
- kfree(arg_org);
-
- kfree(buf);
-}
-
-int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
-{
- s8 eventmask[BRCMF_EVENTING_MASK_LEN];
- u8 buf[BRCMF_DCMD_SMLEN];
- struct brcmf_join_pref_params join_pref_params[2];
- char *ptr;
- s32 err;
-
- /* retreive mac address */
- err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
- sizeof(ifp->mac_addr));
- if (err < 0) {
- brcmf_err("Retreiving cur_etheraddr failed, %d\n",
- err);
- goto done;
- }
- memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
-
- /* query for 'ver' to get version info from firmware */
- memset(buf, 0, sizeof(buf));
- strcpy(buf, "ver");
- err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
- if (err < 0) {
- brcmf_err("Retreiving version information failed, %d\n",
- err);
- goto done;
- }
- ptr = (char *)buf;
- strsep(&ptr, "\n");
-
- /* Print fw version info */
- brcmf_err("Firmware version = %s\n", buf);
-
- /* locate firmware version number for ethtool */
- ptr = strrchr(buf, ' ') + 1;
- strlcpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
-
- /* set mpc */
- err = brcmf_fil_iovar_int_set(ifp, "mpc", 1);
- if (err) {
- brcmf_err("failed setting mpc\n");
- goto done;
- }
-
- /*
- * Setup timeout if Beacons are lost and roam is off to report
- * link down
- */
- err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout",
- BRCMF_DEFAULT_BCN_TIMEOUT);
- if (err) {
- brcmf_err("bcn_timeout error (%d)\n", err);
- goto done;
- }
-
- /* Enable/Disable build-in roaming to allowed ext supplicant to take
- * of romaing
- */
- err = brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
- if (err) {
- brcmf_err("roam_off error (%d)\n", err);
- goto done;
- }
-
- /* Setup join_pref to select target by RSSI(with boost on 5GHz) */
- join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
- join_pref_params[0].len = 2;
- join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
- join_pref_params[0].band = WLC_BAND_5G;
- join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
- join_pref_params[1].len = 2;
- join_pref_params[1].rssi_gain = 0;
- join_pref_params[1].band = 0;
- err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
- sizeof(join_pref_params));
- if (err)
- brcmf_err("Set join_pref error (%d)\n", err);
-
- /* Setup event_msgs, enable E_IF */
- err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
- BRCMF_EVENTING_MASK_LEN);
- if (err) {
- brcmf_err("Get event_msgs error (%d)\n", err);
- goto done;
- }
- setbit(eventmask, BRCMF_E_IF);
- err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask,
- BRCMF_EVENTING_MASK_LEN);
- if (err) {
- brcmf_err("Set event_msgs error (%d)\n", err);
- goto done;
- }
-
- /* Setup default scan channel time */
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
- BRCMF_DEFAULT_SCAN_CHANNEL_TIME);
- if (err) {
- brcmf_err("BRCMF_C_SET_SCAN_CHANNEL_TIME error (%d)\n",
- err);
- goto done;
- }
-
- /* Setup default scan unassoc time */
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
- BRCMF_DEFAULT_SCAN_UNASSOC_TIME);
- if (err) {
- brcmf_err("BRCMF_C_SET_SCAN_UNASSOC_TIME error (%d)\n",
- err);
- goto done;
- }
-
- /* Setup packet filter */
- brcmf_c_pktfilter_offload_set(ifp, BRCMF_DEFAULT_PACKET_FILTER);
- brcmf_c_pktfilter_offload_enable(ifp, BRCMF_DEFAULT_PACKET_FILTER,
- 0, true);
-
- /* do bus specific preinit here */
- err = brcmf_bus_preinit(ifp->drvr->bus_if);
-done:
- return err;
-}
-
-#ifdef CONFIG_BRCM_TRACING
-void __brcmf_err(const char *func, const char *fmt, ...)
-{
- struct va_format vaf = {
- .fmt = fmt,
- };
- va_list args;
-
- va_start(args, fmt);
- vaf.va = &args;
- pr_err("%s: %pV", func, &vaf);
- trace_brcmf_err(func, &vaf);
- va_end(args);
-}
-#endif
-#if defined(CONFIG_BRCM_TRACING) || defined(CONFIG_BRCMDBG)
-void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...)
-{
- struct va_format vaf = {
- .fmt = fmt,
- };
- va_list args;
-
- va_start(args, fmt);
- vaf.va = &args;
- if (brcmf_msg_level & level)
- pr_debug("%s %pV", func, &vaf);
- trace_brcmf_dbg(level, func, &vaf);
- va_end(args);
-}
-#endif
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
index aed53acef456..defb7a44e0bc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
@@ -17,18 +17,13 @@
#include <linux/netdevice.h>
#include <brcm_hw_ids.h>
-#include "dhd.h"
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
#include "fwil.h"
#include "feature.h"
/*
- * firmware error code received if iovar is unsupported.
- */
-#define EBRCMF_FEAT_UNSUPPORTED 23
-
-/*
* expand feature list to array of feature strings.
*/
#define BRCMF_FEAT_DEF(_f) \
@@ -102,6 +97,28 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
}
}
+/**
+ * brcmf_feat_iovar_int_set() - determine feature through iovar set.
+ *
+ * @ifp: interface to query.
+ * @id: feature id.
+ * @name: iovar name.
+ */
+static void brcmf_feat_iovar_int_set(struct brcmf_if *ifp,
+ enum brcmf_feat_id id, char *name, u32 val)
+{
+ int err;
+
+ err = brcmf_fil_iovar_int_set(ifp, name, val);
+ if (err == 0) {
+ brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]);
+ ifp->drvr->feat_flags |= BIT(id);
+ } else {
+ brcmf_dbg(TRACE, "%s feature check failed: %d\n",
+ brcmf_feat_names[id], err);
+ }
+}
+
void brcmf_feat_attach(struct brcmf_pub *drvr)
{
struct brcmf_if *ifp = drvr->iflist[0];
@@ -109,6 +126,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
if (drvr->bus_if->wowl_supported)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
+ brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
/* set chip related quirks */
switch (drvr->bus_if->chip) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/brcm80211/brcmfmac/feature.h
index b9a796d0a44d..f5832e077bb7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.h
@@ -22,6 +22,7 @@
* MCHAN: multi-channel for concurrent P2P.
*/
#define BRCMF_FEAT_LIST \
+ BRCMF_FEAT_DEF(MBSS) \
BRCMF_FEAT_DEF(MCHAN) \
BRCMF_FEAT_DEF(WOWL)
/*
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
index 8ea9f283d2b8..1ff787d1a36b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -20,7 +20,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
-#include "dhd_dbg.h"
+#include "debug.h"
#include "firmware.h"
char brcmf_firmware_path[BRCMF_FW_PATH_LEN];
@@ -262,8 +262,7 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
fail:
brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
- if (fwctx->code)
- release_firmware(fwctx->code);
+ release_firmware(fwctx->code);
device_release_driver(fwctx->dev);
kfree(fwctx);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
index 1faa929f5fff..44f3a84d1999 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
@@ -19,9 +19,9 @@
#include <linux/etherdevice.h>
#include <brcmu_utils.h>
-#include "dhd.h"
-#include "dhd_dbg.h"
-#include "dhd_bus.h"
+#include "core.h"
+#include "debug.h"
+#include "bus.h"
#include "proto.h"
#include "flowring.h"
#include "msgbuf.h"
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
index 44fc85f68f7a..ec62492ffa69 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -18,8 +18,8 @@
#include "brcmu_wifi.h"
#include "brcmu_utils.h"
-#include "dhd.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "debug.h"
#include "tracepoint.h"
#include "fwsignal.h"
#include "fweh.h"
@@ -221,10 +221,8 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
- if (ifp && ifevent->action == BRCMF_E_IF_DEL) {
- brcmf_fws_del_interface(ifp);
- brcmf_del_if(drvr, ifevent->bssidx);
- }
+ if (ifp && ifevent->action == BRCMF_E_IF_DEL)
+ brcmf_remove_interface(drvr, ifevent->bssidx);
}
/**
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
index ded328f80cd1..03f2c406a17b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -22,9 +22,9 @@
#include <linux/netdevice.h>
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
#include "tracepoint.h"
#include "fwil.h"
#include "proto.h"
@@ -32,6 +32,76 @@
#define MAX_HEX_DUMP_LEN 64
+#ifdef DEBUG
+static const char * const brcmf_fil_errstr[] = {
+ "BCME_OK",
+ "BCME_ERROR",
+ "BCME_BADARG",
+ "BCME_BADOPTION",
+ "BCME_NOTUP",
+ "BCME_NOTDOWN",
+ "BCME_NOTAP",
+ "BCME_NOTSTA",
+ "BCME_BADKEYIDX",
+ "BCME_RADIOOFF",
+ "BCME_NOTBANDLOCKED",
+ "BCME_NOCLK",
+ "BCME_BADRATESET",
+ "BCME_BADBAND",
+ "BCME_BUFTOOSHORT",
+ "BCME_BUFTOOLONG",
+ "BCME_BUSY",
+ "BCME_NOTASSOCIATED",
+ "BCME_BADSSIDLEN",
+ "BCME_OUTOFRANGECHAN",
+ "BCME_BADCHAN",
+ "BCME_BADADDR",
+ "BCME_NORESOURCE",
+ "BCME_UNSUPPORTED",
+ "BCME_BADLEN",
+ "BCME_NOTREADY",
+ "BCME_EPERM",
+ "BCME_NOMEM",
+ "BCME_ASSOCIATED",
+ "BCME_RANGE",
+ "BCME_NOTFOUND",
+ "BCME_WME_NOT_ENABLED",
+ "BCME_TSPEC_NOTFOUND",
+ "BCME_ACM_NOTSUPPORTED",
+ "BCME_NOT_WME_ASSOCIATION",
+ "BCME_SDIO_ERROR",
+ "BCME_DONGLE_DOWN",
+ "BCME_VERSION",
+ "BCME_TXFAIL",
+ "BCME_RXFAIL",
+ "BCME_NODEVICE",
+ "BCME_NMODE_DISABLED",
+ "BCME_NONRESIDENT",
+ "BCME_SCANREJECT",
+ "BCME_USAGE_ERROR",
+ "BCME_IOCTL_ERROR",
+ "BCME_SERIAL_PORT_ERR",
+ "BCME_DISABLED",
+ "BCME_DECERR",
+ "BCME_ENCERR",
+ "BCME_MICERR",
+ "BCME_REPLAY",
+ "BCME_IE_NOTFOUND",
+};
+
+static const char *brcmf_fil_get_errstr(u32 err)
+{
+ if (err >= ARRAY_SIZE(brcmf_fil_errstr))
+ return "(unknown)";
+
+ return brcmf_fil_errstr[err];
+}
+#else
+static const char *brcmf_fil_get_errstr(u32 err)
+{
+ return "";
+}
+#endif /* DEBUG */
static s32
brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
@@ -52,11 +122,11 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
err = brcmf_proto_query_dcmd(drvr, ifp->ifidx, cmd, data, len);
if (err >= 0)
- err = 0;
- else
- brcmf_dbg(FIL, "Failed err=%d\n", err);
+ return 0;
- return err;
+ brcmf_dbg(FIL, "Failed: %s (%d)\n",
+ brcmf_fil_get_errstr((u32)(-err)), err);
+ return -EBADE;
}
s32
@@ -66,7 +136,7 @@ brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
mutex_lock(&ifp->drvr->proto_block);
- brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
+ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, len=%d\n", ifp->ifidx, cmd, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
@@ -84,7 +154,7 @@ brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
mutex_lock(&ifp->drvr->proto_block);
err = brcmf_fil_cmd_data(ifp, cmd, data, len, false);
- brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
+ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, len=%d\n", ifp->ifidx, cmd, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
@@ -101,7 +171,7 @@ brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
__le32 data_le = cpu_to_le32(data);
mutex_lock(&ifp->drvr->proto_block);
- brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, data);
+ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data);
err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
mutex_unlock(&ifp->drvr->proto_block);
@@ -118,7 +188,7 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
mutex_unlock(&ifp->drvr->proto_block);
*data = le32_to_cpu(data_le);
- brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, *data);
+ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data);
return err;
}
@@ -154,7 +224,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
mutex_lock(&drvr->proto_block);
- brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
+ brcmf_dbg(FIL, "ifidx=%d, name=%s, len=%d\n", ifp->ifidx, name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
@@ -194,7 +264,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
brcmf_err("Creating iovar failed\n");
}
- brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
+ brcmf_dbg(FIL, "ifidx=%d, name=%s, len=%d\n", ifp->ifidx, name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
@@ -277,7 +347,8 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
mutex_lock(&drvr->proto_block);
- brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
+ brcmf_dbg(FIL, "ifidx=%d, bssidx=%d, name=%s, len=%d\n", ifp->ifidx,
+ ifp->bssidx, name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
@@ -316,7 +387,8 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
err = -EPERM;
brcmf_err("Creating bsscfg failed\n");
}
- brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
+ brcmf_dbg(FIL, "ifidx=%d, bssidx=%d, name=%s, len=%d\n", ifp->ifidx,
+ ifp->bssidx, name, len);
brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index 5ff5cd0bb032..50891c02c4c1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -55,59 +55,63 @@
/* WOWL bits */
/* Wakeup on Magic packet: */
-#define WL_WOWL_MAGIC (1 << 0)
+#define BRCMF_WOWL_MAGIC (1 << 0)
/* Wakeup on Netpattern */
-#define WL_WOWL_NET (1 << 1)
+#define BRCMF_WOWL_NET (1 << 1)
/* Wakeup on loss-of-link due to Disassoc/Deauth: */
-#define WL_WOWL_DIS (1 << 2)
+#define BRCMF_WOWL_DIS (1 << 2)
/* Wakeup on retrograde TSF: */
-#define WL_WOWL_RETR (1 << 3)
+#define BRCMF_WOWL_RETR (1 << 3)
/* Wakeup on loss of beacon: */
-#define WL_WOWL_BCN (1 << 4)
+#define BRCMF_WOWL_BCN (1 << 4)
/* Wakeup after test: */
-#define WL_WOWL_TST (1 << 5)
+#define BRCMF_WOWL_TST (1 << 5)
/* Wakeup after PTK refresh: */
-#define WL_WOWL_M1 (1 << 6)
+#define BRCMF_WOWL_M1 (1 << 6)
/* Wakeup after receipt of EAP-Identity Req: */
-#define WL_WOWL_EAPID (1 << 7)
+#define BRCMF_WOWL_EAPID (1 << 7)
/* Wakeind via PME(0) or GPIO(1): */
-#define WL_WOWL_PME_GPIO (1 << 8)
+#define BRCMF_WOWL_PME_GPIO (1 << 8)
/* need tkip phase 1 key to be updated by the driver: */
-#define WL_WOWL_NEEDTKIP1 (1 << 9)
+#define BRCMF_WOWL_NEEDTKIP1 (1 << 9)
/* enable wakeup if GTK fails: */
-#define WL_WOWL_GTK_FAILURE (1 << 10)
+#define BRCMF_WOWL_GTK_FAILURE (1 << 10)
/* support extended magic packets: */
-#define WL_WOWL_EXTMAGPAT (1 << 11)
+#define BRCMF_WOWL_EXTMAGPAT (1 << 11)
/* support ARP/NS/keepalive offloading: */
-#define WL_WOWL_ARPOFFLOAD (1 << 12)
+#define BRCMF_WOWL_ARPOFFLOAD (1 << 12)
/* read protocol version for EAPOL frames: */
-#define WL_WOWL_WPA2 (1 << 13)
+#define BRCMF_WOWL_WPA2 (1 << 13)
/* If the bit is set, use key rotaton: */
-#define WL_WOWL_KEYROT (1 << 14)
+#define BRCMF_WOWL_KEYROT (1 << 14)
/* If the bit is set, frm received was bcast frame: */
-#define WL_WOWL_BCAST (1 << 15)
+#define BRCMF_WOWL_BCAST (1 << 15)
/* If the bit is set, scan offload is enabled: */
-#define WL_WOWL_SCANOL (1 << 16)
+#define BRCMF_WOWL_SCANOL (1 << 16)
/* Wakeup on tcpkeep alive timeout: */
-#define WL_WOWL_TCPKEEP_TIME (1 << 17)
+#define BRCMF_WOWL_TCPKEEP_TIME (1 << 17)
/* Wakeup on mDNS Conflict Resolution: */
-#define WL_WOWL_MDNS_CONFLICT (1 << 18)
+#define BRCMF_WOWL_MDNS_CONFLICT (1 << 18)
/* Wakeup on mDNS Service Connect: */
-#define WL_WOWL_MDNS_SERVICE (1 << 19)
+#define BRCMF_WOWL_MDNS_SERVICE (1 << 19)
/* tcp keepalive got data: */
-#define WL_WOWL_TCPKEEP_DATA (1 << 20)
+#define BRCMF_WOWL_TCPKEEP_DATA (1 << 20)
/* Firmware died in wowl mode: */
-#define WL_WOWL_FW_HALT (1 << 21)
+#define BRCMF_WOWL_FW_HALT (1 << 21)
/* Enable detection of radio button changes: */
-#define WL_WOWL_ENAB_HWRADIO (1 << 22)
+#define BRCMF_WOWL_ENAB_HWRADIO (1 << 22)
/* Offloads detected MIC failure(s): */
-#define WL_WOWL_MIC_FAIL (1 << 23)
+#define BRCMF_WOWL_MIC_FAIL (1 << 23)
/* Wakeup in Unassociated state (Net/Magic Pattern): */
-#define WL_WOWL_UNASSOC (1 << 24)
+#define BRCMF_WOWL_UNASSOC (1 << 24)
/* Wakeup if received matched secured pattern: */
-#define WL_WOWL_SECURE (1 << 25)
+#define BRCMF_WOWL_SECURE (1 << 25)
/* Link Down indication in WoWL mode: */
-#define WL_WOWL_LINKDOWN (1 << 31)
+#define BRCMF_WOWL_LINKDOWN (1 << 31)
+
+#define BRCMF_WOWL_MAXPATTERNS 8
+#define BRCMF_WOWL_MAXPATTERNSIZE 128
+
/* join preference types for join_pref iovar */
enum brcmf_join_pref_types {
@@ -124,6 +128,12 @@ enum brcmf_fil_p2p_if_types {
BRCMF_FIL_P2P_IF_DEV,
};
+enum brcmf_wowl_pattern_type {
+ BRCMF_WOWL_PATTERN_TYPE_BITMAP = 0,
+ BRCMF_WOWL_PATTERN_TYPE_ARP,
+ BRCMF_WOWL_PATTERN_TYPE_NA
+};
+
struct brcmf_fil_p2p_if_le {
u8 addr[ETH_ALEN];
__le16 type;
@@ -484,4 +494,35 @@ struct brcmf_rx_mgmt_data {
__be32 rate;
};
+/**
+ * struct brcmf_fil_wowl_pattern_le - wowl pattern configuration struct.
+ *
+ * @cmd: "add", "del" or "clr".
+ * @masksize: Size of the mask in #of bytes
+ * @offset: Pattern byte offset in packet
+ * @patternoffset: Offset of start of pattern. Starting from field masksize.
+ * @patternsize: Size of the pattern itself in #of bytes
+ * @id: id
+ * @reasonsize: Size of the wakeup reason code
+ * @type: Type of pattern (enum brcmf_wowl_pattern_type)
+ */
+struct brcmf_fil_wowl_pattern_le {
+ u8 cmd[4];
+ __le32 masksize;
+ __le32 offset;
+ __le32 patternoffset;
+ __le32 patternsize;
+ __le32 id;
+ __le32 reasonsize;
+ __le32 type;
+ /* u8 mask[] - Mask follows the structure above */
+ /* u8 pattern[] - Pattern follows the mask is at 'patternoffset' */
+};
+
+struct brcmf_mbss_ssid_le {
+ __le32 bsscfgidx;
+ __le32 SSID_len;
+ unsigned char SSID[32];
+};
+
#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index 183f08d7fc8c..f0dda0ecd23b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -26,15 +26,15 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
-#include "dhd_dbg.h"
-#include "dhd_bus.h"
+#include "core.h"
+#include "debug.h"
+#include "bus.h"
#include "fwil.h"
#include "fwil_types.h"
#include "fweh.h"
#include "fwsignal.h"
#include "p2p.h"
-#include "wl_cfg80211.h"
+#include "cfg80211.h"
#include "proto.h"
/**
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 11cc051f97cd..456944a6a2db 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -24,13 +24,13 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "debug.h"
#include "proto.h"
#include "msgbuf.h"
#include "commonring.h"
#include "flowring.h"
-#include "dhd_bus.h"
+#include "bus.h"
#include "tracepoint.h"
@@ -518,8 +518,7 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
len : msgbuf->ioctl_resp_ret_len);
}
- if (skb)
- brcmu_pkt_buf_free_skb(skb);
+ brcmu_pkt_buf_free_skb(skb);
return msgbuf->ioctl_resp_status;
}
@@ -1081,8 +1080,17 @@ brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb,
{
struct brcmf_if *ifp;
+ /* The ifidx is the idx to map to matching netdev/ifp. When receiving
+ * events this is easy because it contains the bssidx which maps
+ * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
+ * bssidx 1 is used for p2p0 and no data can be received or
+ * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
+ */
+ if (ifidx)
+ (ifidx)++;
ifp = msgbuf->drvr->iflist[ifidx];
if (!ifp || !ifp->ndev) {
+ brcmf_err("Received pkt for invalid ifidx %d\n", ifidx);
brcmu_pkt_buf_free_skb(skb);
return;
}
@@ -1355,6 +1363,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
}
INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings);
+ count = count * sizeof(unsigned long);
msgbuf->flow_map = kzalloc(count, GFP_KERNEL);
if (!msgbuf->flow_map)
goto fail;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/brcm80211/brcmfmac/of.c
index 927bffd5be64..c824570ddea3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/of.c
@@ -21,8 +21,8 @@
#include <linux/mmc/sdio_func.h>
#include <defs.h>
-#include "dhd_dbg.h"
-#include "sdio_host.h"
+#include "debug.h"
+#include "sdio.h"
void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
{
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index d54c58a32faa..effb48ebd864 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -21,12 +21,12 @@
#include <brcmu_wifi.h>
#include <brcmu_utils.h>
#include <defs.h>
-#include <dhd.h>
-#include <dhd_dbg.h>
+#include "core.h"
+#include "debug.h"
#include "fwil.h"
#include "fwil_types.h"
#include "p2p.h"
-#include "wl_cfg80211.h"
+#include "cfg80211.h"
/* parameters used for p2p escan */
#define P2PAPI_SCAN_NPROBES 1
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
index 16fef3382019..905991fdb7b1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
@@ -30,8 +30,8 @@
#include <brcmu_wifi.h>
#include <brcm_hw_ids.h>
-#include "dhd_dbg.h"
-#include "dhd_bus.h"
+#include "debug.h"
+#include "bus.h"
#include "commonring.h"
#include "msgbuf.h"
#include "pcie.h"
@@ -798,12 +798,14 @@ static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
brcmf_dbg(PCIE, "Enter\n");
/* is it a v1 or v2 implementation */
devinfo->irq_requested = false;
+ pci_enable_msi(pdev);
if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
if (request_threaded_irq(pdev->irq,
brcmf_pcie_quick_check_isr_v1,
brcmf_pcie_isr_thread_v1,
IRQF_SHARED, "brcmf_pcie_intr",
devinfo)) {
+ pci_disable_msi(pdev);
brcmf_err("Failed to request IRQ %d\n", pdev->irq);
return -EIO;
}
@@ -813,6 +815,7 @@ static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
brcmf_pcie_isr_thread_v2,
IRQF_SHARED, "brcmf_pcie_intr",
devinfo)) {
+ pci_disable_msi(pdev);
brcmf_err("Failed to request IRQ %d\n", pdev->irq);
return -EIO;
}
@@ -839,6 +842,7 @@ static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
return;
devinfo->irq_requested = false;
free_irq(pdev->irq, devinfo);
+ pci_disable_msi(pdev);
msleep(50);
count = 0;
@@ -1857,6 +1861,8 @@ static struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
{ /* end: all zeroes */ }
};
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/brcm80211/brcmfmac/proto.c
index 62b940723339..26b68c367f57 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/proto.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/proto.c
@@ -20,9 +20,9 @@
#include <linux/netdevice.h>
#include <brcmu_wifi.h>
-#include "dhd.h"
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
#include "proto.h"
#include "bcdc.h"
#include "msgbuf.h"
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index d20d4e6f391a..0b0d51a61060 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -40,7 +40,7 @@
#include <brcmu_utils.h>
#include <brcm_hw_ids.h>
#include <soc.h>
-#include "sdio_host.h"
+#include "sdio.h"
#include "chip.h"
#include "firmware.h"
@@ -96,8 +96,8 @@ struct rte_console {
#endif /* DEBUG */
#include <chipcommon.h>
-#include "dhd_bus.h"
-#include "dhd_dbg.h"
+#include "bus.h"
+#include "debug.h"
#include "tracepoint.h"
#define TXQLEN 2048 /* bulk tx queue length */
@@ -2762,6 +2762,48 @@ static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
return &bus->txq;
}
+static bool brcmf_sdio_prec_enq(struct pktq *q, struct sk_buff *pkt, int prec)
+{
+ struct sk_buff *p;
+ int eprec = -1; /* precedence to evict from */
+
+ /* Fast case, precedence queue is not full and we are also not
+ * exceeding total queue length
+ */
+ if (!pktq_pfull(q, prec) && !pktq_full(q)) {
+ brcmu_pktq_penq(q, prec, pkt);
+ return true;
+ }
+
+ /* Determine precedence from which to evict packet, if any */
+ if (pktq_pfull(q, prec)) {
+ eprec = prec;
+ } else if (pktq_full(q)) {
+ p = brcmu_pktq_peek_tail(q, &eprec);
+ if (eprec > prec)
+ return false;
+ }
+
+ /* Evict if needed */
+ if (eprec >= 0) {
+ /* Detect queueing to unconfigured precedence */
+ if (eprec == prec)
+ return false; /* refuse newer (incoming) packet */
+ /* Evict packet according to discard policy */
+ p = brcmu_pktq_pdeq_tail(q, eprec);
+ if (p == NULL)
+ brcmf_err("brcmu_pktq_pdeq_tail() failed\n");
+ brcmu_pkt_buf_free_skb(p);
+ }
+
+ /* Enqueue */
+ p = brcmu_pktq_penq(q, prec, pkt);
+ if (p == NULL)
+ brcmf_err("brcmu_pktq_penq() failed\n");
+
+ return p != NULL;
+}
+
static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
{
int ret = -EBADE;
@@ -2787,7 +2829,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
spin_lock_bh(&bus->txq_lock);
/* reset bus_flags in packet cb */
*(u16 *)(pkt->cb) = 0;
- if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
+ if (!brcmf_sdio_prec_enq(&bus->txq, pkt, prec)) {
skb_pull(pkt, bus->tx_hdrlen);
brcmf_err("out of bus->txq !!!\n");
ret = -ENOSR;
@@ -2797,7 +2839,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
if (pktq_len(&bus->txq) >= TXHI) {
bus->txoff = true;
- brcmf_txflowblock(bus->sdiodev->dev, true);
+ brcmf_txflowblock(dev, true);
}
spin_unlock_bh(&bus->txq_lock);
@@ -3948,6 +3990,7 @@ static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
.txctl = brcmf_sdio_bus_txctl,
.rxctl = brcmf_sdio_bus_rxctl,
.gettxq = brcmf_sdio_bus_gettxq,
+ .wowl_config = brcmf_sdio_wowl_config
};
static void brcmf_sdio_firmware_callback(struct device *dev,
@@ -4074,7 +4117,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
/* platform specific configuration:
* alignments must be at least 4 bytes for ADMA
- */
+ */
bus->head_align = ALIGNMENT;
bus->sgentry_align = ALIGNMENT;
if (sdiodev->pdata) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio.h
index f2d06cae366a..8eb42620129c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.h
@@ -14,8 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _BRCM_SDH_H_
-#define _BRCM_SDH_H_
+#ifndef BRCMFMAC_SDIO_H
+#define BRCMFMAC_SDIO_H
#include <linux/skbuff.h>
#include <linux/firmware.h>
@@ -186,6 +186,7 @@ struct brcmf_sdio_dev {
struct sg_table sgtable;
char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
+ bool wowl_enabled;
};
/* sdio core registers */
@@ -334,5 +335,6 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus);
void brcmf_sdio_isr(struct brcmf_sdio *bus);
void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick);
+void brcmf_sdio_wowl_config(struct device *dev, bool enabled);
-#endif /* _BRCM_SDH_H_ */
+#endif /* BRCMFMAC_SDIO_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c
index b505db48c60d..a10f35c5eb3d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c
@@ -19,4 +19,19 @@
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "tracepoint.h"
+
+void __brcmf_err(const char *func, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ pr_err("%s: %pV", func, &vaf);
+ trace_brcmf_err(func, &vaf);
+ va_end(args);
+}
+
#endif
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 875d1142c8b0..4572defc280f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -23,13 +23,12 @@
#include <brcmu_utils.h>
#include <brcm_hw_ids.h>
#include <brcmu_wifi.h>
-#include <dhd_bus.h>
-#include <dhd_dbg.h>
-
+#include "bus.h"
+#include "debug.h"
#include "firmware.h"
-#include "usb_rdl.h"
#include "usb.h"
+
#define IOCTL_RESP_TIMEOUT 2000
#define BRCMF_USB_RESET_GETVER_SPINWAIT 100 /* in unit of ms */
@@ -49,6 +48,71 @@
#define BRCMF_USB_43242_FW_NAME "brcm/brcmfmac43242a.bin"
#define BRCMF_USB_43569_FW_NAME "brcm/brcmfmac43569.bin"
+#define TRX_MAGIC 0x30524448 /* "HDR0" */
+#define TRX_MAX_OFFSET 3 /* Max number of file offsets */
+#define TRX_UNCOMP_IMAGE 0x20 /* Trx holds uncompressed img */
+#define TRX_RDL_CHUNK 1500 /* size of each dl transfer */
+#define TRX_OFFSETS_DLFWLEN_IDX 0
+
+/* Control messages: bRequest values */
+#define DL_GETSTATE 0 /* returns the rdl_state_t struct */
+#define DL_CHECK_CRC 1 /* currently unused */
+#define DL_GO 2 /* execute downloaded image */
+#define DL_START 3 /* initialize dl state */
+#define DL_REBOOT 4 /* reboot the device in 2 seconds */
+#define DL_GETVER 5 /* returns the bootrom_id_t struct */
+#define DL_GO_PROTECTED 6 /* execute the downloaded code and set reset
+ * event to occur in 2 seconds. It is the
+ * responsibility of the downloaded code to
+ * clear this event
+ */
+#define DL_EXEC 7 /* jump to a supplied address */
+#define DL_RESETCFG 8 /* To support single enum on dongle
+ * - Not used by bootloader
+ */
+#define DL_DEFER_RESP_OK 9 /* Potentially defer the response to setup
+ * if resp unavailable
+ */
+
+/* states */
+#define DL_WAITING 0 /* waiting to rx first pkt */
+#define DL_READY 1 /* hdr was good, waiting for more of the
+ * compressed image
+ */
+#define DL_BAD_HDR 2 /* hdr was corrupted */
+#define DL_BAD_CRC 3 /* compressed image was corrupted */
+#define DL_RUNNABLE 4 /* download was successful,waiting for go cmd */
+#define DL_START_FAIL 5 /* failed to initialize correctly */
+#define DL_NVRAM_TOOBIG 6 /* host specified nvram data exceeds DL_NVRAM
+ * value
+ */
+#define DL_IMAGE_TOOBIG 7 /* firmware image too big */
+
+
+struct trx_header_le {
+ __le32 magic; /* "HDR0" */
+ __le32 len; /* Length of file including header */
+ __le32 crc32; /* CRC from flag_version to end of file */
+ __le32 flag_version; /* 0:15 flags, 16:31 version */
+ __le32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of
+ * header
+ */
+};
+
+struct rdl_state_le {
+ __le32 state;
+ __le32 bytes;
+};
+
+struct bootrom_id_le {
+ __le32 chip; /* Chip id */
+ __le32 chiprev; /* Chip rev */
+ __le32 ramsize; /* Size of RAM */
+ __le32 remapbase; /* Current remap base address */
+ __le32 boardtype; /* Type of board */
+ __le32 boardrev; /* Board revision */
+};
+
struct brcmf_usb_image {
struct list_head list;
s8 *fwname;
@@ -93,6 +157,8 @@ struct brcmf_usbdev_info {
u8 ifnum;
struct urb *bulk_urb; /* used for FW download */
+
+ bool wowl_enabled;
};
static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
@@ -600,6 +666,16 @@ static int brcmf_usb_up(struct device *dev)
return 0;
}
+static void brcmf_cancel_all_urbs(struct brcmf_usbdev_info *devinfo)
+{
+ if (devinfo->ctl_urb)
+ usb_kill_urb(devinfo->ctl_urb);
+ if (devinfo->bulk_urb)
+ usb_kill_urb(devinfo->bulk_urb);
+ brcmf_usb_free_q(&devinfo->tx_postq, true);
+ brcmf_usb_free_q(&devinfo->rx_postq, true);
+}
+
static void brcmf_usb_down(struct device *dev)
{
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
@@ -613,14 +689,7 @@ static void brcmf_usb_down(struct device *dev)
brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN);
- if (devinfo->ctl_urb)
- usb_kill_urb(devinfo->ctl_urb);
-
- if (devinfo->bulk_urb)
- usb_kill_urb(devinfo->bulk_urb);
- brcmf_usb_free_q(&devinfo->tx_postq, true);
-
- brcmf_usb_free_q(&devinfo->rx_postq, true);
+ brcmf_cancel_all_urbs(devinfo);
}
static void
@@ -785,7 +854,7 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
brcmf_dbg(USB, "Enter, fw %p, len %d\n", fw, fwlen);
- bulkchunk = kmalloc(RDL_CHUNK, GFP_ATOMIC);
+ bulkchunk = kmalloc(TRX_RDL_CHUNK, GFP_ATOMIC);
if (bulkchunk == NULL) {
err = -ENOMEM;
goto fail;
@@ -812,10 +881,10 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
/* Wait until the usb device reports it received all
* the bytes we sent */
if ((rdlbytes == sent) && (rdlbytes != dllen)) {
- if ((dllen-sent) < RDL_CHUNK)
+ if ((dllen-sent) < TRX_RDL_CHUNK)
sendlen = dllen-sent;
else
- sendlen = RDL_CHUNK;
+ sendlen = TRX_RDL_CHUNK;
/* simply avoid having to send a ZLP by ensuring we
* never have an even
@@ -980,21 +1049,6 @@ static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
kfree(devinfo->rx_reqs);
}
-#define TRX_MAGIC 0x30524448 /* "HDR0" */
-#define TRX_VERSION 1 /* Version 1 */
-#define TRX_MAX_LEN 0x3B0000 /* Max length */
-#define TRX_NO_HEADER 1 /* Do not write TRX header */
-#define TRX_MAX_OFFSET 3 /* Max number of individual files */
-#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed image */
-
-struct trx_header_le {
- __le32 magic; /* "HDR0" */
- __le32 len; /* Length of file including header */
- __le32 crc32; /* CRC from flag_version to end of file */
- __le32 flag_version; /* 0:15 flags, 16:31 version */
- __le32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of
- * header */
-};
static int check_file(const u8 *headers)
{
@@ -1096,11 +1150,24 @@ error:
return NULL;
}
+static void brcmf_usb_wowl_config(struct device *dev, bool enabled)
+{
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+
+ brcmf_dbg(USB, "Configuring WOWL, enabled=%d\n", enabled);
+ devinfo->wowl_enabled = enabled;
+ if (enabled)
+ device_set_wakeup_enable(devinfo->dev, true);
+ else
+ device_set_wakeup_enable(devinfo->dev, false);
+}
+
static struct brcmf_bus_ops brcmf_usb_bus_ops = {
.txdata = brcmf_usb_tx,
.stop = brcmf_usb_down,
.txctl = brcmf_usb_tx_ctlpkt,
.rxctl = brcmf_usb_rx_ctlpkt,
+ .wowl_config = brcmf_usb_wowl_config,
};
static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
@@ -1188,6 +1255,9 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
bus->ops = &brcmf_usb_bus_ops;
bus->proto_type = BRCMF_PROTO_BCDC;
bus->always_use_fws_queue = true;
+#ifdef CONFIG_PM
+ bus->wowl_supported = true;
+#endif
if (!brcmf_usb_dlneeded(devinfo)) {
ret = brcmf_usb_bus_setup(devinfo);
@@ -1341,7 +1411,10 @@ static int brcmf_usb_suspend(struct usb_interface *intf, pm_message_t state)
brcmf_dbg(USB, "Enter\n");
devinfo->bus_pub.state = BRCMFMAC_USB_STATE_SLEEP;
- brcmf_detach(&usb->dev);
+ if (devinfo->wowl_enabled)
+ brcmf_cancel_all_urbs(devinfo);
+ else
+ brcmf_detach(&usb->dev);
return 0;
}
@@ -1354,7 +1427,12 @@ static int brcmf_usb_resume(struct usb_interface *intf)
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
brcmf_dbg(USB, "Enter\n");
- return brcmf_usb_bus_setup(devinfo);
+ if (!devinfo->wowl_enabled)
+ return brcmf_usb_bus_setup(devinfo);
+
+ devinfo->bus_pub.state = BRCMFMAC_USB_STATE_UP;
+ brcmf_usb_rx_fill_all(devinfo);
+ return 0;
}
static int brcmf_usb_reset_resume(struct usb_interface *intf)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb_rdl.h b/drivers/net/wireless/brcm80211/brcmfmac/usb_rdl.h
deleted file mode 100644
index 0a35c51c3da2..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb_rdl.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _USB_RDL_H
-#define _USB_RDL_H
-
-/* Control messages: bRequest values */
-#define DL_GETSTATE 0 /* returns the rdl_state_t struct */
-#define DL_CHECK_CRC 1 /* currently unused */
-#define DL_GO 2 /* execute downloaded image */
-#define DL_START 3 /* initialize dl state */
-#define DL_REBOOT 4 /* reboot the device in 2 seconds */
-#define DL_GETVER 5 /* returns the bootrom_id_t struct */
-#define DL_GO_PROTECTED 6 /* execute the downloaded code and set reset
- * event to occur in 2 seconds. It is the
- * responsibility of the downloaded code to
- * clear this event
- */
-#define DL_EXEC 7 /* jump to a supplied address */
-#define DL_RESETCFG 8 /* To support single enum on dongle
- * - Not used by bootloader
- */
-#define DL_DEFER_RESP_OK 9 /* Potentially defer the response to setup
- * if resp unavailable
- */
-
-/* states */
-#define DL_WAITING 0 /* waiting to rx first pkt */
-#define DL_READY 1 /* hdr was good, waiting for more of the
- * compressed image */
-#define DL_BAD_HDR 2 /* hdr was corrupted */
-#define DL_BAD_CRC 3 /* compressed image was corrupted */
-#define DL_RUNNABLE 4 /* download was successful,waiting for go cmd */
-#define DL_START_FAIL 5 /* failed to initialize correctly */
-#define DL_NVRAM_TOOBIG 6 /* host specified nvram data exceeds DL_NVRAM
- * value */
-#define DL_IMAGE_TOOBIG 7 /* download image too big (exceeds DATA_START
- * for rdl) */
-
-struct rdl_state_le {
- __le32 state;
- __le32 bytes;
-};
-
-struct bootrom_id_le {
- __le32 chip; /* Chip id */
- __le32 chiprev; /* Chip rev */
- __le32 ramsize; /* Size of RAM */
- __le32 remapbase; /* Current remap base address */
- __le32 boardtype; /* Type of board */
- __le32 boardrev; /* Board revision */
-};
-
-#define RDL_CHUNK 1500 /* size of each dl transfer */
-
-#define TRX_OFFSETS_DLFWLEN_IDX 0
-#define TRX_OFFSETS_JUMPTO_IDX 1
-#define TRX_OFFSETS_NVM_LEN_IDX 2
-
-#define TRX_OFFSETS_DLBASE_IDX 0
-
-#endif /* _USB_RDL_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
index 5960d827508c..50cdf7090198 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
@@ -20,10 +20,10 @@
#include <brcmu_wifi.h>
#include "fwil_types.h"
-#include "dhd.h"
+#include "core.h"
#include "p2p.h"
-#include "dhd_dbg.h"
-#include "wl_cfg80211.h"
+#include "debug.h"
+#include "cfg80211.h"
#include "vendor.h"
#include "fwil.h"
@@ -31,8 +31,8 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int len)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct net_device *ndev = cfg_to_ndev(cfg);
+ struct brcmf_cfg80211_vif *vif;
+ struct brcmf_if *ifp;
const struct brcmf_vndr_dcmd_hdr *cmdhdr = data;
struct sk_buff *reply;
int ret, payload, ret_len;
@@ -42,6 +42,9 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set,
cmdhdr->len);
+ vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+ ifp = vif->ifp;
+
len -= sizeof(struct brcmf_vndr_dcmd_hdr);
ret_len = cmdhdr->len;
if (ret_len > 0 || len > 0) {
@@ -63,11 +66,11 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
}
if (cmdhdr->set)
- ret = brcmf_fil_cmd_data_set(netdev_priv(ndev), cmdhdr->cmd,
- dcmd_buf, ret_len);
+ ret = brcmf_fil_cmd_data_set(ifp, cmdhdr->cmd, dcmd_buf,
+ ret_len);
else
- ret = brcmf_fil_cmd_data_get(netdev_priv(ndev), cmdhdr->cmd,
- dcmd_buf, ret_len);
+ ret = brcmf_fil_cmd_data_get(ifp, cmdhdr->cmd, dcmd_buf,
+ ret_len);
if (ret != 0)
goto exit;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/brcm80211/brcmsmac/debug.c
index a5d4add26f41..c9a8b9360ab1 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/debug.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/debug.c
@@ -30,6 +30,7 @@
#include "main.h"
#include "debug.h"
#include "brcms_trace_events.h"
+#include "phy/phy_int.h"
static struct dentry *root_folder;
@@ -71,48 +72,161 @@ struct dentry *brcms_debugfs_get_devdir(struct brcms_pub *drvr)
}
static
-ssize_t brcms_debugfs_hardware_read(struct file *f, char __user *data,
- size_t count, loff_t *ppos)
+int brcms_debugfs_hardware_read(struct seq_file *s, void *data)
{
- char buf[128];
- int res;
- struct brcms_pub *drvr = f->private_data;
-
- /* only allow read from start */
- if (*ppos > 0)
- return 0;
-
- res = scnprintf(buf, sizeof(buf),
- "board vendor: %x\n"
- "board type: %x\n"
- "board revision: %x\n"
- "board flags: %x\n"
- "board flags2: %x\n"
- "firmware revision: %x\n",
- drvr->wlc->hw->d11core->bus->boardinfo.vendor,
- drvr->wlc->hw->d11core->bus->boardinfo.type,
- drvr->wlc->hw->boardrev,
- drvr->wlc->hw->boardflags,
- drvr->wlc->hw->boardflags2,
- drvr->wlc->ucode_rev
- );
-
- return simple_read_from_buffer(data, count, ppos, buf, res);
+ struct brcms_pub *drvr = s->private;
+ struct brcms_hardware *hw = drvr->wlc->hw;
+ struct bcma_device *core = hw->d11core;
+ struct bcma_bus *bus = core->bus;
+ char boardrev[10];
+
+ seq_printf(s, "chipnum 0x%x\n"
+ "chiprev 0x%x\n"
+ "chippackage 0x%x\n"
+ "corerev 0x%x\n"
+ "boardid 0x%x\n"
+ "boardvendor 0x%x\n"
+ "boardrev %s\n"
+ "boardflags 0x%x\n"
+ "boardflags2 0x%x\n"
+ "ucoderev 0x%x\n"
+ "radiorev 0x%x\n"
+ "phytype 0x%x\n"
+ "phyrev 0x%x\n"
+ "anarev 0x%x\n"
+ "nvramrev %d\n",
+ bus->chipinfo.id, bus->chipinfo.rev, bus->chipinfo.pkg,
+ core->id.rev, bus->boardinfo.type, bus->boardinfo.vendor,
+ brcmu_boardrev_str(hw->boardrev, boardrev),
+ drvr->wlc->hw->boardflags, drvr->wlc->hw->boardflags2,
+ drvr->wlc->ucode_rev, hw->band->radiorev,
+ hw->band->phytype, hw->band->phyrev, hw->band->pi->ana_rev,
+ hw->sromrev);
+ return 0;
+}
+
+static int brcms_debugfs_macstat_read(struct seq_file *s, void *data)
+{
+ struct brcms_pub *drvr = s->private;
+ struct brcms_info *wl = drvr->ieee_hw->priv;
+ struct macstat stats;
+ int i;
+
+ spin_lock_bh(&wl->lock);
+ stats = *(drvr->wlc->core->macstat_snapshot);
+ spin_unlock_bh(&wl->lock);
+
+ seq_printf(s, "txallfrm: %d\n", stats.txallfrm);
+ seq_printf(s, "txrtsfrm: %d\n", stats.txrtsfrm);
+ seq_printf(s, "txctsfrm: %d\n", stats.txctsfrm);
+ seq_printf(s, "txackfrm: %d\n", stats.txackfrm);
+ seq_printf(s, "txdnlfrm: %d\n", stats.txdnlfrm);
+ seq_printf(s, "txbcnfrm: %d\n", stats.txbcnfrm);
+ seq_printf(s, "txfunfl[8]:");
+ for (i = 0; i < ARRAY_SIZE(stats.txfunfl); i++)
+ seq_printf(s, " %d", stats.txfunfl[i]);
+ seq_printf(s, "\ntxtplunfl: %d\n", stats.txtplunfl);
+ seq_printf(s, "txphyerr: %d\n", stats.txphyerr);
+ seq_printf(s, "pktengrxducast: %d\n", stats.pktengrxducast);
+ seq_printf(s, "pktengrxdmcast: %d\n", stats.pktengrxdmcast);
+ seq_printf(s, "rxfrmtoolong: %d\n", stats.rxfrmtoolong);
+ seq_printf(s, "rxfrmtooshrt: %d\n", stats.rxfrmtooshrt);
+ seq_printf(s, "rxinvmachdr: %d\n", stats.rxinvmachdr);
+ seq_printf(s, "rxbadfcs: %d\n", stats.rxbadfcs);
+ seq_printf(s, "rxbadplcp: %d\n", stats.rxbadplcp);
+ seq_printf(s, "rxcrsglitch: %d\n", stats.rxcrsglitch);
+ seq_printf(s, "rxstrt: %d\n", stats.rxstrt);
+ seq_printf(s, "rxdfrmucastmbss: %d\n", stats.rxdfrmucastmbss);
+ seq_printf(s, "rxmfrmucastmbss: %d\n", stats.rxmfrmucastmbss);
+ seq_printf(s, "rxcfrmucast: %d\n", stats.rxcfrmucast);
+ seq_printf(s, "rxrtsucast: %d\n", stats.rxrtsucast);
+ seq_printf(s, "rxctsucast: %d\n", stats.rxctsucast);
+ seq_printf(s, "rxackucast: %d\n", stats.rxackucast);
+ seq_printf(s, "rxdfrmocast: %d\n", stats.rxdfrmocast);
+ seq_printf(s, "rxmfrmocast: %d\n", stats.rxmfrmocast);
+ seq_printf(s, "rxcfrmocast: %d\n", stats.rxcfrmocast);
+ seq_printf(s, "rxrtsocast: %d\n", stats.rxrtsocast);
+ seq_printf(s, "rxctsocast: %d\n", stats.rxctsocast);
+ seq_printf(s, "rxdfrmmcast: %d\n", stats.rxdfrmmcast);
+ seq_printf(s, "rxmfrmmcast: %d\n", stats.rxmfrmmcast);
+ seq_printf(s, "rxcfrmmcast: %d\n", stats.rxcfrmmcast);
+ seq_printf(s, "rxbeaconmbss: %d\n", stats.rxbeaconmbss);
+ seq_printf(s, "rxdfrmucastobss: %d\n", stats.rxdfrmucastobss);
+ seq_printf(s, "rxbeaconobss: %d\n", stats.rxbeaconobss);
+ seq_printf(s, "rxrsptmout: %d\n", stats.rxrsptmout);
+ seq_printf(s, "bcntxcancl: %d\n", stats.bcntxcancl);
+ seq_printf(s, "rxf0ovfl: %d\n", stats.rxf0ovfl);
+ seq_printf(s, "rxf1ovfl: %d\n", stats.rxf1ovfl);
+ seq_printf(s, "rxf2ovfl: %d\n", stats.rxf2ovfl);
+ seq_printf(s, "txsfovfl: %d\n", stats.txsfovfl);
+ seq_printf(s, "pmqovfl: %d\n", stats.pmqovfl);
+ seq_printf(s, "rxcgprqfrm: %d\n", stats.rxcgprqfrm);
+ seq_printf(s, "rxcgprsqovfl: %d\n", stats.rxcgprsqovfl);
+ seq_printf(s, "txcgprsfail: %d\n", stats.txcgprsfail);
+ seq_printf(s, "txcgprssuc: %d\n", stats.txcgprssuc);
+ seq_printf(s, "prs_timeout: %d\n", stats.prs_timeout);
+ seq_printf(s, "rxnack: %d\n", stats.rxnack);
+ seq_printf(s, "frmscons: %d\n", stats.frmscons);
+ seq_printf(s, "txnack: %d\n", stats.txnack);
+ seq_printf(s, "txglitch_nack: %d\n", stats.txglitch_nack);
+ seq_printf(s, "txburst: %d\n", stats.txburst);
+ seq_printf(s, "bphy_rxcrsglitch: %d\n", stats.bphy_rxcrsglitch);
+ seq_printf(s, "phywatchdog: %d\n", stats.phywatchdog);
+ seq_printf(s, "bphy_badplcp: %d\n", stats.bphy_badplcp);
+ return 0;
}
-static const struct file_operations brcms_debugfs_hardware_ops = {
+struct brcms_debugfs_entry {
+ int (*read)(struct seq_file *seq, void *data);
+ struct brcms_pub *drvr;
+};
+
+static int brcms_debugfs_entry_open(struct inode *inode, struct file *f)
+{
+ struct brcms_debugfs_entry *entry = inode->i_private;
+
+ return single_open(f, entry->read, entry->drvr);
+}
+
+static const struct file_operations brcms_debugfs_def_ops = {
.owner = THIS_MODULE,
- .open = simple_open,
- .read = brcms_debugfs_hardware_read
+ .open = brcms_debugfs_entry_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek
};
+static int
+brcms_debugfs_add_entry(struct brcms_pub *drvr, const char *fn,
+ int (*read_fn)(struct seq_file *seq, void *data))
+{
+ struct device *dev = &drvr->wlc->hw->d11core->dev;
+ struct dentry *dentry = drvr->dbgfs_dir;
+ struct brcms_debugfs_entry *entry;
+
+ if (IS_ERR_OR_NULL(dentry))
+ return -ENOENT;
+
+ entry = devm_kzalloc(dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->read = read_fn;
+ entry->drvr = drvr;
+
+ dentry = debugfs_create_file(fn, S_IRUGO, dentry, entry,
+ &brcms_debugfs_def_ops);
+
+ return PTR_ERR_OR_ZERO(dentry);
+}
+
void brcms_debugfs_create_files(struct brcms_pub *drvr)
{
- struct dentry *dentry = drvr->dbgfs_dir;
+ if (IS_ERR_OR_NULL(drvr->dbgfs_dir))
+ return;
- if (!IS_ERR_OR_NULL(dentry))
- debugfs_create_file("hardware", S_IRUGO, dentry,
- drvr, &brcms_debugfs_hardware_ops);
+ brcms_debugfs_add_entry(drvr, "hardware", brcms_debugfs_hardware_read);
+ brcms_debugfs_add_entry(drvr, "macstat", brcms_debugfs_macstat_read);
}
#define __brcms_fn(fn) \
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 43c71bfaa474..f95b52442281 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -764,7 +764,9 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw,
return;
}
-static void brcms_ops_sw_scan_start(struct ieee80211_hw *hw)
+static void brcms_ops_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct brcms_info *wl = hw->priv;
spin_lock_bh(&wl->lock);
@@ -773,7 +775,8 @@ static void brcms_ops_sw_scan_start(struct ieee80211_hw *hw)
return;
}
-static void brcms_ops_sw_scan_complete(struct ieee80211_hw *hw)
+static void brcms_ops_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct brcms_info *wl = hw->priv;
spin_lock_bh(&wl->lock);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 1b474828d5b8..a104d7ac3796 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -445,18 +445,18 @@ static void brcms_c_detach_mfree(struct brcms_c_info *wlc)
kfree(wlc->protection);
kfree(wlc->stf);
kfree(wlc->bandstate[0]);
- kfree(wlc->corestate->macstat_snapshot);
+ if (wlc->corestate)
+ kfree(wlc->corestate->macstat_snapshot);
kfree(wlc->corestate);
- kfree(wlc->hw->bandstate[0]);
+ if (wlc->hw)
+ kfree(wlc->hw->bandstate[0]);
kfree(wlc->hw);
if (wlc->beacon)
dev_kfree_skb_any(wlc->beacon);
if (wlc->probe_resp)
dev_kfree_skb_any(wlc->probe_resp);
- /* free the wlc */
kfree(wlc);
- wlc = NULL;
}
static struct brcms_bss_cfg *brcms_c_bsscfg_malloc(uint unit)
@@ -1009,8 +1009,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
if (txh)
trace_brcms_txdesc(&wlc->hw->d11core->dev, txh,
sizeof(*txh));
- if (p)
- brcmu_pkt_buf_free_skb(p);
+ brcmu_pkt_buf_free_skb(p);
}
if (dma && queue < NFIFO) {
@@ -3081,7 +3080,7 @@ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
static void brcms_c_statsupd(struct brcms_c_info *wlc)
{
int i;
- struct macstat macstats;
+ struct macstat *macstats;
#ifdef DEBUG
u16 delta;
u16 rxf0ovfl;
@@ -3092,31 +3091,31 @@ static void brcms_c_statsupd(struct brcms_c_info *wlc)
if (!wlc->pub->up)
return;
+ macstats = wlc->core->macstat_snapshot;
+
#ifdef DEBUG
/* save last rx fifo 0 overflow count */
- rxf0ovfl = wlc->core->macstat_snapshot->rxf0ovfl;
+ rxf0ovfl = macstats->rxf0ovfl;
/* save last tx fifo underflow count */
for (i = 0; i < NFIFO; i++)
- txfunfl[i] = wlc->core->macstat_snapshot->txfunfl[i];
+ txfunfl[i] = macstats->txfunfl[i];
#endif /* DEBUG */
/* Read mac stats from contiguous shared memory */
- brcms_b_copyfrom_objmem(wlc->hw, M_UCODE_MACSTAT, &macstats,
- sizeof(struct macstat), OBJADDR_SHM_SEL);
+ brcms_b_copyfrom_objmem(wlc->hw, M_UCODE_MACSTAT, macstats,
+ sizeof(*macstats), OBJADDR_SHM_SEL);
#ifdef DEBUG
/* check for rx fifo 0 overflow */
- delta = (u16) (wlc->core->macstat_snapshot->rxf0ovfl - rxf0ovfl);
+ delta = (u16)(macstats->rxf0ovfl - rxf0ovfl);
if (delta)
brcms_err(wlc->hw->d11core, "wl%d: %u rx fifo 0 overflows!\n",
wlc->pub->unit, delta);
/* check for tx fifo underflows */
for (i = 0; i < NFIFO; i++) {
- delta =
- (u16) (wlc->core->macstat_snapshot->txfunfl[i] -
- txfunfl[i]);
+ delta = macstats->txfunfl[i] - txfunfl[i];
if (delta)
brcms_err(wlc->hw->d11core,
"wl%d: %u tx fifo %d underflows!\n",
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
index 0f7e1c7b6f58..906e89ddf319 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -261,6 +261,21 @@ struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
}
EXPORT_SYMBOL(brcmu_pktq_mdeq);
+/* Produce a human-readable string for boardrev */
+char *brcmu_boardrev_str(u32 brev, char *buf)
+{
+ char c;
+
+ if (brev < 0x100) {
+ snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
+ } else {
+ c = (brev & 0xf000) == 0x1000 ? 'P' : 'A';
+ snprintf(buf, 8, "%c%03x", c, brev & 0xfff);
+ }
+ return buf;
+}
+EXPORT_SYMBOL(brcmu_boardrev_str);
+
#if defined(DEBUG)
/* pretty hex print a pkt buffer chain */
void brcmu_prpkt(const char *msg, struct sk_buff *p0)
@@ -292,4 +307,5 @@ void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, data, size);
}
EXPORT_SYMBOL(brcmu_dbg_hex_dump);
+
#endif /* defined(DEBUG) */
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index af26e0de1e5c..6996fcc144cf 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -68,6 +68,8 @@
#define BRCM_PCIE_43567_DEVICE_ID 0x43d3
#define BRCM_PCIE_43570_DEVICE_ID 0x43d9
#define BRCM_PCIE_43602_DEVICE_ID 0x43ba
+#define BRCM_PCIE_43602_2G_DEVICE_ID 0x43bb
+#define BRCM_PCIE_43602_5G_DEVICE_ID 0x43bc
/* brcmsmac IDs */
#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index 8ba445b3fd72..a043e29f07e2 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -218,4 +218,6 @@ void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...)
}
#endif
+char *brcmu_boardrev_str(u32 brev, char *buf);
+
#endif /* _BRCMU_UTILS_H_ */
diff --git a/drivers/net/wireless/cw1200/scan.c b/drivers/net/wireless/cw1200/scan.c
index b2fb6c632092..f2e276faca70 100644
--- a/drivers/net/wireless/cw1200/scan.c
+++ b/drivers/net/wireless/cw1200/scan.c
@@ -78,7 +78,7 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
return -EINVAL;
- frame.skb = ieee80211_probereq_get(hw, priv->vif, NULL, 0,
+ frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
req->ie_len);
if (!frame.skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index edc344334a75..67cad9b05ad8 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -1363,7 +1363,7 @@ static ssize_t show_cmd_log(struct device *d,
if (!priv->cmdlog)
return 0;
for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
- (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
+ (i != priv->cmdlog_pos) && (len < PAGE_SIZE);
i = (i + 1) % priv->cmdlog_len) {
len +=
snprintf(buf + len, PAGE_SIZE - len,
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 5ce2f59d3378..b0571618c2ed 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -654,10 +654,6 @@ struct libipw_network {
/* TPC Report - mandatory if spctrm mgmt required */
struct libipw_tpc_report tpc_report;
- /* IBSS DFS - mandatory if spctrm mgmt required and IBSS
- * NOTE: This is variable length and so must be allocated dynamically */
- struct libipw_ibss_dfs *ibss_dfs;
-
/* Channel Switch Announcement - optional if spctrm mgmt required */
struct libipw_csa csa;
@@ -970,7 +966,6 @@ int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
/* make sure to set stats->len */
void libipw_rx_mgt(struct libipw_device *ieee, struct libipw_hdr_4addr *header,
struct libipw_rx_stats *stats);
-void libipw_network_reset(struct libipw_network *network);
/* libipw_geo.c */
const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee);
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index 5f31b72a4921..60f28740f6af 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -84,25 +84,12 @@ static int libipw_networks_allocate(struct libipw_device *ieee)
return 0;
}
-void libipw_network_reset(struct libipw_network *network)
-{
- if (!network)
- return;
-
- if (network->ibss_dfs) {
- kfree(network->ibss_dfs);
- network->ibss_dfs = NULL;
- }
-}
-
static inline void libipw_networks_free(struct libipw_device *ieee)
{
int i;
- for (i = 0; i < MAX_NETWORK_COUNT; i++) {
- kfree(ieee->networks[i]->ibss_dfs);
+ for (i = 0; i < MAX_NETWORK_COUNT; i++)
kfree(ieee->networks[i]);
- }
}
void libipw_networks_age(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 2d66984079bb..a6877dd6ba73 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -1298,13 +1298,6 @@ static int libipw_parse_info_param(struct libipw_info_element
break;
case WLAN_EID_IBSS_DFS:
- if (network->ibss_dfs)
- break;
- network->ibss_dfs = kmemdup(info_element->data,
- info_element->len,
- GFP_ATOMIC);
- if (!network->ibss_dfs)
- return 1;
network->flags |= NETWORK_HAS_IBSS_DFS;
break;
@@ -1335,9 +1328,7 @@ static int libipw_parse_info_param(struct libipw_info_element
static int libipw_handle_assoc_resp(struct libipw_device *ieee, struct libipw_assoc_response
*frame, struct libipw_rx_stats *stats)
{
- struct libipw_network network_resp = {
- .ibss_dfs = NULL,
- };
+ struct libipw_network network_resp = { };
struct libipw_network *network = &network_resp;
struct net_device *dev = ieee->dev;
@@ -1472,9 +1463,6 @@ static void update_network(struct libipw_network *dst,
int qos_active;
u8 old_param;
- libipw_network_reset(dst);
- dst->ibss_dfs = src->ibss_dfs;
-
/* We only update the statistics if they were created by receiving
* the network information on the actual channel the network is on.
*
@@ -1548,9 +1536,7 @@ static void libipw_process_probe_response(struct libipw_device
*stats)
{
struct net_device *dev = ieee->dev;
- struct libipw_network network = {
- .ibss_dfs = NULL,
- };
+ struct libipw_network network = { };
struct libipw_network *target;
struct libipw_network *oldest = NULL;
#ifdef CONFIG_LIBIPW_DEBUG
@@ -1618,7 +1604,6 @@ static void libipw_process_probe_response(struct libipw_device
LIBIPW_DEBUG_SCAN("Expired '%*pE' (%pM) from network list.\n",
target->ssid_len, target->ssid,
target->bssid);
- libipw_network_reset(target);
} else {
/* Otherwise just pull from the free list */
target = list_entry(ieee->network_free_list.next,
@@ -1634,7 +1619,6 @@ static void libipw_process_probe_response(struct libipw_device
"BEACON" : "PROBE RESPONSE");
#endif
memcpy(target, &network, sizeof(*target));
- network.ibss_dfs = NULL;
list_add_tail(&target->list, &ieee->network_list);
} else {
LIBIPW_DEBUG_SCAN("Updating '%*pE' (%pM) via %s.\n",
@@ -1643,7 +1627,6 @@ static void libipw_process_probe_response(struct libipw_device
is_beacon(beacon->header.frame_ctl) ?
"BEACON" : "PROBE RESPONSE");
update_network(target, &network);
- network.ibss_dfs = NULL;
}
spin_unlock_irqrestore(&ieee->lock, flags);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 26fec54dcd03..2748fde4b90c 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -6063,7 +6063,7 @@ il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
void
-il4965_mac_channel_switch(struct ieee80211_hw *hw,
+il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel_switch *ch_switch)
{
struct il_priv *il = hw->priv;
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index 337dfcf3bbde..3a57f71b8ed5 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -187,8 +187,9 @@ int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u8 buf_size);
int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-void il4965_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch);
+void
+il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *ch_switch);
void il4965_led_enable(struct il_priv *il);
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 267e48a2915e..ab019b45551b 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -59,6 +59,7 @@ config IWLDVM
config IWLMVM
tristate "Intel Wireless WiFi MVM Firmware support"
+ select WANT_DEV_COREDUMP
help
This is the driver that supports the MVM firmware which is
currently only available for 7260 and 3160 devices.
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 751ae1d10b7f..7a34e4d158d1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -966,21 +966,21 @@ struct iwl_rem_sta_cmd {
/* WiFi queues mask */
-#define IWL_SCD_BK_MSK cpu_to_le32(BIT(0))
-#define IWL_SCD_BE_MSK cpu_to_le32(BIT(1))
-#define IWL_SCD_VI_MSK cpu_to_le32(BIT(2))
-#define IWL_SCD_VO_MSK cpu_to_le32(BIT(3))
-#define IWL_SCD_MGMT_MSK cpu_to_le32(BIT(3))
+#define IWL_SCD_BK_MSK BIT(0)
+#define IWL_SCD_BE_MSK BIT(1)
+#define IWL_SCD_VI_MSK BIT(2)
+#define IWL_SCD_VO_MSK BIT(3)
+#define IWL_SCD_MGMT_MSK BIT(3)
/* PAN queues mask */
-#define IWL_PAN_SCD_BK_MSK cpu_to_le32(BIT(4))
-#define IWL_PAN_SCD_BE_MSK cpu_to_le32(BIT(5))
-#define IWL_PAN_SCD_VI_MSK cpu_to_le32(BIT(6))
-#define IWL_PAN_SCD_VO_MSK cpu_to_le32(BIT(7))
-#define IWL_PAN_SCD_MGMT_MSK cpu_to_le32(BIT(7))
-#define IWL_PAN_SCD_MULTICAST_MSK cpu_to_le32(BIT(8))
+#define IWL_PAN_SCD_BK_MSK BIT(4)
+#define IWL_PAN_SCD_BE_MSK BIT(5)
+#define IWL_PAN_SCD_VI_MSK BIT(6)
+#define IWL_PAN_SCD_VO_MSK BIT(7)
+#define IWL_PAN_SCD_MGMT_MSK BIT(7)
+#define IWL_PAN_SCD_MULTICAST_MSK BIT(8)
-#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
+#define IWL_AGG_TX_QUEUE_MSK 0xffc00
#define IWL_DROP_ALL BIT(1)
@@ -1005,12 +1005,17 @@ struct iwl_rem_sta_cmd {
* 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable.
* 2: Dump all FIFO
*/
-struct iwl_txfifo_flush_cmd {
+struct iwl_txfifo_flush_cmd_v3 {
__le32 queue_control;
__le16 flush_control;
__le16 reserved;
} __packed;
+struct iwl_txfifo_flush_cmd_v2 {
+ __le16 queue_control;
+ __le16 flush_control;
+} __packed;
+
/*
* REPLY_WEP_KEY = 0x20
*/
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 2191621d69c1..1d2223df5cb0 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -137,37 +137,38 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
*/
int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk)
{
- struct iwl_txfifo_flush_cmd flush_cmd;
- struct iwl_host_cmd cmd = {
- .id = REPLY_TXFIFO_FLUSH,
- .len = { sizeof(struct iwl_txfifo_flush_cmd), },
- .data = { &flush_cmd, },
+ struct iwl_txfifo_flush_cmd_v3 flush_cmd_v3 = {
+ .flush_control = cpu_to_le16(IWL_DROP_ALL),
+ };
+ struct iwl_txfifo_flush_cmd_v2 flush_cmd_v2 = {
+ .flush_control = cpu_to_le16(IWL_DROP_ALL),
};
- memset(&flush_cmd, 0, sizeof(flush_cmd));
+ u32 queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
+ IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | IWL_SCD_MGMT_MSK;
- flush_cmd.queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
- IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
- IWL_SCD_MGMT_MSK;
if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
- flush_cmd.queue_control |= IWL_PAN_SCD_VO_MSK |
- IWL_PAN_SCD_VI_MSK |
- IWL_PAN_SCD_BE_MSK |
- IWL_PAN_SCD_BK_MSK |
- IWL_PAN_SCD_MGMT_MSK |
- IWL_PAN_SCD_MULTICAST_MSK;
+ queue_control |= IWL_PAN_SCD_VO_MSK | IWL_PAN_SCD_VI_MSK |
+ IWL_PAN_SCD_BE_MSK | IWL_PAN_SCD_BK_MSK |
+ IWL_PAN_SCD_MGMT_MSK |
+ IWL_PAN_SCD_MULTICAST_MSK;
if (priv->nvm_data->sku_cap_11n_enable)
- flush_cmd.queue_control |= IWL_AGG_TX_QUEUE_MSK;
+ queue_control |= IWL_AGG_TX_QUEUE_MSK;
if (scd_q_msk)
- flush_cmd.queue_control = cpu_to_le32(scd_q_msk);
-
- IWL_DEBUG_INFO(priv, "queue control: 0x%x\n",
- flush_cmd.queue_control);
- flush_cmd.flush_control = cpu_to_le16(IWL_DROP_ALL);
-
- return iwl_dvm_send_cmd(priv, &cmd);
+ queue_control = scd_q_msk;
+
+ IWL_DEBUG_INFO(priv, "queue control: 0x%x\n", queue_control);
+ flush_cmd_v3.queue_control = cpu_to_le32(queue_control);
+ flush_cmd_v2.queue_control = cpu_to_le16((u16)queue_control);
+
+ if (IWL_UCODE_API(priv->fw->ucode_ver) > 2)
+ return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0,
+ sizeof(flush_cmd_v3),
+ &flush_cmd_v3);
+ return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0,
+ sizeof(flush_cmd_v2), &flush_cmd_v2);
}
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
@@ -418,8 +419,8 @@ void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena)
static bool iwlagn_bt_traffic_is_sco(struct iwl_bt_uart_msg *uart_msg)
{
- return BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3 >>
- BT_UART_MSG_FRAME3SCOESCO_POS;
+ return (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
+ BT_UART_MSG_FRAME3SCOESCO_POS;
}
static void iwlagn_bt_traffic_change_work(struct work_struct *work)
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index cae692ff1013..47e64e8b9517 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -941,6 +941,7 @@ static int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
}
static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct ieee80211_channel_switch *ch_switch)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index b04b8858c690..e5be2d21868f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -73,12 +73,12 @@
#define IWL3160_UCODE_API_MAX 10
/* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK 9
-#define IWL3160_UCODE_API_OK 9
+#define IWL7260_UCODE_API_OK 10
+#define IWL3160_UCODE_API_OK 10
/* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN 8
-#define IWL3160_UCODE_API_MIN 8
+#define IWL7260_UCODE_API_MIN 9
+#define IWL3160_UCODE_API_MIN 9
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
@@ -89,6 +89,8 @@
#define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7265_NVM_VERSION 0x0a1d
#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
+#define IWL7265D_NVM_VERSION 0x0c11
+#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7260_FW_PRE "iwlwifi-7260-"
#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode"
@@ -102,6 +104,9 @@
#define IWL7265_FW_PRE "iwlwifi-7265-"
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+#define IWL7265D_FW_PRE "iwlwifi-7265D-"
+#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+
#define NVM_HW_SECTION_NUM_FAMILY_7000 0
static const struct iwl_base_params iwl7000_base_params = {
@@ -132,8 +137,8 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.base_params = &iwl7000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \
- .non_shared_ant = ANT_A
-
+ .non_shared_ant = ANT_A, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl7260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 7260",
@@ -267,7 +272,38 @@ const struct iwl_cfg iwl7265_n_cfg = {
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
+const struct iwl_cfg iwl7265d_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 7265",
+ .fw_name_pre = IWL7265D_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7265_ht_params,
+ .nvm_ver = IWL7265D_NVM_VERSION,
+ .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+};
+
+const struct iwl_cfg iwl7265d_2n_cfg = {
+ .name = "Intel(R) Dual Band Wireless N 7265",
+ .fw_name_pre = IWL7265D_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7265_ht_params,
+ .nvm_ver = IWL7265D_NVM_VERSION,
+ .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+};
+
+const struct iwl_cfg iwl7265d_n_cfg = {
+ .name = "Intel(R) Wireless N 7265",
+ .fw_name_pre = IWL7265D_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7265_ht_params,
+ .nvm_ver = IWL7265D_NVM_VERSION,
+ .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+};
+
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index d2b7234b1c73..bf0a95cb7153 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -72,10 +72,10 @@
#define IWL8000_UCODE_API_MAX 10
/* Oldest version we won't warn about */
-#define IWL8000_UCODE_API_OK 8
+#define IWL8000_UCODE_API_OK 10
/* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN 8
+#define IWL8000_UCODE_API_MIN 9
/* NVM versions */
#define IWL8000_NVM_VERSION 0x0a1d
@@ -91,6 +91,10 @@
/* Max SDIO RX aggregation size of the ADDBA request/response */
#define MAX_RX_AGG_SIZE_8260_SDIO 28
+/* Max A-MPDU exponent for HT and VHT */
+#define MAX_HT_AMPDU_EXPONENT_8260_SDIO IEEE80211_HT_MAX_AMPDU_32K
+#define MAX_VHT_AMPDU_EXPONENT_8260_SDIO IEEE80211_VHT_MAX_AMPDU_32K
+
static const struct iwl_base_params iwl8000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = IWLAGN_NUM_QUEUES,
@@ -104,6 +108,7 @@ static const struct iwl_base_params iwl8000_base_params = {
};
static const struct iwl_ht_params iwl8000_ht_params = {
+ .stbc = true,
.ldpc = true,
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
};
@@ -118,6 +123,7 @@ static const struct iwl_ht_params iwl8000_ht_params = {
.base_params = &iwl8000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
+ .d0i3 = true, \
.non_shared_ant = ANT_A
const struct iwl_cfg iwl8260_2n_cfg = {
@@ -136,6 +142,7 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
@@ -148,6 +155,23 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.disable_dummy_notification = true,
+ .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
+ .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
+};
+
+const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
+ .name = "Intel(R) Dual Band Wireless-AC 4165",
+ .fw_name_pre = IWL8000_FW_PRE,
+ IWL_DEVICE_8000,
+ .ht_params = &iwl8000_ht_params,
+ .nvm_ver = IWL8000_NVM_VERSION,
+ .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+ .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
+ .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+ .bt_shared_single_ant = true,
+ .disable_dummy_notification = true,
+ .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
+ .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
};
MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 2ef83a39ff10..3a4b9c7fc083 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -87,6 +87,16 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_8000,
};
+static inline bool iwl_has_secure_boot(u32 hw_rev,
+ enum iwl_device_family family)
+{
+ /* return 1 only for family 8000 B0 */
+ if ((family == IWL_DEVICE_FAMILY_8000) && (hw_rev & 0xC))
+ return 1;
+
+ return 0;
+}
+
/*
* LED mode
* IWL_LED_DEFAULT: use device default
@@ -246,6 +256,11 @@ struct iwl_pwr_tx_backoff {
* @nvm_hw_section_num: the ID of the HW NVM section
* @pwr_tx_backoffs: translation table between power limits and backoffs
* @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
+ * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
+ * @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the
+ * station can receive in HT
+ * @max_vht_ampdu_exponent: the exponent of the max length of A-MPDU that the
+ * station can receive in VHT
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -285,6 +300,9 @@ struct iwl_cfg {
const char *default_nvm_file;
unsigned int max_rx_agg_size;
bool disable_dummy_notification;
+ unsigned int max_tx_agg_size;
+ unsigned int max_ht_ampdu_exponent;
+ unsigned int max_vht_ampdu_exponent;
};
/*
@@ -346,9 +364,14 @@ extern const struct iwl_cfg iwl3165_2ac_cfg;
extern const struct iwl_cfg iwl7265_2ac_cfg;
extern const struct iwl_cfg iwl7265_2n_cfg;
extern const struct iwl_cfg iwl7265_n_cfg;
+extern const struct iwl_cfg iwl7265d_2ac_cfg;
+extern const struct iwl_cfg iwl7265d_2n_cfg;
+extern const struct iwl_cfg iwl7265d_n_cfg;
extern const struct iwl_cfg iwl8260_2n_cfg;
extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
+extern const struct iwl_cfg iwl4265_2ac_sdio_cfg;
+extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 3f6f015285e5..aff63c3f5bf8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -129,6 +129,8 @@
#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
+#define CSR_MBOX_SET_REG (CSR_BASE + 0x88)
+
#define CSR_LED_REG (CSR_BASE+0x094)
#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
#define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE+0x0A8) /* 6000 and up */
@@ -184,6 +186,8 @@
#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
+#define CSR_MBOX_SET_REG_OS_ALIVE BIT(5)
+
#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
@@ -305,23 +309,24 @@ enum {
};
-#define CSR_HW_REV_TYPE_MSK (0x000FFF0)
-#define CSR_HW_REV_TYPE_5300 (0x0000020)
-#define CSR_HW_REV_TYPE_5350 (0x0000030)
-#define CSR_HW_REV_TYPE_5100 (0x0000050)
-#define CSR_HW_REV_TYPE_5150 (0x0000040)
-#define CSR_HW_REV_TYPE_1000 (0x0000060)
-#define CSR_HW_REV_TYPE_6x00 (0x0000070)
-#define CSR_HW_REV_TYPE_6x50 (0x0000080)
-#define CSR_HW_REV_TYPE_6150 (0x0000084)
-#define CSR_HW_REV_TYPE_6x05 (0x00000B0)
-#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05
-#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05
-#define CSR_HW_REV_TYPE_2x30 (0x00000C0)
-#define CSR_HW_REV_TYPE_2x00 (0x0000100)
-#define CSR_HW_REV_TYPE_105 (0x0000110)
-#define CSR_HW_REV_TYPE_135 (0x0000120)
-#define CSR_HW_REV_TYPE_NONE (0x00001F0)
+#define CSR_HW_REV_TYPE_MSK (0x000FFF0)
+#define CSR_HW_REV_TYPE_5300 (0x0000020)
+#define CSR_HW_REV_TYPE_5350 (0x0000030)
+#define CSR_HW_REV_TYPE_5100 (0x0000050)
+#define CSR_HW_REV_TYPE_5150 (0x0000040)
+#define CSR_HW_REV_TYPE_1000 (0x0000060)
+#define CSR_HW_REV_TYPE_6x00 (0x0000070)
+#define CSR_HW_REV_TYPE_6x50 (0x0000080)
+#define CSR_HW_REV_TYPE_6150 (0x0000084)
+#define CSR_HW_REV_TYPE_6x05 (0x00000B0)
+#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05
+#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05
+#define CSR_HW_REV_TYPE_2x30 (0x00000C0)
+#define CSR_HW_REV_TYPE_2x00 (0x0000100)
+#define CSR_HW_REV_TYPE_105 (0x0000110)
+#define CSR_HW_REV_TYPE_135 (0x0000120)
+#define CSR_HW_REV_TYPE_7265D (0x0000210)
+#define CSR_HW_REV_TYPE_NONE (0x00001F0)
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 0a70bcd241f5..684254553558 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -143,7 +143,7 @@ do { \
#define IWL_DL_INFO 0x00000001
#define IWL_DL_MAC80211 0x00000002
#define IWL_DL_HCMD 0x00000004
-#define IWL_DL_STATE 0x00000008
+#define IWL_DL_TDLS 0x00000008
/* 0x000000F0 - 0x00000010 */
#define IWL_DL_QUOTA 0x00000010
#define IWL_DL_TE 0x00000020
@@ -180,6 +180,7 @@ do { \
#define IWL_DL_TX_QUEUES 0x80000000
#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_TDLS(p, f, a...) IWL_DEBUG(p, IWL_DL_TDLS, f, ## a)
#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a)
#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 0f1084f09caa..38de1513e4de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -78,9 +78,6 @@
#include "iwl-config.h"
#include "iwl-modparams.h"
-/* private includes */
-#include "iwl-fw-file.h"
-
/******************************************************************************
*
* module boiler plate
@@ -187,6 +184,11 @@ static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img)
static void iwl_dealloc_ucode(struct iwl_drv *drv)
{
int i;
+
+ kfree(drv->fw.dbg_dest_tlv);
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++)
+ kfree(drv->fw.dbg_conf_tlv[i]);
+
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
}
@@ -248,6 +250,9 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
/*
* Starting 8000B - FW name format has changed. This overwrites the
* previous name and uses the new format.
+ *
+ * TODO:
+ * Once there is only one supported step for 8000 family - delete this!
*/
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
char rev_step[2] = {
@@ -258,6 +263,13 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
if (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP)
rev_step[0] = 0;
+ /*
+ * If hw_rev wasn't set yet - default as B-step. If it IS A-step
+ * we'll reload that FW later instead.
+ */
+ if (drv->trans->hw_rev == 0)
+ rev_step[0] = 'B';
+
snprintf(drv->firmware_name, sizeof(drv->firmware_name),
"%s%s-%s.ucode", name_pre, rev_step, tag);
}
@@ -301,6 +313,11 @@ struct iwl_firmware_pieces {
u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
+
+ /* FW debug data parsed for driver usage */
+ struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
+ size_t dbg_conf_tlv_len[FW_DBG_MAX];
};
/*
@@ -574,6 +591,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
char buildstr[25];
u32 build;
int num_of_cpus;
+ bool usniffer_images = false;
+ bool usniffer_req = false;
if (len < sizeof(*ucode)) {
IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -807,19 +826,16 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
tlv_len);
drv->fw.mvm_fw = true;
- drv->fw.img[IWL_UCODE_REGULAR].is_secure = true;
break;
case IWL_UCODE_TLV_SECURE_SEC_INIT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
tlv_len);
drv->fw.mvm_fw = true;
- drv->fw.img[IWL_UCODE_INIT].is_secure = true;
break;
case IWL_UCODE_TLV_SECURE_SEC_WOWLAN:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
tlv_len);
drv->fw.mvm_fw = true;
- drv->fw.img[IWL_UCODE_WOWLAN].is_secure = true;
break;
case IWL_UCODE_TLV_NUM_OF_CPU:
if (tlv_len != sizeof(u32))
@@ -849,12 +865,79 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
capa->n_scan_channels =
le32_to_cpup((__le32 *)tlv_data);
break;
+ case IWL_UCODE_TLV_FW_DBG_DEST: {
+ struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data;
+
+ if (pieces->dbg_dest_tlv) {
+ IWL_ERR(drv,
+ "dbg destination ignored, already exists\n");
+ break;
+ }
+
+ pieces->dbg_dest_tlv = dest;
+ IWL_INFO(drv, "Found debug destination: %s\n",
+ get_fw_dbg_mode_string(dest->monitor_mode));
+
+ drv->fw.dbg_dest_reg_num =
+ tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv,
+ reg_ops);
+ drv->fw.dbg_dest_reg_num /=
+ sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]);
+
+ break;
+ }
+ case IWL_UCODE_TLV_FW_DBG_CONF: {
+ struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data;
+
+ if (!pieces->dbg_dest_tlv) {
+ IWL_ERR(drv,
+ "Ignore dbg config %d - no destination configured\n",
+ conf->id);
+ break;
+ }
+
+ if (conf->id >= ARRAY_SIZE(drv->fw.dbg_conf_tlv)) {
+ IWL_ERR(drv,
+ "Skip unknown configuration: %d\n",
+ conf->id);
+ break;
+ }
+
+ if (pieces->dbg_conf_tlv[conf->id]) {
+ IWL_ERR(drv,
+ "Ignore duplicate dbg config %d\n",
+ conf->id);
+ break;
+ }
+
+ if (conf->usniffer)
+ usniffer_req = true;
+
+ IWL_INFO(drv, "Found debug configuration: %d\n",
+ conf->id);
+
+ pieces->dbg_conf_tlv[conf->id] = conf;
+ pieces->dbg_conf_tlv_len[conf->id] = tlv_len;
+ break;
+ }
+ case IWL_UCODE_TLV_SEC_RT_USNIFFER:
+ usniffer_images = true;
+ iwl_store_ucode_sec(pieces, tlv_data,
+ IWL_UCODE_REGULAR_USNIFFER,
+ tlv_len);
+ break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
}
}
+ if (usniffer_req && !usniffer_images) {
+ IWL_ERR(drv,
+ "user selected to work with usniffer but usniffer image isn't available in ucode package\n");
+ return -EINVAL;
+ }
+
if (len) {
IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len);
@@ -992,13 +1075,14 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
struct iwl_ucode_header *ucode;
struct iwlwifi_opmode_table *op;
int err;
- struct iwl_firmware_pieces pieces;
+ struct iwl_firmware_pieces *pieces;
const unsigned int api_max = drv->cfg->ucode_api_max;
unsigned int api_ok = drv->cfg->ucode_api_ok;
const unsigned int api_min = drv->cfg->ucode_api_min;
u32 api_ver;
int i;
bool load_module = false;
+ u32 hw_rev = drv->trans->hw_rev;
fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
fw->ucode_capa.standard_phy_calibration_size =
@@ -1008,7 +1092,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (!api_ok)
api_ok = api_max;
- memset(&pieces, 0, sizeof(pieces));
+ pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
+ if (!pieces)
+ return;
if (!ucode_raw) {
if (drv->fw_index <= api_ok)
@@ -1031,10 +1117,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
ucode = (struct iwl_ucode_header *)ucode_raw->data;
if (ucode->ver)
- err = iwl_parse_v1_v2_firmware(drv, ucode_raw, &pieces);
+ err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces);
else
- err = iwl_parse_tlv_firmware(drv, ucode_raw, &pieces,
- &fw->ucode_capa);
+ err = iwl_parse_tlv_firmware(drv, ucode_raw, pieces,
+ &fw->ucode_capa);
if (err)
goto try_again;
@@ -1074,7 +1160,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* In mvm uCode there is no difference between data and instructions
* sections.
*/
- if (!fw->mvm_fw && validate_sec_sizes(drv, &pieces, drv->cfg))
+ if (!fw->mvm_fw && validate_sec_sizes(drv, pieces, drv->cfg))
goto try_again;
/* Allocate ucode buffers for card's bus-master loading ... */
@@ -1083,9 +1169,33 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
- if (iwl_alloc_ucode(drv, &pieces, i))
+ if (iwl_alloc_ucode(drv, pieces, i))
goto out_free_fw;
+ if (pieces->dbg_dest_tlv) {
+ drv->fw.dbg_dest_tlv =
+ kmemdup(pieces->dbg_dest_tlv,
+ sizeof(*pieces->dbg_dest_tlv) +
+ sizeof(pieces->dbg_dest_tlv->reg_ops[0]) *
+ drv->fw.dbg_dest_reg_num, GFP_KERNEL);
+
+ if (!drv->fw.dbg_dest_tlv)
+ goto out_free_fw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) {
+ if (pieces->dbg_conf_tlv[i]) {
+ drv->fw.dbg_conf_tlv_len[i] =
+ pieces->dbg_conf_tlv_len[i];
+ drv->fw.dbg_conf_tlv[i] =
+ kmemdup(pieces->dbg_conf_tlv[i],
+ drv->fw.dbg_conf_tlv_len[i],
+ GFP_KERNEL);
+ if (!drv->fw.dbg_conf_tlv[i])
+ goto out_free_fw;
+ }
+ }
+
/* Now that we can no longer fail, copy information */
/*
@@ -1093,20 +1203,20 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* for each event, which is of mode 1 (including timestamp) for all
* new microcodes that include this information.
*/
- fw->init_evtlog_ptr = pieces.init_evtlog_ptr;
- if (pieces.init_evtlog_size)
- fw->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
+ fw->init_evtlog_ptr = pieces->init_evtlog_ptr;
+ if (pieces->init_evtlog_size)
+ fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12;
else
fw->init_evtlog_size =
drv->cfg->base_params->max_event_log_size;
- fw->init_errlog_ptr = pieces.init_errlog_ptr;
- fw->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
- if (pieces.inst_evtlog_size)
- fw->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
+ fw->init_errlog_ptr = pieces->init_errlog_ptr;
+ fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr;
+ if (pieces->inst_evtlog_size)
+ fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12;
else
fw->inst_evtlog_size =
drv->cfg->base_params->max_event_log_size;
- fw->inst_errlog_ptr = pieces.inst_errlog_ptr;
+ fw->inst_errlog_ptr = pieces->inst_errlog_ptr;
/*
* figure out the offset of chain noise reset and gain commands
@@ -1165,10 +1275,55 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
op->name, err);
#endif
}
+
+ /*
+ * We may have loaded the wrong FW file in 8000 HW family if it is an
+ * A-step card, and if drv->trans->hw_rev wasn't properly read when
+ * the FW file had been loaded. (This might happen in SDIO.) In such a
+ * case - unload and reload the correct file.
+ *
+ * TODO:
+ * Once there is only one supported step for 8000 family - delete this!
+ */
+ if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+ CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP &&
+ drv->trans->hw_rev != hw_rev) {
+ char firmware_name[32];
+
+ /* Free previous FW resources */
+ if (drv->op_mode)
+ _iwl_op_mode_stop(drv);
+ iwl_dealloc_ucode(drv);
+
+ /* Build name of correct-step FW */
+ snprintf(firmware_name, sizeof(firmware_name),
+ strrchr(drv->firmware_name, '-'));
+ snprintf(drv->firmware_name, sizeof(drv->firmware_name),
+ "%s%s", drv->cfg->fw_name_pre, firmware_name);
+
+ /* Clear data before loading correct FW */
+ list_del(&drv->list);
+
+ /* Request correct FW file this time */
+ IWL_DEBUG_INFO(drv, "attempting to load A-step FW %s\n",
+ drv->firmware_name);
+ err = request_firmware(&ucode_raw, drv->firmware_name,
+ drv->trans->dev);
+ if (err) {
+ IWL_ERR(drv, "Failed swapping FW!\n");
+ goto out_unbind;
+ }
+
+ /* Redo callback function - this time with right FW */
+ iwl_req_fw_callback(ucode_raw, context);
+ }
+
+ kfree(pieces);
return;
try_again:
/* try next, if any */
+ kfree(pieces);
release_firmware(ucode_raw);
if (iwl_request_firmware(drv, false))
goto out_unbind;
@@ -1179,6 +1334,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
iwl_dealloc_ucode(drv);
release_firmware(ucode_raw);
out_unbind:
+ kfree(pieces);
complete(&drv->request_firmware_complete);
device_release_driver(drv->trans->dev);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 74b796dc4242..41ff85de7334 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -764,7 +764,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
if (iwlwifi_mod_params.amsdu_size_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
- ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
ht_info->mcs.rx_mask[0] = 0xFF;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index e30a41d04c8b..20a8a64c9fe3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -81,6 +81,7 @@
* @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor
* @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several
* sections like this in a single file.
+ * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
*/
enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_SRAM = 0,
@@ -90,6 +91,8 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_DEV_FW_INFO = 4,
IWL_FW_ERROR_DUMP_FW_MONITOR = 5,
IWL_FW_ERROR_DUMP_PRPH = 6,
+ IWL_FW_ERROR_DUMP_TXF = 7,
+ IWL_FW_ERROR_DUMP_FH_REGS = 8,
IWL_FW_ERROR_DUMP_MAX,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 401f7be36b93..f2a047f6bb3e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -131,6 +131,9 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_API_CHANGES_SET = 29,
IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30,
IWL_UCODE_TLV_N_SCAN_CHANNELS = 31,
+ IWL_UCODE_TLV_SEC_RT_USNIFFER = 34,
+ IWL_UCODE_TLV_FW_DBG_DEST = 38,
+ IWL_UCODE_TLV_FW_DBG_CONF = 39,
};
struct iwl_ucode_tlv {
@@ -179,4 +182,309 @@ struct iwl_ucode_capa {
__le32 api_capa;
} __packed;
+/**
+ * enum iwl_ucode_tlv_flag - ucode API flags
+ * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
+ * was a separate TLV but moved here to save space.
+ * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
+ * treats good CRC threshold as a boolean
+ * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
+ * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
+ * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
+ * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
+ * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
+ * offload profile config command.
+ * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
+ * (rather than two) IPv6 addresses
+ * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
+ * from the probe request template.
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
+ * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
+ * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
+ * P2P client interfaces simultaneously if they are in different bindings.
+ * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
+ * P2P client interfaces simultaneously if they are in same bindings.
+ * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
+ * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
+ * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
+ * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
+ * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
+ */
+enum iwl_ucode_tlv_flag {
+ IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
+ IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
+ IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
+ IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
+ IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
+ IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
+ IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
+ IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
+ IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
+ IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
+ IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
+ IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
+ IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
+ IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
+ IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
+ IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
+ IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
+ IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
+};
+
+/**
+ * enum iwl_ucode_tlv_api - ucode api
+ * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
+ * @IWL_UCODE_TLV_CAPA_EXTENDED_BEACON: Support Extended beacon notification
+ * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
+ * @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA.
+ * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
+ * @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API.
+ * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
+ * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
+ * longer than the passive one, which is essential for fragmented scan.
+ */
+enum iwl_ucode_tlv_api {
+ IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
+ IWL_UCODE_TLV_CAPA_EXTENDED_BEACON = BIT(1),
+ IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
+ IWL_UCODE_TLV_API_CSA_FLOW = BIT(4),
+ IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5),
+ IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6),
+ IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
+ IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
+};
+
+/**
+ * enum iwl_ucode_tlv_capa - ucode capabilities
+ * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
+ * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
+ * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
+ * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
+ * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
+ * tx power value into TPC Report action frame and Link Measurement Report
+ * action frame
+ * @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current
+ * channel in DS parameter set element in probe requests.
+ * @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in
+ * probe requests.
+ * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
+ * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
+ * which also implies support for the scheduler configuration command
+ * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
+ * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
+ */
+enum iwl_ucode_tlv_capa {
+ IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT = BIT(1),
+ IWL_UCODE_TLV_CAPA_UMAC_SCAN = BIT(2),
+ IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = BIT(6),
+ IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8),
+ IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9),
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
+ IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
+ IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
+ IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = BIT(13),
+ IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
+};
+
+/* The default calibrate table size if not specified by firmware file */
+#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
+#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
+#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253
+
+/* The default max probe length if not specified by the firmware file */
+#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
+
+/*
+ * For 16.0 uCode and above, there is no differentiation between sections,
+ * just an offset to the HW address.
+ */
+#define IWL_UCODE_SECTION_MAX 12
+#define IWL_API_ARRAY_SIZE 1
+#define IWL_CAPABILITIES_ARRAY_SIZE 1
+#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
+
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
+#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
+#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
+#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers.
+ */
+struct iwl_tlv_calib_ctrl {
+ __le32 flow_trigger;
+ __le32 event_trigger;
+} __packed;
+
+enum iwl_fw_phy_cfg {
+ FW_PHY_CFG_RADIO_TYPE_POS = 0,
+ FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS,
+ FW_PHY_CFG_RADIO_STEP_POS = 2,
+ FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS,
+ FW_PHY_CFG_RADIO_DASH_POS = 4,
+ FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS,
+ FW_PHY_CFG_TX_CHAIN_POS = 16,
+ FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS,
+ FW_PHY_CFG_RX_CHAIN_POS = 20,
+ FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
+};
+
+#define IWL_UCODE_MAX_CS 1
+
+/**
+ * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
+ * @cipher: a cipher suite selector
+ * @flags: cipher scheme flags (currently reserved for a future use)
+ * @hdr_len: a size of MPDU security header
+ * @pn_len: a size of PN
+ * @pn_off: an offset of pn from the beginning of the security header
+ * @key_idx_off: an offset of key index byte in the security header
+ * @key_idx_mask: a bit mask of key_idx bits
+ * @key_idx_shift: bit shift needed to get key_idx
+ * @mic_len: mic length in bytes
+ * @hw_cipher: a HW cipher index used in host commands
+ */
+struct iwl_fw_cipher_scheme {
+ __le32 cipher;
+ u8 flags;
+ u8 hdr_len;
+ u8 pn_len;
+ u8 pn_off;
+ u8 key_idx_off;
+ u8 key_idx_mask;
+ u8 key_idx_shift;
+ u8 mic_len;
+ u8 hw_cipher;
+} __packed;
+
+enum iwl_fw_dbg_reg_operator {
+ CSR_ASSIGN,
+ CSR_SETBIT,
+ CSR_CLEARBIT,
+
+ PRPH_ASSIGN,
+ PRPH_SETBIT,
+ PRPH_CLEARBIT,
+};
+
+/**
+ * struct iwl_fw_dbg_reg_op - an operation on a register
+ *
+ * @op: %enum iwl_fw_dbg_reg_operator
+ * @addr: offset of the register
+ * @val: value
+ */
+struct iwl_fw_dbg_reg_op {
+ u8 op;
+ u8 reserved[3];
+ __le32 addr;
+ __le32 val;
+} __packed;
+
+/**
+ * enum iwl_fw_dbg_monitor_mode - available monitor recording modes
+ *
+ * @SMEM_MODE: monitor stores the data in SMEM
+ * @EXTERNAL_MODE: monitor stores the data in allocated DRAM
+ * @MARBH_MODE: monitor stores the data in MARBH buffer
+ */
+enum iwl_fw_dbg_monitor_mode {
+ SMEM_MODE = 0,
+ EXTERNAL_MODE = 1,
+ MARBH_MODE = 2,
+};
+
+/**
+ * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
+ *
+ * @version: version of the TLV - currently 0
+ * @monitor_mode: %enum iwl_fw_dbg_monitor_mode
+ * @base_reg: addr of the base addr register (PRPH)
+ * @end_reg: addr of the end addr register (PRPH)
+ * @write_ptr_reg: the addr of the reg of the write pointer
+ * @wrap_count: the addr of the reg of the wrap_count
+ * @base_shift: shift right of the base addr reg
+ * @end_shift: shift right of the end addr reg
+ * @reg_ops: array of registers operations
+ *
+ * This parses IWL_UCODE_TLV_FW_DBG_DEST
+ */
+struct iwl_fw_dbg_dest_tlv {
+ u8 version;
+ u8 monitor_mode;
+ u8 reserved[2];
+ __le32 base_reg;
+ __le32 end_reg;
+ __le32 write_ptr_reg;
+ __le32 wrap_count;
+ u8 base_shift;
+ u8 end_shift;
+ struct iwl_fw_dbg_reg_op reg_ops[0];
+} __packed;
+
+struct iwl_fw_dbg_conf_hcmd {
+ u8 id;
+ u8 reserved;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger - a TLV that describes a debug configuration
+ *
+ * @enabled: is this trigger enabled
+ * @reserved:
+ * @len: length, in bytes, of the %trigger field
+ * @trigger: pointer to a trigger struct
+ */
+struct iwl_fw_dbg_trigger {
+ u8 enabled;
+ u8 reserved;
+ u8 len;
+ u8 trigger[0];
+} __packed;
+
+/**
+ * enum iwl_fw_dbg_conf - configurations available
+ *
+ * @FW_DBG_CUSTOM: take this configuration from alive
+ * Note that the trigger is NO-OP for this configuration
+ */
+enum iwl_fw_dbg_conf {
+ FW_DBG_CUSTOM = 0,
+
+ /* must be last */
+ FW_DBG_MAX,
+ FW_DBG_INVALID = 0xff,
+};
+
+/**
+ * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration
+ *
+ * @id: %enum iwl_fw_dbg_conf
+ * @usniffer: should the uSniffer image be used
+ * @num_of_hcmds: how many HCMDs to send are present here
+ * @hcmd: a variable length host command to be sent to apply the configuration.
+ * If there is more than one HCMD to send, they will appear one after the
+ * other and be sent in the order that they appear in.
+ * This parses IWL_UCODE_TLV_FW_DBG_CONF
+ */
+struct iwl_fw_dbg_conf_tlv {
+ u8 id;
+ u8 usniffer;
+ u8 reserved;
+ u8 num_of_hcmds;
+ struct iwl_fw_dbg_conf_hcmd hcmd;
+
+ /* struct iwl_fw_dbg_trigger sits after all variable length hcmds */
+} __packed;
+
#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index b894a84e8393..e6dc3b870949 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -70,112 +70,6 @@
#include "iwl-fw-file.h"
/**
- * enum iwl_ucode_tlv_flag - ucode API flags
- * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
- * was a separate TLV but moved here to save space.
- * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
- * treats good CRC threshold as a boolean
- * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
- * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
- * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
- * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
- * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
- * offload profile config command.
- * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
- * (rather than two) IPv6 addresses
- * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
- * from the probe request template.
- * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
- * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
- * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
- * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
- * P2P client interfaces simultaneously if they are in different bindings.
- * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
- * P2P client interfaces simultaneously if they are in same bindings.
- * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
- * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
- * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
- * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
- * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
- */
-enum iwl_ucode_tlv_flag {
- IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
- IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
- IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
- IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
- IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
- IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
- IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
- IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
- IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
- IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
- IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
- IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
- IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
- IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
- IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
- IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
- IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
- IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
-};
-
-/**
- * enum iwl_ucode_tlv_api - ucode api
- * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
- * @IWL_UCODE_TLV_CAPA_EXTENDED_BEACON: Support Extended beacon notification
- * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
- * @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA.
- * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
- * @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API.
- * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
- * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
- * longer than the passive one, which is essential for fragmented scan.
- */
-enum iwl_ucode_tlv_api {
- IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
- IWL_UCODE_TLV_CAPA_EXTENDED_BEACON = BIT(1),
- IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
- IWL_UCODE_TLV_API_CSA_FLOW = BIT(4),
- IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5),
- IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6),
- IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
- IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
-};
-
-/**
- * enum iwl_ucode_tlv_capa - ucode capabilities
- * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
- * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
- * tx power value into TPC Report action frame and Link Measurement Report
- * action frame
- * @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports adding DS params
- * element in probe requests.
- * @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in
- * probe requests.
- * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
- * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
- * which also implies support for the scheduler configuration command
- * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
- */
-enum iwl_ucode_tlv_capa {
- IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
- IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8),
- IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9),
- IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
- IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
- IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
- IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
-};
-
-/* The default calibrate table size if not specified by firmware file */
-#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
-#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
-#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253
-
-/* The default max probe length if not specified by the firmware file */
-#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
-
-/**
* enum iwl_ucode_type
*
* The type of ucode.
@@ -183,11 +77,13 @@ enum iwl_ucode_tlv_capa {
* @IWL_UCODE_REGULAR: Normal runtime ucode
* @IWL_UCODE_INIT: Initial ucode
* @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
+ * @IWL_UCODE_REGULAR_USNIFFER: Normal runtime ucode when using usniffer image
*/
enum iwl_ucode_type {
IWL_UCODE_REGULAR,
IWL_UCODE_INIT,
IWL_UCODE_WOWLAN,
+ IWL_UCODE_REGULAR_USNIFFER,
IWL_UCODE_TYPE_MAX,
};
@@ -202,14 +98,6 @@ enum iwl_ucode_sec {
IWL_UCODE_SECTION_DATA,
IWL_UCODE_SECTION_INST,
};
-/*
- * For 16.0 uCode and above, there is no differentiation between sections,
- * just an offset to the HW address.
- */
-#define IWL_UCODE_SECTION_MAX 12
-#define IWL_API_ARRAY_SIZE 1
-#define IWL_CAPABILITIES_ARRAY_SIZE 1
-#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
struct iwl_ucode_capabilities {
u32 max_probe_length;
@@ -229,7 +117,6 @@ struct fw_desc {
struct fw_img {
struct fw_desc sec[IWL_UCODE_SECTION_MAX];
- bool is_secure;
bool is_dual_cpus;
};
@@ -238,66 +125,6 @@ struct iwl_sf_region {
u32 size;
};
-/* uCode version contains 4 values: Major/Minor/API/Serial */
-#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
-#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
-#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
-#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
-
-/*
- * Calibration control struct.
- * Sent as part of the phy configuration command.
- * @flow_trigger: bitmap for which calibrations to perform according to
- * flow triggers.
- * @event_trigger: bitmap for which calibrations to perform according to
- * event triggers.
- */
-struct iwl_tlv_calib_ctrl {
- __le32 flow_trigger;
- __le32 event_trigger;
-} __packed;
-
-enum iwl_fw_phy_cfg {
- FW_PHY_CFG_RADIO_TYPE_POS = 0,
- FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS,
- FW_PHY_CFG_RADIO_STEP_POS = 2,
- FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS,
- FW_PHY_CFG_RADIO_DASH_POS = 4,
- FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS,
- FW_PHY_CFG_TX_CHAIN_POS = 16,
- FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS,
- FW_PHY_CFG_RX_CHAIN_POS = 20,
- FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
-};
-
-#define IWL_UCODE_MAX_CS 1
-
-/**
- * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
- * @cipher: a cipher suite selector
- * @flags: cipher scheme flags (currently reserved for a future use)
- * @hdr_len: a size of MPDU security header
- * @pn_len: a size of PN
- * @pn_off: an offset of pn from the beginning of the security header
- * @key_idx_off: an offset of key index byte in the security header
- * @key_idx_mask: a bit mask of key_idx bits
- * @key_idx_shift: bit shift needed to get key_idx
- * @mic_len: mic length in bytes
- * @hw_cipher: a HW cipher index used in host commands
- */
-struct iwl_fw_cipher_scheme {
- __le32 cipher;
- u8 flags;
- u8 hdr_len;
- u8 pn_len;
- u8 pn_off;
- u8 key_idx_off;
- u8 key_idx_mask;
- u8 key_idx_shift;
- u8 mic_len;
- u8 hw_cipher;
-} __packed;
-
/**
* struct iwl_fw_cscheme_list - a cipher scheme list
* @size: a number of entries
@@ -324,6 +151,11 @@ struct iwl_fw_cscheme_list {
* @inst_errlog_ptr: error log offfset for runtime ucode.
* @mvm_fw: indicates this is MVM firmware
* @cipher_scheme: optional external cipher scheme.
+ * @human_readable: human readable version
+ * @dbg_dest_tlv: points to the destination TLV for debug
+ * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
+ * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries
+ * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
*/
struct iwl_fw {
u32 ucode_ver;
@@ -348,6 +180,68 @@ struct iwl_fw {
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
+
+ struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
+ size_t dbg_conf_tlv_len[FW_DBG_MAX];
+
+ u8 dbg_dest_reg_num;
};
+static inline const char *get_fw_dbg_mode_string(int mode)
+{
+ switch (mode) {
+ case SMEM_MODE:
+ return "SMEM";
+ case EXTERNAL_MODE:
+ return "EXTERNAL_DRAM";
+ case MARBH_MODE:
+ return "MARBH";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline const struct iwl_fw_dbg_trigger *
+iwl_fw_dbg_conf_get_trigger(const struct iwl_fw *fw, u8 id)
+{
+ const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
+ u8 *ptr;
+ int i;
+
+ if (!conf_tlv)
+ return NULL;
+
+ ptr = (void *)&conf_tlv->hcmd;
+ for (i = 0; i < conf_tlv->num_of_hcmds; i++) {
+ ptr += sizeof(conf_tlv->hcmd);
+ ptr += le16_to_cpu(conf_tlv->hcmd.len);
+ }
+
+ return (const struct iwl_fw_dbg_trigger *)ptr;
+}
+
+static inline bool
+iwl_fw_dbg_conf_enabled(const struct iwl_fw *fw, u8 id)
+{
+ const struct iwl_fw_dbg_trigger *trigger =
+ iwl_fw_dbg_conf_get_trigger(fw, id);
+
+ if (!trigger)
+ return false;
+
+ return trigger->enabled;
+}
+
+static inline bool
+iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
+{
+ const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
+
+ if (!conf_tlv)
+ return false;
+
+ return conf_tlv->usniffer;
+}
+
#endif /* __iwl_fw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index c302e7468559..06e02fcd6f7b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -325,6 +325,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
{
int num_rx_ants = num_of_ant(rx_chains);
int num_tx_ants = num_of_ant(tx_chains);
+ unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?:
+ IEEE80211_VHT_MAX_AMPDU_1024K);
vht_cap->vht_supported = true;
@@ -332,7 +334,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
- 7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+ max_ampdu_exponent <<
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
if (cfg->ht_params->ldpc)
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index b6d666ee8359..17de6d46222a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -138,7 +138,8 @@ struct iwl_cfg;
* @nic_config: configure NIC, called before firmware is started.
* May sleep
* @wimax_active: invoked when WiMax becomes active. May sleep
- * @enter_d0i3: configure the fw to enter d0i3. May sleep.
+ * @enter_d0i3: configure the fw to enter d0i3. return 1 to indicate d0i3
+ * entrance is aborted (e.g. due to held reference). May sleep.
* @exit_d0i3: configure the fw to exit d0i3. May sleep.
*/
struct iwl_op_mode_ops {
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 1560f4576c7d..2df51eab1348 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -322,6 +322,7 @@ enum secure_boot_config_reg {
LMPM_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
};
+#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0 (0xA01E30)
#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR (0x1E30)
#define LMPM_SECURE_BOOT_CPU2_STATUS_ADDR (0x1E34)
enum secure_boot_status_reg {
@@ -333,6 +334,7 @@ enum secure_boot_status_reg {
LMPM_SECURE_BOOT_STATUS_SUCCESS = 0x00000003,
};
+#define FH_UCODE_LOAD_STATUS (0x1AF0)
#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70)
enum secure_load_status_reg {
LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001,
@@ -352,7 +354,7 @@ enum secure_load_status_reg {
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
-#define LMPM_SECURE_TIME_OUT (100)
+#define LMPM_SECURE_TIME_OUT (100) /* 10 micro */
/* Rx FIFO */
#define RXF_SIZE_ADDR (0xa00c88)
@@ -368,4 +370,10 @@ enum secure_load_status_reg {
#define MON_BUFF_WRPTR (0xa03c44)
#define MON_BUFF_CYCLE_CNT (0xa03c48)
+/* FW chicken bits */
+#define LMPM_CHICK 0xA01FF8
+enum {
+ LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
+};
+
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index d8fc548c0d6c..028408a6ecba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -534,10 +534,10 @@ struct iwl_trans_ops {
u32 value);
void (*ref)(struct iwl_trans *trans);
void (*unref)(struct iwl_trans *trans);
+ void (*suspend)(struct iwl_trans *trans);
+ void (*resume)(struct iwl_trans *trans);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans);
-#endif
};
/**
@@ -574,6 +574,9 @@ enum iwl_trans_state {
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
* start of the 802.11 header in the @rx_mpdu_cmd
* @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
+ * @dbg_dest_tlv: points to the destination TLV for debug
+ * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
+ * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -605,6 +608,10 @@ struct iwl_trans {
u64 dflt_pwr_limit;
+ const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
+ u8 dbg_dest_reg_num;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -704,7 +711,18 @@ static inline void iwl_trans_unref(struct iwl_trans *trans)
trans->ops->unref(trans);
}
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+static inline void iwl_trans_suspend(struct iwl_trans *trans)
+{
+ if (trans->ops->suspend)
+ trans->ops->suspend(trans);
+}
+
+static inline void iwl_trans_resume(struct iwl_trans *trans)
+{
+ if (trans->ops->resume)
+ trans->ops->resume(trans);
+}
+
static inline struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans *trans)
{
@@ -712,7 +730,6 @@ iwl_trans_dump_data(struct iwl_trans *trans)
return NULL;
return trans->ops->dump_data(trans);
}
-#endif
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index da2ffb785194..a3bfda45d9e6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -72,8 +72,6 @@
#include "mvm.h"
#include "iwl-debug.h"
-#define BT_ANTENNA_COUPLING_THRESHOLD (30)
-
const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
[BT_KILL_MSK_DEFAULT] = 0xfffffc00,
[BT_KILL_MSK_NEVER] = 0xffffffff,
@@ -302,11 +300,6 @@ static const __le64 iwl_ci_mask[][3] = {
},
};
-static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
- cpu_to_le32(0x2e402280),
- cpu_to_le32(0x7711a751),
-};
-
struct corunning_block_luts {
u8 range;
__le32 lut20[BT_COEX_CORUN_LUT_SIZE];
@@ -605,7 +598,7 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
bt_cmd->max_kill = cpu_to_le32(5);
bt_cmd->bt4_antenna_isolation_thr =
- cpu_to_le32(BT_ANTENNA_COUPLING_THRESHOLD);
+ cpu_to_le32(IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS);
bt_cmd->bt4_tx_tx_delta_freq_thr = cpu_to_le32(15);
bt_cmd->bt4_tx_rx_max_freq0 = cpu_to_le32(15);
bt_cmd->override_primary_lut = cpu_to_le32(BT_COEX_INVALID_LUT);
@@ -638,8 +631,8 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
memcpy(&bt_cmd->mplut_prio_boost, iwl_bt_prio_boost,
sizeof(iwl_bt_prio_boost));
- memcpy(&bt_cmd->multiprio_lut, iwl_bt_mprio_lut,
- sizeof(iwl_bt_mprio_lut));
+ bt_cmd->multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
+ bt_cmd->multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
send_cmd:
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
@@ -1144,6 +1137,22 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
return lut_type != BT_COEX_LOOSE_LUT;
}
+bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
+{
+ /* there is no other antenna, shared antenna is always available */
+ if (mvm->cfg->bt_shared_single_ant)
+ return true;
+
+ if (ant & mvm->cfg->non_shared_ant)
+ return true;
+
+ if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
+
+ return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+ BT_HIGH_TRAFFIC;
+}
+
bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
{
/* there is no other antenna, shared antenna is always available */
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index 8a1d2f33d5b7..b3210cfbecc8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -102,8 +102,6 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
#undef EVENT_PRIO_ANT
-#define BT_ANTENNA_COUPLING_THRESHOLD (30)
-
static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
{
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
@@ -290,11 +288,6 @@ static const __le64 iwl_ci_mask[][3] = {
},
};
-static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
- cpu_to_le32(0x2e402280),
- cpu_to_le32(0x7711a751),
-};
-
struct corunning_block_luts {
u8 range;
__le32 lut20[BT_COEX_CORUN_LUT_SIZE];
@@ -593,7 +586,8 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
}
bt_cmd->max_kill = 5;
- bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD;
+ bt_cmd->bt4_antenna_isolation_thr =
+ IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS;
bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
bt_cmd->bt4_tx_rx_max_freq0 = 15;
@@ -618,7 +612,9 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
BT_VALID_ANT_ISOLATION_THRS |
BT_VALID_TXTX_DELTA_FREQ_THRS |
BT_VALID_TXRX_MAX_FREQ_0 |
- BT_VALID_SYNC_TO_SCO);
+ BT_VALID_SYNC_TO_SCO |
+ BT_VALID_TTC |
+ BT_VALID_RRC);
if (IWL_MVM_BT_COEX_SYNC2SCO)
bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
@@ -634,6 +630,12 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
}
+ if (IWL_MVM_BT_COEX_TTC)
+ bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC);
+
+ if (IWL_MVM_BT_COEX_RRC)
+ bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC);
+
if (mvm->cfg->bt_shared_single_ant)
memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
sizeof(iwl_single_shared_ant));
@@ -649,8 +651,8 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
sizeof(iwl_bt_prio_boost));
- memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
- sizeof(iwl_bt_mprio_lut));
+ bt_cmd->bt4_multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
+ bt_cmd->bt4_multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
send_cmd:
memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
@@ -830,6 +832,9 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
if (!vif->bss_conf.assoc)
smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
IWL_DEBUG_COEX(data->mvm,
"mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
mvmvif->id, data->notif->bt_status, bt_activity_grading,
@@ -1162,6 +1167,12 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
return lut_type != BT_COEX_LOOSE_LUT;
}
+bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant)
+{
+ u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
+ return ag < BT_HIGH_TRAFFIC;
+}
+
bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm)
{
u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index d4dfbe4cb66d..3bd93476ec1c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -92,8 +92,15 @@
#define IWL_MVM_BT_COEX_SYNC2SCO 1
#define IWL_MVM_BT_COEX_CORUNNING 0
#define IWL_MVM_BT_COEX_MPLUT 1
+#define IWL_MVM_BT_COEX_RRC 1
+#define IWL_MVM_BT_COEX_TTC 1
+#define IWL_MVM_BT_COEX_MPLUT_REG0 0x28412201
+#define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451
+#define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30
#define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0
+#define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0
#define IWL_MVM_QUOTA_THRESHOLD 8
#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
+#define IWL_MVM_RS_DISABLE_MIMO 0
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index c17be0fb7283..744de262373e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -601,33 +601,6 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
return ret;
}
-struct iwl_d3_iter_data {
- struct iwl_mvm *mvm;
- struct ieee80211_vif *vif;
- bool error;
-};
-
-static void iwl_mvm_d3_iface_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_d3_iter_data *data = _data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
- return;
-
- if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
- return;
-
- if (data->vif) {
- IWL_ERR(data->mvm, "More than one managed interface active!\n");
- data->error = true;
- return;
- }
-
- data->vif = vif;
-}
-
static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *ap_sta)
{
@@ -783,130 +756,81 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
IWL_ERR(mvm, "failed to set non-QoS seqno\n");
}
-static int
-iwl_mvm_send_wowlan_config_cmd(struct iwl_mvm *mvm,
- const struct iwl_wowlan_config_cmd_v3 *cmd)
+static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
{
- /* start only with the v2 part of the command */
- u16 cmd_len = sizeof(cmd->common);
-
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID)
- cmd_len = sizeof(*cmd);
-
- return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
- cmd_len, cmd);
-}
-
-static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wowlan,
- bool test)
-{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_d3_iter_data suspend_iter_data = {
- .mvm = mvm,
- };
- struct ieee80211_vif *vif;
- struct iwl_mvm_vif *mvmvif;
- struct ieee80211_sta *ap_sta;
- struct iwl_mvm_sta *mvm_ap_sta;
- struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {};
- struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
- struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
- struct iwl_d3_manager_config d3_cfg_cmd_data = {
- /*
- * Program the minimum sleep time to 10 seconds, as many
- * platforms have issues processing a wakeup signal while
- * still being in the process of suspending.
- */
- .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
- };
- struct iwl_host_cmd d3_cfg_cmd = {
- .id = D3_CONFIG_CMD,
- .flags = CMD_WANT_SKB,
- .data[0] = &d3_cfg_cmd_data,
- .len[0] = sizeof(d3_cfg_cmd_data),
- };
- struct wowlan_key_data key_data = {
- .use_rsc_tsc = false,
- .tkip = &tkip_cmd,
- .use_tkip = false,
- };
- int ret;
- int len __maybe_unused;
-
- if (!wowlan) {
- /*
- * mac80211 shouldn't get here, but for D3 test
- * it doesn't warrant a warning
- */
- WARN_ON(!test);
- return -EINVAL;
- }
-
- key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
- if (!key_data.rsc_tsc)
- return -ENOMEM;
+ iwl_mvm_cancel_scan(mvm);
- mutex_lock(&mvm->mutex);
+ iwl_trans_stop_device(mvm->trans);
- /* see if there's only a single BSS vif and it's associated */
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_d3_iface_iterator, &suspend_iter_data);
+ /*
+ * Set the HW restart bit -- this is mostly true as we're
+ * going to load new firmware and reprogram that, though
+ * the reprogramming is going to be manual to avoid adding
+ * all the MACs that aren't support.
+ * We don't have to clear up everything though because the
+ * reprogramming is manual. When we resume, we'll actually
+ * go through a proper restart sequence again to switch
+ * back to the runtime firmware image.
+ */
+ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
- if (suspend_iter_data.error || !suspend_iter_data.vif) {
- ret = 1;
- goto out_noreset;
- }
+ /* We reprogram keys and shouldn't allocate new key indices */
+ memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
- vif = suspend_iter_data.vif;
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ mvm->ptk_ivlen = 0;
+ mvm->ptk_icvlen = 0;
+ mvm->ptk_ivlen = 0;
+ mvm->ptk_icvlen = 0;
- ap_sta = rcu_dereference_protected(
- mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
- lockdep_is_held(&mvm->mutex));
- if (IS_ERR_OR_NULL(ap_sta)) {
- ret = -EINVAL;
- goto out_noreset;
- }
+ return iwl_mvm_load_d3_fw(mvm);
+}
- mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
+static int
+iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan,
+ struct iwl_wowlan_config_cmd *wowlan_config_cmd,
+ struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
+ struct ieee80211_sta *ap_sta)
+{
+ int ret;
+ struct iwl_mvm_sta *mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
- /* TODO: wowlan_config_cmd.common.wowlan_ba_teardown_tids */
+ /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
- wowlan_config_cmd.common.is_11n_connection =
+ wowlan_config_cmd->is_11n_connection =
ap_sta->ht_cap.ht_supported;
/* Query the last used seqno and set it */
ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
if (ret < 0)
- goto out_noreset;
- wowlan_config_cmd.common.non_qos_seq = cpu_to_le16(ret);
+ return ret;
+
+ wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
- iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &wowlan_config_cmd.common);
+ iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
if (wowlan->disconnect)
- wowlan_config_cmd.common.wakeup_filter |=
+ wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
IWL_WOWLAN_WAKEUP_LINK_CHANGE);
if (wowlan->magic_pkt)
- wowlan_config_cmd.common.wakeup_filter |=
+ wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
if (wowlan->gtk_rekey_failure)
- wowlan_config_cmd.common.wakeup_filter |=
+ wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
if (wowlan->eap_identity_req)
- wowlan_config_cmd.common.wakeup_filter |=
+ wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
if (wowlan->four_way_handshake)
- wowlan_config_cmd.common.wakeup_filter |=
+ wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
if (wowlan->n_patterns)
- wowlan_config_cmd.common.wakeup_filter |=
+ wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
if (wowlan->rfkill_release)
- wowlan_config_cmd.common.wakeup_filter |=
+ wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
if (wowlan->tcp) {
@@ -914,44 +838,43 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
* Set the "link change" (really "link lost") flag as well
* since that implies losing the TCP connection.
*/
- wowlan_config_cmd.common.wakeup_filter |=
+ wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
IWL_WOWLAN_WAKEUP_LINK_CHANGE);
}
- iwl_mvm_cancel_scan(mvm);
-
- iwl_trans_stop_device(mvm->trans);
-
- /*
- * Set the HW restart bit -- this is mostly true as we're
- * going to load new firmware and reprogram that, though
- * the reprogramming is going to be manual to avoid adding
- * all the MACs that aren't support.
- * We don't have to clear up everything though because the
- * reprogramming is manual. When we resume, we'll actually
- * go through a proper restart sequence again to switch
- * back to the runtime firmware image.
- */
- set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
-
- /* We reprogram keys and shouldn't allocate new key indices */
- memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+ return 0;
+}
- mvm->ptk_ivlen = 0;
- mvm->ptk_icvlen = 0;
- mvm->ptk_ivlen = 0;
- mvm->ptk_icvlen = 0;
+static int
+iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan,
+ struct iwl_wowlan_config_cmd *wowlan_config_cmd,
+ struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
+ struct ieee80211_sta *ap_sta)
+{
+ struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
+ struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
+ struct wowlan_key_data key_data = {
+ .use_rsc_tsc = false,
+ .tkip = &tkip_cmd,
+ .use_tkip = false,
+ };
+ int ret;
- ret = iwl_mvm_load_d3_fw(mvm);
+ ret = iwl_mvm_switch_to_d3(mvm);
if (ret)
- goto out;
+ return ret;
ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
if (ret)
- goto out;
+ return ret;
+
+ key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
+ if (!key_data.rsc_tsc)
+ return -ENOMEM;
if (!iwlwifi_mod_params.sw_crypto) {
/*
@@ -1010,7 +933,9 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
}
}
- ret = iwl_mvm_send_wowlan_config_cmd(mvm, &wowlan_config_cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
+ sizeof(*wowlan_config_cmd),
+ wowlan_config_cmd);
if (ret)
goto out;
@@ -1023,8 +948,153 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
goto out;
ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp);
+
+out:
+ kfree(key_data.rsc_tsc);
+ return ret;
+}
+
+static int
+iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan,
+ struct cfg80211_sched_scan_request *nd_config,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+ int ret;
+
+ ret = iwl_mvm_switch_to_d3(mvm);
if (ret)
- goto out;
+ return ret;
+
+ /* rfkill release can be either for wowlan or netdetect */
+ if (wowlan->rfkill_release)
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
+ sizeof(wowlan_config_cmd),
+ &wowlan_config_cmd);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_scan_offload_start(mvm, vif, nd_config, &mvm->nd_ies);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels))
+ return -EBUSY;
+
+ /* save the sched scan matchsets... */
+ if (nd_config->n_match_sets) {
+ mvm->nd_match_sets = kmemdup(nd_config->match_sets,
+ sizeof(*nd_config->match_sets) *
+ nd_config->n_match_sets,
+ GFP_KERNEL);
+ if (mvm->nd_match_sets)
+ mvm->n_nd_match_sets = nd_config->n_match_sets;
+ }
+
+ /* ...and the sched scan channels for later reporting */
+ mvm->nd_channels = kmemdup(nd_config->channels,
+ sizeof(*nd_config->channels) *
+ nd_config->n_channels,
+ GFP_KERNEL);
+ if (mvm->nd_channels)
+ mvm->n_nd_channels = nd_config->n_channels;
+
+ return 0;
+}
+
+static void iwl_mvm_free_nd(struct iwl_mvm *mvm)
+{
+ kfree(mvm->nd_match_sets);
+ mvm->nd_match_sets = NULL;
+ mvm->n_nd_match_sets = 0;
+ kfree(mvm->nd_channels);
+ mvm->nd_channels = NULL;
+ mvm->n_nd_channels = 0;
+}
+
+static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan,
+ bool test)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_vif *vif = NULL;
+ struct iwl_mvm_vif *mvmvif = NULL;
+ struct ieee80211_sta *ap_sta = NULL;
+ struct iwl_d3_manager_config d3_cfg_cmd_data = {
+ /*
+ * Program the minimum sleep time to 10 seconds, as many
+ * platforms have issues processing a wakeup signal while
+ * still being in the process of suspending.
+ */
+ .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
+ };
+ struct iwl_host_cmd d3_cfg_cmd = {
+ .id = D3_CONFIG_CMD,
+ .flags = CMD_WANT_SKB,
+ .data[0] = &d3_cfg_cmd_data,
+ .len[0] = sizeof(d3_cfg_cmd_data),
+ };
+ int ret;
+ int len __maybe_unused;
+
+ if (!wowlan) {
+ /*
+ * mac80211 shouldn't get here, but for D3 test
+ * it doesn't warrant a warning
+ */
+ WARN_ON(!test);
+ return -EINVAL;
+ }
+
+ mutex_lock(&mvm->mutex);
+
+ vif = iwl_mvm_get_bss_vif(mvm);
+ if (IS_ERR_OR_NULL(vif)) {
+ ret = 1;
+ goto out_noreset;
+ }
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) {
+ /* if we're not associated, this must be netdetect */
+ if (!wowlan->nd_config && !mvm->nd_config) {
+ ret = 1;
+ goto out_noreset;
+ }
+
+ ret = iwl_mvm_netdetect_config(
+ mvm, wowlan, wowlan->nd_config ?: mvm->nd_config, vif);
+ if (ret)
+ goto out;
+
+ mvm->net_detect = true;
+ } else {
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+
+ ap_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(ap_sta)) {
+ ret = -EINVAL;
+ goto out_noreset;
+ }
+
+ ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
+ vif, mvmvif, ap_sta);
+ if (ret)
+ goto out_noreset;
+ ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
+ vif, mvmvif, ap_sta);
+ if (ret)
+ goto out;
+
+ mvm->net_detect = false;
+ }
ret = iwl_mvm_power_update_device(mvm);
if (ret)
@@ -1057,11 +1127,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_d3_suspend(mvm->trans, test);
out:
- if (ret < 0)
+ if (ret < 0) {
ieee80211_restart_hw(mvm->hw);
+ iwl_mvm_free_nd(mvm);
+ }
out_noreset:
- kfree(key_data.rsc_tsc);
-
mutex_unlock(&mvm->mutex);
return ret;
@@ -1071,6 +1141,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ iwl_trans_suspend(mvm->trans);
if (iwl_mvm_is_d0i3_supported(mvm)) {
mutex_lock(&mvm->d0i3_suspend_mutex);
__set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
@@ -1449,9 +1520,8 @@ out:
return true;
}
-/* releases the MVM mutex */
-static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+static struct iwl_wowlan_status *
+iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
u32 base = mvm->error_event_table;
struct error_table_start {
@@ -1463,19 +1533,15 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
.id = WOWLAN_GET_STATUSES,
.flags = CMD_WANT_SKB,
};
- struct iwl_wowlan_status_data status;
- struct iwl_wowlan_status *fw_status;
- int ret, len, status_size, i;
- bool keep;
- struct ieee80211_sta *ap_sta;
- struct iwl_mvm_sta *mvm_ap_sta;
+ struct iwl_wowlan_status *status, *fw_status;
+ int ret, len, status_size;
iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info));
if (err_info.valid) {
- IWL_INFO(mvm, "error table is valid (%d)\n",
- err_info.valid);
+ IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
+ err_info.valid, err_info.error_id);
if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
struct cfg80211_wowlan_wakeup wakeup = {
.rfkill_release = true,
@@ -1483,7 +1549,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
ieee80211_report_wowlan_wakeup(vif, &wakeup,
GFP_KERNEL);
}
- goto out_unlock;
+ return ERR_PTR(-EIO);
}
/* only for tracing for now */
@@ -1494,22 +1560,53 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm, "failed to query status (%d)\n", ret);
- goto out_unlock;
+ return ERR_PTR(ret);
}
/* RF-kill already asserted again... */
- if (!cmd.resp_pkt)
- goto out_unlock;
+ if (!cmd.resp_pkt) {
+ ret = -ERFKILL;
+ goto out_free_resp;
+ }
status_size = sizeof(*fw_status);
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (len < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ ret = -EIO;
goto out_free_resp;
}
- fw_status = (void *)cmd.resp_pkt->data;
+ status = (void *)cmd.resp_pkt->data;
+ if (len != (status_size +
+ ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ ret = -EIO;
+ goto out_free_resp;
+ }
+
+ fw_status = kmemdup(status, len, GFP_KERNEL);
+
+out_free_resp:
+ iwl_free_resp(&cmd);
+ return ret ? ERR_PTR(ret) : fw_status;
+}
+
+/* releases the MVM mutex */
+static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_wowlan_status_data status;
+ struct iwl_wowlan_status *fw_status;
+ int i;
+ bool keep;
+ struct ieee80211_sta *ap_sta;
+ struct iwl_mvm_sta *mvm_ap_sta;
+
+ fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
+ if (IS_ERR_OR_NULL(fw_status))
+ goto out_unlock;
status.pattern_number = le16_to_cpu(fw_status->pattern_number);
for (i = 0; i < 8; i++)
@@ -1522,17 +1619,12 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
le32_to_cpu(fw_status->wake_packet_bufsize);
status.wake_packet = fw_status->wake_packet;
- if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) {
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
- goto out_free_resp;
- }
-
/* still at hard-coded place 0 for D3 image */
ap_sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[0],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(ap_sta))
- goto out_free_resp;
+ goto out_free;
mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
@@ -1549,16 +1641,151 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
- iwl_free_resp(&cmd);
+ kfree(fw_status);
return keep;
- out_free_resp:
- iwl_free_resp(&cmd);
- out_unlock:
+out_free:
+ kfree(fw_status);
+out_unlock:
mutex_unlock(&mvm->mutex);
return false;
}
+struct iwl_mvm_nd_query_results {
+ u32 matched_profiles;
+ struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
+};
+
+static int
+iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
+ struct iwl_mvm_nd_query_results *results)
+{
+ struct iwl_scan_offload_profiles_query *query;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
+ .flags = CMD_WANT_SKB,
+ };
+ int ret, len;
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret);
+ return ret;
+ }
+
+ /* RF-kill already asserted again... */
+ if (!cmd.resp_pkt) {
+ ret = -ERFKILL;
+ goto out_free_resp;
+ }
+
+ len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+ if (len < sizeof(*query)) {
+ IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
+ ret = -EIO;
+ goto out_free_resp;
+ }
+
+ query = (void *)cmd.resp_pkt->data;
+
+ results->matched_profiles = le32_to_cpu(query->matched_profiles);
+ memcpy(results->matches, query->matches, sizeof(results->matches));
+
+out_free_resp:
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct cfg80211_wowlan_nd_info *net_detect = NULL;
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .pattern_idx = -1,
+ };
+ struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
+ struct iwl_mvm_nd_query_results query;
+ struct iwl_wowlan_status *fw_status;
+ unsigned long matched_profiles;
+ u32 reasons = 0;
+ int i, j, n_matches, ret;
+
+ fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
+ if (!IS_ERR_OR_NULL(fw_status))
+ reasons = le32_to_cpu(fw_status->wakeup_reasons);
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
+ wakeup.rfkill_release = true;
+
+ if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
+ goto out;
+
+ ret = iwl_mvm_netdetect_query_results(mvm, &query);
+ if (ret || !query.matched_profiles) {
+ wakeup_report = NULL;
+ goto out;
+ }
+
+ matched_profiles = query.matched_profiles;
+ if (mvm->n_nd_match_sets) {
+ n_matches = hweight_long(matched_profiles);
+ } else {
+ IWL_ERR(mvm, "no net detect match information available\n");
+ n_matches = 0;
+ }
+
+ net_detect = kzalloc(sizeof(*net_detect) +
+ (n_matches * sizeof(net_detect->matches[0])),
+ GFP_KERNEL);
+ if (!net_detect || !n_matches)
+ goto out_report_nd;
+
+ for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
+ struct iwl_scan_offload_profile_match *fw_match;
+ struct cfg80211_wowlan_nd_match *match;
+ int n_channels = 0;
+
+ fw_match = &query.matches[i];
+
+ for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++)
+ n_channels += hweight8(fw_match->matching_channels[j]);
+
+ match = kzalloc(sizeof(*match) +
+ (n_channels * sizeof(*match->channels)),
+ GFP_KERNEL);
+ if (!match)
+ goto out_report_nd;
+
+ net_detect->matches[net_detect->n_matches++] = match;
+
+ match->ssid.ssid_len = mvm->nd_match_sets[i].ssid.ssid_len;
+ memcpy(match->ssid.ssid, mvm->nd_match_sets[i].ssid.ssid,
+ match->ssid.ssid_len);
+
+ if (mvm->n_nd_channels < n_channels)
+ continue;
+
+ for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++)
+ if (fw_match->matching_channels[j / 8] & (BIT(j % 8)))
+ match->channels[match->n_channels++] =
+ mvm->nd_channels[j]->center_freq;
+ }
+
+out_report_nd:
+ wakeup.net_detect = net_detect;
+out:
+ iwl_mvm_free_nd(mvm);
+
+ mutex_unlock(&mvm->mutex);
+ ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+
+ if (net_detect) {
+ for (i = 0; i < net_detect->n_matches; i++)
+ kfree(net_detect->matches[i]);
+ kfree(net_detect);
+ }
+}
+
static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
{
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1592,9 +1819,6 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
{
- struct iwl_d3_iter_data resume_iter_data = {
- .mvm = mvm,
- };
struct ieee80211_vif *vif = NULL;
int ret;
enum iwl_d3_status d3_status;
@@ -1603,15 +1827,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
mutex_lock(&mvm->mutex);
/* get the BSS vif pointer again */
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_d3_iface_iterator, &resume_iter_data);
-
- if (WARN_ON(resume_iter_data.error || !resume_iter_data.vif))
+ vif = iwl_mvm_get_bss_vif(mvm);
+ if (IS_ERR_OR_NULL(vif))
goto out_unlock;
- vif = resume_iter_data.vif;
-
ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
if (ret)
goto out_unlock;
@@ -1624,11 +1843,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* query SRAM first in case we want event logging */
iwl_mvm_read_d3_sram(mvm);
- keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
+ if (mvm->net_detect) {
+ iwl_mvm_query_netdetect_reasons(mvm, vif);
+ } else {
+ keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (keep)
- mvm->keep_vif = vif;
+ if (keep)
+ mvm->keep_vif = vif;
#endif
+ }
/* has unlocked the mutex, so skip that */
goto out;
@@ -1643,6 +1866,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* return 1 to reconfigure the device */
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
return 1;
}
@@ -1650,18 +1874,10 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- if (iwl_mvm_is_d0i3_supported(mvm)) {
- bool exit_now;
+ iwl_trans_resume(mvm->trans);
- mutex_lock(&mvm->d0i3_suspend_mutex);
- __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
- exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
- &mvm->d0i3_suspend_flags);
- mutex_unlock(&mvm->d0i3_suspend_mutex);
- if (exit_now)
- _iwl_mvm_exit_d0i3(mvm);
+ if (iwl_mvm_is_d0i3_supported(mvm))
return 0;
- }
return __iwl_mvm_resume(mvm, false);
}
@@ -1741,7 +1957,9 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
int remaining_time = 10;
mvm->d3_test_active = false;
+ rtnl_lock();
__iwl_mvm_resume(mvm, true);
+ rtnl_unlock();
iwl_abort_notification_waits(&mvm->notif_wait);
ieee80211_restart_hw(mvm->hw);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 50527a9bb267..33bf915cd7ea 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -121,78 +121,6 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
return ret;
}
-static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
-{
- struct iwl_mvm *mvm = inode->i_private;
- int ret;
-
- if (!mvm)
- return -EINVAL;
-
- mutex_lock(&mvm->mutex);
- if (!mvm->fw_error_dump) {
- ret = -ENODATA;
- goto out;
- }
-
- file->private_data = mvm->fw_error_dump;
- mvm->fw_error_dump = NULL;
- ret = 0;
-
-out:
- mutex_unlock(&mvm->mutex);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_fw_error_dump_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm_dump_ptrs *dump_ptrs = (void *)file->private_data;
- ssize_t bytes_read = 0;
- ssize_t bytes_read_trans = 0;
-
- if (*ppos < dump_ptrs->op_mode_len)
- bytes_read +=
- simple_read_from_buffer(user_buf, count, ppos,
- dump_ptrs->op_mode_ptr,
- dump_ptrs->op_mode_len);
-
- if (bytes_read < 0 || *ppos < dump_ptrs->op_mode_len)
- return bytes_read;
-
- if (dump_ptrs->trans_ptr) {
- *ppos -= dump_ptrs->op_mode_len;
- bytes_read_trans =
- simple_read_from_buffer(user_buf + bytes_read,
- count - bytes_read, ppos,
- dump_ptrs->trans_ptr->data,
- dump_ptrs->trans_ptr->len);
- *ppos += dump_ptrs->op_mode_len;
-
- if (bytes_read_trans >= 0)
- bytes_read += bytes_read_trans;
- else if (!bytes_read)
- /* propagate the failure */
- return bytes_read_trans;
- }
-
- return bytes_read;
-
-}
-
-static int iwl_dbgfs_fw_error_dump_release(struct inode *inode,
- struct file *file)
-{
- struct iwl_mvm_dump_ptrs *dump_ptrs = (void *)file->private_data;
-
- vfree(dump_ptrs->op_mode_ptr);
- vfree(dump_ptrs->trans_ptr);
- kfree(dump_ptrs);
-
- return 0;
-}
-
static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1008,7 +936,11 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
if (scan_rx_ant & ~mvm->fw->valid_rx_ant)
return -EINVAL;
- mvm->scan_rx_ant = scan_rx_ant;
+ if (mvm->scan_rx_ant != scan_rx_ant) {
+ mvm->scan_rx_ant = scan_rx_ant;
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+ iwl_mvm_config_scan(mvm);
+ }
return count;
}
@@ -1250,6 +1182,118 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
return ret;
}
+
+#define MAX_NUM_ND_MATCHSETS 10
+
+static ssize_t iwl_dbgfs_netdetect_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ const char *seps = ",\n";
+ char *buf_ptr = buf;
+ char *value_str = NULL;
+ int ret, i;
+
+ /* TODO: don't free if write is being called several times in one go */
+ if (mvm->nd_config) {
+ kfree(mvm->nd_config->match_sets);
+ kfree(mvm->nd_config);
+ mvm->nd_config = NULL;
+ }
+
+ mvm->nd_config = kzalloc(sizeof(*mvm->nd_config) +
+ (11 * sizeof(struct ieee80211_channel *)),
+ GFP_KERNEL);
+ if (!mvm->nd_config) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ mvm->nd_config->n_channels = 11;
+ mvm->nd_config->scan_width = NL80211_BSS_CHAN_WIDTH_20;
+ mvm->nd_config->interval = 5;
+ mvm->nd_config->min_rssi_thold = -80;
+ for (i = 0; i < mvm->nd_config->n_channels; i++)
+ mvm->nd_config->channels[i] = &mvm->nvm_data->channels[i];
+
+ mvm->nd_config->match_sets =
+ kcalloc(MAX_NUM_ND_MATCHSETS,
+ sizeof(*mvm->nd_config->match_sets),
+ GFP_KERNEL);
+ if (!mvm->nd_config->match_sets) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ while ((value_str = strsep(&buf_ptr, seps)) &&
+ strlen(value_str)) {
+ struct cfg80211_match_set *set;
+
+ if (mvm->nd_config->n_match_sets >= MAX_NUM_ND_MATCHSETS) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ set = &mvm->nd_config->match_sets[mvm->nd_config->n_match_sets];
+ set->ssid.ssid_len = strlen(value_str);
+
+ if (set->ssid.ssid_len > IEEE80211_MAX_SSID_LEN) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ memcpy(set->ssid.ssid, value_str, set->ssid.ssid_len);
+
+ mvm->nd_config->n_match_sets++;
+ }
+
+ ret = count;
+
+ if (mvm->nd_config->n_match_sets)
+ goto out;
+
+out_free:
+ if (mvm->nd_config)
+ kfree(mvm->nd_config->match_sets);
+ kfree(mvm->nd_config);
+ mvm->nd_config = NULL;
+out:
+ return ret;
+}
+
+static ssize_t
+iwl_dbgfs_netdetect_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ size_t bufsz, ret;
+ char *buf;
+ int i, n_match_sets, pos = 0;
+
+ n_match_sets = mvm->nd_config ? mvm->nd_config->n_match_sets : 0;
+
+ bufsz = n_match_sets * (IEEE80211_MAX_SSID_LEN + 1) + 1;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < n_match_sets; i++) {
+ if (pos +
+ mvm->nd_config->match_sets[i].ssid.ssid_len + 2 > bufsz) {
+ ret = -EIO;
+ goto out;
+ }
+
+ memcpy(buf + pos, mvm->nd_config->match_sets[i].ssid.ssid,
+ mvm->nd_config->match_sets[i].ssid.ssid_len);
+ pos += mvm->nd_config->match_sets[i].ssid.ssid_len;
+ buf[pos++] = '\n';
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+out:
+ kfree(buf);
+ return ret;
+}
#endif
#define PRINT_MVM_REF(ref) do { \
@@ -1295,6 +1339,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
PRINT_MVM_REF(IWL_MVM_REF_NMI);
PRINT_MVM_REF(IWL_MVM_REF_TM_CMD);
PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
+ PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -1415,12 +1460,6 @@ MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
-static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
- .open = iwl_dbgfs_fw_error_dump_open,
- .read = iwl_dbgfs_fw_error_dump_read,
- .release = iwl_dbgfs_fw_error_dump_release,
-};
-
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
@@ -1428,6 +1467,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(netdetect, 384);
#endif
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
@@ -1446,7 +1486,6 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
- MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
@@ -1487,6 +1526,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
if (!debugfs_create_bool("d3_wake_sysassert", S_IRUSR | S_IWUSR,
mvm->debugfs_dir, &mvm->d3_wake_sysassert))
goto err;
+ MVM_DEBUGFS_ADD_FILE(netdetect, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
#endif
if (!debugfs_create_u8("low_latency_agg_frame_limit", S_IRUSR | S_IWUSR,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
index 816883f9ff94..f3b11897991e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
@@ -84,6 +84,8 @@
* @BT_COEX_SYNC2SCO:
* @BT_COEX_CORUNNING:
* @BT_COEX_MPLUT:
+ * @BT_COEX_TTC:
+ * @BT_COEX_RRC:
*
* The COEX_MODE must be set for each command. Even if it is not changed.
*/
@@ -100,6 +102,8 @@ enum iwl_bt_coex_flags {
BT_COEX_SYNC2SCO = BIT(7),
BT_COEX_CORUNNING = BIT(8),
BT_COEX_MPLUT = BIT(9),
+ BT_COEX_TTC = BIT(20),
+ BT_COEX_RRC = BIT(21),
};
/*
@@ -127,6 +131,8 @@ enum iwl_bt_coex_valid_bit_msk {
BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
BT_VALID_SYNC_TO_SCO = BIT(18),
+ BT_VALID_TTC = BIT(20),
+ BT_VALID_RRC = BIT(21),
};
/**
@@ -506,7 +512,8 @@ struct iwl_bt_coex_profile_notif_old {
u8 bt_agg_traffic_load;
u8 bt_ci_compliance;
u8 ttc_enabled;
- __le16 reserved;
+ u8 rrc_enabled;
+ u8 reserved;
__le32 primary_ch_lut;
__le32 secondary_ch_lut;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index e74cdf2132f8..6d3bea5c59d1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -241,16 +241,12 @@ enum iwl_wowlan_wakeup_filters {
IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16),
}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
-struct iwl_wowlan_config_cmd_v2 {
+struct iwl_wowlan_config_cmd {
__le32 wakeup_filter;
__le16 non_qos_seq;
__le16 qos_seq[8];
u8 wowlan_ba_teardown_tids;
u8 is_11n_connection;
-} __packed; /* WOWLAN_CONFIG_API_S_VER_2 */
-
-struct iwl_wowlan_config_cmd_v3 {
- struct iwl_wowlan_config_cmd_v2 common;
u8 offloading_tid;
u8 reserved[3];
} __packed; /* WOWLAN_CONFIG_API_S_VER_3 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 2fd8ad4633e0..430020047b77 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -370,7 +370,7 @@ struct iwl_beacon_filter_cmd {
#define IWL_BF_DEBUG_FLAG_DEFAULT 0
#define IWL_BF_DEBUG_FLAG_D0I3 0
-#define IWL_BF_ESCAPE_TIMER_DEFAULT 50
+#define IWL_BF_ESCAPE_TIMER_DEFAULT 0
#define IWL_BF_ESCAPE_TIMER_D0I3 0
#define IWL_BF_ESCAPE_TIMER_MAX 1024
#define IWL_BF_ESCAPE_TIMER_MIN 0
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 1354c68f6468..1f2acf47bfb2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -794,4 +794,301 @@ struct iwl_periodic_scan_complete {
__le32 reserved;
} __packed;
+/* UMAC Scan API */
+
+/**
+ * struct iwl_mvm_umac_cmd_hdr - Command header for UMAC commands
+ * @size: size of the command (not including header)
+ * @reserved0: for future use and alignment
+ * @ver: API version number
+ */
+struct iwl_mvm_umac_cmd_hdr {
+ __le16 size;
+ u8 reserved0;
+ u8 ver;
+} __packed;
+
+#define IWL_MVM_MAX_SIMULTANEOUS_SCANS 8
+
+enum scan_config_flags {
+ SCAN_CONFIG_FLAG_ACTIVATE = BIT(0),
+ SCAN_CONFIG_FLAG_DEACTIVATE = BIT(1),
+ SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = BIT(2),
+ SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = BIT(3),
+ SCAN_CONFIG_FLAG_SET_TX_CHAINS = BIT(8),
+ SCAN_CONFIG_FLAG_SET_RX_CHAINS = BIT(9),
+ SCAN_CONFIG_FLAG_SET_AUX_STA_ID = BIT(10),
+ SCAN_CONFIG_FLAG_SET_ALL_TIMES = BIT(11),
+ SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = BIT(12),
+ SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = BIT(13),
+ SCAN_CONFIG_FLAG_SET_LEGACY_RATES = BIT(14),
+ SCAN_CONFIG_FLAG_SET_MAC_ADDR = BIT(15),
+ SCAN_CONFIG_FLAG_SET_FRAGMENTED = BIT(16),
+ SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = BIT(17),
+ SCAN_CONFIG_FLAG_SET_CAM_MODE = BIT(18),
+ SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19),
+ SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20),
+ SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21),
+
+ /* Bits 26-31 are for num of channels in channel_array */
+#define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26)
+};
+
+enum scan_config_rates {
+ /* OFDM basic rates */
+ SCAN_CONFIG_RATE_6M = BIT(0),
+ SCAN_CONFIG_RATE_9M = BIT(1),
+ SCAN_CONFIG_RATE_12M = BIT(2),
+ SCAN_CONFIG_RATE_18M = BIT(3),
+ SCAN_CONFIG_RATE_24M = BIT(4),
+ SCAN_CONFIG_RATE_36M = BIT(5),
+ SCAN_CONFIG_RATE_48M = BIT(6),
+ SCAN_CONFIG_RATE_54M = BIT(7),
+ /* CCK basic rates */
+ SCAN_CONFIG_RATE_1M = BIT(8),
+ SCAN_CONFIG_RATE_2M = BIT(9),
+ SCAN_CONFIG_RATE_5M = BIT(10),
+ SCAN_CONFIG_RATE_11M = BIT(11),
+
+ /* Bits 16-27 are for supported rates */
+#define SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16)
+};
+
+enum iwl_channel_flags {
+ IWL_CHANNEL_FLAG_EBS = BIT(0),
+ IWL_CHANNEL_FLAG_ACCURATE_EBS = BIT(1),
+ IWL_CHANNEL_FLAG_EBS_ADD = BIT(2),
+ IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3),
+};
+
+/**
+ * struct iwl_scan_config
+ * @hdr: umac command header
+ * @flags: enum scan_config_flags
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ * @legacy_rates: default legacy rates - enum scan_config_rates
+ * @out_of_channel_time: default max out of serving channel time
+ * @suspend_time: default max suspend time
+ * @dwell_active: default dwell time for active scan
+ * @dwell_passive: default dwell time for passive scan
+ * @dwell_fragmented: default dwell time for fragmented scan
+ * @reserved: for future use and alignment
+ * @mac_addr: default mac address to be used in probes
+ * @bcast_sta_id: the index of the station in the fw
+ * @channel_flags: default channel flags - enum iwl_channel_flags
+ * scan_config_channel_flag
+ * @channel_array: default supported channels
+ */
+struct iwl_scan_config {
+ struct iwl_mvm_umac_cmd_hdr hdr;
+ __le32 flags;
+ __le32 tx_chains;
+ __le32 rx_chains;
+ __le32 legacy_rates;
+ __le32 out_of_channel_time;
+ __le32 suspend_time;
+ u8 dwell_active;
+ u8 dwell_passive;
+ u8 dwell_fragmented;
+ u8 reserved;
+ u8 mac_addr[ETH_ALEN];
+ u8 bcast_sta_id;
+ u8 channel_flags;
+ u8 channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
+
+/**
+ * iwl_umac_scan_flags
+ *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
+ * can be preempted by other scan requests with higher priority.
+ * The low priority scan is aborted.
+ *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
+ * when scan starts.
+ */
+enum iwl_umac_scan_flags {
+ IWL_UMAC_SCAN_FLAG_PREEMPTIVE = BIT(0),
+ IWL_UMAC_SCAN_FLAG_START_NOTIF = BIT(1),
+};
+
+enum iwl_umac_scan_uid_offsets {
+ IWL_UMAC_SCAN_UID_TYPE_OFFSET = 0,
+ IWL_UMAC_SCAN_UID_SEQ_OFFSET = 8,
+};
+
+enum iwl_umac_scan_general_flags {
+ IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0),
+ IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1),
+ IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2),
+ IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3),
+ IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4),
+ IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5),
+ IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6),
+ IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7),
+ IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8),
+ IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9)
+};
+
+/**
+ * struct iwl_scan_channel_cfg_umac
+ * @flags: bitmap - 0-19: directed scan to i'th ssid.
+ * @channel_num: channel number 1-13 etc.
+ * @iter_count: repetition count for the channel.
+ * @iter_interval: interval between two scan interations on one channel.
+ */
+struct iwl_scan_channel_cfg_umac {
+ __le32 flags;
+ u8 channel_num;
+ u8 iter_count;
+ __le16 iter_interval;
+} __packed; /* SCAN_CHANNEL_CFG_S_VER2 */
+
+/**
+ * struct iwl_scan_umac_schedule
+ * @interval: interval in seconds between scan iterations
+ * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop
+ * @reserved: for alignment and future use
+ */
+struct iwl_scan_umac_schedule {
+ __le16 interval;
+ u8 iter_count;
+ u8 reserved;
+} __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */
+
+/**
+ * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command
+ * parameters following channels configuration array.
+ * @schedule: two scheduling plans.
+ * @delay: delay in TUs before starting the first scan iteration
+ * @reserved: for future use and alignment
+ * @preq: probe request with IEs blocks
+ * @direct_scan: list of SSIDs for directed active scan
+ */
+struct iwl_scan_req_umac_tail {
+ /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+ struct iwl_scan_umac_schedule schedule[2];
+ __le16 delay;
+ __le16 reserved;
+ /* SCAN_PROBE_PARAMS_API_S_VER_1 */
+ struct iwl_scan_probe_req preq;
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+} __packed;
+
+/**
+ * struct iwl_scan_req_umac
+ * @hdr: umac command header
+ * @flags: &enum iwl_umac_scan_flags
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @general_flags: &enum iwl_umac_scan_general_flags
+ * @reserved1: for future use and alignment
+ * @active_dwell: dwell time for active scan
+ * @passive_dwell: dwell time for passive scan
+ * @fragmented_dwell: dwell time for fragmented passive scan
+ * @max_out_time: max out of serving channel time
+ * @suspend_time: max suspend time
+ * @scan_priority: scan internal prioritization &enum iwl_scan_priority
+ * @channel_flags: &enum iwl_scan_channel_flags
+ * @n_channels: num of channels in scan request
+ * @reserved2: for future use and alignment
+ * @data: &struct iwl_scan_channel_cfg_umac and
+ * &struct iwl_scan_req_umac_tail
+ */
+struct iwl_scan_req_umac {
+ struct iwl_mvm_umac_cmd_hdr hdr;
+ __le32 flags;
+ __le32 uid;
+ __le32 ooc_priority;
+ /* SCAN_GENERAL_PARAMS_API_S_VER_1 */
+ __le32 general_flags;
+ u8 reserved1;
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
+ __le32 max_out_time;
+ __le32 suspend_time;
+ __le32 scan_priority;
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+ u8 channel_flags;
+ u8 n_channels;
+ __le16 reserved2;
+ u8 data[];
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
+
+/**
+ * struct iwl_umac_scan_abort
+ * @hdr: umac command header
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @flags: reserved
+ */
+struct iwl_umac_scan_abort {
+ struct iwl_mvm_umac_cmd_hdr hdr;
+ __le32 uid;
+ __le32 flags;
+} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
+
+/**
+ * struct iwl_umac_scan_complete
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @last_schedule: last scheduling line
+ * @last_iter: last scan iteration number
+ * @scan status: &enum iwl_scan_offload_complete_status
+ * @ebs_status: &enum iwl_scan_ebs_status
+ * @time_from_last_iter: time elapsed from last iteration
+ * @reserved: for future use
+ */
+struct iwl_umac_scan_complete {
+ __le32 uid;
+ u8 last_schedule;
+ u8 last_iter;
+ u8 status;
+ u8 ebs_status;
+ __le32 time_from_last_iter;
+ __le32 reserved;
+} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
+#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5
+/**
+ * struct iwl_scan_offload_profile_match - match information
+ * @bssid: matched bssid
+ * @channel: channel where the match occurred
+ * @energy:
+ * @matching_feature:
+ * @matching_channels: bitmap of channels that matched, referencing
+ * the channels passed in tue scan offload request
+ */
+struct iwl_scan_offload_profile_match {
+ u8 bssid[ETH_ALEN];
+ __le16 reserved;
+ u8 channel;
+ u8 energy;
+ u8 matching_feature;
+ u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN];
+} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
+
+/**
+ * struct iwl_scan_offload_profiles_query - match results query response
+ * @matched_profiles: bitmap of matched profiles, referencing the
+ * matches passed in the scan offload request
+ * @last_scan_age: age of the last offloaded scan
+ * @n_scans_done: number of offloaded scans done
+ * @gp2_d0u: GP2 when D0U occurred
+ * @gp2_invoked: GP2 when scan offload was invoked
+ * @resume_while_scanning: not used
+ * @self_recovery: obsolete
+ * @reserved: reserved
+ * @matches: array of match information, one for each match
+ */
+struct iwl_scan_offload_profiles_query {
+ __le32 matched_profiles;
+ __le32 last_scan_age;
+ __le32 n_scans_done;
+ __le32 gp2_d0u;
+ __le32 gp2_invoked;
+ u8 resume_while_scanning;
+ u8 self_recovery;
+ __le16 reserved;
+ struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
+} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
+
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index c62575d86bcd..88af6dd2ceaa 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -106,6 +106,12 @@ enum {
DBG_CFG = 0x9,
ANTENNA_COUPLING_NOTIFICATION = 0xa,
+ /* UMAC scan commands */
+ SCAN_CFG_CMD = 0xc,
+ SCAN_REQ_UMAC = 0xd,
+ SCAN_ABORT_UMAC = 0xe,
+ SCAN_COMPLETE_UMAC = 0xf,
+
/* station table */
ADD_STA_KEY = 0x17,
ADD_STA = 0x18,
@@ -122,6 +128,11 @@ enum {
/* global key */
WEP_KEY = 0x20,
+ /* TDLS */
+ TDLS_CHANNEL_SWITCH_CMD = 0x27,
+ TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa,
+ TDLS_CONFIG_CMD = 0xa7,
+
/* MAC and Binding commands */
MAC_CONTEXT_CMD = 0x28,
TIME_EVENT_CMD = 0x29, /* both CMD and response */
@@ -190,6 +201,8 @@ enum {
/* Power - new power table command */
MAC_PM_POWER_TABLE = 0xa9,
+ MFUART_LOAD_NOTIFICATION = 0xb1,
+
REPLY_RX_PHY_CMD = 0xc0,
REPLY_RX_MPDU_CMD = 0xc1,
BA_NOTIF = 0xc5,
@@ -236,11 +249,9 @@ enum {
WOWLAN_TX_POWER_PER_DB = 0xe6,
/* and for NetDetect */
- NET_DETECT_CONFIG_CMD = 0x54,
- NET_DETECT_PROFILES_QUERY_CMD = 0x56,
- NET_DETECT_PROFILES_CMD = 0x57,
- NET_DETECT_HOTSPOTS_CMD = 0x58,
- NET_DETECT_HOTSPOTS_QUERY_CMD = 0x59,
+ SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56,
+ SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD = 0x58,
+ SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD = 0x59,
REPLY_MAX = 0xff,
};
@@ -1201,6 +1212,21 @@ struct iwl_missed_beacons_notif {
} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
/**
+ * struct iwl_mfuart_load_notif - mfuart image version & status
+ * ( MFUART_LOAD_NOTIFICATION = 0xb1 )
+ * @installed_ver: installed image version
+ * @external_ver: external image version
+ * @status: MFUART loading status
+ * @duration: MFUART loading time
+*/
+struct iwl_mfuart_load_notif {
+ __le32 installed_ver;
+ __le32 external_ver;
+ __le32 status;
+ __le32 duration;
+} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/
+
+/**
* struct iwl_set_calib_default_cmd - set default value for calibration.
* ( SET_CALIB_DEFAULT_CMD = 0x8e )
* @calib_index: the calibration to set value for
@@ -1589,7 +1615,7 @@ enum iwl_sf_scenario {
#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */
/* smart FIFO default values */
-#define SF_W_MARK_SISO 4096
+#define SF_W_MARK_SISO 6144
#define SF_W_MARK_MIMO2 8192
#define SF_W_MARK_MIMO3 6144
#define SF_W_MARK_LEGACY 4096
@@ -1711,4 +1737,145 @@ struct iwl_scd_txq_cfg_cmd {
u8 flags;
} __packed;
+/***********************************
+ * TDLS API
+ ***********************************/
+
+/* Type of TDLS request */
+enum iwl_tdls_channel_switch_type {
+ TDLS_SEND_CHAN_SW_REQ = 0,
+ TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH,
+ TDLS_MOVE_CH,
+}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */
+
+/**
+ * Switch timing sub-element in a TDLS channel-switch command
+ * @frame_timestamp: GP2 timestamp of channel-switch request/response packet
+ * received from peer
+ * @max_offchan_duration: What amount of microseconds out of a DTIM is given
+ * to the TDLS off-channel communication. For instance if the DTIM is
+ * 200TU and the TDLS peer is to be given 25% of the time, the value
+ * given will be 50TU, or 50 * 1024 if translated into microseconds.
+ * @switch_time: switch time the peer sent in its channel switch timing IE
+ * @switch_timout: switch timeout the peer sent in its channel switch timing IE
+ */
+struct iwl_tdls_channel_switch_timing {
+ __le32 frame_timestamp; /* GP2 time of peer packet Rx */
+ __le32 max_offchan_duration; /* given in micro-seconds */
+ __le32 switch_time; /* given in micro-seconds */
+ __le32 switch_timeout; /* given in micro-seconds */
+} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */
+
+#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200
+
+/**
+ * TDLS channel switch frame template
+ *
+ * A template representing a TDLS channel-switch request or response frame
+ *
+ * @switch_time_offset: offset to the channel switch timing IE in the template
+ * @tx_cmd: Tx parameters for the frame
+ * @data: frame data
+ */
+struct iwl_tdls_channel_switch_frame {
+ __le32 switch_time_offset;
+ struct iwl_tx_cmd tx_cmd;
+ u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE];
+} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
+
+/**
+ * TDLS channel switch command
+ *
+ * The command is sent to initiate a channel switch and also in response to
+ * incoming TDLS channel-switch request/response packets from remote peers.
+ *
+ * @switch_type: see &enum iwl_tdls_channel_switch_type
+ * @peer_sta_id: station id of TDLS peer
+ * @ci: channel we switch to
+ * @timing: timing related data for command
+ * @frame: channel-switch request/response template, depending to switch_type
+ */
+struct iwl_tdls_channel_switch_cmd {
+ u8 switch_type;
+ __le32 peer_sta_id;
+ struct iwl_fw_channel_info ci;
+ struct iwl_tdls_channel_switch_timing timing;
+ struct iwl_tdls_channel_switch_frame frame;
+} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */
+
+/**
+ * TDLS channel switch start notification
+ *
+ * @status: non-zero on success
+ * @offchannel_duration: duration given in microseconds
+ * @sta_id: peer currently performing the channel-switch with
+ */
+struct iwl_tdls_channel_switch_notif {
+ __le32 status;
+ __le32 offchannel_duration;
+ __le32 sta_id;
+} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */
+
+/**
+ * TDLS station info
+ *
+ * @sta_id: station id of the TDLS peer
+ * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx
+ * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer
+ * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise
+ */
+struct iwl_tdls_sta_info {
+ u8 sta_id;
+ u8 tx_to_peer_tid;
+ __le16 tx_to_peer_ssn;
+ __le32 is_initiator;
+} __packed; /* TDLS_STA_INFO_VER_1 */
+
+/**
+ * TDLS basic config command
+ *
+ * @id_and_color: MAC id and color being configured
+ * @tdls_peer_count: amount of currently connected TDLS peers
+ * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx
+ * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP
+ * @sta_info: per-station info. Only the first tdls_peer_count entries are set
+ * @pti_req_data_offset: offset of network-level data for the PTI template
+ * @pti_req_tx_cmd: Tx parameters for PTI request template
+ * @pti_req_template: PTI request template data
+ */
+struct iwl_tdls_config_cmd {
+ __le32 id_and_color; /* mac id and color */
+ u8 tdls_peer_count;
+ u8 tx_to_ap_tid;
+ __le16 tx_to_ap_ssn;
+ struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT];
+
+ __le32 pti_req_data_offset;
+ struct iwl_tx_cmd pti_req_tx_cmd;
+ u8 pti_req_template[0];
+} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */
+
+/**
+ * TDLS per-station config information from FW
+ *
+ * @sta_id: station id of the TDLS peer
+ * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to
+ * the peer
+ */
+struct iwl_tdls_config_sta_info_res {
+ __le16 sta_id;
+ __le16 tx_to_peer_last_seq;
+} __packed; /* TDLS_STA_INFO_RSP_VER_1 */
+
+/**
+ * TDLS config information from FW
+ *
+ * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP
+ * @sta_info: per-station TDLS config information
+ */
+struct iwl_tdls_config_res {
+ __le32 tx_to_ap_last_seq;
+ struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
+} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index eb03943f8463..d0fa6e9ed590 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -186,7 +186,12 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
static const u8 alive_cmd[] = { MVM_ALIVE };
struct iwl_sf_region st_fwrd_space;
- fw = iwl_get_ucode_image(mvm, ucode_type);
+ if (ucode_type == IWL_UCODE_REGULAR &&
+ iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_CUSTOM) &&
+ iwl_fw_dbg_conf_enabled(mvm->fw, FW_DBG_CUSTOM))
+ fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
+ else
+ fw = iwl_get_ucode_image(mvm, ucode_type);
if (WARN_ON(!fw))
return -EINVAL;
mvm->cur_ucode = ucode_type;
@@ -227,6 +232,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
st_fwrd_space.addr = mvm->sf_space.addr;
st_fwrd_space.size = mvm->sf_space.size;
ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
+ return ret;
+ }
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
@@ -390,6 +399,42 @@ out:
return ret;
}
+static int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm,
+ enum iwl_fw_dbg_conf conf_id)
+{
+ u8 *ptr;
+ int ret;
+ int i;
+
+ if (WARN_ONCE(conf_id >= ARRAY_SIZE(mvm->fw->dbg_conf_tlv),
+ "Invalid configuration %d\n", conf_id))
+ return -EINVAL;
+
+ if (!mvm->fw->dbg_conf_tlv[conf_id])
+ return -EINVAL;
+
+ if (mvm->fw_dbg_conf != FW_DBG_INVALID)
+ IWL_WARN(mvm, "FW already configured (%d) - re-configuring\n",
+ mvm->fw_dbg_conf);
+
+ /* Send all HCMDs for configuring the FW debug */
+ ptr = (void *)&mvm->fw->dbg_conf_tlv[conf_id]->hcmd;
+ for (i = 0; i < mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) {
+ struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd->id, 0,
+ le16_to_cpu(cmd->len), cmd->data);
+ if (ret)
+ return ret;
+
+ ptr += sizeof(*cmd);
+ ptr += le16_to_cpu(cmd->len);
+ }
+
+ mvm->fw_dbg_conf = conf_id;
+ return ret;
+}
+
int iwl_mvm_up(struct iwl_mvm *mvm)
{
int ret, i;
@@ -441,6 +486,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
+ mvm->fw_dbg_conf = FW_DBG_INVALID;
+ iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_CUSTOM);
+
ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
if (ret)
goto error;
@@ -462,6 +510,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
+ mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
+
/* reset quota debouncing buffer - 0xff will yield invalid data */
memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
@@ -501,6 +551,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
goto error;
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+ ret = iwl_mvm_config_scan(mvm);
+ if (ret)
+ goto error;
+ }
+
/* allow FW/transport low power modes if not during restart */
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
@@ -587,3 +643,19 @@ int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
le32_to_cpu(radio_version->radio_dash));
return 0;
}
+
+int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
+
+ IWL_DEBUG_INFO(mvm,
+ "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
+ le32_to_cpu(mfuart_notif->installed_ver),
+ le32_to_cpu(mfuart_notif->external_ver),
+ le32_to_cpu(mfuart_notif->status),
+ le32_to_cpu(mfuart_notif->duration));
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 0c5c0b0e23f5..f6d86ccce6a8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -83,11 +83,15 @@ struct iwl_mvm_mac_iface_iterator_data {
struct ieee80211_vif *vif;
unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
- u32 used_hw_queues;
enum iwl_tsf_id preferred_tsf;
bool found_vif;
};
+struct iwl_mvm_hw_queues_iface_iterator_data {
+ struct ieee80211_vif *exclude_vif;
+ unsigned long used_hw_queues;
+};
+
static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -197,8 +201,7 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
/*
* Get the mask of the queues used by the vif
*/
-u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
{
u32 qmask = 0, ac;
@@ -214,6 +217,54 @@ u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
return qmask;
}
+static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
+
+ /* exclude the given vif */
+ if (vif == data->exclude_vif)
+ return;
+
+ data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
+}
+
+static void iwl_mvm_mac_sta_hw_queues_iter(void *_data,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ /* Mark the queues used by the sta */
+ data->used_hw_queues |= mvmsta->tfd_queue_msk;
+}
+
+unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
+ struct ieee80211_vif *exclude_vif)
+{
+ struct iwl_mvm_hw_queues_iface_iterator_data data = {
+ .exclude_vif = exclude_vif,
+ .used_hw_queues =
+ BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
+ BIT(mvm->aux_queue) |
+ BIT(IWL_MVM_CMD_QUEUE),
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* mark all VIF used hw queues */
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_iface_hw_queues_iter, &data);
+
+ /* don't assign the same hw queues as TDLS stations */
+ ieee80211_iterate_stations_atomic(mvm->hw,
+ iwl_mvm_mac_sta_hw_queues_iter,
+ &data);
+
+ return data.used_hw_queues;
+}
+
static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -226,9 +277,6 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
return;
}
- /* Mark the queues used by the vif */
- data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(data->mvm, vif);
-
/* Mark MAC IDs as used by clearing the available bit, and
* (below) mark TSFs as used if their existing use is not
* compatible with the new interface type.
@@ -275,10 +323,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
.available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
/* no preference yet */
.preferred_tsf = NUM_TSF_IDS,
- .used_hw_queues =
- BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
- BIT(mvm->aux_queue) |
- BIT(IWL_MVM_CMD_QUEUE),
.found_vif = false,
};
u32 ac;
@@ -317,6 +361,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
iwl_mvm_mac_iface_iterator, &data);
+ used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif);
+
/*
* In the case we're getting here during resume, it's similar to
* firmware restart, and with RESUME_ALL the iterator will find
@@ -366,8 +412,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
return 0;
}
- used_hw_queues = data.used_hw_queues;
-
/* Find available queues, and allocate them to the ACs */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
u8 queue = find_first_zero_bit(&used_hw_queues,
@@ -1219,17 +1263,25 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
- struct ieee80211_vif *csa_vif, u32 gp2)
+ struct ieee80211_vif *csa_vif, u32 gp2,
+ bool tx_success)
{
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(csa_vif);
+ /* Don't start to countdown from a failed beacon */
+ if (!tx_success && !mvmvif->csa_countdown)
+ return;
+
+ mvmvif->csa_countdown = true;
+
if (!ieee80211_csa_is_complete(csa_vif)) {
int c = ieee80211_csa_update_counter(csa_vif);
iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif);
if (csa_vif->p2p &&
- !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2) {
+ !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 &&
+ tx_success) {
u32 rel_time = (c + 1) *
csa_vif->bss_conf.beacon_int -
IWL_MVM_CHANNEL_SWITCH_TIME_GO;
@@ -1252,38 +1304,30 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
struct iwl_mvm_tx_resp *beacon_notify_hdr;
struct ieee80211_vif *csa_vif;
struct ieee80211_vif *tx_blocked_vif;
- u64 tsf;
+ u16 status;
lockdep_assert_held(&mvm->mutex);
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_CAPA_EXTENDED_BEACON) {
- struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
-
- beacon_notify_hdr = &beacon->beacon_notify_hdr;
- tsf = le64_to_cpu(beacon->tsf);
- mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
- } else {
- struct iwl_beacon_notif *beacon = (void *)pkt->data;
-
- beacon_notify_hdr = &beacon->beacon_notify_hdr;
- tsf = le64_to_cpu(beacon->tsf);
- }
+ beacon_notify_hdr = &beacon->beacon_notify_hdr;
+ mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
+ status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK;
IWL_DEBUG_RX(mvm,
"beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
- le16_to_cpu(beacon_notify_hdr->status.status) &
- TX_STATUS_MSK,
- beacon_notify_hdr->failure_frame, tsf,
+ status, beacon_notify_hdr->failure_frame,
+ le64_to_cpu(beacon->tsf),
mvm->ap_last_beacon_gp2,
le32_to_cpu(beacon_notify_hdr->initial_rate));
csa_vif = rcu_dereference_protected(mvm->csa_vif,
lockdep_is_held(&mvm->mutex));
if (unlikely(csa_vif && csa_vif->csa_active))
- iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2);
+ iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2,
+ (status == TX_STATUS_SUCCESS));
tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif,
lockdep_is_held(&mvm->mutex));
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index b6d2683da3a9..31a5b3f4266c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -69,6 +69,7 @@
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/if_arp.h>
+#include <linux/devcoredump.h>
#include <net/mac80211.h>
#include <net/ieee80211_radiotap.h>
#include <net/tcp.h>
@@ -253,6 +254,26 @@ static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
spin_unlock_bh(&mvm->refs_lock);
}
+bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
+{
+ int i;
+ bool taken = false;
+
+ if (!iwl_mvm_is_d0i3_supported(mvm))
+ return true;
+
+ spin_lock_bh(&mvm->refs_lock);
+ for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
+ if (mvm->refs[i]) {
+ taken = true;
+ break;
+ }
+ }
+ spin_unlock_bh(&mvm->refs_lock);
+
+ return taken;
+}
+
int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
{
iwl_mvm_ref(mvm, ref_type);
@@ -302,7 +323,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
IEEE80211_RADIOTAP_MCS_HAVE_STBC;
- hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC;
+ hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
+ IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
hw->rate_control_algorithm = "iwl-mvm-rs";
/*
@@ -315,15 +337,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
- IWL_UCODE_API(mvm->fw->ucode_ver) >= 9 &&
!iwlwifi_mod_params.uapsd_disable) {
hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
}
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN ||
+ mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
+ hw->wiphy->features |=
+ NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ }
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
@@ -343,8 +369,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_CSA_FLOW)
- hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
hw->wiphy->n_iface_combinations =
@@ -402,7 +427,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
NL80211_FEATURE_LOW_PRIORITY_SCAN |
NL80211_FEATURE_P2P_GO_OPPPS |
NL80211_FEATURE_DYNAMIC_SMPS |
- NL80211_FEATURE_STATIC_SMPS;
+ NL80211_FEATURE_STATIC_SMPS |
+ NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
if (mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)
@@ -440,7 +466,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_EAP_IDENTITY_REQ |
- WIPHY_WOWLAN_RFKILL_RELEASE;
+ WIPHY_WOWLAN_RFKILL_RELEASE |
+ WIPHY_WOWLAN_NET_DETECT;
if (!iwlwifi_mod_params.sw_crypto)
mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
WIPHY_WOWLAN_GTK_REKEY_FAILURE |
@@ -449,6 +476,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
+ mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
hw->wiphy->wowlan = &mvm->wowlan;
}
@@ -463,6 +491,17 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (ret)
return ret;
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_TDLS_SUPPORT) {
+ IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+ }
+
+ if (mvm->fw->ucode_capa.capa[0] &
+ IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH) {
+ IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
+ hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+ }
+
ret = ieee80211_register_hw(mvm->hw);
if (ret)
iwl_mvm_leds_exit(mvm);
@@ -679,10 +718,51 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
}
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count,
+ const void *data, size_t datalen)
+{
+ const struct iwl_mvm_dump_ptrs *dump_ptrs = data;
+ ssize_t bytes_read;
+ ssize_t bytes_read_trans;
+
+ if (offset < dump_ptrs->op_mode_len) {
+ bytes_read = min_t(ssize_t, count,
+ dump_ptrs->op_mode_len - offset);
+ memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset,
+ bytes_read);
+ offset += bytes_read;
+ count -= bytes_read;
+
+ if (count == 0)
+ return bytes_read;
+ } else {
+ bytes_read = 0;
+ }
+
+ if (!dump_ptrs->trans_ptr)
+ return bytes_read;
+
+ offset -= dump_ptrs->op_mode_len;
+ bytes_read_trans = min_t(ssize_t, count,
+ dump_ptrs->trans_ptr->len - offset);
+ memcpy(buffer + bytes_read,
+ (u8 *)dump_ptrs->trans_ptr->data + offset,
+ bytes_read_trans);
+
+ return bytes_read + bytes_read_trans;
+}
+
+static void iwl_mvm_free_coredump(const void *data)
+{
+ const struct iwl_mvm_dump_ptrs *fw_error_dump = data;
+
+ vfree(fw_error_dump->op_mode_ptr);
+ vfree(fw_error_dump->trans_ptr);
+ kfree(fw_error_dump);
+}
+
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
{
- static char *env[] = { "DRIVER=iwlwifi", "EVENT=error_dump", NULL };
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
struct iwl_fw_error_dump_info *dump_info;
@@ -695,10 +775,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
- if (mvm->fw_error_dump)
- return;
-
- fw_error_dump = kzalloc(sizeof(*mvm->fw_error_dump), GFP_KERNEL);
+ fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
if (!fw_error_dump)
return;
@@ -773,16 +850,19 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
if (fw_error_dump->trans_ptr)
file_len += fw_error_dump->trans_ptr->len;
dump_file->file_len = cpu_to_le32(file_len);
- mvm->fw_error_dump = fw_error_dump;
- /* notify the userspace about the error we had */
- kobject_uevent_env(&mvm->hw->wiphy->dev.kobj, KOBJ_CHANGE, env);
+ dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
+ GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
}
-#endif
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
- iwl_mvm_fw_error_dump(mvm);
+ /* clear the D3 reconfig, we only need it to avoid dumping a
+ * firmware coredump on reconfiguration, we shouldn't do that
+ * on D3->D0 transition
+ */
+ if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status))
+ iwl_mvm_fw_error_dump(mvm);
iwl_trans_stop_device(mvm->trans);
@@ -803,6 +883,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
+ memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
@@ -859,9 +940,8 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
return ret;
}
-static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw)
+static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
mutex_lock(&mvm->mutex);
@@ -876,9 +956,50 @@ static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw)
/* allow transport/FW low power modes */
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+ /*
+ * If we have TDLS peers, remove them. We don't know the last seqno/PN
+ * of packets the FW sent out, so we must reconnect.
+ */
+ iwl_mvm_teardown_tdls_peers(mvm);
+
mutex_unlock(&mvm->mutex);
}
+static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
+{
+ bool exit_now;
+
+ if (!iwl_mvm_is_d0i3_supported(mvm))
+ return;
+
+ mutex_lock(&mvm->d0i3_suspend_mutex);
+ __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+ exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
+ &mvm->d0i3_suspend_flags);
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+
+ if (exit_now) {
+ IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
+ _iwl_mvm_exit_d0i3(mvm);
+ }
+}
+
+static void
+iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ switch (reconfig_type) {
+ case IEEE80211_RECONFIG_TYPE_RESTART:
+ iwl_mvm_restart_complete(mvm);
+ break;
+ case IEEE80211_RECONFIG_TYPE_SUSPEND:
+ iwl_mvm_resume_complete(mvm);
+ break;
+ }
+}
+
void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex);
@@ -1087,7 +1208,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
- u32 tfd_msk = iwl_mvm_mac_get_queues_mask(mvm, vif);
+ u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
if (tfd_msk) {
mutex_lock(&mvm->mutex);
@@ -1383,6 +1504,9 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
.cmd = cmd,
};
+ if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
+ return false;
+
memset(cmd, 0, sizeof(*cmd));
cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
cmd->max_macs = ARRAY_SIZE(cmd->macs);
@@ -1835,9 +1959,11 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
return -EINVAL;
- ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_SCHED);
- if (ret)
- return ret;
+ if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_SCHED);
+ if (ret)
+ return ret;
+ }
mutex_lock(&mvm->mutex);
@@ -1848,7 +1974,9 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+ ret = iwl_mvm_scan_umac(mvm, vif, hw_req);
+ else if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
else
ret = iwl_mvm_scan_request(mvm, vif, req);
@@ -2065,6 +2193,15 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
out_unlock:
mutex_unlock(&mvm->mutex);
+ if (sta->tdls && ret == 0) {
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE)
+ ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
+ else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)
+ ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
+ }
+
return ret;
}
@@ -2147,9 +2284,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
- ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS);
- if (ret)
- return ret;
+ if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS);
+ if (ret)
+ return ret;
+ }
mutex_lock(&mvm->mutex);
@@ -2169,27 +2308,10 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
goto out;
}
- mvm->scan_status = IWL_MVM_SCAN_SCHED;
-
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
- ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
- if (ret)
- goto err;
- }
-
- ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+ ret = iwl_mvm_scan_offload_start(mvm, vif, req, ies);
if (ret)
- goto err;
-
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
- ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
- else
- ret = iwl_mvm_sched_scan_start(mvm, req);
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
- if (!ret)
- goto out;
-err:
- mvm->scan_status = IWL_MVM_SCAN_NONE;
out:
mutex_unlock(&mvm->mutex);
return ret;
@@ -2207,6 +2329,7 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
iwl_mvm_wait_for_async_handlers(mvm);
return ret;
+
}
static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
@@ -2235,12 +2358,16 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- /*
- * Support for TX only, at least for now, so accept
- * the key and do nothing else. Then mac80211 will
- * pass it for TX but we don't have to use it for RX.
+ /* For non-client mode, only use WEP keys for TX as we probably
+ * don't have a station yet anyway and would then have to keep
+ * track of the keys, linking them to each of the clients/peers
+ * as they appear. For now, don't do that, for performance WEP
+ * offload doesn't really matter much, but we need it for some
+ * other offload features in client mode.
*/
- return 0;
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return 0;
+ break;
default:
/* currently FW supports only one optional cipher scheme */
if (hw->n_cipher_schemes &&
@@ -2563,7 +2690,7 @@ static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
IWL_DEBUG_MAC80211(mvm, "enter\n");
mutex_lock(&mvm->mutex);
- iwl_mvm_stop_p2p_roc(mvm);
+ iwl_mvm_stop_roc(mvm);
mutex_unlock(&mvm->mutex);
IWL_DEBUG_MAC80211(mvm, "leave\n");
@@ -2676,8 +2803,8 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
switch (vif->type) {
case NL80211_IFTYPE_AP:
- /* Unless it's a CSA flow we have nothing to do here */
- if (vif->csa_active) {
+ /* only needed if we're switching chanctx (i.e. during CSA) */
+ if (switching_chanctx) {
mvmvif->ap_ibss_active = true;
break;
}
@@ -2721,23 +2848,32 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
}
/* Handle binding during CSA */
- if ((vif->type == NL80211_IFTYPE_AP) ||
- (switching_chanctx && (vif->type == NL80211_IFTYPE_STATION))) {
+ if (vif->type == NL80211_IFTYPE_AP) {
iwl_mvm_update_quotas(mvm, NULL);
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
}
- if (vif->csa_active && vif->type == NL80211_IFTYPE_STATION) {
- struct iwl_mvm_sta *mvmsta;
+ if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
+ u32 duration = 2 * vif->bss_conf.beacon_int;
- mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
- mvmvif->ap_sta_id);
+ /* iwl_mvm_protect_session() reads directly from the
+ * device (the system time), so make sure it is
+ * available.
+ */
+ ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
+ if (ret)
+ goto out_remove_binding;
- if (WARN_ON(!mvmsta))
- goto out;
+ /* Protect the session to make sure we hear the first
+ * beacon on the new channel.
+ */
+ iwl_mvm_protect_session(mvm, vif, duration, duration,
+ vif->bss_conf.beacon_int / 2,
+ true);
- /* TODO: only re-enable after the first beacon */
- iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
+
+ iwl_mvm_update_quotas(mvm, NULL);
}
goto out;
@@ -2771,7 +2907,6 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_vif *disabled_vif = NULL;
- struct iwl_mvm_sta *mvmsta;
lockdep_assert_held(&mvm->mutex);
@@ -2786,9 +2921,11 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
break;
case NL80211_IFTYPE_AP:
/* This part is triggered only during CSA */
- if (!vif->csa_active || !mvmvif->ap_ibss_active)
+ if (!switching_chanctx || !mvmvif->ap_ibss_active)
goto out;
+ mvmvif->csa_countdown = false;
+
/* Set CS bit on all the stations */
iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
@@ -2803,12 +2940,6 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
disabled_vif = vif;
- mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
- mvmvif->ap_sta_id);
-
- if (!WARN_ON(!mvmsta))
- iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
-
iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
break;
default:
@@ -2834,18 +2965,12 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
-static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
- struct ieee80211_vif_chanctx_switch *vifs,
- int n_vifs,
- enum ieee80211_chanctx_switch_mode mode)
+static int
+iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
+ struct ieee80211_vif_chanctx_switch *vifs)
{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
- /* we only support SWAP_CONTEXTS and with a single-vif right now */
- if (mode != CHANCTX_SWMODE_SWAP_CONTEXTS || n_vifs > 1)
- return -EOPNOTSUPP;
-
mutex_lock(&mvm->mutex);
__iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
__iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
@@ -2874,15 +2999,51 @@ out_remove:
__iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
out_reassign:
- ret = __iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx);
- if (ret) {
+ if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
goto out_restart;
}
- ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
+ if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
+ true)) {
+ IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
+ goto out_restart;
+ }
+
+ goto out;
+
+out_restart:
+ /* things keep failing, better restart the hw */
+ iwl_mvm_nic_restart(mvm, false);
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static int
+iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
+ struct ieee80211_vif_chanctx_switch *vifs)
+{
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
+
+ ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
true);
if (ret) {
+ IWL_ERR(mvm,
+ "failed to assign new_ctx during channel switch\n");
+ goto out_reassign;
+ }
+
+ goto out;
+
+out_reassign:
+ if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
+ true)) {
IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
goto out_restart;
}
@@ -2895,6 +3056,34 @@ out_restart:
out:
mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ /* we only support a single-vif right now */
+ if (n_vifs > 1)
+ return -EOPNOTSUPP;
+
+ switch (mode) {
+ case CHANCTX_SWMODE_SWAP_CONTEXTS:
+ ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
+ break;
+ case CHANCTX_SWMODE_REASSIGN_VIF:
+ ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
return ret;
}
@@ -2980,27 +3169,134 @@ static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
}
#endif
-static void iwl_mvm_channel_switch_beacon(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_chan_def *chandef)
+static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ /* By implementing this operation, we prevent mac80211 from
+ * starting its own channel switch timer, so that we can call
+ * ieee80211_chswitch_done() ourselves at the right time
+ * (which is when the absence time event starts).
+ */
+
+ IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
+ "dummy channel switch op\n");
+}
+
+static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct ieee80211_vif *csa_vif;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 apply_time;
+ int ret;
mutex_lock(&mvm->mutex);
- csa_vif = rcu_dereference_protected(mvm->csa_vif,
- lockdep_is_held(&mvm->mutex));
- if (WARN(csa_vif && csa_vif->csa_active,
- "Another CSA is already in progress"))
+ IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
+ chsw->chandef.center_freq1);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ csa_vif =
+ rcu_dereference_protected(mvm->csa_vif,
+ lockdep_is_held(&mvm->mutex));
+ if (WARN_ONCE(csa_vif && csa_vif->csa_active,
+ "Another CSA is already in progress")) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ rcu_assign_pointer(mvm->csa_vif, vif);
+
+ if (WARN_ONCE(mvmvif->csa_countdown,
+ "Previous CSA countdown didn't complete")) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* Schedule the time event to a bit before beacon 1,
+ * to make sure we're in the new channel when the
+ * GO/AP arrives.
+ */
+ apply_time = chsw->device_timestamp +
+ ((vif->bss_conf.beacon_int * (chsw->count - 1) -
+ IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
+
+ if (chsw->block_tx)
+ iwl_mvm_csa_client_absent(mvm, vif);
+
+ iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
+ apply_time);
+ if (mvmvif->bf_data.bf_enabled) {
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ if (ret)
+ goto out_unlock;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ mvmvif->ps_disabled = true;
+
+ ret = iwl_mvm_power_update_ps(mvm);
+ if (ret)
goto out_unlock;
- IWL_DEBUG_MAC80211(mvm, "CSA started to freq %d\n",
- chandef->center_freq1);
- rcu_assign_pointer(mvm->csa_vif, vif);
+ /* we won't be on this channel any longer */
+ iwl_mvm_teardown_tdls_peers(mvm);
+
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ struct iwl_mvm_sta *mvmsta;
+
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
+ mvmvif->ap_sta_id);
+
+ if (WARN_ON(!mvmsta)) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
+
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ if (ret)
+ goto out_unlock;
+
+ iwl_mvm_stop_session_protection(mvm, vif);
+ }
+
+ mvmvif->ps_disabled = false;
+
+ ret = iwl_mvm_power_update_ps(mvm);
out_unlock:
mutex_unlock(&mvm->mutex);
+
+ return ret;
}
static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
@@ -3009,33 +3305,52 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif;
struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_sta *sta;
+ int i;
+ u32 msk = 0;
if (!vif || vif->type != NL80211_IFTYPE_STATION)
return;
mutex_lock(&mvm->mutex);
mvmvif = iwl_mvm_vif_from_mac80211(vif);
- mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
- if (WARN_ON_ONCE(!mvmsta))
- goto done;
+ /* flush the AP-station and all TDLS peers */
+ for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ if (mvmsta->vif != vif)
+ continue;
+
+ /* make sure only TDLS peers or the AP are flushed */
+ WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
+
+ msk |= mvmsta->tfd_queue_msk;
+ }
if (drop) {
- if (iwl_mvm_flush_tx_path(mvm, mvmsta->tfd_queue_msk, true))
+ if (iwl_mvm_flush_tx_path(mvm, msk, true))
IWL_ERR(mvm, "flush request fail\n");
+ mutex_unlock(&mvm->mutex);
} else {
- iwl_trans_wait_tx_queue_empty(mvm->trans,
- mvmsta->tfd_queue_msk);
+ mutex_unlock(&mvm->mutex);
+
+ /* this can take a while, and we may need/want other operations
+ * to succeed while doing this, so do it without the mutex held
+ */
+ iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
}
-done:
- mutex_unlock(&mvm->mutex);
}
const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
.ampdu_action = iwl_mvm_mac_ampdu_action,
.start = iwl_mvm_mac_start,
- .restart_complete = iwl_mvm_mac_restart_complete,
+ .reconfig_complete = iwl_mvm_mac_reconfig_complete,
.stop = iwl_mvm_mac_stop,
.add_interface = iwl_mvm_mac_add_interface,
.remove_interface = iwl_mvm_mac_remove_interface,
@@ -3076,7 +3391,13 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.set_tim = iwl_mvm_set_tim,
- .channel_switch_beacon = iwl_mvm_channel_switch_beacon,
+ .channel_switch = iwl_mvm_channel_switch,
+ .pre_channel_switch = iwl_mvm_pre_channel_switch,
+ .post_channel_switch = iwl_mvm_post_channel_switch,
+
+ .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
+ .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
+ .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 845429c88cf4..d24660fb4ef2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -87,12 +87,18 @@
/* A TimeUnit is 1024 microsecond */
#define MSEC_TO_TU(_msec) (_msec*1000/1024)
-/* This value represents the number of TUs before CSA "beacon 0" TBTT
- * when the CSA time-event needs to be scheduled to start. It must be
- * big enough to ensure that we switch in time.
+/* For GO, this value represents the number of TUs before CSA "beacon
+ * 0" TBTT when the CSA time-event needs to be scheduled to start. It
+ * must be big enough to ensure that we switch in time.
*/
#define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40
+/* For client, this value represents the number of TUs before CSA
+ * "beacon 1" TBTT, instead. This is because we don't know when the
+ * GO/AP will be in the new channel, so we switch early enough.
+ */
+#define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT 10
+
/*
* This value (in TUs) is used to fine tune the CSA NoA end time which should
* be just before "beacon 0" TBTT.
@@ -269,6 +275,7 @@ enum iwl_mvm_ref_type {
IWL_MVM_REF_NMI,
IWL_MVM_REF_TM_CMD,
IWL_MVM_REF_EXIT_WORK,
+ IWL_MVM_REF_PROTECT_CSA,
/* update debugfs.c when changing this */
@@ -288,7 +295,6 @@ enum iwl_bt_force_ant_mode {
* struct iwl_mvm_vif_bf_data - beacon filtering related data
* @bf_enabled: indicates if beacon filtering is enabled
* @ba_enabled: indicated if beacon abort is enabled
-* @last_beacon_signal: last beacon rssi signal in dbm
* @ave_beacon_signal: average beacon signal
* @last_cqm_event: rssi of the last cqm event
* @bt_coex_min_thold: minimum threshold for BT coex
@@ -399,6 +405,9 @@ struct iwl_mvm_vif {
/* FW identified misbehaving AP */
u8 uapsd_misbehaving_bssid[ETH_ALEN];
+
+ /* Indicates that CSA countdown may be started */
+ bool csa_countdown;
};
static inline struct iwl_mvm_vif *
@@ -519,6 +528,13 @@ enum {
#define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100
#define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200
+enum iwl_mvm_tdls_cs_state {
+ IWL_MVM_TDLS_SW_IDLE = 0,
+ IWL_MVM_TDLS_SW_REQ_SENT,
+ IWL_MVM_TDLS_SW_REQ_RCVD,
+ IWL_MVM_TDLS_SW_ACTIVE,
+};
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -578,6 +594,7 @@ struct iwl_mvm {
struct work_struct sta_drained_wk;
unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
atomic_t pending_frames[IWL_MVM_STATION_COUNT];
+ u32 tfd_drained[IWL_MVM_STATION_COUNT];
u8 rx_ba_sessions;
/* configured by mac80211 */
@@ -588,6 +605,10 @@ struct iwl_mvm {
void *scan_cmd;
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
+ /* UMAC scan tracking */
+ u32 scan_uid[IWL_MVM_MAX_SIMULTANEOUS_SCANS];
+ u8 scan_seq_num, sched_scan_seq_num;
+
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
@@ -649,7 +670,7 @@ struct iwl_mvm {
/* -1 for always, 0 for never, >0 for that many times */
s8 restart_fw;
struct work_struct fw_error_dump_wk;
- struct iwl_mvm_dump_ptrs *fw_error_dump;
+ enum iwl_fw_dbg_conf fw_dbg_conf;
#ifdef CONFIG_IWLWIFI_LEDS
struct led_classdev led;
@@ -660,6 +681,15 @@ struct iwl_mvm {
#ifdef CONFIG_PM_SLEEP
struct wiphy_wowlan_support wowlan;
int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
+
+ /* sched scan settings for net detect */
+ struct cfg80211_sched_scan_request *nd_config;
+ struct ieee80211_scan_ies nd_ies;
+ struct cfg80211_match_set *nd_match_sets;
+ int n_nd_match_sets;
+ struct ieee80211_channel **nd_channels;
+ int n_nd_channels;
+ bool net_detect;
#ifdef CONFIG_IWLWIFI_DEBUGFS
u32 d3_wake_sysassert; /* must be u32 for debugfs_create_bool */
bool d3_test_active;
@@ -732,6 +762,28 @@ struct iwl_mvm {
u32 ap_last_beacon_gp2;
u8 low_latency_agg_frame_limit;
+
+ /* TDLS channel switch data */
+ struct {
+ struct delayed_work dwork;
+ enum iwl_mvm_tdls_cs_state state;
+
+ /*
+ * Current cs sta - might be different from periodic cs peer
+ * station. Value is meaningless when the cs-state is idle.
+ */
+ u8 cur_sta_id;
+
+ /* TDLS periodic channel-switch peer */
+ struct {
+ u8 sta_id;
+ u8 op_class;
+ bool initiator; /* are we the link initiator */
+ struct cfg80211_chan_def chandef;
+ struct sk_buff *skb; /* ch sw template */
+ u32 ch_sw_tm_ie;
+ } peer;
+ } tdls_cs;
};
/* Extract MVM priv from op_mode and _hw */
@@ -748,6 +800,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_IN_D0I3,
IWL_MVM_STATUS_ROC_AUX_RUNNING,
+ IWL_MVM_STATUS_D3_RECONFIG,
};
static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
@@ -756,6 +809,26 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
}
+/* Must be called with rcu_read_lock() held and it can only be
+ * released when mvmsta is not needed anymore.
+ */
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id)
+{
+ struct ieee80211_sta *sta;
+
+ if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+ return NULL;
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /* This can happen if the station has been removed right now */
+ if (IS_ERR_OR_NULL(sta))
+ return NULL;
+
+ return iwl_mvm_sta_from_mac80211(sta);
+}
+
static inline struct iwl_mvm_sta *
iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
{
@@ -829,6 +902,16 @@ int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id,
int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta);
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
+void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info, u8 sta_id);
+void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd,
+ struct sk_buff *skb_frag);
+void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc);
#ifdef CONFIG_IWLWIFI_DEBUG
const char *iwl_mvm_get_tx_fail_reason(u32 status);
#else
@@ -885,6 +968,8 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
/* MVM PHY */
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
@@ -898,6 +983,8 @@ void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt);
int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm);
+u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef);
+u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef);
/* MAC (virtual interface) programming */
int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -906,8 +993,7 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool force_assoc_off, const u8 *bssid_override);
int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
+u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
@@ -918,6 +1004,8 @@ int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd);
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
+unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
+ struct ieee80211_vif *exclude_vif);
/* Bindings */
int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -928,6 +1016,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
struct ieee80211_vif *disabled_vif);
/* Scanning */
+int iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_scan_request(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req);
@@ -950,6 +1039,10 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
+int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies);
int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify);
int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
@@ -964,6 +1057,17 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies);
+/* UMAC scan */
+int iwl_mvm_config_scan(struct iwl_mvm *mvm);
+int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req);
+int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies);
+int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
@@ -1043,7 +1147,7 @@ iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
#endif
void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
- struct iwl_wowlan_config_cmd_v2 *cmd);
+ struct iwl_wowlan_config_cmd *cmd);
int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool disable_offloading,
@@ -1053,6 +1157,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+bool iwl_mvm_ref_taken(struct iwl_mvm *mvm);
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
@@ -1068,12 +1173,14 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
enum ieee80211_band band);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
+bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant);
bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
@@ -1189,6 +1296,10 @@ bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
+void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
+int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
@@ -1200,18 +1311,37 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool added_vif);
/* TDLS */
+
+/*
+ * We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present.
+ * This TID is marked as used vs the AP and all connected TDLS peers.
+ */
+#define IWL_MVM_TDLS_FW_TID 4
+
int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm);
void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool sta_added);
void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
+int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie);
+void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_tdls_ch_sw_params *params);
+void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
+
+struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
-#else
-static inline void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) {}
-#endif
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index af074563e770..d55fd8e3654c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -339,11 +339,15 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
} *file_sec;
const u8 *eof, *temp;
int max_section_size;
+ const __le32 *dword_buff;
#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
#define NVM_WORD2_ID(x) (x >> 12)
#define NVM_WORD2_LEN_FAMILY_8000(x) (2 * ((x & 0xFF) << 8 | x >> 8))
#define NVM_WORD1_ID_FAMILY_8000(x) (x >> 4)
+#define NVM_HEADER_0 (0x2A504C54)
+#define NVM_HEADER_1 (0x4E564D2A)
+#define NVM_HEADER_SIZE (4 * sizeof(u32))
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
@@ -372,12 +376,6 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
mvm->nvm_file_name, fw_entry->size);
- if (fw_entry->size < sizeof(*file_sec)) {
- IWL_ERR(mvm, "NVM file too small\n");
- ret = -EINVAL;
- goto out;
- }
-
if (fw_entry->size > MAX_NVM_FILE_LEN) {
IWL_ERR(mvm, "NVM file too large\n");
ret = -EINVAL;
@@ -385,8 +383,25 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
}
eof = fw_entry->data + fw_entry->size;
-
- file_sec = (void *)fw_entry->data;
+ dword_buff = (__le32 *)fw_entry->data;
+
+ /* some NVM file will contain a header.
+ * The header is identified by 2 dwords header as follow:
+ * dword[0] = 0x2A504C54
+ * dword[1] = 0x4E564D2A
+ *
+ * This header must be skipped when providing the NVM data to the FW.
+ */
+ if (fw_entry->size > NVM_HEADER_SIZE &&
+ dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
+ dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
+ file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
+ IWL_INFO(mvm, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
+ IWL_INFO(mvm, "NVM Manufacturing date %08X\n",
+ le32_to_cpu(dword_buff[3]));
+ } else {
+ file_sec = (void *)fw_entry->data;
+ }
while (true) {
if (file_sec->data > eof) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/offloading.c b/drivers/net/wireless/iwlwifi/mvm/offloading.c
index adcbf4c8edd8..68b0169c8892 100644
--- a/drivers/net/wireless/iwlwifi/mvm/offloading.c
+++ b/drivers/net/wireless/iwlwifi/mvm/offloading.c
@@ -67,7 +67,7 @@
#include "mvm.h"
void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
- struct iwl_wowlan_config_cmd_v2 *cmd)
+ struct iwl_wowlan_config_cmd *cmd)
{
int i;
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 5b719ee8e789..97dfba50c682 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -244,6 +244,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_scan_offload_complete_notif, true),
RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results,
false),
+ RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
+ true),
RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
@@ -254,6 +256,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
iwl_mvm_power_uapsd_misbehaving_ap_notif, false),
+ RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true),
+
+ RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
+ true),
+ RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
+
};
#undef RX_HANDLER
#define CMD(x) [x] = #x
@@ -317,11 +325,9 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(WOWLAN_KEK_KCK_MATERIAL),
CMD(WOWLAN_GET_STATUSES),
CMD(WOWLAN_TX_POWER_PER_DB),
- CMD(NET_DETECT_CONFIG_CMD),
- CMD(NET_DETECT_PROFILES_QUERY_CMD),
- CMD(NET_DETECT_PROFILES_CMD),
- CMD(NET_DETECT_HOTSPOTS_CMD),
- CMD(NET_DETECT_HOTSPOTS_QUERY_CMD),
+ CMD(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
+ CMD(SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD),
+ CMD(SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD),
CMD(CARD_STATE_NOTIFICATION),
CMD(MISSED_BEACONS_NOTIFICATION),
CMD(BT_COEX_PRIO_TABLE),
@@ -344,6 +350,13 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
CMD(ANTENNA_COUPLING_NOTIFICATION),
CMD(SCD_QUEUE_CFG),
+ CMD(SCAN_CFG_CMD),
+ CMD(SCAN_REQ_UMAC),
+ CMD(SCAN_ABORT_UMAC),
+ CMD(SCAN_COMPLETE_UMAC),
+ CMD(TDLS_CHANNEL_SWITCH_CMD),
+ CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION),
+ CMD(TDLS_CONFIG_CMD),
};
#undef CMD
@@ -403,6 +416,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (cfg->max_rx_agg_size)
hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
+ if (cfg->max_tx_agg_size)
+ hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
+
op_mode = hw->priv;
op_mode->ops = &iwl_mvm_ops;
@@ -439,6 +455,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
INIT_WORK(&mvm->fw_error_dump_wk, iwl_mvm_fw_error_dump_wk);
+ INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
spin_lock_init(&mvm->d0i3_tx_lock);
spin_lock_init(&mvm->refs_lock);
@@ -479,6 +496,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
+ trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
+ trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
+ memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
+ sizeof(trans->dbg_conf_tlv));
/* set up notification wait support */
iwl_notification_wait_init(&mvm->notif_wait);
@@ -522,7 +543,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex);
err = iwl_run_init_mvm_ucode(mvm, true);
- iwl_trans_stop_device(trans);
+ if (!err || !iwlmvm_mod_params.init_dbg)
+ iwl_trans_stop_device(trans);
mutex_unlock(&mvm->mutex);
/* returns 0 if successful, 1 if success but in rfkill */
if (err < 0 && !iwlmvm_mod_params.init_dbg) {
@@ -531,16 +553,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
}
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
- scan_size = sizeof(struct iwl_scan_req_unified_lmac) +
- sizeof(struct iwl_scan_channel_cfg_lmac) *
- mvm->fw->ucode_capa.n_scan_channels +
- sizeof(struct iwl_scan_probe_req);
- else
- scan_size = sizeof(struct iwl_scan_cmd) +
- mvm->fw->ucode_capa.max_probe_length +
- mvm->fw->ucode_capa.n_scan_channels *
- sizeof(struct iwl_scan_channel);
+ scan_size = iwl_mvm_scan_size(mvm);
mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
if (!mvm->scan_cmd)
@@ -585,16 +598,16 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
ieee80211_unregister_hw(mvm->hw);
kfree(mvm->scan_cmd);
- if (mvm->fw_error_dump) {
- vfree(mvm->fw_error_dump->op_mode_ptr);
- vfree(mvm->fw_error_dump->trans_ptr);
- kfree(mvm->fw_error_dump);
- }
kfree(mvm->mcast_filter_cmd);
mvm->mcast_filter_cmd = NULL;
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
kfree(mvm->d3_resume_sram);
+ if (mvm->nd_config) {
+ kfree(mvm->nd_config->match_sets);
+ kfree(mvm->nd_config);
+ mvm->nd_config = NULL;
+ }
#endif
iwl_trans_op_mode_leave(mvm->trans);
@@ -991,7 +1004,7 @@ static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
}
static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
- struct iwl_wowlan_config_cmd_v3 *cmd,
+ struct iwl_wowlan_config_cmd *cmd,
struct iwl_d0i3_iter_data *iter_data)
{
struct ieee80211_sta *ap_sta;
@@ -1007,14 +1020,14 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
goto out;
mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
- cmd->common.is_11n_connection = ap_sta->ht_cap.ht_supported;
+ cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
cmd->offloading_tid = iter_data->offloading_tid;
/*
* The d0i3 uCode takes care of the nonqos counters,
* so configure only the qos seq ones.
*/
- iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &cmd->common);
+ iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd);
out:
rcu_read_unlock();
}
@@ -1026,14 +1039,11 @@ static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
struct iwl_d0i3_iter_data d0i3_iter_data = {
.mvm = mvm,
};
- struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {
- .common = {
- .wakeup_filter =
- cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
- IWL_WOWLAN_WAKEUP_BEACON_MISS |
- IWL_WOWLAN_WAKEUP_LINK_CHANGE |
- IWL_WOWLAN_WAKEUP_BCN_FILTERING),
- },
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+ .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
+ IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE |
+ IWL_WOWLAN_WAKEUP_BCN_FILTERING),
};
struct iwl_d3_manager_config d3_cfg_cmd = {
.min_sleep_time = cpu_to_le32(1000),
@@ -1045,6 +1055,19 @@ static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
synchronize_net();
+ /*
+ * iwl_mvm_ref_sync takes a reference before checking the flag.
+ * so by checking there is no held reference we prevent a state
+ * in which iwl_mvm_ref_sync continues successfully while we
+ * configure the firmware to enter d0i3
+ */
+ if (iwl_mvm_ref_taken(mvm)) {
+ IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n");
+ clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+ wake_up(&mvm->d0i3_exit_waitq);
+ return 1;
+ }
+
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_enter_d0i3_iterator,
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index 12283b55ee84..1c0d4a45c1a8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -68,7 +68,7 @@
#include "mvm.h"
/* Maps the driver specific channel width definition to the the fw values */
-static inline u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
+u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
{
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -90,7 +90,7 @@ static inline u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
* Maps the driver specific control channel position (relative to the center
* freq) definitions to the the fw values
*/
-static inline u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
+u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
{
switch (chandef->chan->center_freq - chandef->center_freq1) {
case -70:
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 5b85b0cc7a2a..2620dd0c45f9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -286,6 +286,27 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
return true;
}
+static int iwl_mvm_power_get_skip_over_dtim(int dtimper, int bi)
+{
+ int numerator;
+ int dtim_interval = dtimper * bi;
+
+ if (WARN_ON(!dtim_interval))
+ return 0;
+
+ if (dtimper == 1) {
+ if (bi > 100)
+ numerator = 408;
+ else
+ numerator = 510;
+ } else if (dtimper < 10) {
+ numerator = 612;
+ } else {
+ return 0;
+ }
+ return max(1, (numerator / dtim_interval));
+}
+
static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
{
struct ieee80211_chanctx_conf *chanctx_conf;
@@ -308,7 +329,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd)
{
- int dtimper, dtimper_msec;
+ int dtimper, bi;
int keep_alive;
bool radar_detect = false;
struct iwl_mvm_vif *mvmvif __maybe_unused =
@@ -317,6 +338,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color));
dtimper = vif->bss_conf.dtim_period;
+ bi = vif->bss_conf.beacon_int;
/*
* Regardless of power management state the driver must set
@@ -324,10 +346,9 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
* immediately after association. Check that keep alive period
* is at least 3 * DTIM
*/
- dtimper_msec = dtimper * vif->bss_conf.beacon_int;
- keep_alive = max_t(int, 3 * dtimper_msec,
- MSEC_PER_SEC * POWER_KEEP_ALIVE_PERIOD_SEC);
- keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
+ keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi),
+ USEC_PER_SEC);
+ keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC);
cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
if (mvm->ps_disabled)
@@ -352,11 +373,14 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
radar_detect = iwl_mvm_power_is_radar(vif);
/* Check skip over DTIM conditions */
- if (!radar_detect && (dtimper <= 10) &&
+ if (!radar_detect && (dtimper < 10) &&
(iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
- cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- cmd->skip_dtim_periods = 3;
+ cmd->skip_dtim_periods =
+ iwl_mvm_power_get_skip_over_dtim(dtimper, bi);
+ if (cmd->skip_dtim_periods)
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
}
if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 18a539999580..30ceb67ed7a7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -158,6 +158,12 @@ struct rs_tx_column {
allow_column_func_t checks[MAX_COLUMN_CHECKS];
};
+static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant);
+}
+
static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl)
{
@@ -218,6 +224,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
},
+ .checks = {
+ rs_ant_allow,
+ },
},
[RS_COLUMN_LEGACY_ANT_B] = {
.mode = RS_LEGACY,
@@ -231,6 +240,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
},
+ .checks = {
+ rs_ant_allow,
+ },
},
[RS_COLUMN_SISO_ANT_A] = {
.mode = RS_SISO,
@@ -246,6 +258,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
},
.checks = {
rs_siso_allow,
+ rs_ant_allow,
},
},
[RS_COLUMN_SISO_ANT_B] = {
@@ -262,6 +275,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
},
.checks = {
rs_siso_allow,
+ rs_ant_allow,
},
},
[RS_COLUMN_SISO_ANT_A_SGI] = {
@@ -279,6 +293,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
},
.checks = {
rs_siso_allow,
+ rs_ant_allow,
rs_sgi_allow,
},
},
@@ -297,6 +312,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
},
.checks = {
rs_siso_allow,
+ rs_ant_allow,
rs_sgi_allow,
},
},
@@ -505,10 +521,11 @@ static const char *rs_pretty_lq_type(enum iwl_table_type type)
static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
const char *prefix)
{
- IWL_DEBUG_RATE(mvm, "%s: (%s: %d) ANT: %s BW: %d SGI: %d LDPC: %d\n",
+ IWL_DEBUG_RATE(mvm,
+ "%s: (%s: %d) ANT: %s BW: %d SGI: %d LDPC: %d STBC: %d\n",
prefix, rs_pretty_lq_type(rate->type),
rate->index, rs_pretty_ant(rate->ant),
- rate->bw, rate->sgi, rate->ldpc);
+ rate->bw, rate->sgi, rate->ldpc, rate->stbc);
}
static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
@@ -741,6 +758,12 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
IWL_ERR(mvm, "Invalid rate->type %d\n", rate->type);
}
+ if (is_siso(rate) && rate->stbc) {
+ /* To enable STBC we need to set both a flag and ANT_AB */
+ ucode_rate |= RATE_MCS_ANT_AB_MSK;
+ ucode_rate |= RATE_MCS_VHT_STBC_MSK;
+ }
+
ucode_rate |= rate->bw;
if (rate->sgi)
ucode_rate |= RATE_MCS_SGI_MSK;
@@ -785,6 +808,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
rate->sgi = true;
if (ucode_rate & RATE_MCS_LDPC_MSK)
rate->ldpc = true;
+ if (ucode_rate & RATE_MCS_VHT_STBC_MSK)
+ rate->stbc = true;
rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK;
@@ -794,7 +819,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
if (nss == 1) {
rate->type = LQ_HT_SISO;
- WARN_ON_ONCE(num_of_ant != 1);
+ WARN_ON_ONCE(!rate->stbc && num_of_ant != 1);
} else if (nss == 2) {
rate->type = LQ_HT_MIMO2;
WARN_ON_ONCE(num_of_ant != 2);
@@ -807,7 +832,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
if (nss == 1) {
rate->type = LQ_VHT_SISO;
- WARN_ON_ONCE(num_of_ant != 1);
+ WARN_ON_ONCE(!rate->stbc && num_of_ant != 1);
} else if (nss == 2) {
rate->type = LQ_VHT_MIMO2;
WARN_ON_ONCE(num_of_ant != 2);
@@ -992,7 +1017,15 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
static inline bool rs_rate_match(struct rs_rate *a,
struct rs_rate *b)
{
- return (a->type == b->type) && (a->ant == b->ant) && (a->sgi == b->sgi);
+ bool ant_match;
+
+ if (a->stbc)
+ ant_match = (b->ant == ANT_A || b->ant == ANT_B);
+ else
+ ant_match = (a->ant == b->ant);
+
+ return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi)
+ && ant_match;
}
static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
@@ -1093,10 +1126,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (time_after(jiffies,
(unsigned long)(lq_sta->last_tx + RS_IDLE_TIMEOUT))) {
- int tid;
+ int t;
+
IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
- for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
- ieee80211_stop_tx_ba_session(sta, tid);
+ for (t = 0; t < IWL_MAX_TID_COUNT; t++)
+ ieee80211_stop_tx_ba_session(sta, t);
iwl_mvm_rs_rate_init(mvm, sta, info->band, false);
return;
@@ -1137,16 +1171,15 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* Rate did match, so reset the missed_rate_counter */
lq_sta->missed_rate_counter = 0;
- /* Figure out if rate scale algorithm is in active or search table */
- if (rs_rate_match(&rate,
- &(lq_sta->lq_info[lq_sta->active_tbl].rate))) {
+ if (!lq_sta->search_better_tbl) {
curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- } else if (rs_rate_match(&rate,
- &lq_sta->lq_info[1 - lq_sta->active_tbl].rate)) {
+ } else {
curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- } else {
+ }
+
+ if (WARN_ON_ONCE(!rs_rate_match(&rate, &curr_tbl->rate))) {
IWL_DEBUG_RATE(mvm,
"Neither active nor search matches tx rate\n");
tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1171,6 +1204,13 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* first index into rate scale table.
*/
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ /* ampdu_ack_len = 0 marks no BA was received. In this case
+ * treat it as a single frame loss as we don't want the success
+ * ratio to dip too quickly because a BA wasn't received
+ */
+ if (info->status.ampdu_ack_len == 0)
+ info->status.ampdu_len = 1;
+
ucode_rate = le32_to_cpu(table->rs_table[0]);
rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
rs_collect_tx_data(lq_sta, curr_tbl, rate.index,
@@ -1225,7 +1265,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
done:
/* See if there's a better rate or modulation mode to try. */
- if (sta && sta->supp_rates[info->band])
+ if (sta->supp_rates[info->band])
rs_rate_scale_perform(mvm, sta, lq_sta, tid);
}
@@ -1623,6 +1663,8 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
else
rate->type = LQ_LEGACY_G;
+ rate->bw = RATE_MCS_CHAN_WIDTH_20;
+ rate->ldpc = false;
rate_mask = lq_sta->active_legacy_rate;
} else if (column->mode == RS_SISO) {
rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
@@ -1634,8 +1676,11 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
WARN_ON_ONCE("Bad column mode");
}
- rate->bw = rs_bw_from_sta_bw(sta);
- rate->ldpc = lq_sta->ldpc;
+ if (column->mode != RS_LEGACY) {
+ rate->bw = rs_bw_from_sta_bw(sta);
+ rate->ldpc = lq_sta->ldpc;
+ }
+
search_tbl->column = col_id;
rs_set_expected_tpt_table(lq_sta, search_tbl);
@@ -1754,6 +1799,29 @@ out:
return action;
}
+static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_vif *vif = mvmsta->vif;
+ bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
+ !vif->bss_conf.ps);
+
+ /* Our chip supports Tx STBC and the peer is an HT/VHT STA which
+ * supports STBC of at least 1*SS
+ */
+ if (!lq_sta->stbc)
+ return false;
+
+ if (!mvm->ps_disabled && !sta_ps_disabled)
+ return false;
+
+ if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+ return false;
+
+ return true;
+}
+
static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
int *weaker, int *stronger)
{
@@ -2675,6 +2743,11 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (mvm->cfg->ht_params->ldpc &&
(ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING))
lq_sta->ldpc = true;
+
+ if (mvm->cfg->ht_params->stbc &&
+ (num_of_ant(mvm->fw->valid_tx_ant) > 1) &&
+ (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC))
+ lq_sta->stbc = true;
} else {
rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
lq_sta->is_vht = true;
@@ -2682,8 +2755,16 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (mvm->cfg->ht_params->ldpc &&
(vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))
lq_sta->ldpc = true;
+
+ if (mvm->cfg->ht_params->stbc &&
+ (num_of_ant(mvm->fw->valid_tx_ant) > 1) &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
+ lq_sta->stbc = true;
}
+ if (IWL_MVM_RS_DISABLE_MIMO)
+ lq_sta->active_mimo2_rate = 0;
+
lq_sta->max_legacy_rate_idx = find_last_bit(&lq_sta->active_legacy_rate,
BITS_PER_LONG);
lq_sta->max_siso_rate_idx = find_last_bit(&lq_sta->active_siso_rate,
@@ -2692,11 +2773,11 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
BITS_PER_LONG);
IWL_DEBUG_RATE(mvm,
- "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d\n",
+ "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC%d\n",
lq_sta->active_legacy_rate,
lq_sta->active_siso_rate,
lq_sta->active_mimo2_rate,
- lq_sta->is_vht, lq_sta->ldpc);
+ lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc);
IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n",
lq_sta->max_legacy_rate_idx,
lq_sta->max_siso_rate_idx,
@@ -2820,6 +2901,7 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
* rate[15] 0x800D Legacy | ANT: B Rate: 6 Mbps
*/
static void rs_build_rates_table(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
const struct rs_rate *initial_rate)
{
@@ -2832,6 +2914,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
memcpy(&rate, initial_rate, sizeof(rate));
valid_tx_ant = mvm->fw->valid_tx_ant;
+ rate.stbc = rs_stbc_allow(mvm, sta, lq_sta);
if (is_siso(&rate)) {
num_rates = RS_INITIAL_SISO_NUM_RATES;
@@ -2903,7 +2986,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
if (WARN_ON_ONCE(!sta || !initial_rate))
return;
- rs_build_rates_table(mvm, lq_sta, initial_rate);
+ rs_build_rates_table(mvm, sta, lq_sta, initial_rate);
if (num_of_ant(initial_rate->ant) == 1)
lq_cmd->single_stream_ant_msk = initial_rate->ant;
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index eb34c1209acc..defd70a6d9e6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -208,6 +208,7 @@ struct rs_rate {
u32 bw;
bool sgi;
bool ldpc;
+ bool stbc;
};
@@ -331,6 +332,7 @@ struct iwl_lq_sta {
u64 last_tx;
bool is_vht;
bool ldpc; /* LDPC Rx is supported by the STA */
+ bool stbc; /* Tx STBC is supported by chip and Rx by STA */
enum ieee80211_band band;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 3cf40f3f58ec..94b6e7297a1e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -96,27 +96,27 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
* Adds the rxb to a new skb and give it to mac80211
*/
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+ struct sk_buff *skb,
struct ieee80211_hdr *hdr, u16 len,
- u32 ampdu_status,
- struct iwl_rx_cmd_buffer *rxb,
- struct ieee80211_rx_status *stats)
+ u32 ampdu_status, u8 crypt_len,
+ struct iwl_rx_cmd_buffer *rxb)
{
- struct sk_buff *skb;
unsigned int hdrlen, fraglen;
- /* Dont use dev_alloc_skb(), we'll have enough headroom once
- * ieee80211_hdr pulled.
- */
- skb = alloc_skb(128, GFP_ATOMIC);
- if (!skb) {
- IWL_ERR(mvm, "alloc_skb failed\n");
- return;
- }
/* If frame is small enough to fit in skb->head, pull it completely.
- * If not, only pull ieee80211_hdr so that splice() or TCP coalesce
- * are more efficient.
+ * If not, only pull ieee80211_hdr (including crypto if present, and
+ * an additional 8 bytes for SNAP/ethertype, see below) so that
+ * splice() or TCP coalesce are more efficient.
+ *
+ * Since, in addition, ieee80211_data_to_8023() always pull in at
+ * least 8 bytes (possibly more for mesh) we can do the same here
+ * to save the cost of doing it later. That still doesn't pull in
+ * the actual IP header since the typical case has a SNAP header.
+ * If the latter changes (there are efforts in the standards group
+ * to do so) we should revisit this and ieee80211_data_to_8023().
*/
- hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr);
+ hdrlen = (len <= skb_tailroom(skb)) ? len :
+ sizeof(*hdr) + crypt_len + 8;
memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
fraglen = len - hdrlen;
@@ -129,8 +129,6 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
fraglen, rxb->truesize);
}
- memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
ieee80211_rx(mvm->hw, skb);
}
@@ -185,7 +183,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *stats,
- u32 rx_pkt_status)
+ u32 rx_pkt_status,
+ u8 *crypt_len)
{
if (!ieee80211_has_protected(hdr->frame_control) ||
(rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
@@ -205,12 +204,14 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
stats->flag |= RX_FLAG_DECRYPTED;
IWL_DEBUG_WEP(mvm, "hw decrypted CCMP successfully\n");
+ *crypt_len = IEEE80211_CCMP_HDR_LEN;
return 0;
case RX_MPDU_RES_STATUS_SEC_TKIP_ENC:
/* Don't drop the frame and decrypt it in SW */
if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
return 0;
+ *crypt_len = IEEE80211_TKIP_IV_LEN;
/* fall through if TTAK OK */
case RX_MPDU_RES_STATUS_SEC_WEP_ENC:
@@ -218,6 +219,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
return -1;
stats->flag |= RX_FLAG_DECRYPTED;
+ if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+ RX_MPDU_RES_STATUS_SEC_WEP_ENC)
+ *crypt_len = IEEE80211_WEP_IV_LEN;
return 0;
case RX_MPDU_RES_STATUS_SEC_EXT_ENC:
@@ -242,15 +246,17 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct ieee80211_hdr *hdr;
- struct ieee80211_rx_status rx_status = {};
+ struct ieee80211_rx_status *rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_phy_info *phy_info;
struct iwl_rx_mpdu_res_start *rx_res;
struct ieee80211_sta *sta;
+ struct sk_buff *skb;
u32 len;
u32 ampdu_status;
u32 rate_n_flags;
u32 rx_pkt_status;
+ u8 crypt_len = 0;
phy_info = &mvm->last_phy_info;
rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
@@ -259,20 +265,32 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
rx_pkt_status = le32_to_cpup((__le32 *)
(pkt->data + sizeof(*rx_res) + len));
- memset(&rx_status, 0, sizeof(rx_status));
+ /* Dont use dev_alloc_skb(), we'll have enough headroom once
+ * ieee80211_hdr pulled.
+ */
+ skb = alloc_skb(128, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mvm, "alloc_skb failed\n");
+ return 0;
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
/*
* drop the packet if it has failed being decrypted by HW
*/
- if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, &rx_status, rx_pkt_status)) {
+ if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status,
+ &crypt_len)) {
IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
rx_pkt_status);
+ kfree_skb(skb);
return 0;
}
if ((unlikely(phy_info->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n",
phy_info->cfg_phy_cnt);
+ kfree_skb(skb);
return 0;
}
@@ -283,31 +301,31 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
!(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
- rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
}
/* This will be used in several places later */
rate_n_flags = le32_to_cpu(phy_info->rate_n_flags);
/* rx_status carries information about the packet to mac80211 */
- rx_status.mactime = le64_to_cpu(phy_info->timestamp);
- rx_status.device_timestamp = le32_to_cpu(phy_info->system_timestamp);
- rx_status.band =
+ rx_status->mactime = le64_to_cpu(phy_info->timestamp);
+ rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp);
+ rx_status->band =
(phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- rx_status.freq =
+ rx_status->freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
- rx_status.band);
+ rx_status->band);
/*
* TSF as indicated by the fw is at INA time, but mac80211 expects the
* TSF at the beginning of the MPDU.
*/
- /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
+ /*rx_status->flag |= RX_FLAG_MACTIME_MPDU;*/
- iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
+ iwl_mvm_get_signal_strength(mvm, phy_info, rx_status);
- IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
- (unsigned long long)rx_status.mactime);
+ IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal,
+ (unsigned long long)rx_status->mactime);
rcu_read_lock();
/*
@@ -326,15 +344,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
if (sta) {
struct iwl_mvm_sta *mvmsta;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
- rs_update_last_rssi(mvm, &mvmsta->lq_sta,
- &rx_status);
+ rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
}
rcu_read_unlock();
/* set the preamble flag if appropriate */
if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
- rx_status.flag |= RX_FLAG_SHORTPRE;
+ rx_status->flag |= RX_FLAG_SHORTPRE;
if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
/*
@@ -342,8 +359,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
* together since we get a single PHY response
* from the firmware for all of them
*/
- rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
- rx_status.ampdu_reference = mvm->ampdu_ref;
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->ampdu_reference = mvm->ampdu_ref;
}
/* Set up the HT phy flags */
@@ -351,50 +368,50 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
case RATE_MCS_CHAN_WIDTH_20:
break;
case RATE_MCS_CHAN_WIDTH_40:
- rx_status.flag |= RX_FLAG_40MHZ;
+ rx_status->flag |= RX_FLAG_40MHZ;
break;
case RATE_MCS_CHAN_WIDTH_80:
- rx_status.vht_flag |= RX_VHT_FLAG_80MHZ;
+ rx_status->vht_flag |= RX_VHT_FLAG_80MHZ;
break;
case RATE_MCS_CHAN_WIDTH_160:
- rx_status.vht_flag |= RX_VHT_FLAG_160MHZ;
+ rx_status->vht_flag |= RX_VHT_FLAG_160MHZ;
break;
}
if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status.flag |= RX_FLAG_SHORT_GI;
+ rx_status->flag |= RX_FLAG_SHORT_GI;
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
- rx_status.flag |= RX_FLAG_HT_GF;
+ rx_status->flag |= RX_FLAG_HT_GF;
if (rate_n_flags & RATE_MCS_LDPC_MSK)
- rx_status.flag |= RX_FLAG_LDPC;
+ rx_status->flag |= RX_FLAG_LDPC;
if (rate_n_flags & RATE_MCS_HT_MSK) {
u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
RATE_MCS_STBC_POS;
- rx_status.flag |= RX_FLAG_HT;
- rx_status.rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
- rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
+ rx_status->flag |= RX_FLAG_HT;
+ rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+ rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
RATE_MCS_STBC_POS;
- rx_status.vht_nss =
+ rx_status->vht_nss =
((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
- rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
- rx_status.flag |= RX_FLAG_VHT;
- rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
+ rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ rx_status->flag |= RX_FLAG_VHT;
+ rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
- rx_status.vht_flag |= RX_VHT_FLAG_BF;
+ rx_status->vht_flag |= RX_VHT_FLAG_BF;
} else {
- rx_status.rate_idx =
+ rx_status->rate_idx =
iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
- rx_status.band);
+ rx_status->band);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_update_frame_stats(mvm, &mvm->drv_rx_stats, rate_n_flags,
- rx_status.flag & RX_FLAG_AMPDU_DETAILS);
+ rx_status->flag & RX_FLAG_AMPDU_DETAILS);
#endif
- iwl_mvm_pass_packet_to_mac80211(mvm, hdr, len, ampdu_status,
- rxb, &rx_status);
+ iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status,
+ crypt_len, rxb);
return 0;
}
@@ -500,29 +517,8 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
.mvm = mvm,
};
- /*
- * set temperature debug enabled - ignore FW temperature updates
- * and use the user set temperature.
- */
- if (mvm->temperature_test) {
- if (mvm->temperature < le32_to_cpu(common->temperature))
- IWL_DEBUG_TEMP(mvm,
- "Ignoring FW temperature update that is greater than the debug set temperature (debug temp = %d, fw temp = %d)\n",
- mvm->temperature,
- le32_to_cpu(common->temperature));
- /*
- * skip iwl_mvm_tt_handler since we are in
- * temperature debug mode and we are ignoring
- * the new temperature value
- */
- goto update;
- }
+ iwl_mvm_tt_temp_changed(mvm, le32_to_cpu(common->temperature));
- if (mvm->temperature != le32_to_cpu(common->temperature)) {
- mvm->temperature = le32_to_cpu(common->temperature);
- iwl_mvm_tt_handler(mvm);
- }
-update:
iwl_mvm_update_rx_statistics(mvm, stats);
ieee80211_iterate_active_interfaces(mvm->hw,
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 7554f7053830..e5294d01181e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -83,15 +83,29 @@ struct iwl_mvm_scan_params {
} dwell[IEEE80211_NUM_BANDS];
};
+enum iwl_umac_scan_uid_type {
+ IWL_UMAC_SCAN_UID_REG_SCAN = BIT(0),
+ IWL_UMAC_SCAN_UID_SCHED_SCAN = BIT(1),
+ IWL_UMAC_SCAN_UID_ALL = IWL_UMAC_SCAN_UID_REG_SCAN |
+ IWL_UMAC_SCAN_UID_SCHED_SCAN,
+};
+
+static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
+ enum iwl_umac_scan_uid_type type, bool notify);
+
+static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
+{
+ if (mvm->scan_rx_ant != ANT_NONE)
+ return mvm->scan_rx_ant;
+ return mvm->fw->valid_rx_ant;
+}
+
static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
{
u16 rx_chain;
u8 rx_ant;
- if (mvm->scan_rx_ant != ANT_NONE)
- rx_ant = mvm->scan_rx_ant;
- else
- rx_ant = mvm->fw->valid_rx_ant;
+ rx_ant = iwl_mvm_scan_rx_ant(mvm);
rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
@@ -270,7 +284,8 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
bool *global_bound = data;
- if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < MAX_PHYS)
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
+ mvmvif->phy_ctxt->id < MAX_PHYS)
*global_bound = true;
}
@@ -365,6 +380,10 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
!is_sched_scan)
max_probe_len -= 32;
+ /* DS parameter set element is added on 2.4GHZ band if required */
+ if (iwl_mvm_rrm_scan_needed(mvm))
+ max_probe_len -= 3;
+
return max_probe_len;
}
@@ -536,23 +555,17 @@ int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u8 client_bitmap = 0;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
+ if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) &&
+ !(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
struct iwl_sched_scan_results *notif = (void *)pkt->data;
- client_bitmap = notif->client_bitmap;
+ if (!(notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN))
+ return 0;
}
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN ||
- client_bitmap & SCAN_CLIENT_SCHED_SCAN) {
- if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
- IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
- ieee80211_sched_scan_results(mvm->hw);
- } else {
- IWL_DEBUG_SCAN(mvm, "Scan results\n");
- }
- }
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
+ ieee80211_sched_scan_results(mvm->hw);
return 0;
}
@@ -662,6 +675,7 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
mvm->scan_status = IWL_MVM_SCAN_NONE;
ieee80211_scan_completed(mvm->hw,
status == IWL_SCAN_OFFLOAD_ABORTED);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
}
mvm->last_ebs_successful = !ebs_status;
@@ -963,6 +977,20 @@ free_blacklist:
return ret;
}
+static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req)
+{
+ if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
+ IWL_DEBUG_SCAN(mvm,
+ "Sending scheduled scan with filtering, n_match_sets %d\n",
+ req->n_match_sets);
+ return false;
+ }
+
+ IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
+ return true;
+}
+
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req)
{
@@ -978,15 +1006,8 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
.schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
};
- if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
- IWL_DEBUG_SCAN(mvm,
- "Sending scheduled scan with filtering, filter len %d\n",
- req->n_match_sets);
- } else {
- IWL_DEBUG_SCAN(mvm,
- "Sending Scheduled scan without filtering\n");
+ if (iwl_mvm_scan_pass_all(mvm, req))
scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
- }
if (mvm->last_ebs_successful &&
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
@@ -997,6 +1018,38 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
sizeof(scan_req), &scan_req);
}
+int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+ int ret;
+
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+ ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+ if (ret)
+ return ret;
+ ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies);
+ } else if ((mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
+ mvm->scan_status = IWL_MVM_SCAN_SCHED;
+ ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+ if (ret)
+ return ret;
+ ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
+ } else {
+ mvm->scan_status = IWL_MVM_SCAN_SCHED;
+ ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
+ if (ret)
+ return ret;
+ ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+ if (ret)
+ return ret;
+ ret = iwl_mvm_sched_scan_start(mvm, req);
+ }
+
+ return ret;
+}
+
static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
{
int ret;
@@ -1041,6 +1094,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
lockdep_assert_held(&mvm->mutex);
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+ return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
+ notify);
+
if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
(!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
mvm->scan_status != IWL_MVM_SCAN_OS)) {
@@ -1071,8 +1128,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
/*
* Clear the scan status so the next scan requests will succeed. This
* also ensures the Rx handler doesn't do anything, as the scan was
- * stopped from above.
+ * stopped from above. Since the rx handler won't do anything now,
+ * we have to release the scan reference here.
*/
+ if (mvm->scan_status == IWL_MVM_SCAN_OS)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+
mvm->scan_status = IWL_MVM_SCAN_NONE;
if (notify) {
@@ -1124,20 +1185,64 @@ iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
}
}
+static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
+ size_t len, u8 *const pos)
+{
+ static const u8 before_ds_params[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_REQUEST,
+ WLAN_EID_EXT_SUPP_RATES,
+ };
+ size_t offs;
+ u8 *newpos = pos;
+
+ if (!iwl_mvm_rrm_scan_needed(mvm)) {
+ memcpy(newpos, ies, len);
+ return newpos + len;
+ }
+
+ offs = ieee80211_ie_split(ies, len,
+ before_ds_params,
+ ARRAY_SIZE(before_ds_params),
+ 0);
+
+ memcpy(newpos, ies, offs);
+ newpos += offs;
+
+ /* Add a placeholder for DS Parameter Set element */
+ *newpos++ = WLAN_EID_DS_PARAMS;
+ *newpos++ = 1;
+ *newpos++ = 0;
+
+ memcpy(newpos, ies + offs, len - offs);
+ newpos += len - offs;
+
+ return newpos;
+}
+
static void
iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_scan_ies *ies,
- struct iwl_scan_req_unified_lmac *cmd)
+ struct iwl_scan_probe_req *preq,
+ const u8 *mac_addr, const u8 *mac_addr_mask)
{
- struct iwl_scan_probe_req *preq = (void *)(cmd->data +
- sizeof(struct iwl_scan_channel_cfg_lmac) *
- mvm->fw->ucode_capa.n_scan_channels);
struct ieee80211_mgmt *frame = (struct ieee80211_mgmt *)preq->buf;
- u8 *pos;
+ u8 *pos, *newpos;
+
+ /*
+ * Unfortunately, right now the offload scan doesn't support randomising
+ * within the firmware, so until the firmware API is ready we implement
+ * it in the driver. This means that the scan iterations won't really be
+ * random, only when it's restarted, but at least that helps a bit.
+ */
+ if (mac_addr)
+ get_random_mask_addr(frame->sa, mac_addr, mac_addr_mask);
+ else
+ memcpy(frame->sa, vif->addr, ETH_ALEN);
frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
eth_broadcast_addr(frame->da);
- memcpy(frame->sa, vif->addr, ETH_ALEN);
eth_broadcast_addr(frame->bssid);
frame->seq_ctrl = 0;
@@ -1148,11 +1253,14 @@ iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
preq->mac_header.offset = 0;
preq->mac_header.len = cpu_to_le16(24 + 2);
- memcpy(pos, ies->ies[IEEE80211_BAND_2GHZ],
- ies->len[IEEE80211_BAND_2GHZ]);
+ /* Insert ds parameter set element on 2.4 GHz band */
+ newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
+ ies->ies[IEEE80211_BAND_2GHZ],
+ ies->len[IEEE80211_BAND_2GHZ],
+ pos);
preq->band_data[0].offset = cpu_to_le16(pos - preq->buf);
- preq->band_data[0].len = cpu_to_le16(ies->len[IEEE80211_BAND_2GHZ]);
- pos += ies->len[IEEE80211_BAND_2GHZ];
+ preq->band_data[0].len = cpu_to_le16(newpos - pos);
+ pos = newpos;
memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
ies->len[IEEE80211_BAND_5GHZ]);
@@ -1213,9 +1321,10 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
+ struct iwl_scan_probe_req *preq;
struct iwl_mvm_scan_params params = {};
u32 flags;
- int ssid_bitmap = 0;
+ u32 ssid_bitmap = 0;
int ret, i;
lockdep_assert_held(&mvm->mutex);
@@ -1274,7 +1383,13 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
req->req.n_channels, ssid_bitmap,
cmd);
- iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, cmd);
+ preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
+ mvm->fw->ucode_capa.n_scan_channels);
+
+ iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, preq,
+ req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+ req->req.mac_addr : NULL,
+ req->req.mac_addr_mask);
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (!ret) {
@@ -1307,6 +1422,7 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
+ struct iwl_scan_probe_req *preq;
struct iwl_mvm_scan_params params = {};
int ret;
u32 flags = 0, ssid_bitmap = 0;
@@ -1330,15 +1446,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
cmd->n_channels = (u8)req->n_channels;
- if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
- IWL_DEBUG_SCAN(mvm,
- "Sending scheduled scan with filtering, n_match_sets %d\n",
- req->n_match_sets);
- } else {
- IWL_DEBUG_SCAN(mvm,
- "Sending Scheduled scan without filtering\n");
+ if (iwl_mvm_scan_pass_all(mvm, req))
flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
- }
if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
@@ -1368,7 +1477,13 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
ssid_bitmap, cmd);
- iwl_mvm_build_unified_scan_probe(mvm, vif, ies, cmd);
+ preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
+ mvm->fw->ucode_capa.n_scan_channels);
+
+ iwl_mvm_build_unified_scan_probe(mvm, vif, ies, preq,
+ req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+ req->mac_addr : NULL,
+ req->mac_addr_mask);
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (!ret) {
@@ -1390,6 +1505,10 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
{
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+ return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_REG_SCAN,
+ true);
+
if (mvm->scan_status == IWL_MVM_SCAN_NONE)
return 0;
@@ -1404,3 +1523,576 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
return iwl_mvm_scan_offload_stop(mvm, true);
return iwl_mvm_cancel_regular_scan(mvm);
}
+
+/* UMAC scan API */
+
+struct iwl_umac_scan_done {
+ struct iwl_mvm *mvm;
+ enum iwl_umac_scan_uid_type type;
+};
+
+static int rate_to_scan_rate_flag(unsigned int rate)
+{
+ static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
+ [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M,
+ [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M,
+ [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M,
+ [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M,
+ [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M,
+ [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M,
+ [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M,
+ [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M,
+ [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M,
+ [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M,
+ [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M,
+ [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M,
+ };
+
+ return rate_to_scan_rate[rate];
+}
+
+static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
+{
+ struct ieee80211_supported_band *band;
+ unsigned int rates = 0;
+ int i;
+
+ band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
+ for (i = 0; i < band->n_bitrates; i++)
+ rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
+ band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+ for (i = 0; i < band->n_bitrates; i++)
+ rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
+
+ /* Set both basic rates and supported rates */
+ rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
+
+ return cpu_to_le32(rates);
+}
+
+int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+{
+
+ struct iwl_scan_config *scan_config;
+ struct ieee80211_supported_band *band;
+ int num_channels =
+ mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
+ mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+ int ret, i, j = 0, cmd_size, data_size;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_CFG_CMD,
+ };
+
+ if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
+ return -ENOBUFS;
+
+ cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
+
+ scan_config = kzalloc(cmd_size, GFP_KERNEL);
+ if (!scan_config)
+ return -ENOMEM;
+
+ data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr);
+ scan_config->hdr.size = cpu_to_le16(data_size);
+ scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
+ SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
+ SCAN_CONFIG_FLAG_SET_TX_CHAINS |
+ SCAN_CONFIG_FLAG_SET_RX_CHAINS |
+ SCAN_CONFIG_FLAG_SET_ALL_TIMES |
+ SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
+ SCAN_CONFIG_FLAG_SET_MAC_ADDR |
+ SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
+ SCAN_CONFIG_N_CHANNELS(num_channels));
+ scan_config->tx_chains = cpu_to_le32(mvm->fw->valid_tx_ant);
+ scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+ scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
+ scan_config->out_of_channel_time = cpu_to_le32(170);
+ scan_config->suspend_time = cpu_to_le32(30);
+ scan_config->dwell_active = 20;
+ scan_config->dwell_passive = 110;
+ scan_config->dwell_fragmented = 20;
+
+ memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
+
+ scan_config->bcast_sta_id = mvm->aux_sta.sta_id;
+ scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS |
+ IWL_CHANNEL_FLAG_ACCURATE_EBS |
+ IWL_CHANNEL_FLAG_EBS_ADD |
+ IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
+
+ band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
+ for (i = 0; i < band->n_channels; i++, j++)
+ scan_config->channel_array[j] = band->channels[i].center_freq;
+ band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+ for (i = 0; i < band->n_channels; i++, j++)
+ scan_config->channel_array[j] = band->channels[i].center_freq;
+
+ cmd.data[0] = scan_config;
+ cmd.len[0] = cmd_size;
+ cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+
+ IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+ kfree(scan_config);
+ return ret;
+}
+
+static int iwl_mvm_find_scan_uid(struct iwl_mvm *mvm, u32 uid)
+{
+ int i;
+
+ for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
+ if (mvm->scan_uid[i] == uid)
+ return i;
+
+ return i;
+}
+
+static int iwl_mvm_find_free_scan_uid(struct iwl_mvm *mvm)
+{
+ return iwl_mvm_find_scan_uid(mvm, 0);
+}
+
+static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm,
+ enum iwl_umac_scan_uid_type type)
+{
+ int i;
+
+ for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
+ if (mvm->scan_uid[i] & type)
+ return true;
+
+ return false;
+}
+
+static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm,
+ enum iwl_umac_scan_uid_type type)
+{
+ u32 uid;
+
+ /* make sure exactly one bit is on in scan type */
+ WARN_ON(hweight8(type) != 1);
+
+ /*
+ * Make sure scan uids are unique. If one scan lasts long time while
+ * others are completing frequently, the seq number will wrap up and
+ * we may have more than one scan with the same uid.
+ */
+ do {
+ uid = type | (mvm->scan_seq_num <<
+ IWL_UMAC_SCAN_UID_SEQ_OFFSET);
+ mvm->scan_seq_num++;
+ } while (iwl_mvm_find_scan_uid(mvm, uid) <
+ IWL_MVM_MAX_SIMULTANEOUS_SCANS);
+
+ IWL_DEBUG_SCAN(mvm, "Generated scan UID %u\n", uid);
+
+ return uid;
+}
+
+static void
+iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
+ struct iwl_scan_req_umac *cmd,
+ struct iwl_mvm_scan_params *params)
+{
+ memset(cmd, 0, ksize(cmd));
+ cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
+ sizeof(struct iwl_mvm_umac_cmd_hdr));
+ cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
+ cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
+ if (params->passive_fragmented)
+ cmd->fragmented_dwell =
+ params->dwell[IEEE80211_BAND_2GHZ].passive;
+ cmd->max_out_time = cpu_to_le32(params->max_out_time);
+ cmd->suspend_time = cpu_to_le32(params->suspend_time);
+ cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+}
+
+static void
+iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
+ struct ieee80211_channel **channels,
+ int n_channels, u32 ssid_bitmap,
+ struct iwl_scan_req_umac *cmd)
+{
+ struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data;
+ int i;
+
+ for (i = 0; i < n_channels; i++) {
+ channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
+ channel_cfg[i].channel_num = channels[i]->hw_value;
+ channel_cfg[i].iter_count = 1;
+ channel_cfg[i].iter_interval = 0;
+ }
+}
+
+static u32 iwl_mvm_scan_umac_common_flags(struct iwl_mvm *mvm, int n_ssids,
+ struct cfg80211_ssid *ssids,
+ int fragmented)
+{
+ int flags = 0;
+
+ if (n_ssids == 0)
+ flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
+
+ if (n_ssids == 1 && ssids[0].ssid_len != 0)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
+
+ if (fragmented)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
+
+ if (iwl_mvm_rrm_scan_needed(mvm))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
+
+ return flags;
+}
+
+int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req)
+{
+ struct iwl_host_cmd hcmd = {
+ .id = SCAN_REQ_UMAC,
+ .len = { iwl_mvm_scan_size(mvm), },
+ .data = { mvm->scan_cmd, },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+ struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
+ sizeof(struct iwl_scan_channel_cfg_umac) *
+ mvm->fw->ucode_capa.n_scan_channels;
+ struct iwl_mvm_scan_params params = {};
+ u32 uid, flags;
+ u32 ssid_bitmap = 0;
+ int ret, i, uid_idx;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ uid_idx = iwl_mvm_find_free_scan_uid(mvm);
+ if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
+ return -EBUSY;
+
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(mvm->scan_cmd == NULL))
+ return -ENOMEM;
+
+ if (WARN_ON(req->req.n_ssids > PROBE_OPTION_MAX ||
+ req->ies.common_ie_len +
+ req->ies.len[NL80211_BAND_2GHZ] +
+ req->ies.len[NL80211_BAND_5GHZ] + 24 + 2 >
+ SCAN_OFFLOAD_PROBE_REQ_SIZE || req->req.n_channels >
+ mvm->fw->ucode_capa.n_scan_channels))
+ return -ENOBUFS;
+
+ iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
+ &params);
+
+ iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
+
+ uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
+ mvm->scan_uid[uid_idx] = uid;
+ cmd->uid = cpu_to_le32(uid);
+
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+
+ flags = iwl_mvm_scan_umac_common_flags(mvm, req->req.n_ssids,
+ req->req.ssids,
+ params.passive_fragmented);
+
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
+
+ cmd->general_flags = cpu_to_le32(flags);
+ cmd->n_channels = req->req.n_channels;
+
+ for (i = 0; i < req->req.n_ssids; i++)
+ ssid_bitmap |= BIT(i);
+
+ iwl_mvm_umac_scan_cfg_channels(mvm, req->req.channels,
+ req->req.n_channels, ssid_bitmap, cmd);
+
+ sec_part->schedule[0].iter_count = 1;
+ sec_part->delay = 0;
+
+ iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, &sec_part->preq,
+ req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+ req->req.mac_addr : NULL,
+ req->req.mac_addr_mask);
+
+ iwl_mvm_scan_fill_ssids(sec_part->direct_scan, req->req.ssids,
+ req->req.n_ssids, 0);
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ if (!ret) {
+ IWL_DEBUG_SCAN(mvm,
+ "Scan request was sent successfully\n");
+ } else {
+ /*
+ * If the scan failed, it usually means that the FW was unable
+ * to allocate the time events. Warn on it, but maybe we
+ * should try to send the command again with different params.
+ */
+ IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
+ }
+ return ret;
+}
+
+int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+
+ struct iwl_host_cmd hcmd = {
+ .id = SCAN_REQ_UMAC,
+ .len = { iwl_mvm_scan_size(mvm), },
+ .data = { mvm->scan_cmd, },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+ struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
+ sizeof(struct iwl_scan_channel_cfg_umac) *
+ mvm->fw->ucode_capa.n_scan_channels;
+ struct iwl_mvm_scan_params params = {};
+ u32 uid, flags;
+ u32 ssid_bitmap = 0;
+ int ret, uid_idx;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ uid_idx = iwl_mvm_find_free_scan_uid(mvm);
+ if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
+ return -EBUSY;
+
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(mvm->scan_cmd == NULL))
+ return -ENOMEM;
+
+ if (WARN_ON(req->n_ssids > PROBE_OPTION_MAX ||
+ ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
+ ies->len[NL80211_BAND_5GHZ] + 24 + 2 >
+ SCAN_OFFLOAD_PROBE_REQ_SIZE || req->n_channels >
+ mvm->fw->ucode_capa.n_scan_channels))
+ return -ENOBUFS;
+
+ iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags,
+ &params);
+
+ iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
+
+ cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
+
+ uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN);
+ mvm->scan_uid[uid_idx] = uid;
+ cmd->uid = cpu_to_le32(uid);
+
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
+
+ flags = iwl_mvm_scan_umac_common_flags(mvm, req->n_ssids, req->ssids,
+ params.passive_fragmented);
+
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
+
+ if (iwl_mvm_scan_pass_all(mvm, req))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
+ else
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
+
+ cmd->general_flags = cpu_to_le32(flags);
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
+ mvm->last_ebs_successful)
+ cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+
+ cmd->n_channels = req->n_channels;
+
+ iwl_scan_offload_build_ssid(req, sec_part->direct_scan, &ssid_bitmap,
+ false);
+
+ /* This API uses bits 0-19 instead of 1-20. */
+ ssid_bitmap = ssid_bitmap >> 1;
+
+ iwl_mvm_umac_scan_cfg_channels(mvm, req->channels, req->n_channels,
+ ssid_bitmap, cmd);
+
+ sec_part->schedule[0].interval =
+ cpu_to_le16(req->interval / MSEC_PER_SEC);
+ sec_part->schedule[0].iter_count = 0xff;
+
+ sec_part->delay = 0;
+
+ iwl_mvm_build_unified_scan_probe(mvm, vif, ies, &sec_part->preq,
+ req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+ req->mac_addr : NULL,
+ req->mac_addr_mask);
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ if (!ret) {
+ IWL_DEBUG_SCAN(mvm,
+ "Sched scan request was sent successfully\n");
+ } else {
+ /*
+ * If the scan failed, it usually means that the FW was unable
+ * to allocate the time events. Warn on it, but maybe we
+ * should try to send the command again with different params.
+ */
+ IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
+ }
+ return ret;
+}
+
+int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_umac_scan_complete *notif = (void *)pkt->data;
+ u32 uid = __le32_to_cpu(notif->uid);
+ bool sched = !!(uid & IWL_UMAC_SCAN_UID_SCHED_SCAN);
+ int uid_idx = iwl_mvm_find_scan_uid(mvm, uid);
+
+ /*
+ * Scan uid may be set to zero in case of scan abort request from above.
+ */
+ if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
+ return 0;
+
+ IWL_DEBUG_SCAN(mvm,
+ "Scan completed, uid %u type %s, status %s, EBS status %s\n",
+ uid, sched ? "sched" : "regular",
+ notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
+ "completed" : "aborted",
+ notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
+ "success" : "failed");
+
+ mvm->last_ebs_successful = !notif->ebs_status;
+ mvm->scan_uid[uid_idx] = 0;
+
+ if (!sched) {
+ ieee80211_scan_completed(mvm->hw,
+ notif->status ==
+ IWL_SCAN_OFFLOAD_ABORTED);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ } else if (!iwl_mvm_find_scan_type(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN)) {
+ ieee80211_sched_scan_stopped(mvm->hw);
+ } else {
+ IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n");
+ }
+
+ return 0;
+}
+
+static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_umac_scan_done *scan_done = data;
+ struct iwl_umac_scan_complete *notif = (void *)pkt->data;
+ u32 uid = __le32_to_cpu(notif->uid);
+ int uid_idx = iwl_mvm_find_scan_uid(scan_done->mvm, uid);
+
+ if (WARN_ON(pkt->hdr.cmd != SCAN_COMPLETE_UMAC))
+ return false;
+
+ if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
+ return false;
+
+ /*
+ * Clear scan uid of scans that was aborted from above and completed
+ * in FW so the RX handler does nothing.
+ */
+ scan_done->mvm->scan_uid[uid_idx] = 0;
+
+ return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type);
+}
+
+static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid)
+{
+ struct iwl_umac_scan_abort cmd = {
+ .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
+ sizeof(struct iwl_mvm_umac_cmd_hdr)),
+ .uid = cpu_to_le32(uid),
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
+
+ return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+}
+
+static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
+ enum iwl_umac_scan_uid_type type, bool notify)
+{
+ struct iwl_notification_wait wait_scan_done;
+ static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, };
+ struct iwl_umac_scan_done scan_done = {
+ .mvm = mvm,
+ .type = type,
+ };
+ int i, ret = -EIO;
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
+ scan_done_notif,
+ ARRAY_SIZE(scan_done_notif),
+ iwl_scan_umac_done_check, &scan_done);
+
+ IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
+
+ for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
+ if (mvm->scan_uid[i] & type) {
+ int err;
+
+ if (iwl_mvm_is_radio_killed(mvm) &&
+ (type & IWL_UMAC_SCAN_UID_REG_SCAN)) {
+ ieee80211_scan_completed(mvm->hw, true);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ break;
+ }
+
+ err = iwl_umac_scan_abort_one(mvm, mvm->scan_uid[i]);
+ if (!err)
+ ret = 0;
+ }
+ }
+
+ if (ret) {
+ IWL_DEBUG_SCAN(mvm, "Couldn't stop scan\n");
+ iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
+ return ret;
+ }
+
+ ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
+ if (ret)
+ return ret;
+
+ if (notify) {
+ if (type & IWL_UMAC_SCAN_UID_SCHED_SCAN)
+ ieee80211_sched_scan_stopped(mvm->hw);
+ if (type & IWL_UMAC_SCAN_UID_REG_SCAN) {
+ ieee80211_scan_completed(mvm->hw, true);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ }
+ }
+
+ return ret;
+}
+
+int iwl_mvm_scan_size(struct iwl_mvm *mvm)
+{
+ if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+ return sizeof(struct iwl_scan_req_umac) +
+ sizeof(struct iwl_scan_channel_cfg_umac) *
+ mvm->fw->ucode_capa.n_scan_channels +
+ sizeof(struct iwl_scan_req_umac_tail);
+
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
+ return sizeof(struct iwl_scan_req_unified_lmac) +
+ sizeof(struct iwl_scan_channel_cfg_lmac) *
+ mvm->fw->ucode_capa.n_scan_channels +
+ sizeof(struct iwl_scan_probe_req);
+
+ return sizeof(struct iwl_scan_cmd) +
+ mvm->fw->ucode_capa.max_probe_length +
+ mvm->fw->ucode_capa.n_scan_channels *
+ sizeof(struct iwl_scan_channel);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 1731c205c81d..d86fe432e51f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -204,6 +204,56 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return ret;
}
+static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ unsigned long used_hw_queues;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ u32 ac;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
+
+ /* Find available queues, and allocate them to the ACs */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ u8 queue = find_first_zero_bit(&used_hw_queues,
+ mvm->first_agg_queue);
+
+ if (queue >= mvm->first_agg_queue) {
+ IWL_ERR(mvm, "Failed to allocate STA queue\n");
+ return -EBUSY;
+ }
+
+ __set_bit(queue, &used_hw_queues);
+ mvmsta->hw_queue[ac] = queue;
+ }
+
+ /* Found a place for all queues - enable them */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
+ iwl_mvm_ac_to_tx_fifo[ac]);
+ mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ unsigned long sta_msk;
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* disable the TDLS STA-specific queues */
+ sta_msk = mvmsta->tfd_queue_msk;
+ for_each_set_bit(i, &sta_msk, sizeof(sta_msk))
+ iwl_mvm_disable_txq(mvm, i);
+}
+
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -237,9 +287,17 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
atomic_set(&mvm->pending_frames[sta_id], 0);
mvm_sta->tid_disable_agg = 0;
mvm_sta->tfd_queue_msk = 0;
- for (i = 0; i < IEEE80211_NUM_ACS; i++)
- if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
- mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
+
+ /* allocate new queues for a TDLS station */
+ if (sta->tdls) {
+ ret = iwl_mvm_tdls_sta_init(mvm, sta);
+ if (ret)
+ return ret;
+ } else {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
+ mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
+ }
/* for HW restart - reset everything but the sequence number */
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
@@ -251,7 +309,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
if (ret)
- return ret;
+ goto err;
if (vif->type == NL80211_IFTYPE_STATION) {
if (!sta->tdls) {
@@ -265,6 +323,10 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
return 0;
+
+err:
+ iwl_mvm_tdls_sta_deinit(mvm, sta);
+ return ret;
}
int iwl_mvm_update_sta(struct iwl_mvm *mvm,
@@ -398,6 +460,17 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
}
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
clear_bit(sta_id, mvm->sta_drained);
+
+ if (mvm->tfd_drained[sta_id]) {
+ unsigned long i, msk = mvm->tfd_drained[sta_id];
+
+ for_each_set_bit(i, &msk, sizeof(msk))
+ iwl_mvm_disable_txq(mvm, i);
+
+ mvm->tfd_drained[sta_id] = 0;
+ IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
+ sta_id, msk);
+ }
}
mutex_unlock(&mvm->mutex);
@@ -431,6 +504,15 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
}
/*
+ * This shouldn't happen - the TDLS channel switch should be canceled
+ * before the STA is removed.
+ */
+ if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
+ mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
+ cancel_delayed_work(&mvm->tdls_cs.dwork);
+ }
+
+ /*
* Make sure that the tx response code sees the station as -EBUSY and
* calls the drain worker.
*/
@@ -443,9 +525,22 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
ERR_PTR(-EBUSY));
spin_unlock_bh(&mvm_sta->lock);
+
+ /* disable TDLS sta queues on drain complete */
+ if (sta->tdls) {
+ mvm->tfd_drained[mvm_sta->sta_id] =
+ mvm_sta->tfd_queue_msk;
+ IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
+ mvm_sta->sta_id);
+ }
+
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
} else {
spin_unlock_bh(&mvm_sta->lock);
+
+ if (sta->tdls)
+ iwl_mvm_tdls_sta_deinit(mvm, sta);
+
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
}
@@ -609,7 +704,7 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
- qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
+ qmask = iwl_mvm_mac_get_queues_mask(vif);
/*
* The firmware defines the TFD queue mask to only be relevant
@@ -1071,15 +1166,16 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
- u32 cmd_flags)
+ struct ieee80211_key_conf *keyconf, bool mcast,
+ u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags)
{
struct iwl_mvm_add_sta_key_cmd cmd = {};
__le16 key_flags;
- int ret, status;
+ int ret;
+ u32 status;
u16 keyidx;
int i;
+ u8 sta_id = mvm_sta->sta_id;
keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK;
@@ -1098,12 +1194,18 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
memcpy(cmd.key, keyconf->key, keyconf->keylen);
break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
+ case WLAN_CIPHER_SUITE_WEP40:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
+ memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
+ break;
default:
key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
memcpy(cmd.key, keyconf->key, keyconf->keylen);
}
- if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ if (mcast)
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
cmd.key_offset = keyconf->hw_key_idx;
@@ -1195,17 +1297,88 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
return NULL;
}
+static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf,
+ bool mcast)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ int ret;
+ const u8 *addr;
+ struct ieee80211_key_seq seq;
+ u16 p1k[5];
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
+ /* get phase 1 key from mac80211 */
+ ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+ ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
+ ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
+ seq.tkip.iv32, p1k, 0);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
+ 0, NULL, 0);
+ break;
+ default:
+ ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
+ 0, NULL, 0);
+ }
+
+ return ret;
+}
+
+static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
+ struct ieee80211_key_conf *keyconf,
+ bool mcast)
+{
+ struct iwl_mvm_add_sta_key_cmd cmd = {};
+ __le16 key_flags;
+ int ret;
+ u32 status;
+
+ key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
+ STA_KEY_FLG_KEYID_MSK);
+ key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
+ key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
+
+ if (mcast)
+ key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+
+ cmd.key_flags = key_flags;
+ cmd.key_offset = keyconf->hw_key_idx;
+ cmd.sta_id = sta_id;
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
+ &cmd, &status);
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
+ break;
+ }
+
+ return ret;
+}
+
int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *keyconf,
bool have_key_offset)
{
- struct iwl_mvm_sta *mvm_sta;
+ bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ u8 sta_id;
int ret;
- u8 *addr, sta_id;
- struct ieee80211_key_seq seq;
- u16 p1k[5];
lockdep_assert_held(&mvm->mutex);
@@ -1234,8 +1407,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
}
}
- mvm_sta = (struct iwl_mvm_sta *)sta->drv_priv;
- if (WARN_ON_ONCE(mvm_sta->vif != vif))
+ if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
return -EINVAL;
if (!have_key_offset) {
@@ -1249,26 +1421,26 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
return -ENOSPC;
}
- switch (keyconf->cipher) {
- case WLAN_CIPHER_SUITE_TKIP:
- addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
- /* get phase 1 key from mac80211 */
- ieee80211_get_key_rx_seq(keyconf, 0, &seq);
- ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
- ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
- seq.tkip.iv32, p1k, 0);
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
- 0, NULL, 0);
- break;
- default:
- ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf,
- sta_id, 0, NULL, 0);
+ ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast);
+ if (ret) {
+ __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
+ goto end;
}
- if (ret)
- __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
+ /*
+ * For WEP, the same key is used for multicast and unicast. Upload it
+ * again, using the same key offset, and now pointing the other one
+ * to the same key slot (offset).
+ * If this fails, remove the original as well.
+ */
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
+ ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast);
+ if (ret) {
+ __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
+ __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
+ }
+ }
end:
IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
@@ -1282,11 +1454,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *keyconf)
{
- struct iwl_mvm_sta *mvm_sta;
- struct iwl_mvm_add_sta_key_cmd cmd = {};
- __le16 key_flags;
- int ret, status;
+ bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
u8 sta_id;
+ int ret;
lockdep_assert_held(&mvm->mutex);
@@ -1299,8 +1469,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
- ret = __test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
- if (!ret) {
+ if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
IWL_ERR(mvm, "offset %d not used in fw key table.\n",
keyconf->hw_key_idx);
return -ENOENT;
@@ -1326,35 +1495,17 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
}
}
- mvm_sta = (struct iwl_mvm_sta *)sta->drv_priv;
- if (WARN_ON_ONCE(mvm_sta->vif != vif))
+ if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
return -EINVAL;
- key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
- STA_KEY_FLG_KEYID_MSK);
- key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
- key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
-
- if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
-
- cmd.key_flags = key_flags;
- cmd.key_offset = keyconf->hw_key_idx;
- cmd.sta_id = sta_id;
-
- status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
- &cmd, &status);
+ ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
+ if (ret)
+ return ret;
- switch (status) {
- case ADD_STA_SUCCESS:
- IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
- break;
- default:
- ret = -EIO;
- IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
- break;
- }
+ /* delete WEP key twice to get rid of (now useless) offset */
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
+ ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
return ret;
}
@@ -1367,6 +1518,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
{
struct iwl_mvm_sta *mvm_sta;
u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
+ bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
return;
@@ -1381,8 +1533,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
}
}
- mvm_sta = (void *)sta->drv_priv;
- iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
iv32, phase1key, CMD_ASYNC);
rcu_read_unlock();
}
@@ -1580,3 +1732,18 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
}
}
+
+void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvmsta;
+
+ rcu_read_lock();
+
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
+
+ if (!WARN_ON(!mvmsta))
+ iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
+
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index d9c0d7b0e9d4..d8f48975ad08 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -264,6 +264,7 @@ enum iwl_mvm_agg_state {
* the first packet to be sent in legacy HW queue in Tx AGG stop flow.
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
* we are ready to finish the Tx AGG stop / start flow.
+ * @tx_time: medium time consumed by this A-MPDU
*/
struct iwl_mvm_tid_data {
u16 seq_number;
@@ -274,6 +275,7 @@ struct iwl_mvm_tid_data {
enum iwl_mvm_agg_state state;
u16 txq_id;
u16 ssn;
+ u16 tx_time;
};
static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
@@ -286,6 +288,7 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
* struct iwl_mvm_sta - representation of a station in the driver
* @sta_id: the index of the station in the fw (will be replaced by id_n_color)
* @tfd_queue_msk: the tfd queues used by the station
+ * @hw_queue: per-AC mapping of the TFD queues used by station
* @mac_id_n_color: the MAC context this station is linked to
* @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
* tid.
@@ -309,6 +312,7 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
struct iwl_mvm_sta {
u32 sta_id;
u32 tfd_queue_msk;
+ u8 hw_queue[IEEE80211_NUM_ACS];
u32 mac_id_n_color;
u16 tid_disable_agg;
u8 max_agg_bufsize;
@@ -418,5 +422,6 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif,
bool disable);
+void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/tdls.c b/drivers/net/wireless/iwlwifi/mvm/tdls.c
index 66c82df2d0a1..c0e00bae5bd0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tdls.c
@@ -61,9 +61,13 @@
*
*****************************************************************************/
+#include <linux/etherdevice.h>
#include "mvm.h"
#include "time-event.h"
+#define TU_TO_US(x) (x * 1024)
+#define TU_TO_MS(x) (TU_TO_US(x) / 1000)
+
void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
{
struct ieee80211_sta *sta;
@@ -113,17 +117,85 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return count;
}
+static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_rx_packet *pkt;
+ struct iwl_tdls_config_res *resp;
+ struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
+ struct iwl_host_cmd cmd = {
+ .id = TDLS_CONFIG_CMD,
+ .flags = CMD_WANT_SKB,
+ .data = { &tdls_cfg_cmd, },
+ .len = { sizeof(struct iwl_tdls_config_cmd), },
+ };
+ struct ieee80211_sta *sta;
+ int ret, i, cnt;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ tdls_cfg_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+ tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
+ tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
+
+ /* for now the Tx cmd is empty and unused */
+
+ /* populate TDLS peer data */
+ cnt = 0;
+ for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta) || !sta->tdls)
+ continue;
+
+ tdls_cfg_cmd.sta_info[cnt].sta_id = i;
+ tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
+ IWL_MVM_TDLS_FW_TID;
+ tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
+ tdls_cfg_cmd.sta_info[cnt].is_initiator =
+ cpu_to_le32(sta->tdls_initiator ? 1 : 0);
+
+ cnt++;
+ }
+
+ tdls_cfg_cmd.tdls_peer_count = cnt;
+ IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (WARN_ON_ONCE(ret))
+ return;
+
+ pkt = cmd.resp_pkt;
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n",
+ pkt->hdr.flags);
+ goto exit;
+ }
+
+ if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)))
+ goto exit;
+
+ /* we don't really care about the response at this point */
+
+exit:
+ iwl_free_resp(&cmd);
+}
+
void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool sta_added)
{
int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
- /*
- * Disable ps when the first TDLS sta is added and re-enable it
- * when the last TDLS sta is removed
- */
- if ((tdls_sta_cnt == 1 && sta_added) ||
- (tdls_sta_cnt == 0 && !sta_added))
+ /* when the first peer joins, send a power update first */
+ if (tdls_sta_cnt == 1 && sta_added)
+ iwl_mvm_power_update_mac(mvm);
+
+ /* configure the FW with TDLS peer info */
+ iwl_mvm_tdls_config(mvm, vif);
+
+ /* when the last peer leaves, send a power update last */
+ if (tdls_sta_cnt == 0 && !sta_added)
iwl_mvm_power_update_mac(mvm);
}
@@ -147,3 +219,488 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
}
+
+static const char *
+iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
+{
+ switch (state) {
+ case IWL_MVM_TDLS_SW_IDLE:
+ return "IDLE";
+ case IWL_MVM_TDLS_SW_REQ_SENT:
+ return "REQ SENT";
+ case IWL_MVM_TDLS_SW_REQ_RCVD:
+ return "REQ RECEIVED";
+ case IWL_MVM_TDLS_SW_ACTIVE:
+ return "ACTIVE";
+ }
+
+ return NULL;
+}
+
+static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
+ enum iwl_mvm_tdls_cs_state state)
+{
+ if (mvm->tdls_cs.state == state)
+ return;
+
+ IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
+ iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
+ iwl_mvm_tdls_cs_state_str(state));
+ mvm->tdls_cs.state = state;
+
+ if (state == IWL_MVM_TDLS_SW_IDLE)
+ mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
+}
+
+int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ unsigned int delay;
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_vif *vif;
+ u32 sta_id = le32_to_cpu(notif->sta_id);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* can fail sometimes */
+ if (!le32_to_cpu(notif->status)) {
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+ goto out;
+ }
+
+ if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
+ goto out;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ /* the station may not be here, but if it is, it must be a TDLS peer */
+ if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
+ goto out;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvmsta->vif;
+
+ /*
+ * Update state and possibly switch again after this is over (DTIM).
+ * Also convert TU to msec.
+ */
+ delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
+ mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+ msecs_to_jiffies(delay));
+
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
+
+out:
+ return 0;
+}
+
+static int
+iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
+ enum iwl_tdls_channel_switch_type type,
+ const u8 *peer, bool peer_initiator)
+{
+ bool same_peer = false;
+ int ret = 0;
+
+ /* get the existing peer if it's there */
+ if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
+ mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
+ struct ieee80211_sta *sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (!IS_ERR_OR_NULL(sta))
+ same_peer = ether_addr_equal(peer, sta->addr);
+ }
+
+ switch (mvm->tdls_cs.state) {
+ case IWL_MVM_TDLS_SW_IDLE:
+ /*
+ * might be spurious packet from the peer after the switch is
+ * already done
+ */
+ if (type == TDLS_MOVE_CH)
+ ret = -EINVAL;
+ break;
+ case IWL_MVM_TDLS_SW_REQ_SENT:
+ /*
+ * We received a ch-switch request while an outgoing one is
+ * pending. Allow it to proceed if the other peer is the same
+ * one we sent to, and we are not the link initiator.
+ */
+ if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH) {
+ if (!same_peer)
+ ret = -EBUSY;
+ else if (!peer_initiator) /* we are the initiator */
+ ret = -EBUSY;
+ }
+ break;
+ case IWL_MVM_TDLS_SW_REQ_RCVD:
+ /* as above, allow the link initiator to proceed */
+ if (type == TDLS_SEND_CHAN_SW_REQ) {
+ if (!same_peer)
+ ret = -EBUSY;
+ else if (peer_initiator) /* they are the initiator */
+ ret = -EBUSY;
+ } else if (type == TDLS_MOVE_CH) {
+ ret = -EINVAL;
+ }
+ break;
+ case IWL_MVM_TDLS_SW_ACTIVE:
+ /* we don't allow initiations during active channel switch */
+ if (type == TDLS_SEND_CHAN_SW_REQ)
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ IWL_DEBUG_TDLS(mvm,
+ "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
+ type, mvm->tdls_cs.state, peer, same_peer,
+ peer_initiator);
+
+ return ret;
+}
+
+static int
+iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum iwl_tdls_channel_switch_type type,
+ const u8 *peer, bool peer_initiator,
+ u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ u32 timestamp, u16 switch_time,
+ u16 switch_timeout, struct sk_buff *skb,
+ u32 ch_sw_tm_ie)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_hdr *hdr;
+ struct iwl_tdls_channel_switch_cmd cmd = {0};
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator);
+ if (ret)
+ return ret;
+
+ if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd.switch_type = type;
+ cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
+ cmd.timing.switch_time = cpu_to_le32(switch_time);
+ cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, peer);
+ if (!sta) {
+ rcu_read_unlock();
+ ret = -ENOENT;
+ goto out;
+ }
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
+
+ if (!chandef) {
+ if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
+ mvm->tdls_cs.peer.chandef.chan) {
+ /* actually moving to the channel */
+ chandef = &mvm->tdls_cs.peer.chandef;
+ } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
+ type == TDLS_MOVE_CH) {
+ /* we need to return to base channel */
+ struct ieee80211_chanctx_conf *chanctx =
+ rcu_dereference(vif->chanctx_conf);
+
+ if (WARN_ON_ONCE(!chanctx)) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ chandef = &chanctx->def;
+ }
+ }
+
+ if (chandef) {
+ cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
+ PHY_BAND_24 : PHY_BAND_5);
+ cmd.ci.channel = chandef->chan->hw_value;
+ cmd.ci.width = iwl_mvm_get_channel_width(chandef);
+ cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
+ }
+
+ /* keep quota calculation simple for now - 50% of DTIM for TDLS */
+ cmd.timing.max_offchan_duration =
+ cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
+ vif->bss_conf.beacon_int) / 2);
+
+ /* Switch time is the first element in the switch-timing IE. */
+ cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
+
+ info = IEEE80211_SKB_CB(skb);
+ if (info->control.hw_key)
+ iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb);
+
+ iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
+ mvmsta->sta_id);
+
+ hdr = (void *)skb->data;
+ iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
+ hdr->frame_control);
+ rcu_read_unlock();
+
+ memcpy(cmd.frame.data, skb->data, skb->len);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
+ sizeof(cmd), &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
+ ret);
+ goto out;
+ }
+
+ /* channel switch has started, update state */
+ if (type != TDLS_MOVE_CH) {
+ mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
+ iwl_mvm_tdls_update_cs_state(mvm,
+ type == TDLS_SEND_CHAN_SW_REQ ?
+ IWL_MVM_TDLS_SW_REQ_SENT :
+ IWL_MVM_TDLS_SW_REQ_RCVD);
+ }
+
+out:
+
+ /* channel switch failed - we are idle */
+ if (ret)
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+
+ return ret;
+}
+
+void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
+{
+ struct iwl_mvm *mvm;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_vif *vif;
+ unsigned int delay;
+ int ret;
+
+ mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
+ mutex_lock(&mvm->mutex);
+
+ /* called after an active channel switch has finished or timed-out */
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+
+ /* station might be gone, in that case do nothing */
+ if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT)
+ goto out;
+
+ sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
+ lockdep_is_held(&mvm->mutex));
+ /* the station may not be here, but if it is, it must be a TDLS peer */
+ if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
+ goto out;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvmsta->vif;
+ ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
+ TDLS_SEND_CHAN_SW_REQ,
+ sta->addr,
+ mvm->tdls_cs.peer.initiator,
+ mvm->tdls_cs.peer.op_class,
+ &mvm->tdls_cs.peer.chandef,
+ 0, 0, 0,
+ mvm->tdls_cs.peer.skb,
+ mvm->tdls_cs.peer.ch_sw_tm_ie);
+ if (ret)
+ IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
+
+ /* retry after a DTIM if we failed sending now */
+ delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
+ queue_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+ msecs_to_jiffies(delay));
+out:
+ mutex_unlock(&mvm->mutex);
+}
+
+int
+iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvmsta;
+ unsigned int delay;
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
+ sta->addr, chandef->chan->center_freq, chandef->width);
+
+ /* we only support a single peer for channel switching */
+ if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) {
+ IWL_DEBUG_TDLS(mvm,
+ "Existing peer. Can't start switch with %pM\n",
+ sta->addr);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
+ TDLS_SEND_CHAN_SW_REQ,
+ sta->addr, sta->tdls_initiator,
+ oper_class, chandef, 0, 0, 0,
+ tmpl_skb, ch_sw_tm_ie);
+ if (ret)
+ goto out;
+
+ /*
+ * Mark the peer as "in tdls switch" for this vif. We only allow a
+ * single such peer per vif.
+ */
+ mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
+ if (!mvm->tdls_cs.peer.skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
+ mvm->tdls_cs.peer.chandef = *chandef;
+ mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
+ mvm->tdls_cs.peer.op_class = oper_class;
+ mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
+
+ /*
+ * Wait for 2 DTIM periods before attempting the next switch. The next
+ * switch will be made sooner if the current one completes before that.
+ */
+ delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
+ vif->bss_conf.beacon_int);
+ mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+ msecs_to_jiffies(delay));
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_sta *cur_sta;
+ bool wait_for_phy = false;
+
+ mutex_lock(&mvm->mutex);
+
+ IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
+
+ /* we only support a single peer for channel switching */
+ if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) {
+ IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
+ goto out;
+ }
+
+ cur_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
+ lockdep_is_held(&mvm->mutex));
+ /* make sure it's the same peer */
+ if (cur_sta != sta)
+ goto out;
+
+ /*
+ * If we're currently in a switch because of the now canceled peer,
+ * wait a DTIM here to make sure the phy is back on the base channel.
+ * We can't otherwise force it.
+ */
+ if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
+ mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
+ wait_for_phy = true;
+
+ mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
+ dev_kfree_skb(mvm->tdls_cs.peer.skb);
+ mvm->tdls_cs.peer.skb = NULL;
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ /* make sure the phy is on the base channel */
+ if (wait_for_phy)
+ msleep(TU_TO_MS(vif->bss_conf.dtim_period *
+ vif->bss_conf.beacon_int));
+
+ /* flush the channel switch state */
+ flush_delayed_work(&mvm->tdls_cs.dwork);
+
+ IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
+}
+
+void
+iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_tdls_ch_sw_params *params)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ enum iwl_tdls_channel_switch_type type;
+ unsigned int delay;
+
+ mutex_lock(&mvm->mutex);
+
+ IWL_DEBUG_TDLS(mvm,
+ "Received TDLS ch switch action %d from %pM status %d\n",
+ params->action_code, params->sta->addr, params->status);
+
+ /*
+ * we got a non-zero status from a peer we were switching to - move to
+ * the idle state and retry again later
+ */
+ if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
+ params->status != 0 &&
+ mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
+ mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
+ struct ieee80211_sta *cur_sta;
+
+ /* make sure it's the same peer */
+ cur_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (cur_sta == params->sta) {
+ iwl_mvm_tdls_update_cs_state(mvm,
+ IWL_MVM_TDLS_SW_IDLE);
+ goto retry;
+ }
+ }
+
+ type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
+ TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
+
+ iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
+ params->sta->tdls_initiator, 0,
+ params->chandef, params->timestamp,
+ params->switch_time,
+ params->switch_timeout,
+ params->tmpl_skb,
+ params->ch_sw_tm_ie);
+
+retry:
+ /* register a timeout in case we don't succeed in switching */
+ delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
+ 1024 / 1000;
+ mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+ msecs_to_jiffies(delay));
+ mutex_unlock(&mvm->mutex);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 6dfad230be5e..54fafbf9a711 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -191,6 +191,35 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
return true;
}
+static void
+iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data,
+ struct iwl_time_event_notif *notif)
+{
+ if (!le32_to_cpu(notif->status)) {
+ IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
+ iwl_mvm_te_clear_data(mvm, te_data);
+ return;
+ }
+
+ switch (te_data->vif->type) {
+ case NL80211_IFTYPE_AP:
+ iwl_mvm_csa_noa_start(mvm);
+ break;
+ case NL80211_IFTYPE_STATION:
+ iwl_mvm_csa_client_absent(mvm, te_data->vif);
+ ieee80211_chswitch_done(te_data->vif, true);
+ break;
+ default:
+ /* should never happen */
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ /* we don't need it anymore */
+ iwl_mvm_te_clear_data(mvm, te_data);
+}
+
/*
* Handles a FW notification for an event that is known to the driver.
*
@@ -252,14 +281,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
ieee80211_ready_on_channel(mvm->hw);
- } else if (te_data->vif->type == NL80211_IFTYPE_AP) {
- if (le32_to_cpu(notif->status))
- iwl_mvm_csa_noa_start(mvm);
- else
- IWL_DEBUG_TE(mvm, "CSA NOA failed to start\n");
-
- /* we don't need it anymore */
- iwl_mvm_te_clear_data(mvm, te_data);
+ } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
+ iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
}
} else {
IWL_WARN(mvm, "Got TE with unknown action\n");
@@ -549,18 +572,11 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
}
}
-/*
- * Explicit request to remove a time event. The removal of a time event needs to
- * be synchronized with the flow of a time event's end notification, which also
- * removes the time event from the op mode data structures.
- */
-void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
- struct iwl_mvm_vif *mvmvif,
- struct iwl_mvm_time_event_data *te_data)
+static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data,
+ u32 *uid)
{
- struct iwl_time_event_cmd time_cmd = {};
- u32 id, uid;
- int ret;
+ u32 id;
/*
* It is possible that by the time we got to this point the time
@@ -569,7 +585,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
spin_lock_bh(&mvm->time_event_lock);
/* Save time event uid before clearing its data */
- uid = te_data->uid;
+ *uid = te_data->uid;
id = te_data->id;
/*
@@ -584,10 +600,59 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
* send a removal command.
*/
if (id == TE_MAX) {
- IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", uid);
- return;
+ IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
+ return false;
}
+ return true;
+}
+
+/*
+ * Explicit request to remove a aux roc time event. The removal of a time
+ * event needs to be synchronized with the flow of a time event's end
+ * notification, which also removes the time event from the op mode
+ * data structures.
+ */
+static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ struct iwl_hs20_roc_req aux_cmd = {};
+ u32 uid;
+ int ret;
+
+ if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
+ return;
+
+ aux_cmd.event_unique_id = cpu_to_le32(uid);
+ aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+ aux_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+ IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
+ le32_to_cpu(aux_cmd.event_unique_id));
+ ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
+ sizeof(aux_cmd), &aux_cmd);
+
+ if (WARN_ON(ret))
+ return;
+}
+
+/*
+ * Explicit request to remove a time event. The removal of a time event needs to
+ * be synchronized with the flow of a time event's end notification, which also
+ * removes the time event from the op mode data structures.
+ */
+void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ struct iwl_time_event_cmd time_cmd = {};
+ u32 uid;
+ int ret;
+
+ if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
+ return;
+
/* When we remove a TE, the UID is to be set in the id field */
time_cmd.id = cpu_to_le32(uid);
time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
@@ -666,13 +731,17 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
-void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm)
+void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
{
struct iwl_mvm_vif *mvmvif;
struct iwl_mvm_time_event_data *te_data;
+ bool is_p2p = false;
lockdep_assert_held(&mvm->mutex);
+ mvmvif = NULL;
+ spin_lock_bh(&mvm->time_event_lock);
+
/*
* Iterate over the list of time events and find the time event that is
* associated with a P2P_DEVICE interface.
@@ -680,22 +749,41 @@ void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm)
* event at any given time and this time event coresponds to a ROC
* request
*/
- mvmvif = NULL;
- spin_lock_bh(&mvm->time_event_lock);
list_for_each_entry(te_data, &mvm->time_event_list, list) {
- if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE &&
+ te_data->running) {
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
- break;
+ is_p2p = true;
+ goto remove_te;
}
}
+
+ /*
+ * Iterate over the list of aux roc time events and find the time
+ * event that is associated with a BSS interface.
+ * This assumes that a BSS interface can have only a single time
+ * event at any given time and this time event coresponds to a ROC
+ * request
+ */
+ list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
+ if (te_data->running) {
+ mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+ goto remove_te;
+ }
+ }
+
+remove_te:
spin_unlock_bh(&mvm->time_event_lock);
if (!mvmvif) {
- IWL_WARN(mvm, "P2P_DEVICE no remain on channel event\n");
+ IWL_WARN(mvm, "No remain on channel event\n");
return;
}
- iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+ if (is_p2p)
+ iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+ else
+ iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
iwl_mvm_roc_finished(mvm);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index b350e47e19da..6f6b35db3ab8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -182,14 +182,14 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int duration, enum ieee80211_roc_type type);
/**
- * iwl_mvm_stop_p2p_roc - stop remain on channel for p2p device functionlity
+ * iwl_mvm_stop_roc - stop remain on channel functionality
* @mvm: the mvm component
*
* This function can be used to cancel an ongoing ROC session.
* The function is async, it will instruct the FW to stop serving the ROC
* session, but will not wait for the actual stopping of the session.
*/
-void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm);
+void iwl_mvm_stop_roc(struct iwl_mvm *mvm);
/**
* iwl_mvm_remove_time_event - general function to clean up of time event
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index acca44a45086..2b1e61fac34a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -64,10 +64,6 @@
*****************************************************************************/
#include "mvm.h"
-#include "iwl-config.h"
-#include "iwl-io.h"
-#include "iwl-csr.h"
-#include "iwl-prph.h"
#define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ
@@ -99,32 +95,81 @@ static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm)
iwl_mvm_set_hw_ctkill_state(mvm, false);
}
-static bool iwl_mvm_temp_notif(struct iwl_notif_wait_data *notif_wait,
- struct iwl_rx_packet *pkt, void *data)
+void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp)
+{
+ /* ignore the notification if we are in test mode */
+ if (mvm->temperature_test)
+ return;
+
+ if (mvm->temperature == temp)
+ return;
+
+ mvm->temperature = temp;
+ iwl_mvm_tt_handler(mvm);
+}
+
+static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
{
- struct iwl_mvm *mvm =
- container_of(notif_wait, struct iwl_mvm, notif_wait);
- int *temp = data;
struct iwl_dts_measurement_notif *notif;
int len = iwl_rx_packet_payload_len(pkt);
+ int temp;
if (WARN_ON_ONCE(len != sizeof(*notif))) {
IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
- return true;
+ return -EINVAL;
}
notif = (void *)pkt->data;
- *temp = le32_to_cpu(notif->temp);
+ temp = le32_to_cpu(notif->temp);
/* shouldn't be negative, but since it's s32, make sure it isn't */
- if (WARN_ON_ONCE(*temp < 0))
- *temp = 0;
+ if (WARN_ON_ONCE(temp < 0))
+ temp = 0;
+
+ IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", temp);
+
+ return temp;
+}
+
+static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ int *temp = data;
+ int ret;
+
+ ret = iwl_mvm_temp_notif_parse(mvm, pkt);
+ if (ret < 0)
+ return true;
+
+ *temp = ret;
- IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", *temp);
return true;
}
+int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ int temp;
+
+ /* the notification is handled synchronously in ctkill, so skip here */
+ if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
+ return 0;
+
+ temp = iwl_mvm_temp_notif_parse(mvm, pkt);
+ if (temp < 0)
+ return 0;
+
+ iwl_mvm_tt_temp_changed(mvm, temp);
+
+ return 0;
+}
+
static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
{
struct iwl_dts_measurement_cmd cmd = {
@@ -145,7 +190,7 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm)
iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
temp_notif, ARRAY_SIZE(temp_notif),
- iwl_mvm_temp_notif, &temp);
+ iwl_mvm_temp_notif_wait, &temp);
ret = iwl_mvm_get_temp_cmd(mvm);
if (ret) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index c6a517c771df..4f15d9decc81 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -73,9 +73,9 @@
/*
* Sets most of the Tx cmd's fields
*/
-static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info, u8 sta_id)
+void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info, u8 sta_id)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
__le16 fc = hdr->frame_control;
@@ -149,11 +149,9 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
/*
* Sets the fields in the Tx cmd that are rate related
*/
-static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta,
- __le16 fc)
+void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
{
u32 rate_flags;
int rate_idx;
@@ -189,8 +187,10 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
/* HT rate doesn't make sense for a non data frame */
WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
- "Got an HT rate for a non data frame 0x%x\n",
- info->control.rates[0].flags);
+ "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n",
+ info->control.rates[0].flags,
+ info->control.rates[0].idx,
+ le16_to_cpu(fc));
rate_idx = info->control.rates[0].idx;
/* if the rate isn't a well known legacy rate, take the lowest one */
@@ -230,10 +230,10 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
/*
* Sets the fields in the Tx cmd that are crypto related
*/
-static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
- struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
- struct sk_buff *skb_frag)
+void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd,
+ struct sk_buff *skb_frag)
{
struct ieee80211_key_conf *keyconf = info->control.hw_key;
@@ -424,6 +424,13 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
+ if (sta->tdls) {
+ /* default to TID 0 for non-QoS packets */
+ u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
+
+ txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
+ }
+
if (is_ampdu) {
if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON))
goto drop_unlock_sta;
@@ -658,6 +665,12 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
}
+ /*
+ * TODO: this is not accurate if we are freeing more than one
+ * packet.
+ */
+ info->status.tx_time =
+ le16_to_cpu(tx_resp->wireless_media_time);
BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
info->status.status_driver_data[0] =
(void *)(uintptr_t)tx_resp->reduced_tpc;
@@ -850,6 +863,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
mvmsta->tid_data[tid].rate_n_flags =
le32_to_cpu(tx_resp->initial_rate);
mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
+ mvmsta->tid_data[tid].tx_time =
+ le16_to_cpu(tx_resp->wireless_media_time);
}
rcu_read_unlock();
@@ -878,6 +893,8 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
info->status.ampdu_len = ba_notif->txed;
iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
info);
+ /* TODO: not accounted if the whole A-MPDU failed */
+ info->status.tx_time = tid_data->tx_time;
info->status.status_driver_data[0] =
(void *)(uintptr_t)tid_data->reduced_tpc;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 8021f6eec27f..e56e77ef5d2e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -734,3 +734,40 @@ bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
return idle;
}
+
+struct iwl_bss_iter_data {
+ struct ieee80211_vif *vif;
+ bool error;
+};
+
+static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_bss_iter_data *data = _data;
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return;
+
+ if (data->vif) {
+ data->error = true;
+ return;
+ }
+
+ data->vif = vif;
+}
+
+struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
+{
+ struct iwl_bss_iter_data bss_iter_data = {};
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bss_iface_iterator, &bss_iter_data);
+
+ if (bss_iter_data.error) {
+ IWL_ERR(mvm, "More than one managed interface active!\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return bss_iter_data.vif;
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 6ced8549eb3a..3ee8e3848876 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -499,6 +499,7 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {}
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+ const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
struct iwl_trans *iwl_trans;
struct iwl_trans_pcie *trans_pcie;
int ret;
@@ -507,6 +508,25 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(iwl_trans))
return PTR_ERR(iwl_trans);
+#if IS_ENABLED(CONFIG_IWLMVM)
+ /*
+ * special-case 7265D, it has the same PCI IDs.
+ *
+ * Note that because we already pass the cfg to the transport above,
+ * all the parameters that the transport uses must, until that is
+ * changed, be identical to the ones in the 7265D configuration.
+ */
+ if (cfg == &iwl7265_2ac_cfg)
+ cfg_7265d = &iwl7265d_2ac_cfg;
+ else if (cfg == &iwl7265_2n_cfg)
+ cfg_7265d = &iwl7265d_2n_cfg;
+ else if (cfg == &iwl7265_n_cfg)
+ cfg_7265d = &iwl7265d_n_cfg;
+ if (cfg_7265d &&
+ (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
+ cfg = cfg_7265d;
+#endif
+
pci_set_drvdata(pdev, iwl_trans);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index dd2f3f8baa9d..5d79a1f44b8e 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -78,6 +78,11 @@
#include "iwl-agn-hw.h"
#include "iwl-fw-error-dump.h"
#include "internal.h"
+#include "iwl-fh.h"
+
+/* extended range in FW SRAM */
+#define IWL_FW_MEM_EXTENDED_START 0x40000
+#define IWL_FW_MEM_EXTENDED_END 0x57FFF
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
{
@@ -133,7 +138,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
break;
}
- if (!page)
+ if (WARN_ON_ONCE(!page))
return;
trans_pcie->fw_mon_page = page;
@@ -512,6 +517,9 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
HW_READY_TIMEOUT);
+ if (ret >= 0)
+ iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
+
IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
return ret;
}
@@ -624,14 +632,28 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
}
for (offset = 0; offset < section->len; offset += chunk_sz) {
- u32 copy_size;
+ u32 copy_size, dst_addr;
+ bool extended_addr = false;
copy_size = min_t(u32, chunk_sz, section->len - offset);
+ dst_addr = section->offset + offset;
+
+ if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
+ dst_addr <= IWL_FW_MEM_EXTENDED_END)
+ extended_addr = true;
+
+ if (extended_addr)
+ iwl_set_bits_prph(trans, LMPM_CHICK,
+ LMPM_CHICK_EXTENDED_ADDR_SPACE);
memcpy(v_addr, (u8 *)section->data + offset, copy_size);
- ret = iwl_pcie_load_firmware_chunk(trans,
- section->offset + offset,
- p_addr, copy_size);
+ ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
+ copy_size);
+
+ if (extended_addr)
+ iwl_clear_bits_prph(trans, LMPM_CHICK,
+ LMPM_CHICK_EXTENDED_ADDR_SPACE);
+
if (ret) {
IWL_ERR(trans,
"Could not load the [%d] uCode section\n",
@@ -644,14 +666,14 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
return ret;
}
-static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans,
- const struct fw_img *image,
- int cpu,
- int *first_ucode_section)
+static int iwl_pcie_load_cpu_sections_8000b(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
{
int shift_param;
- int i, ret = 0;
- u32 last_read_idx = 0;
+ int i, ret = 0, sec_num = 0x1;
+ u32 val, last_read_idx = 0;
if (cpu == 1) {
shift_param = 0;
@@ -672,21 +694,16 @@ static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans,
break;
}
- if (i == (*first_ucode_section) + 1)
- /* set CPU to started */
- iwl_set_bits_prph(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- LMPM_CPU_HDRS_LOADING_COMPLETED
- << shift_param);
-
ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
if (ret)
return ret;
+
+ /* Notify the ucode of the loaded section number and status */
+ val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
+ val = val | (sec_num << shift_param);
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+ sec_num = (sec_num << 1) | 0x1;
}
- /* image loading complete */
- iwl_set_bits_prph(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param);
*first_ucode_section = last_read_idx;
@@ -739,49 +756,78 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
return 0;
}
-static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
- const struct fw_img *image)
+static void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ret = 0;
- int first_ucode_section;
+ const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
+ int i;
- IWL_DEBUG_FW(trans,
- "working with %s image\n",
- image->is_secure ? "Secured" : "Non Secured");
- IWL_DEBUG_FW(trans,
- "working with %s CPU\n",
- image->is_dual_cpus ? "Dual" : "Single");
+ if (dest->version)
+ IWL_ERR(trans,
+ "DBG DEST version is %d - expect issues\n",
+ dest->version);
- /* configure the ucode to be ready to get the secured image */
- if (image->is_secure) {
- /* set secure boot inspector addresses */
- iwl_write_prph(trans,
- LMPM_SECURE_INSPECTOR_CODE_ADDR,
- LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE);
+ IWL_INFO(trans, "Applying debug destination %s\n",
+ get_fw_dbg_mode_string(dest->monitor_mode));
- iwl_write_prph(trans,
- LMPM_SECURE_INSPECTOR_DATA_ADDR,
- LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE);
+ if (dest->monitor_mode == EXTERNAL_MODE)
+ iwl_pcie_alloc_fw_monitor(trans);
+ else
+ IWL_WARN(trans, "PCI should have external buffer debug\n");
- /* set CPU1 header address */
- iwl_write_prph(trans,
- LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR,
- LMPM_SECURE_CPU1_HDR_MEM_SPACE);
+ for (i = 0; i < trans->dbg_dest_reg_num; i++) {
+ u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
+ u32 val = le32_to_cpu(dest->reg_ops[i].val);
- /* load to FW the binary Secured sections of CPU1 */
- ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1,
- &first_ucode_section);
- if (ret)
- return ret;
+ switch (dest->reg_ops[i].op) {
+ case CSR_ASSIGN:
+ iwl_write32(trans, addr, val);
+ break;
+ case CSR_SETBIT:
+ iwl_set_bit(trans, addr, BIT(val));
+ break;
+ case CSR_CLEARBIT:
+ iwl_clear_bit(trans, addr, BIT(val));
+ break;
+ case PRPH_ASSIGN:
+ iwl_write_prph(trans, addr, val);
+ break;
+ case PRPH_SETBIT:
+ iwl_set_bits_prph(trans, addr, BIT(val));
+ break;
+ case PRPH_CLEARBIT:
+ iwl_clear_bits_prph(trans, addr, BIT(val));
+ break;
+ default:
+ IWL_ERR(trans, "FW debug - unknown OP %d\n",
+ dest->reg_ops[i].op);
+ break;
+ }
+ }
- } else {
- /* load to FW the binary Non secured sections of CPU1 */
- ret = iwl_pcie_load_cpu_sections(trans, image, 1,
- &first_ucode_section);
- if (ret)
- return ret;
+ if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
+ iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
+ trans_pcie->fw_mon_phys >> dest->base_shift);
+ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+ (trans_pcie->fw_mon_phys +
+ trans_pcie->fw_mon_size) >> dest->end_shift);
}
+}
+
+static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
+ const struct fw_img *image)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret = 0;
+ int first_ucode_section;
+
+ IWL_DEBUG_FW(trans, "working with %s CPU\n",
+ image->is_dual_cpus ? "Dual" : "Single");
+
+ /* load to FW the binary non secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
+ if (ret)
+ return ret;
if (image->is_dual_cpus) {
/* set CPU2 header address */
@@ -790,13 +836,8 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
LMPM_SECURE_CPU2_HDR_MEM_SPACE);
/* load to FW the binary sections of CPU2 */
- if (image->is_secure)
- ret = iwl_pcie_load_cpu_secured_sections(
- trans, image, 2,
- &first_ucode_section);
- else
- ret = iwl_pcie_load_cpu_sections(trans, image, 2,
- &first_ucode_section);
+ ret = iwl_pcie_load_cpu_sections(trans, image, 2,
+ &first_ucode_section);
if (ret)
return ret;
}
@@ -813,6 +854,8 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
(trans_pcie->fw_mon_phys +
trans_pcie->fw_mon_size) >> 4);
}
+ } else if (trans->dbg_dest_tlv) {
+ iwl_pcie_apply_destination(trans);
}
/* release CPU reset */
@@ -821,18 +864,50 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
else
iwl_write32(trans, CSR_RESET, 0);
- if (image->is_secure) {
- /* wait for image verification to complete */
- ret = iwl_poll_prph_bit(trans,
- LMPM_SECURE_BOOT_CPU1_STATUS_ADDR,
- LMPM_SECURE_BOOT_STATUS_SUCCESS,
- LMPM_SECURE_BOOT_STATUS_SUCCESS,
- LMPM_SECURE_TIME_OUT);
+ return 0;
+}
- if (ret < 0) {
- IWL_ERR(trans, "Time out on secure boot process\n");
- return ret;
- }
+static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
+ const struct fw_img *image)
+{
+ int ret = 0;
+ int first_ucode_section;
+ u32 reg;
+
+ IWL_DEBUG_FW(trans, "working with %s CPU\n",
+ image->is_dual_cpus ? "Dual" : "Single");
+
+ /* configure the ucode to be ready to get the secured image */
+ /* release CPU reset */
+ iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
+
+ /* load to FW the binary Secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_sections_8000b(trans, image, 1,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+
+ /* load to FW the binary sections of CPU2 */
+ ret = iwl_pcie_load_cpu_sections_8000b(trans, image, 2,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+
+ /* Notify FW loading is done */
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
+
+ /* wait for image verification to complete */
+ ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS,
+ LMPM_SECURE_TIME_OUT);
+ if (ret < 0) {
+ reg = iwl_read_prph(trans,
+ LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0);
+
+ IWL_ERR(trans, "Timeout on secure boot process, reg = %x\n",
+ reg);
+ return ret;
}
return 0;
@@ -884,7 +959,11 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
/* Load the given image to the HW */
- return iwl_pcie_load_given_ucode(trans, fw);
+ if ((trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) &&
+ (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP))
+ return iwl_pcie_load_given_ucode_8000b(trans, fw);
+ else
+ return iwl_pcie_load_given_ucode(trans, fw);
}
static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
@@ -941,7 +1020,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
spin_unlock(&trans_pcie->irq_lock);
/* stop and reset the on-board processor */
- iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ udelay(20);
/* clear all status bits */
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
@@ -974,6 +1054,9 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
clear_bit(STATUS_RFKILL, &trans->status);
if (hw_rfkill != was_hw_rfkill)
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+
+ /* re-take ownership to prevent other users from stealing the deivce */
+ iwl_pcie_prepare_card_hw(trans);
}
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
@@ -1023,14 +1106,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return 0;
}
- iwl_pcie_set_pwr(trans, false);
-
- val = iwl_read32(trans, CSR_RESET);
- if (val & CSR_RESET_REG_FLAG_NEVO_RESET) {
- *status = IWL_D3_STATUS_RESET;
- return 0;
- }
-
/*
* Also enables interrupts - none will happen as the device doesn't
* know we're waking it up, only when the opmode actually tells it
@@ -1041,6 +1116,9 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ udelay(2);
+
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
@@ -1050,6 +1128,8 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return ret;
}
+ iwl_pcie_set_pwr(trans, false);
+
iwl_trans_pcie_tx_reset(trans);
ret = iwl_pcie_rx_init(trans);
@@ -1058,7 +1138,12 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return ret;
}
- *status = IWL_D3_STATUS_ALIVE;
+ val = iwl_read32(trans, CSR_RESET);
+ if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
+ *status = IWL_D3_STATUS_RESET;
+ else
+ *status = IWL_D3_STATUS_ALIVE;
+
return 0;
}
@@ -1236,6 +1321,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ udelay(2);
/*
* These bits say the device is running, and should keep running for
@@ -1767,6 +1854,13 @@ err:
IWL_ERR(trans, "failed to create the trans debugfs entry\n");
return -ENOMEM;
}
+#else
+static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
+ struct dentry *dir)
+{
+ return 0;
+}
+#endif /*CONFIG_IWLWIFI_DEBUGFS */
static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
{
@@ -1937,6 +2031,31 @@ static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
return csr_len;
}
+static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_data **data)
+{
+ u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
+ unsigned long flags;
+ __le32 *val;
+ int i;
+
+ if (!iwl_trans_grab_nic_access(trans, false, &flags))
+ return 0;
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
+ (*data)->len = cpu_to_le32(fh_regs_len);
+ val = (void *)(*data)->data;
+
+ for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
+ *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
+
+ iwl_trans_release_nic_access(trans, &flags);
+
+ *data = iwl_fw_error_next_data(*data);
+
+ return sizeof(**data) + fh_regs_len;
+}
+
static
struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
{
@@ -1946,6 +2065,7 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len;
+ u32 monitor_len;
int i, ptr;
/* transport dump header */
@@ -1968,10 +2088,34 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
num_bytes_in_chunk;
}
+ /* FH registers */
+ len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+
/* FW monitor */
- if (trans_pcie->fw_mon_page)
+ if (trans_pcie->fw_mon_page) {
+ len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
+ trans_pcie->fw_mon_size;
+ monitor_len = trans_pcie->fw_mon_size;
+ } else if (trans->dbg_dest_tlv) {
+ u32 base, end;
+
+ base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
+
+ base = iwl_read_prph(trans, base) <<
+ trans->dbg_dest_tlv->base_shift;
+ end = iwl_read_prph(trans, end) <<
+ trans->dbg_dest_tlv->end_shift;
+
+ /* Make "end" point to the actual end */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ end += (1 << trans->dbg_dest_tlv->end_shift);
+ monitor_len = end - base;
len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
- trans_pcie->fw_mon_size;
+ monitor_len;
+ } else {
+ monitor_len = 0;
+ }
dump_data = vzalloc(len);
if (!dump_data)
@@ -2008,49 +2152,77 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
len += iwl_trans_pcie_dump_prph(trans, &data);
len += iwl_trans_pcie_dump_csr(trans, &data);
+ len += iwl_trans_pcie_fh_regs_dump(trans, &data);
/* data is already pointing to the next section */
- if (trans_pcie->fw_mon_page) {
+ if ((trans_pcie->fw_mon_page &&
+ trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
+ trans->dbg_dest_tlv) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
+ u32 base, write_ptr, wrap_cnt;
+
+ /* If there was a dest TLV - use the values from there */
+ if (trans->dbg_dest_tlv) {
+ write_ptr =
+ le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
+ wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+ base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ } else {
+ base = MON_BUFF_BASE_ADDR;
+ write_ptr = MON_BUFF_WRPTR;
+ wrap_cnt = MON_BUFF_CYCLE_CNT;
+ }
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
- data->len = cpu_to_le32(trans_pcie->fw_mon_size +
- sizeof(*fw_mon_data));
fw_mon_data = (void *)data->data;
fw_mon_data->fw_mon_wr_ptr =
- cpu_to_le32(iwl_read_prph(trans, MON_BUFF_WRPTR));
+ cpu_to_le32(iwl_read_prph(trans, write_ptr));
fw_mon_data->fw_mon_cycle_cnt =
- cpu_to_le32(iwl_read_prph(trans, MON_BUFF_CYCLE_CNT));
+ cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
fw_mon_data->fw_mon_base_ptr =
- cpu_to_le32(iwl_read_prph(trans, MON_BUFF_BASE_ADDR));
-
- /*
- * The firmware is now asserted, it won't write anything to
- * the buffer. CPU can take ownership to fetch the data.
- * The buffer will be handed back to the device before the
- * firmware will be restarted.
- */
- dma_sync_single_for_cpu(trans->dev, trans_pcie->fw_mon_phys,
- trans_pcie->fw_mon_size,
- DMA_FROM_DEVICE);
- memcpy(fw_mon_data->data, page_address(trans_pcie->fw_mon_page),
- trans_pcie->fw_mon_size);
-
- len += sizeof(*data) + sizeof(*fw_mon_data) +
- trans_pcie->fw_mon_size;
+ cpu_to_le32(iwl_read_prph(trans, base));
+
+ len += sizeof(*data) + sizeof(*fw_mon_data);
+ if (trans_pcie->fw_mon_page) {
+ data->len = cpu_to_le32(trans_pcie->fw_mon_size +
+ sizeof(*fw_mon_data));
+
+ /*
+ * The firmware is now asserted, it won't write anything
+ * to the buffer. CPU can take ownership to fetch the
+ * data. The buffer will be handed back to the device
+ * before the firmware will be restarted.
+ */
+ dma_sync_single_for_cpu(trans->dev,
+ trans_pcie->fw_mon_phys,
+ trans_pcie->fw_mon_size,
+ DMA_FROM_DEVICE);
+ memcpy(fw_mon_data->data,
+ page_address(trans_pcie->fw_mon_page),
+ trans_pcie->fw_mon_size);
+
+ len += trans_pcie->fw_mon_size;
+ } else {
+ /* If we are here then the buffer is internal */
+
+ /*
+ * Update pointers to reflect actual values after
+ * shifting
+ */
+ base = iwl_read_prph(trans, base) <<
+ trans->dbg_dest_tlv->base_shift;
+ iwl_trans_read_mem(trans, base, fw_mon_data->data,
+ monitor_len / sizeof(u32));
+ data->len = cpu_to_le32(sizeof(*fw_mon_data) +
+ monitor_len);
+ len += monitor_len;
+ }
}
dump_data->len = len;
return dump_data;
}
-#else
-static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
- struct dentry *dir)
-{
- return 0;
-}
-#endif /*CONFIG_IWLWIFI_DEBUGFS */
static const struct iwl_trans_ops trans_ops_pcie = {
.start_hw = iwl_trans_pcie_start_hw,
@@ -2087,9 +2259,7 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.release_nic_access = iwl_trans_pcie_release_nic_access,
.set_bits_mask = iwl_trans_pcie_set_bits_mask,
-#ifdef CONFIG_IWLWIFI_DEBUGFS
.dump_data = iwl_trans_pcie_dump_data,
-#endif
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index eb8e2984c5e9..8a6c7a084aa1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -989,6 +989,65 @@ out:
spin_unlock_bh(&txq->lock);
}
+static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ lockdep_assert_held(&trans_pcie->reg_lock);
+
+ if (trans_pcie->cmd_in_flight)
+ return 0;
+
+ trans_pcie->cmd_in_flight = true;
+
+ /*
+ * wake up the NIC to make sure that the firmware will see the host
+ * command - we will let the NIC sleep once all the host commands
+ * returned. This needs to be done only on NICs that have
+ * apmg_wake_up_wa set.
+ */
+ if (trans->cfg->base_params->apmg_wake_up_wa) {
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ udelay(2);
+
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
+ 15000);
+ if (ret < 0) {
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ trans_pcie->cmd_in_flight = false;
+ IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->reg_lock);
+
+ if (WARN_ON(!trans_pcie->cmd_in_flight))
+ return 0;
+
+ trans_pcie->cmd_in_flight = false;
+
+ if (trans->cfg->base_params->apmg_wake_up_wa)
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ return 0;
+}
+
/*
* iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
*
@@ -1024,14 +1083,9 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
}
}
- if (trans->cfg->base_params->apmg_wake_up_wa &&
- q->read_ptr == q->write_ptr) {
+ if (q->read_ptr == q->write_ptr) {
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
- WARN_ON(!trans_pcie->cmd_in_flight);
- trans_pcie->cmd_in_flight = false;
- __iwl_trans_pcie_clear_bit(trans,
- CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_pcie_clear_cmd_in_flight(trans);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
@@ -1419,32 +1473,11 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
-
- /*
- * wake up the NIC to make sure that the firmware will see the host
- * command - we will let the NIC sleep once all the host commands
- * returned. This needs to be done only on NICs that have
- * apmg_wake_up_wa set.
- */
- if (trans->cfg->base_params->apmg_wake_up_wa &&
- !trans_pcie->cmd_in_flight) {
- trans_pcie->cmd_in_flight = true;
- __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
- (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
- CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
- 15000);
- if (ret < 0) {
- __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
- trans_pcie->cmd_in_flight = false;
- IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
- idx = -EIO;
- goto out;
- }
+ ret = iwl_pcie_set_cmd_in_flight(trans);
+ if (ret < 0) {
+ idx = ret;
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ goto out;
}
/* Increment and update queue's write index */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c9ad4cf1adfb..a71b9d5e353d 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -412,6 +412,11 @@ struct mac80211_hwsim_data {
struct mac_address addresses[2];
int channels, idx;
bool use_chanctx;
+ bool destroy_on_close;
+ struct work_struct destroy_work;
+ u32 portid;
+ char alpha2[2];
+ const struct ieee80211_regdomain *regd;
struct ieee80211_channel *tmp_chan;
struct delayed_work roc_done;
@@ -419,6 +424,7 @@ struct mac80211_hwsim_data {
struct cfg80211_scan_request *hw_scan_request;
struct ieee80211_vif *hw_scan_vif;
int scan_chan_idx;
+ u8 scan_addr[ETH_ALEN];
struct ieee80211_channel *channel;
u64 beacon_int /* beacon interval in us */;
@@ -436,7 +442,7 @@ struct mac80211_hwsim_data {
/*
* Only radios in the same group can communicate together (the
* channel has to match too). Each bit represents a group. A
- * radio can be in more then one group.
+ * radio can be in more than one group.
*/
u64 group;
@@ -447,6 +453,14 @@ struct mac80211_hwsim_data {
s64 bcn_delta;
/* absolute beacon transmission time. Used to cover up "tx" delay. */
u64 abs_bcn_ts;
+
+ /* Stats */
+ u64 tx_pkts;
+ u64 rx_pkts;
+ u64 tx_bytes;
+ u64 rx_bytes;
+ u64 tx_dropped;
+ u64 tx_failed;
};
@@ -476,6 +490,14 @@ static struct genl_family hwsim_genl_family = {
.maxattr = HWSIM_ATTR_MAX,
};
+enum hwsim_multicast_groups {
+ HWSIM_MCGRP_CONFIG,
+};
+
+static const struct genl_multicast_group hwsim_mcgrps[] = {
+ [HWSIM_MCGRP_CONFIG] = { .name = "config", },
+};
+
/* MAC80211_HWSIM netlink policy */
static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
@@ -496,6 +518,10 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 },
[HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG },
[HWSIM_ATTR_SUPPORT_P2P_DEVICE] = { .type = NLA_FLAG },
+ [HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE] = { .type = NLA_FLAG },
+ [HWSIM_ATTR_RADIO_NAME] = { .type = NLA_STRING },
+ [HWSIM_ATTR_NO_VIF] = { .type = NLA_FLAG },
+ [HWSIM_ATTR_FREQ] = { .type = NLA_U32 },
};
static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
@@ -807,6 +833,9 @@ static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
.ret = false,
};
+ if (data->scanning && memcmp(addr, data->scan_addr, ETH_ALEN) == 0)
+ return true;
+
memcpy(md.addr, addr, ETH_ALEN);
ieee80211_iterate_active_interfaces_atomic(data->hw,
@@ -861,8 +890,10 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
/* If the queue contains MAX_QUEUE skb's drop some */
if (skb_queue_len(&data->pending) >= MAX_QUEUE) {
/* Droping until WARN_QUEUE level */
- while (skb_queue_len(&data->pending) >= WARN_QUEUE)
- skb_dequeue(&data->pending);
+ while (skb_queue_len(&data->pending) >= WARN_QUEUE) {
+ ieee80211_free_txskb(hw, skb_dequeue(&data->pending));
+ data->tx_dropped++;
+ }
}
skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
@@ -896,6 +927,9 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags))
goto nla_put_failure;
+ if (nla_put_u32(skb, HWSIM_ATTR_FREQ, data->channel->center_freq))
+ goto nla_put_failure;
+
/* We get the tx control (rate and retries) info*/
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
@@ -917,10 +951,14 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
/* Enqueue the packet */
skb_queue_tail(&data->pending, my_skb);
+ data->tx_pkts++;
+ data->tx_bytes += my_skb->len;
return;
nla_put_failure:
printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
+ ieee80211_free_txskb(hw, my_skb);
+ data->tx_failed++;
}
static bool hwsim_chans_compat(struct ieee80211_channel *c1,
@@ -952,6 +990,53 @@ static void mac80211_hwsim_tx_iter(void *_data, u8 *addr,
data->receive = true;
}
+static void mac80211_hwsim_add_vendor_rtap(struct sk_buff *skb)
+{
+ /*
+ * To enable this code, #define the HWSIM_RADIOTAP_OUI,
+ * e.g. like this:
+ * #define HWSIM_RADIOTAP_OUI "\x02\x00\x00"
+ * (but you should use a valid OUI, not that)
+ *
+ * If anyone wants to 'donate' a radiotap OUI/subns code
+ * please send a patch removing this #ifdef and changing
+ * the values accordingly.
+ */
+#ifdef HWSIM_RADIOTAP_OUI
+ struct ieee80211_vendor_radiotap *rtap;
+
+ /*
+ * Note that this code requires the headroom in the SKB
+ * that was allocated earlier.
+ */
+ rtap = (void *)skb_push(skb, sizeof(*rtap) + 8 + 4);
+ rtap->oui[0] = HWSIM_RADIOTAP_OUI[0];
+ rtap->oui[1] = HWSIM_RADIOTAP_OUI[1];
+ rtap->oui[2] = HWSIM_RADIOTAP_OUI[2];
+ rtap->subns = 127;
+
+ /*
+ * Radiotap vendor namespaces can (and should) also be
+ * split into fields by using the standard radiotap
+ * presence bitmap mechanism. Use just BIT(0) here for
+ * the presence bitmap.
+ */
+ rtap->present = BIT(0);
+ /* We have 8 bytes of (dummy) data */
+ rtap->len = 8;
+ /* For testing, also require it to be aligned */
+ rtap->align = 8;
+ /* And also test that padding works, 4 bytes */
+ rtap->pad = 4;
+ /* push the data */
+ memcpy(rtap->data, "ABCDEFGH", 8);
+ /* make sure to clear padding, mac80211 doesn't */
+ memset(rtap->data + 8, 0, 4);
+
+ IEEE80211_SKB_RXCB(skb)->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA;
+#endif
+}
+
static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct ieee80211_channel *chan)
@@ -1066,6 +1151,11 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
rx_status.mactime = now + data2->tsf_offset;
memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
+
+ mac80211_hwsim_add_vendor_rtap(nskb);
+
+ data2->rx_pkts++;
+ data2->rx_bytes += nskb->len;
ieee80211_rx_irqsafe(data2->hw, nskb);
}
spin_unlock(&hwsim_radio_lock);
@@ -1133,6 +1223,8 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
/* NO wmediumd detected, perfect medium simulation */
+ data->tx_pkts++;
+ data->tx_bytes += skb->len;
ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel);
if (ack && skb->len >= 16) {
@@ -1716,7 +1808,7 @@ static void hw_scan_work(struct work_struct *work)
struct sk_buff *probe;
probe = ieee80211_probereq_get(hwsim->hw,
- hwsim->hw_scan_vif,
+ hwsim->scan_addr,
req->ssids[i].ssid,
req->ssids[i].ssid_len,
req->ie_len);
@@ -1754,6 +1846,12 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
hwsim->hw_scan_request = req;
hwsim->hw_scan_vif = vif;
hwsim->scan_chan_idx = 0;
+ if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
+ get_random_mask_addr(hwsim->scan_addr,
+ hw_req->req.mac_addr,
+ hw_req->req.mac_addr_mask);
+ else
+ memcpy(hwsim->scan_addr, vif->addr, ETH_ALEN);
mutex_unlock(&hwsim->mutex);
wiphy_debug(hw->wiphy, "hwsim hw_scan request\n");
@@ -1780,7 +1878,9 @@ static void mac80211_hwsim_cancel_hw_scan(struct ieee80211_hw *hw,
mutex_unlock(&hwsim->mutex);
}
-static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw)
+static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct mac80211_hwsim_data *hwsim = hw->priv;
@@ -1792,13 +1892,16 @@ static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw)
}
printk(KERN_DEBUG "hwsim sw_scan request, prepping stuff\n");
+
+ memcpy(hwsim->scan_addr, mac_addr, ETH_ALEN);
hwsim->scanning = true;
out:
mutex_unlock(&hwsim->mutex);
}
-static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw)
+static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct mac80211_hwsim_data *hwsim = hw->priv;
@@ -1806,6 +1909,7 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw)
printk(KERN_DEBUG "hwsim sw_scan_complete\n");
hwsim->scanning = false;
+ memset(hwsim->scan_addr, 0, ETH_ALEN);
mutex_unlock(&hwsim->mutex);
}
@@ -1916,6 +2020,57 @@ static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
hwsim_check_chanctx_magic(ctx);
}
+static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx_pkts_nic",
+ "tx_bytes_nic",
+ "rx_pkts_nic",
+ "rx_bytes_nic",
+ "d_tx_dropped",
+ "d_tx_failed",
+ "d_ps_mode",
+ "d_group",
+ "d_tx_power",
+};
+
+#define MAC80211_HWSIM_SSTATS_LEN ARRAY_SIZE(mac80211_hwsim_gstrings_stats)
+
+static void mac80211_hwsim_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, *mac80211_hwsim_gstrings_stats,
+ sizeof(mac80211_hwsim_gstrings_stats));
+}
+
+static int mac80211_hwsim_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return MAC80211_HWSIM_SSTATS_LEN;
+ return 0;
+}
+
+static void mac80211_hwsim_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mac80211_hwsim_data *ar = hw->priv;
+ int i = 0;
+
+ data[i++] = ar->tx_pkts;
+ data[i++] = ar->tx_bytes;
+ data[i++] = ar->rx_pkts;
+ data[i++] = ar->rx_bytes;
+ data[i++] = ar->tx_dropped;
+ data[i++] = ar->tx_failed;
+ data[i++] = ar->ps;
+ data[i++] = ar->group;
+ data[i++] = ar->power_level;
+
+ WARN_ON(i != MAC80211_HWSIM_SSTATS_LEN);
+}
+
static const struct ieee80211_ops mac80211_hwsim_ops = {
.tx = mac80211_hwsim_tx,
.start = mac80211_hwsim_start,
@@ -1939,14 +2094,131 @@ static const struct ieee80211_ops mac80211_hwsim_ops = {
.flush = mac80211_hwsim_flush,
.get_tsf = mac80211_hwsim_get_tsf,
.set_tsf = mac80211_hwsim_set_tsf,
+ .get_et_sset_count = mac80211_hwsim_get_et_sset_count,
+ .get_et_stats = mac80211_hwsim_get_et_stats,
+ .get_et_strings = mac80211_hwsim_get_et_strings,
};
static struct ieee80211_ops mac80211_hwsim_mchan_ops;
-static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
- const struct ieee80211_regdomain *regd,
- bool reg_strict, bool p2p_device,
- bool use_chanctx)
+struct hwsim_new_radio_params {
+ unsigned int channels;
+ const char *reg_alpha2;
+ const struct ieee80211_regdomain *regd;
+ bool reg_strict;
+ bool p2p_device;
+ bool use_chanctx;
+ bool destroy_on_close;
+ const char *hwname;
+ bool no_vif;
+};
+
+static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb,
+ struct genl_info *info)
+{
+ if (info)
+ genl_notify(&hwsim_genl_family, mcast_skb,
+ genl_info_net(info), info->snd_portid,
+ HWSIM_MCGRP_CONFIG, info->nlhdr, GFP_KERNEL);
+ else
+ genlmsg_multicast(&hwsim_genl_family, mcast_skb, 0,
+ HWSIM_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+static int append_radio_msg(struct sk_buff *skb, int id,
+ struct hwsim_new_radio_params *param)
+{
+ int ret;
+
+ ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id);
+ if (ret < 0)
+ return ret;
+
+ if (param->channels) {
+ ret = nla_put_u32(skb, HWSIM_ATTR_CHANNELS, param->channels);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (param->reg_alpha2) {
+ ret = nla_put(skb, HWSIM_ATTR_REG_HINT_ALPHA2, 2,
+ param->reg_alpha2);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (param->regd) {
+ int i;
+
+ for (i = 0; hwsim_world_regdom_custom[i] != param->regd &&
+ i < ARRAY_SIZE(hwsim_world_regdom_custom); i++)
+ ;
+
+ if (i < ARRAY_SIZE(hwsim_world_regdom_custom)) {
+ ret = nla_put_u32(skb, HWSIM_ATTR_REG_CUSTOM_REG, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (param->reg_strict) {
+ ret = nla_put_flag(skb, HWSIM_ATTR_REG_STRICT_REG);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (param->p2p_device) {
+ ret = nla_put_flag(skb, HWSIM_ATTR_SUPPORT_P2P_DEVICE);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (param->use_chanctx) {
+ ret = nla_put_flag(skb, HWSIM_ATTR_USE_CHANCTX);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (param->hwname) {
+ ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME,
+ strlen(param->hwname), param->hwname);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hwsim_mcast_new_radio(int id, struct genl_info *info,
+ struct hwsim_new_radio_params *param)
+{
+ struct sk_buff *mcast_skb;
+ void *data;
+
+ mcast_skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!mcast_skb)
+ return;
+
+ data = genlmsg_put(mcast_skb, 0, 0, &hwsim_genl_family, 0,
+ HWSIM_CMD_NEW_RADIO);
+ if (!data)
+ goto out_err;
+
+ if (append_radio_msg(mcast_skb, id, param) < 0)
+ goto out_err;
+
+ genlmsg_end(mcast_skb, data);
+
+ hwsim_mcast_config_msg(mcast_skb, info);
+ return;
+
+out_err:
+ genlmsg_cancel(mcast_skb, data);
+ nlmsg_free(mcast_skb);
+}
+
+static int mac80211_hwsim_new_radio(struct genl_info *info,
+ struct hwsim_new_radio_params *param)
{
int err;
u8 addr[ETH_ALEN];
@@ -1956,16 +2228,16 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
int idx;
- if (WARN_ON(channels > 1 && !use_chanctx))
+ if (WARN_ON(param->channels > 1 && !param->use_chanctx))
return -EINVAL;
spin_lock_bh(&hwsim_radio_lock);
idx = hwsim_radio_idx++;
spin_unlock_bh(&hwsim_radio_lock);
- if (use_chanctx)
+ if (param->use_chanctx)
ops = &mac80211_hwsim_mchan_ops;
- hw = ieee80211_alloc_hw(sizeof(*data), ops);
+ hw = ieee80211_alloc_hw_nm(sizeof(*data), ops, param->hwname);
if (!hw) {
printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw failed\n");
err = -ENOMEM;
@@ -2003,9 +2275,12 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
hw->wiphy->n_addresses = 2;
hw->wiphy->addresses = data->addresses;
- data->channels = channels;
- data->use_chanctx = use_chanctx;
+ data->channels = param->channels;
+ data->use_chanctx = param->use_chanctx;
data->idx = idx;
+ data->destroy_on_close = param->destroy_on_close;
+ if (info)
+ data->portid = info->snd_portid;
if (data->use_chanctx) {
hw->wiphy->max_scan_ssids = 255;
@@ -2014,12 +2289,12 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
/* For channels > 1 DFS is not allowed */
hw->wiphy->n_iface_combinations = 1;
hw->wiphy->iface_combinations = &data->if_combination;
- if (p2p_device)
+ if (param->p2p_device)
data->if_combination = hwsim_if_comb_p2p_dev[0];
else
data->if_combination = hwsim_if_comb[0];
data->if_combination.num_different_channels = data->channels;
- } else if (p2p_device) {
+ } else if (param->p2p_device) {
hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev;
hw->wiphy->n_iface_combinations =
ARRAY_SIZE(hwsim_if_comb_p2p_dev);
@@ -2040,7 +2315,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
- if (p2p_device)
+ if (param->p2p_device)
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
hw->flags = IEEE80211_HW_MFP_CAPABLE |
@@ -2060,7 +2335,8 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_STATIC_SMPS |
- NL80211_FEATURE_DYNAMIC_SMPS;
+ NL80211_FEATURE_DYNAMIC_SMPS |
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
@@ -2095,6 +2371,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
sband->ht_cap.ht_supported = true;
sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_DSSSCCK40;
sband->ht_cap.ampdu_factor = 0x3;
@@ -2111,7 +2388,6 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
sband->vht_cap.cap =
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_SHORT_GI_160 |
@@ -2142,15 +2418,19 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
hw->max_rates = 4;
hw->max_rate_tries = 11;
- if (reg_strict)
+ if (param->reg_strict)
hw->wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
- if (regd) {
+ if (param->regd) {
+ data->regd = param->regd;
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
- wiphy_apply_custom_regulatory(hw->wiphy, regd);
+ wiphy_apply_custom_regulatory(hw->wiphy, param->regd);
/* give the regulatory workqueue a chance to run */
schedule_timeout_interruptible(1);
}
+ if (param->no_vif)
+ hw->flags |= IEEE80211_HW_NO_AUTO_VIF;
+
err = ieee80211_register_hw(hw);
if (err < 0) {
printk(KERN_DEBUG "mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
@@ -2160,8 +2440,11 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
wiphy_debug(hw->wiphy, "hwaddr %pM registered\n", hw->wiphy->perm_addr);
- if (reg_alpha2)
- regulatory_hint(hw->wiphy, reg_alpha2);
+ if (param->reg_alpha2) {
+ data->alpha2[0] = param->reg_alpha2[0];
+ data->alpha2[1] = param->reg_alpha2[1];
+ regulatory_hint(hw->wiphy, param->reg_alpha2);
+ }
data->debugfs = debugfs_create_dir("hwsim", hw->wiphy->debugfsdir);
debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps);
@@ -2180,6 +2463,9 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
list_add_tail(&data->list, &hwsim_radios);
spin_unlock_bh(&hwsim_radio_lock);
+ if (idx > 0)
+ hwsim_mcast_new_radio(idx, info, param);
+
return idx;
failed_hw:
@@ -2192,8 +2478,46 @@ failed:
return err;
}
-static void mac80211_hwsim_destroy_radio(struct mac80211_hwsim_data *data)
+static void hwsim_mcast_del_radio(int id, const char *hwname,
+ struct genl_info *info)
{
+ struct sk_buff *skb;
+ void *data;
+ int ret;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ data = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0,
+ HWSIM_CMD_DEL_RADIO);
+ if (!data)
+ goto error;
+
+ ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id);
+ if (ret < 0)
+ goto error;
+
+ ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME, strlen(hwname),
+ hwname);
+ if (ret < 0)
+ goto error;
+
+ genlmsg_end(skb, data);
+
+ hwsim_mcast_config_msg(skb, info);
+
+ return;
+
+error:
+ nlmsg_free(skb);
+}
+
+static void mac80211_hwsim_del_radio(struct mac80211_hwsim_data *data,
+ const char *hwname,
+ struct genl_info *info)
+{
+ hwsim_mcast_del_radio(data->idx, hwname, info);
debugfs_remove_recursive(data->debugfs);
ieee80211_unregister_hw(data->hw);
device_release_driver(data->dev);
@@ -2201,6 +2525,46 @@ static void mac80211_hwsim_destroy_radio(struct mac80211_hwsim_data *data)
ieee80211_free_hw(data->hw);
}
+static int mac80211_hwsim_get_radio(struct sk_buff *skb,
+ struct mac80211_hwsim_data *data,
+ u32 portid, u32 seq,
+ struct netlink_callback *cb, int flags)
+{
+ void *hdr;
+ struct hwsim_new_radio_params param = { };
+ int res = -EMSGSIZE;
+
+ hdr = genlmsg_put(skb, portid, seq, &hwsim_genl_family, flags,
+ HWSIM_CMD_GET_RADIO);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (cb)
+ genl_dump_check_consistent(cb, hdr, &hwsim_genl_family);
+
+ if (data->alpha2[0] && data->alpha2[1])
+ param.reg_alpha2 = data->alpha2;
+
+ param.reg_strict = !!(data->hw->wiphy->regulatory_flags &
+ REGULATORY_STRICT_REG);
+ param.p2p_device = !!(data->hw->wiphy->interface_modes &
+ BIT(NL80211_IFTYPE_P2P_DEVICE));
+ param.use_chanctx = data->use_chanctx;
+ param.regd = data->regd;
+ param.channels = data->channels;
+ param.hwname = wiphy_name(data->hw->wiphy);
+
+ res = append_radio_msg(skb, data->idx, &param);
+ if (res < 0)
+ goto out_err;
+
+ return genlmsg_end(skb, hdr);
+
+out_err:
+ genlmsg_cancel(skb, hdr);
+ return res;
+}
+
static void mac80211_hwsim_free(void)
{
struct mac80211_hwsim_data *data;
@@ -2211,7 +2575,8 @@ static void mac80211_hwsim_free(void)
list))) {
list_del(&data->list);
spin_unlock_bh(&hwsim_radio_lock);
- mac80211_hwsim_destroy_radio(data);
+ mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
+ NULL);
spin_lock_bh(&hwsim_radio_lock);
}
spin_unlock_bh(&hwsim_radio_lock);
@@ -2339,7 +2704,6 @@ out:
static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
struct genl_info *info)
{
-
struct mac80211_hwsim_data *data2;
struct ieee80211_rx_status rx_status;
const u8 *dst;
@@ -2382,18 +2746,22 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
/* A frame is received from user space */
memset(&rx_status, 0, sizeof(rx_status));
+ /* TODO: Check ATTR_FREQ if it exists, and maybe throw away off-channel
+ * packets?
+ */
rx_status.freq = data2->channel->center_freq;
rx_status.band = data2->channel->band;
rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+ data2->rx_pkts++;
+ data2->rx_bytes += skb->len;
ieee80211_rx_irqsafe(data2->hw, skb);
return 0;
err:
printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
- goto out;
out:
dev_kfree_skb(skb);
return -EINVAL;
@@ -2429,54 +2797,72 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
return 0;
}
-static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
+static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
{
- unsigned int chans = channels;
- const char *alpha2 = NULL;
- const struct ieee80211_regdomain *regd = NULL;
- bool reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
- bool p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
- bool use_chanctx;
+ struct hwsim_new_radio_params param = { 0 };
+
+ param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
+ param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
+ param.channels = channels;
+ param.destroy_on_close =
+ info->attrs[HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE];
if (info->attrs[HWSIM_ATTR_CHANNELS])
- chans = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
+ param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
+
+ if (info->attrs[HWSIM_ATTR_NO_VIF])
+ param.no_vif = true;
+
+ if (info->attrs[HWSIM_ATTR_RADIO_NAME])
+ param.hwname = nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
- use_chanctx = true;
+ param.use_chanctx = true;
else
- use_chanctx = (chans > 1);
+ param.use_chanctx = (param.channels > 1);
if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2])
- alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]);
+ param.reg_alpha2 =
+ nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]);
if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom))
return -EINVAL;
- regd = hwsim_world_regdom_custom[idx];
+ param.regd = hwsim_world_regdom_custom[idx];
}
- return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict,
- p2p_device, use_chanctx);
+ return mac80211_hwsim_new_radio(info, &param);
}
-static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info)
+static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
{
struct mac80211_hwsim_data *data;
- int idx;
+ s64 idx = -1;
+ const char *hwname = NULL;
- if (!info->attrs[HWSIM_ATTR_RADIO_ID])
+ if (info->attrs[HWSIM_ATTR_RADIO_ID])
+ idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
+ else if (info->attrs[HWSIM_ATTR_RADIO_NAME])
+ hwname = (void *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
+ else
return -EINVAL;
- idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry(data, &hwsim_radios, list) {
- if (data->idx != idx)
- continue;
+ if (idx >= 0) {
+ if (data->idx != idx)
+ continue;
+ } else {
+ if (strcmp(hwname, wiphy_name(data->hw->wiphy)))
+ continue;
+ }
+
list_del(&data->list);
spin_unlock_bh(&hwsim_radio_lock);
- mac80211_hwsim_destroy_radio(data);
+ mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
+ info);
return 0;
}
spin_unlock_bh(&hwsim_radio_lock);
@@ -2484,6 +2870,77 @@ static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info)
return -ENODEV;
}
+static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ struct mac80211_hwsim_data *data;
+ struct sk_buff *skb;
+ int idx, res = -ENODEV;
+
+ if (!info->attrs[HWSIM_ATTR_RADIO_ID])
+ return -EINVAL;
+ idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
+
+ spin_lock_bh(&hwsim_radio_lock);
+ list_for_each_entry(data, &hwsim_radios, list) {
+ if (data->idx != idx)
+ continue;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb) {
+ res = -ENOMEM;
+ goto out_err;
+ }
+
+ res = mac80211_hwsim_get_radio(skb, data, info->snd_portid,
+ info->snd_seq, NULL, 0);
+ if (res < 0) {
+ nlmsg_free(skb);
+ goto out_err;
+ }
+
+ genlmsg_reply(skb, info);
+ break;
+ }
+
+out_err:
+ spin_unlock_bh(&hwsim_radio_lock);
+
+ return res;
+}
+
+static int hwsim_dump_radio_nl(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ int idx = cb->args[0];
+ struct mac80211_hwsim_data *data = NULL;
+ int res;
+
+ spin_lock_bh(&hwsim_radio_lock);
+
+ if (idx == hwsim_radio_idx)
+ goto done;
+
+ list_for_each_entry(data, &hwsim_radios, list) {
+ if (data->idx < idx)
+ continue;
+
+ res = mac80211_hwsim_get_radio(skb, data,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, cb,
+ NLM_F_MULTI);
+ if (res < 0)
+ break;
+
+ idx = data->idx + 1;
+ }
+
+ cb->args[0] = idx;
+
+done:
+ spin_unlock_bh(&hwsim_radio_lock);
+ return skb->len;
+}
+
/* Generic Netlink operations array */
static const struct genl_ops hwsim_ops[] = {
{
@@ -2503,19 +2960,48 @@ static const struct genl_ops hwsim_ops[] = {
.doit = hwsim_tx_info_frame_received_nl,
},
{
- .cmd = HWSIM_CMD_CREATE_RADIO,
+ .cmd = HWSIM_CMD_NEW_RADIO,
.policy = hwsim_genl_policy,
- .doit = hwsim_create_radio_nl,
+ .doit = hwsim_new_radio_nl,
.flags = GENL_ADMIN_PERM,
},
{
- .cmd = HWSIM_CMD_DESTROY_RADIO,
+ .cmd = HWSIM_CMD_DEL_RADIO,
.policy = hwsim_genl_policy,
- .doit = hwsim_destroy_radio_nl,
+ .doit = hwsim_del_radio_nl,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = HWSIM_CMD_GET_RADIO,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_get_radio_nl,
+ .dumpit = hwsim_dump_radio_nl,
+ },
};
+static void destroy_radio(struct work_struct *work)
+{
+ struct mac80211_hwsim_data *data =
+ container_of(work, struct mac80211_hwsim_data, destroy_work);
+
+ mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL);
+}
+
+static void remove_user_radios(u32 portid)
+{
+ struct mac80211_hwsim_data *entry, *tmp;
+
+ spin_lock_bh(&hwsim_radio_lock);
+ list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
+ if (entry->destroy_on_close && entry->portid == portid) {
+ list_del(&entry->list);
+ INIT_WORK(&entry->destroy_work, destroy_radio);
+ schedule_work(&entry->destroy_work);
+ }
+ }
+ spin_unlock_bh(&hwsim_radio_lock);
+}
+
static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
unsigned long state,
void *_notify)
@@ -2525,6 +3011,8 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
if (state != NETLINK_URELEASE)
return NOTIFY_DONE;
+ remove_user_radios(notify->portid);
+
if (notify->portid == wmediumd_portid) {
printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"
" socket, switching to perfect channel medium\n");
@@ -2544,7 +3032,9 @@ static int hwsim_init_netlink(void)
printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
- rc = genl_register_family_with_ops(&hwsim_genl_family, hwsim_ops);
+ rc = genl_register_family_with_ops_groups(&hwsim_genl_family,
+ hwsim_ops,
+ hwsim_mcgrps);
if (rc)
goto failure;
@@ -2605,69 +3095,73 @@ static int __init init_mac80211_hwsim(void)
goto out_unregister_driver;
}
+ err = hwsim_init_netlink();
+ if (err < 0)
+ goto out_unregister_driver;
+
for (i = 0; i < radios; i++) {
- const char *reg_alpha2 = NULL;
- const struct ieee80211_regdomain *regd = NULL;
- bool reg_strict = false;
+ struct hwsim_new_radio_params param = { 0 };
+
+ param.channels = channels;
switch (regtest) {
case HWSIM_REGTEST_DIFF_COUNTRY:
if (i < ARRAY_SIZE(hwsim_alpha2s))
- reg_alpha2 = hwsim_alpha2s[i];
+ param.reg_alpha2 = hwsim_alpha2s[i];
break;
case HWSIM_REGTEST_DRIVER_REG_FOLLOW:
if (!i)
- reg_alpha2 = hwsim_alpha2s[0];
+ param.reg_alpha2 = hwsim_alpha2s[0];
break;
case HWSIM_REGTEST_STRICT_ALL:
- reg_strict = true;
+ param.reg_strict = true;
case HWSIM_REGTEST_DRIVER_REG_ALL:
- reg_alpha2 = hwsim_alpha2s[0];
+ param.reg_alpha2 = hwsim_alpha2s[0];
break;
case HWSIM_REGTEST_WORLD_ROAM:
if (i == 0)
- regd = &hwsim_world_regdom_custom_01;
+ param.regd = &hwsim_world_regdom_custom_01;
break;
case HWSIM_REGTEST_CUSTOM_WORLD:
- regd = &hwsim_world_regdom_custom_01;
+ param.regd = &hwsim_world_regdom_custom_01;
break;
case HWSIM_REGTEST_CUSTOM_WORLD_2:
if (i == 0)
- regd = &hwsim_world_regdom_custom_01;
+ param.regd = &hwsim_world_regdom_custom_01;
else if (i == 1)
- regd = &hwsim_world_regdom_custom_02;
+ param.regd = &hwsim_world_regdom_custom_02;
break;
case HWSIM_REGTEST_STRICT_FOLLOW:
if (i == 0) {
- reg_strict = true;
- reg_alpha2 = hwsim_alpha2s[0];
+ param.reg_strict = true;
+ param.reg_alpha2 = hwsim_alpha2s[0];
}
break;
case HWSIM_REGTEST_STRICT_AND_DRIVER_REG:
if (i == 0) {
- reg_strict = true;
- reg_alpha2 = hwsim_alpha2s[0];
+ param.reg_strict = true;
+ param.reg_alpha2 = hwsim_alpha2s[0];
} else if (i == 1) {
- reg_alpha2 = hwsim_alpha2s[1];
+ param.reg_alpha2 = hwsim_alpha2s[1];
}
break;
case HWSIM_REGTEST_ALL:
switch (i) {
case 0:
- regd = &hwsim_world_regdom_custom_01;
+ param.regd = &hwsim_world_regdom_custom_01;
break;
case 1:
- regd = &hwsim_world_regdom_custom_02;
+ param.regd = &hwsim_world_regdom_custom_02;
break;
case 2:
- reg_alpha2 = hwsim_alpha2s[0];
+ param.reg_alpha2 = hwsim_alpha2s[0];
break;
case 3:
- reg_alpha2 = hwsim_alpha2s[1];
+ param.reg_alpha2 = hwsim_alpha2s[1];
break;
case 4:
- reg_strict = true;
- reg_alpha2 = hwsim_alpha2s[2];
+ param.reg_strict = true;
+ param.reg_alpha2 = hwsim_alpha2s[2];
break;
}
break;
@@ -2675,10 +3169,10 @@ static int __init init_mac80211_hwsim(void)
break;
}
- err = mac80211_hwsim_create_radio(channels, reg_alpha2,
- regd, reg_strict,
- support_p2p_device,
- channels > 1);
+ param.p2p_device = support_p2p_device;
+ param.use_chanctx = channels > 1;
+
+ err = mac80211_hwsim_new_radio(NULL, &param);
if (err < 0)
goto out_free_radios;
}
@@ -2704,10 +3198,6 @@ static int __init init_mac80211_hwsim(void)
}
rtnl_unlock();
- err = hwsim_init_netlink();
- if (err < 0)
- goto out_free_mon;
-
return 0;
out_free_mon:
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index c9d0315575ba..66e1c73bd507 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -60,14 +60,17 @@ enum hwsim_tx_control_flags {
* space, uses:
* %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_ADDR_RECEIVER,
* %HWSIM_ATTR_FRAME, %HWSIM_ATTR_FLAGS, %HWSIM_ATTR_RX_RATE,
- * %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
+ * %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE, %HWSIM_ATTR_FREQ (optional)
* @HWSIM_CMD_TX_INFO_FRAME: Transmission info report from user space to
* kernel, uses:
* %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS,
* %HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
- * @HWSIM_CMD_CREATE_RADIO: create a new radio with the given parameters,
- * returns the radio ID (>= 0) or negative on errors
- * @HWSIM_CMD_DESTROY_RADIO: destroy a radio
+ * @HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters,
+ * returns the radio ID (>= 0) or negative on errors, if successful
+ * then multicast the result
+ * @HWSIM_CMD_DEL_RADIO: destroy a radio, reply is multicasted
+ * @HWSIM_CMD_GET_RADIO: fetch information about existing radios, uses:
+ * %HWSIM_ATTR_RADIO_ID
* @__HWSIM_CMD_MAX: enum limit
*/
enum {
@@ -75,12 +78,16 @@ enum {
HWSIM_CMD_REGISTER,
HWSIM_CMD_FRAME,
HWSIM_CMD_TX_INFO_FRAME,
- HWSIM_CMD_CREATE_RADIO,
- HWSIM_CMD_DESTROY_RADIO,
+ HWSIM_CMD_NEW_RADIO,
+ HWSIM_CMD_DEL_RADIO,
+ HWSIM_CMD_GET_RADIO,
__HWSIM_CMD_MAX,
};
#define HWSIM_CMD_MAX (_HWSIM_CMD_MAX - 1)
+#define HWSIM_CMD_CREATE_RADIO HWSIM_CMD_NEW_RADIO
+#define HWSIM_CMD_DESTROY_RADIO HWSIM_CMD_DEL_RADIO
+
/**
* enum hwsim_attrs - hwsim netlink attributes
*
@@ -111,6 +118,11 @@ enum {
* @HWSIM_ATTR_USE_CHANCTX: used with the %HWSIM_CMD_CREATE_RADIO
* command to force use of channel contexts even when only a
* single channel is supported
+ * @HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE: used with the %HWSIM_CMD_CREATE_RADIO
+ * command to force radio removal when process that created the radio dies
+ * @HWSIM_ATTR_RADIO_NAME: Name of radio, e.g. phy666
+ * @HWSIM_ATTR_NO_VIF: Do not create vif (wlanX) when creating radio.
+ * @HWSIM_ATTR_FREQ: Frequency at which packet is transmitted or received.
* @__HWSIM_ATTR_MAX: enum limit
*/
@@ -132,6 +144,10 @@ enum {
HWSIM_ATTR_REG_STRICT_REG,
HWSIM_ATTR_SUPPORT_P2P_DEVICE,
HWSIM_ATTR_USE_CHANCTX,
+ HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE,
+ HWSIM_ATTR_RADIO_NAME,
+ HWSIM_ATTR_NO_VIF,
+ HWSIM_ATTR_FREQ,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 62f5dbe602d3..9d4786e7ddff 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -544,6 +544,7 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
u32 tx_win_size = priv->add_ba_param.tx_win_size;
static u8 dialog_tok;
int ret;
+ unsigned long flags;
u16 block_ack_param_set;
dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid);
@@ -554,15 +555,18 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
memcmp(priv->cfg_bssid, peer_mac, ETH_ALEN)) {
struct mwifiex_sta_node *sta_ptr;
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
if (!sta_ptr) {
dev_warn(priv->adapter->dev,
"BA setup with unknown TDLS peer %pM!\n",
peer_mac);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
return -1;
}
if (sta_ptr->is_11ac_enabled)
tx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE;
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
block_ack_param_set = (u16)((tid << BLOCKACKPARAM_TID_POS) |
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 2ee268b632be..f275675cdbd3 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -84,6 +84,8 @@ mwifiex_is_amsdu_in_ampdu_allowed(struct mwifiex_private *priv,
{
struct mwifiex_tx_ba_stream_tbl *tx_tbl;
+ if (is_broadcast_ether_addr(ptr->ra))
+ return false;
tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
if (tx_tbl)
return tx_tbl->amsdu;
@@ -96,6 +98,8 @@ static inline u8
mwifiex_is_ampdu_allowed(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ptr, int tid)
{
+ if (is_broadcast_ether_addr(ptr->ra))
+ return false;
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
return mwifiex_is_station_ampdu_allowed(priv, ptr, tid);
} else {
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 5ef5a0eeba50..d73fda312c87 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -351,6 +351,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
new_node->init_win = seq_num;
new_node->flags = 0;
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
if (mwifiex_queuing_ra_based(priv)) {
dev_dbg(priv->adapter->dev,
"info: AP/ADHOC:last_seq=%d start_win=%d\n",
@@ -367,6 +368,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
else
last_seq = priv->rx_seq[tid];
}
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
last_seq >= new_node->start_win) {
@@ -455,22 +457,26 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
u32 rx_win_size = priv->add_ba_param.rx_win_size;
u8 tid;
int win_size;
+ unsigned long flags;
uint16_t block_ack_param_set;
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
priv->adapter->is_hw_11ac_capable &&
memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) {
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
sta_ptr = mwifiex_get_sta_entry(priv,
cmd_addba_req->peer_mac_addr);
if (!sta_ptr) {
dev_warn(priv->adapter->dev,
"BA setup with unknown TDLS peer %pM!\n",
cmd_addba_req->peer_mac_addr);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
return -1;
}
if (sta_ptr->is_11ac_enabled)
rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE;
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index e70d0df9b0da..aa01c9bc77f9 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -31,7 +31,7 @@ config MWIFIEX_PCIE
mwifiex_pcie.
config MWIFIEX_USB
- tristate "Marvell WiFi-Ex Driver for USB8797/8897"
+ tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897"
depends on MWIFIEX && USB
select FW_LOADER
---help---
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 0dd672954ad1..4a66a6555366 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -194,10 +194,17 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
tx_info->pkt_len = pkt_len;
mwifiex_form_mgmt_frame(skb, buf, len);
- mwifiex_queue_tx_pkt(priv, skb);
-
*cookie = prandom_u32() | 1;
- cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, GFP_ATOMIC);
+
+ if (ieee80211_is_action(mgmt->frame_control))
+ skb = mwifiex_clone_skb_for_tx_status(priv,
+ skb,
+ MWIFIEX_BUF_FLAG_ACTION_TX_STATUS, cookie);
+ else
+ cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
+ GFP_ATOMIC);
+
+ mwifiex_queue_tx_pkt(priv, skb);
wiphy_dbg(wiphy, "info: management frame transmitted\n");
return 0;
@@ -992,6 +999,52 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
return mwifiex_dump_station_info(priv, sinfo);
}
+static int
+mwifiex_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
+ int idx, struct survey_info *survey)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ struct mwifiex_chan_stats *pchan_stats = priv->adapter->chan_stats;
+ enum ieee80211_band band;
+
+ dev_dbg(priv->adapter->dev, "dump_survey idx=%d\n", idx);
+
+ memset(survey, 0, sizeof(struct survey_info));
+
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+ priv->media_connected && idx == 0) {
+ u8 curr_bss_band = priv->curr_bss_params.band;
+ u32 chan = priv->curr_bss_params.bss_descriptor.channel;
+
+ band = mwifiex_band_to_radio_type(curr_bss_band);
+ survey->channel = ieee80211_get_channel(wiphy,
+ ieee80211_channel_to_frequency(chan, band));
+
+ if (priv->bcn_nf_last) {
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = priv->bcn_nf_last;
+ }
+ return 0;
+ }
+
+ if (idx >= priv->adapter->num_in_chan_stats)
+ return -ENOENT;
+
+ if (!pchan_stats[idx].cca_scan_dur)
+ return 0;
+
+ band = pchan_stats[idx].bandcfg;
+ survey->channel = ieee80211_get_channel(wiphy,
+ ieee80211_channel_to_frequency(pchan_stats[idx].chan_num, band));
+ survey->filled = SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_CHANNEL_TIME | SURVEY_INFO_CHANNEL_TIME_BUSY;
+ survey->noise = pchan_stats[idx].noise;
+ survey->channel_time = pchan_stats[idx].cca_scan_dur;
+ survey->channel_time_busy = pchan_stats[idx].cca_busy_dur;
+
+ return 0;
+}
+
/* Supported rates to be advertised to the cfg80211 */
static struct ieee80211_rate mwifiex_rates[] = {
{.bitrate = 10, .hw_value = 2, },
@@ -1239,36 +1292,34 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
*/
static int
mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
- const u8 *mac)
+ struct station_del_parameters *params)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct mwifiex_sta_node *sta_node;
+ u8 deauth_mac[ETH_ALEN];
unsigned long flags;
if (list_empty(&priv->sta_list) || !priv->bss_started)
return 0;
- if (!mac || is_broadcast_ether_addr(mac)) {
- wiphy_dbg(wiphy, "%s: NULL/broadcast mac address\n", __func__);
- list_for_each_entry(sta_node, &priv->sta_list, list) {
- if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH,
- HostCmd_ACT_GEN_SET, 0,
- sta_node->mac_addr, true))
- return -1;
- mwifiex_uap_del_sta_data(priv, sta_node);
- }
- } else {
- wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, mac);
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
- sta_node = mwifiex_get_sta_entry(priv, mac);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- if (sta_node) {
- if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH,
- HostCmd_ACT_GEN_SET, 0,
- sta_node->mac_addr, true))
- return -1;
- mwifiex_uap_del_sta_data(priv, sta_node);
- }
+ if (!params->mac || is_broadcast_ether_addr(params->mac))
+ return 0;
+
+ wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, params->mac);
+
+ memset(deauth_mac, 0, ETH_ALEN);
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ sta_node = mwifiex_get_sta_entry(priv, params->mac);
+ if (sta_node)
+ ether_addr_copy(deauth_mac, params->mac);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+ if (is_valid_ether_addr(deauth_mac)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH,
+ HostCmd_ACT_GEN_SET, 0,
+ deauth_mac, true))
+ return -1;
}
return 0;
@@ -1759,6 +1810,10 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
dev_dbg(priv->adapter->dev,
"info: associated to bssid %pM successfully\n",
priv->cfg_bssid);
+ if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ priv->adapter->auto_tdls &&
+ priv->bss_type == MWIFIEX_BSS_TYPE_STA)
+ mwifiex_setup_auto_tdls_timer(priv);
} else {
dev_dbg(priv->adapter->dev,
"info: association to bssid %pM failed\n",
@@ -2630,11 +2685,13 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
dev_dbg(priv->adapter->dev,
"Send TDLS Setup Request to %pM status_code=%d\n", peer,
status_code);
+ mwifiex_add_auto_tdls_peer(priv, peer);
ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
dialog_token, status_code,
extra_ies, extra_ies_len);
break;
case WLAN_TDLS_SETUP_RESPONSE:
+ mwifiex_add_auto_tdls_peer(priv, peer);
dev_dbg(priv->adapter->dev,
"Send TDLS Setup Response to %pM status_code=%d\n",
peer, status_code);
@@ -2779,6 +2836,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.disconnect = mwifiex_cfg80211_disconnect,
.get_station = mwifiex_cfg80211_get_station,
.dump_station = mwifiex_cfg80211_dump_station,
+ .dump_survey = mwifiex_cfg80211_dump_survey,
.set_wiphy_params = mwifiex_cfg80211_set_wiphy_params,
.join_ibss = mwifiex_cfg80211_join_ibss,
.leave_ibss = mwifiex_cfg80211_leave_ibss,
@@ -2840,6 +2898,25 @@ static const struct wiphy_coalesce_support mwifiex_coalesce_support = {
.max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN,
};
+int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter)
+{
+ u32 n_channels_bg, n_channels_a = 0;
+
+ n_channels_bg = mwifiex_band_2ghz.n_channels;
+
+ if (adapter->config_bands & BAND_A)
+ n_channels_a = mwifiex_band_5ghz.n_channels;
+
+ adapter->num_in_chan_stats = max_t(u32, n_channels_bg, n_channels_a);
+ adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) *
+ adapter->num_in_chan_stats);
+
+ if (!adapter->chan_stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
/*
* This function registers the device with CFG802.11 subsystem.
*
@@ -2915,6 +2992,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
NL80211_FEATURE_INACTIVITY_TIMER |
NL80211_FEATURE_NEED_OBSS_SCAN;
+ if (adapter->fw_api_ver == MWIFIEX_FW_V15)
+ wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
+
/* Reserve space for mwifiex specific private data for BSS */
wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index e0d00a7f0ec3..2269acf41ad8 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -76,6 +76,8 @@
#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
#define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1)
#define MWIFIEX_BUF_FLAG_TDLS_PKT BIT(2)
+#define MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS BIT(3)
+#define MWIFIEX_BUF_FLAG_ACTION_TX_STATUS BIT(4)
#define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024
#define MWIFIEX_BRIDGED_PKTS_THR_LOW 128
@@ -85,6 +87,11 @@
#define MWIFIEX_TDLS_CREATE_LINK 0x02
#define MWIFIEX_TDLS_CONFIG_LINK 0x03
+#define MWIFIEX_TDLS_RSSI_HIGH 50
+#define MWIFIEX_TDLS_RSSI_LOW 55
+#define MWIFIEX_TDLS_MAX_FAIL_COUNT 4
+#define MWIFIEX_AUTO_TDLS_IDLE_TIME 10
+
enum mwifiex_bss_type {
MWIFIEX_BSS_TYPE_STA = 0,
MWIFIEX_BSS_TYPE_UAP = 1,
@@ -154,6 +161,8 @@ struct mwifiex_txinfo {
u8 bss_num;
u8 bss_type;
u32 pkt_len;
+ u8 ack_frame_id;
+ u64 cookie;
};
enum mwifiex_wmm_ac_e {
@@ -185,4 +194,14 @@ struct mwifiex_arp_eth_header {
u8 ar_tha[ETH_ALEN];
u8 ar_tip[4];
} __packed;
+
+struct mwifiex_chan_stats {
+ u8 chan_num;
+ u8 bandcfg;
+ u8 flags;
+ s8 noise;
+ u16 total_bss;
+ u16 cca_scan_dur;
+ u16 cca_busy_dur;
+} __packed;
#endif /* !_MWIFIEX_DECL_H_ */
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 1eb61739071f..fb5936eb82e3 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -172,6 +172,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194)
#define TLV_TYPE_SCAN_CHANNEL_GAP (PROPRIETARY_TLV_BASE_ID + 197)
#define TLV_TYPE_API_REV (PROPRIETARY_TLV_BASE_ID + 199)
+#define TLV_TYPE_CHANNEL_STATS (PROPRIETARY_TLV_BASE_ID + 198)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -493,6 +494,7 @@ enum P2P_MODES {
#define EVENT_TDLS_GENERIC_EVENT 0x00000052
#define EVENT_EXT_SCAN_REPORT 0x00000058
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
+#define EVENT_TX_STATUS_REPORT 0x00000074
#define EVENT_ID_MASK 0xffff
#define BSS_NUM_MASK 0xf
@@ -541,6 +543,7 @@ struct mwifiex_ie_types_data {
#define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08
#define MWIFIEX_TXPD_FLAGS_TDLS_PACKET 0x10
#define MWIFIEX_RXPD_FLAGS_TDLS_PACKET 0x01
+#define MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS 0x20
struct txpd {
u8 bss_type;
@@ -552,7 +555,9 @@ struct txpd {
u8 priority;
u8 flags;
u8 pkt_delay_2ms;
- u8 reserved1;
+ u8 reserved1[2];
+ u8 tx_token_id;
+ u8 reserved[2];
} __packed;
struct rxpd {
@@ -583,6 +588,7 @@ struct rxpd {
* [Bit 7] Reserved
*/
u8 ht_info;
+ u8 reserved[3];
u8 flags;
} __packed;
@@ -596,8 +602,9 @@ struct uap_txpd {
u8 priority;
u8 flags;
u8 pkt_delay_2ms;
- u8 reserved1;
- __le32 reserved2;
+ u8 reserved1[2];
+ u8 tx_token_id;
+ u8 reserved[2];
};
struct uap_rxpd {
@@ -611,6 +618,16 @@ struct uap_rxpd {
u8 reserved1;
};
+struct mwifiex_fw_chan_stats {
+ u8 chan_num;
+ u8 bandcfg;
+ u8 flags;
+ s8 noise;
+ __le16 total_bss;
+ __le16 cca_scan_dur;
+ __le16 cca_busy_dur;
+} __packed;
+
enum mwifiex_chan_scan_mode_bitmasks {
MWIFIEX_PASSIVE_SCAN = BIT(0),
MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
@@ -660,6 +677,11 @@ struct mwifiex_ie_types_scan_chan_gap {
__le16 chan_gap;
} __packed;
+struct mwifiex_ietypes_chanstats {
+ struct mwifiex_ie_types_header header;
+ struct mwifiex_fw_chan_stats chanstats[0];
+} __packed;
+
struct mwifiex_ie_types_wildcard_ssid_params {
struct mwifiex_ie_types_header header;
u8 max_ssid_length;
@@ -1207,6 +1229,12 @@ struct mwifiex_event_scan_result {
u8 num_of_set;
} __packed;
+struct tx_status_event {
+ u8 packet_type;
+ u8 tx_token_id;
+ u8 status;
+} __packed;
+
#define MWIFIEX_USER_SCAN_CHAN_MAX 50
#define MWIFIEX_MAX_SSID_LIST_LENGTH 10
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 580aa45ec4bc..520ad4a3018b 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -137,6 +137,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
priv->csa_expire_time = 0;
priv->del_list_idx = 0;
priv->hs2_enabled = false;
+ priv->check_tdls_tx = false;
memcpy(priv->tos_to_tid_inv, tos_to_tid_inv, MAX_NUM_TID);
return mwifiex_add_bss_prio_tbl(priv);
@@ -366,6 +367,7 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
list_del(&priv->tx_ba_stream_tbl_ptr);
list_del(&priv->rx_reorder_tbl_ptr);
list_del(&priv->sta_list);
+ list_del(&priv->auto_tdls_list);
}
}
}
@@ -434,6 +436,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
spin_lock_init(&priv->wmm.ra_list_spinlock);
spin_lock_init(&priv->curr_bcn_buf_lock);
spin_lock_init(&priv->sta_list_spinlock);
+ spin_lock_init(&priv->auto_tdls_lock);
}
}
@@ -449,7 +452,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
spin_lock_init(&adapter->scan_pending_q_lock);
spin_lock_init(&adapter->rx_proc_lock);
- skb_queue_head_init(&adapter->usb_rx_data_q);
skb_queue_head_init(&adapter->rx_data_q);
for (i = 0; i < adapter->priv_num; ++i) {
@@ -466,10 +468,14 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
INIT_LIST_HEAD(&priv->sta_list);
+ INIT_LIST_HEAD(&priv->auto_tdls_list);
skb_queue_head_init(&priv->tdls_txq);
spin_lock_init(&priv->tx_ba_stream_tbl_lock);
spin_lock_init(&priv->rx_reorder_tbl_lock);
+
+ spin_lock_init(&priv->ack_status_lock);
+ idr_init(&priv->ack_status_frames);
}
return 0;
@@ -646,6 +652,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
if (adapter->priv[i]) {
priv = adapter->priv[i];
+ mwifiex_clean_auto_tdls(priv);
mwifiex_clean_txrx(priv);
mwifiex_delete_bss_prio_tbl(priv);
}
@@ -668,19 +675,6 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
spin_lock(&adapter->mwifiex_lock);
- if (adapter->if_ops.data_complete) {
- while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) {
- struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
-
- priv = adapter->priv[rx_info->bss_num];
- if (priv)
- priv->stats.rx_dropped++;
-
- dev_kfree_skb_any(skb);
- adapter->if_ops.data_complete(adapter);
- }
- }
-
mwifiex_adapter_cleanup(adapter);
spin_unlock(&adapter->mwifiex_lock);
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 8d6c25908b6d..411a6c2f4aca 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -880,9 +880,7 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
/* Set Capability info */
bss_desc->cap_info_bitmap |= WLAN_CAPABILITY_IBSS;
- tmp_cap = le16_to_cpu(adhoc_start->cap_info_bitmap);
- tmp_cap &= ~WLAN_CAPABILITY_ESS;
- tmp_cap |= WLAN_CAPABILITY_IBSS;
+ tmp_cap = WLAN_CAPABILITY_IBSS;
/* Set up privacy in bss_desc */
if (priv->sec_info.encryption_mode) {
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index d5070c444fe1..d4d2223d1f31 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -28,6 +28,11 @@ const char driver_version[] = "mwifiex " VERSION " (%s) ";
static char *cal_data_cfg;
module_param(cal_data_cfg, charp, 0);
+static unsigned short driver_mode;
+module_param(driver_mode, ushort, 0);
+MODULE_PARM_DESC(driver_mode,
+ "station=0x1(default), ap-sta=0x3, station-p2p=0x5, ap-sta-p2p=0x7");
+
/*
* This function registers the device and performs all the necessary
* initializations.
@@ -122,6 +127,7 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
}
}
+ vfree(adapter->chan_stats);
kfree(adapter);
return 0;
}
@@ -143,8 +149,11 @@ static int mwifiex_process_rx(struct mwifiex_adapter *adapter)
/* Check for Rx data */
while ((skb = skb_dequeue(&adapter->rx_data_q))) {
atomic_dec(&adapter->rx_pending);
- if (adapter->delay_main_work &&
+ if ((adapter->delay_main_work ||
+ adapter->iface_type == MWIFIEX_USB) &&
(atomic_read(&adapter->rx_pending) < LOW_RX_PENDING)) {
+ if (adapter->if_ops.submit_rem_rx_urbs)
+ adapter->if_ops.submit_rem_rx_urbs(adapter);
adapter->delay_main_work = false;
queue_work(adapter->workqueue, &adapter->main_work);
}
@@ -177,7 +186,6 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
{
int ret = 0;
unsigned long flags;
- struct sk_buff *skb;
spin_lock_irqsave(&adapter->main_proc_lock, flags);
@@ -195,12 +203,15 @@ process_start:
(adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY))
break;
- /* If we process interrupts first, it would increase RX pending
- * even further. Avoid this by checking if rx_pending has
- * crossed high threshold and schedule rx work queue
- * and then process interrupts
+ /* For non-USB interfaces, If we process interrupts first, it
+ * would increase RX pending even further. Avoid this by
+ * checking if rx_pending has crossed high threshold and
+ * schedule rx work queue and then process interrupts.
+ * For USB interface, there are no interrupts. We already have
+ * HIGH_RX_PENDING check in usb.c
*/
- if (atomic_read(&adapter->rx_pending) >= HIGH_RX_PENDING) {
+ if (atomic_read(&adapter->rx_pending) >= HIGH_RX_PENDING &&
+ adapter->iface_type != MWIFIEX_USB) {
adapter->delay_main_work = true;
if (!adapter->rx_processing)
queue_work(adapter->rx_workqueue,
@@ -252,11 +263,6 @@ process_start:
}
}
- /* Check Rx data for USB */
- if (adapter->iface_type == MWIFIEX_USB)
- while ((skb = skb_dequeue(&adapter->usb_rx_data_q)))
- mwifiex_handle_rx_packet(adapter, skb);
-
/* Check for event */
if (adapter->event_received) {
adapter->event_received = false;
@@ -447,6 +453,16 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto err_init_fw;
}
+ if (mwifiex_init_channel_scan_gap(adapter)) {
+ dev_err(adapter->dev, "could not init channel stats table\n");
+ goto err_init_fw;
+ }
+
+ if (driver_mode) {
+ driver_mode &= MWIFIEX_DRIVER_MODE_BITMASK;
+ driver_mode |= MWIFIEX_DRIVER_MODE_STA;
+ }
+
rtnl_lock();
/* Create station interface by default */
wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d",
@@ -456,6 +472,28 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
rtnl_unlock();
goto err_add_intf;
}
+
+ if (driver_mode & MWIFIEX_DRIVER_MODE_UAP) {
+ wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d",
+ NL80211_IFTYPE_AP, NULL, NULL);
+ if (IS_ERR(wdev)) {
+ dev_err(adapter->dev, "cannot create AP interface\n");
+ rtnl_unlock();
+ goto err_add_intf;
+ }
+ }
+
+ if (driver_mode & MWIFIEX_DRIVER_MODE_P2P) {
+ wdev = mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d",
+ NL80211_IFTYPE_P2P_CLIENT, NULL,
+ NULL);
+ if (IS_ERR(wdev)) {
+ dev_err(adapter->dev,
+ "cannot create p2p client interface\n");
+ rtnl_unlock();
+ goto err_add_intf;
+ }
+ }
rtnl_unlock();
mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
@@ -570,6 +608,48 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
return 0;
}
+struct sk_buff *
+mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv,
+ struct sk_buff *skb, u8 flag, u64 *cookie)
+{
+ struct sk_buff *orig_skb = skb;
+ struct mwifiex_txinfo *tx_info, *orig_tx_info;
+
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (skb) {
+ unsigned long flags;
+ int id;
+
+ spin_lock_irqsave(&priv->ack_status_lock, flags);
+ id = idr_alloc(&priv->ack_status_frames, orig_skb,
+ 1, 0xff, GFP_ATOMIC);
+ spin_unlock_irqrestore(&priv->ack_status_lock, flags);
+
+ if (id >= 0) {
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tx_info->ack_frame_id = id;
+ tx_info->flags |= flag;
+ orig_tx_info = MWIFIEX_SKB_TXCB(orig_skb);
+ orig_tx_info->ack_frame_id = id;
+ orig_tx_info->flags |= flag;
+
+ if (flag == MWIFIEX_BUF_FLAG_ACTION_TX_STATUS && cookie)
+ orig_tx_info->cookie = *cookie;
+
+ } else if (skb_shared(skb)) {
+ kfree_skb(orig_skb);
+ } else {
+ kfree_skb(skb);
+ skb = orig_skb;
+ }
+ } else {
+ /* couldn't clone -- lose tx status ... */
+ skb = orig_skb;
+ }
+
+ return skb;
+}
+
/*
* CFG802.11 network device handler for data transmission.
*/
@@ -579,6 +659,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct sk_buff *new_skb;
struct mwifiex_txinfo *tx_info;
+ bool multicast;
dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n",
jiffies, priv->bss_type, priv->bss_num);
@@ -619,6 +700,15 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->bss_type = priv->bss_type;
tx_info->pkt_len = skb->len;
+ multicast = is_multicast_ether_addr(skb->data);
+
+ if (unlikely(!multicast && skb->sk &&
+ skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS &&
+ priv->adapter->fw_api_ver == MWIFIEX_FW_V15))
+ skb = mwifiex_clone_skb_for_tx_status(priv,
+ skb,
+ MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS, NULL);
+
/* Record the current time the packet was queued; used to
* determine the amount of time the packet was queued in
* the driver before it was sent to the firmware.
@@ -628,6 +718,13 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
__net_timestamp(skb);
+ if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ priv->bss_type == MWIFIEX_BSS_TYPE_STA &&
+ !ether_addr_equal_unaligned(priv->cfg_bssid, skb->data)) {
+ if (priv->adapter->auto_tdls && priv->check_tdls_tx)
+ mwifiex_tdls_check_tx(priv, skb);
+ }
+
mwifiex_queue_tx_pkt(priv, skb);
return 0;
@@ -858,7 +955,7 @@ mwifiex_add_card(void *card, struct semaphore *sem,
adapter->cmd_wait_q.status = 0;
adapter->scan_wait_q_woken = false;
- if (num_possible_cpus() > 1) {
+ if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB) {
adapter->rx_work_enabled = true;
pr_notice("rx work enabled, cpus %d\n", num_possible_cpus());
}
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index f55658d15c60..e66993cb5daf 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -34,6 +34,7 @@
#include <linux/firmware.h>
#include <linux/ctype.h>
#include <linux/of.h>
+#include <linux/idr.h>
#include "decl.h"
#include "ioctl.h"
@@ -48,6 +49,11 @@ enum {
MWIFIEX_SYNC_CMD
};
+#define MWIFIEX_DRIVER_MODE_STA BIT(0)
+#define MWIFIEX_DRIVER_MODE_UAP BIT(1)
+#define MWIFIEX_DRIVER_MODE_P2P BIT(2)
+#define MWIFIEX_DRIVER_MODE_BITMASK (BIT(0) | BIT(1) | BIT(2))
+
#define MWIFIEX_MAX_AP 64
#define MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT (5 * HZ)
@@ -106,10 +112,7 @@ enum {
*/
#define IS_CARD_RX_RCVD(adapter) (adapter->cmd_resp_received || \
adapter->event_received || \
- ((adapter->iface_type != MWIFIEX_USB) && \
- adapter->data_received) || \
- ((adapter->iface_type == MWIFIEX_USB) && \
- !skb_queue_empty(&adapter->usb_rx_data_q)))
+ adapter->data_received)
#define MWIFIEX_TYPE_CMD 1
#define MWIFIEX_TYPE_DATA 0
@@ -504,8 +507,11 @@ struct mwifiex_private {
struct mwifiex_wmm_desc wmm;
atomic_t wmm_tx_pending[IEEE80211_NUM_ACS];
struct list_head sta_list;
- /* spin lock for associated station list */
+ /* spin lock for associated station/TDLS peers list */
spinlock_t sta_list_spinlock;
+ struct list_head auto_tdls_list;
+ /* spin lock for auto TDLS peer list */
+ spinlock_t auto_tdls_lock;
struct list_head tx_ba_stream_tbl_ptr;
/* spin lock for tx_ba_stream_tbl_ptr queue */
spinlock_t tx_ba_stream_tbl_lock;
@@ -570,6 +576,12 @@ struct mwifiex_private {
bool hs2_enabled;
struct station_parameters *sta_params;
struct sk_buff_head tdls_txq;
+ u8 check_tdls_tx;
+ struct timer_list auto_tdls_timer;
+ bool auto_tdls_timer_active;
+ struct idr ack_status_frames;
+ /* spin lock for ack status */
+ spinlock_t ack_status_lock;
};
enum mwifiex_ba_status {
@@ -670,6 +682,17 @@ struct mwifiex_sta_node {
struct mwifiex_tdls_capab tdls_cap;
};
+struct mwifiex_auto_tdls_peer {
+ struct list_head list;
+ u8 mac_addr[ETH_ALEN];
+ u8 tdls_status;
+ int rssi;
+ long rssi_jiffies;
+ u8 failure_count;
+ u8 do_discover;
+ u8 do_setup;
+};
+
struct mwifiex_if_ops {
int (*init_if) (struct mwifiex_adapter *);
void (*cleanup_if) (struct mwifiex_adapter *);
@@ -690,13 +713,13 @@ struct mwifiex_if_ops {
void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
- int (*data_complete) (struct mwifiex_adapter *);
int (*init_fw_port) (struct mwifiex_adapter *);
int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
void (*card_reset) (struct mwifiex_adapter *);
void (*fw_dump)(struct mwifiex_adapter *);
int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
void (*iface_work)(struct work_struct *work);
+ void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter);
};
struct mwifiex_adapter {
@@ -767,7 +790,6 @@ struct mwifiex_adapter {
spinlock_t scan_pending_q_lock;
/* spin lock for RX processing routine */
spinlock_t rx_proc_lock;
- struct sk_buff_head usb_rx_data_q;
u32 scan_processing;
u16 region_code;
struct mwifiex_802_11d_domain_reg domain_reg;
@@ -845,6 +867,10 @@ struct mwifiex_adapter {
u8 curr_mem_idx;
bool scan_chan_gap_enabled;
struct sk_buff_head rx_data_q;
+ struct mwifiex_chan_stats *chan_stats;
+ u32 num_in_chan_stats;
+ int survey_idx;
+ bool auto_tdls;
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -949,6 +975,8 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
int mwifiex_process_sta_event(struct mwifiex_private *);
int mwifiex_process_uap_event(struct mwifiex_private *);
void mwifiex_delete_all_station_list(struct mwifiex_private *priv);
+void mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv,
+ const u8 *ra_addr);
void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb);
int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta);
@@ -1031,7 +1059,8 @@ void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv);
int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
void *data_buf);
-int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv);
+int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp);
int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
void *buf);
@@ -1301,6 +1330,24 @@ u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
u32 pri_chan, u8 chan_bw);
int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter);
+int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb);
+void mwifiex_flush_auto_tdls_list(struct mwifiex_private *priv);
+void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv,
+ const u8 *mac, u8 link_status);
+void mwifiex_auto_tdls_update_peer_signal(struct mwifiex_private *priv,
+ u8 *mac, s8 snr, s8 nflr);
+void mwifiex_check_auto_tdls(unsigned long context);
+void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac);
+void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv);
+void mwifiex_clean_auto_tdls(struct mwifiex_private *priv);
+
+void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
+ void *event_body);
+
+struct sk_buff *
+mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv,
+ struct sk_buff *skb, u8 flag, u64 *cookie);
+
#ifdef CONFIG_DEBUG_FS
void mwifiex_debugfs_init(void);
void mwifiex_debugfs_remove(void);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index ca64d4c94112..984a7a4fa93b 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1623,7 +1623,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
if (*bytes_left >= sizeof(beacon_size)) {
/* Extract & convert beacon size from command buffer */
- memcpy(&beacon_size, *bss_info, sizeof(beacon_size));
+ beacon_size = le16_to_cpu(*(__le16 *)(*bss_info));
*bytes_left -= sizeof(beacon_size);
*bss_info += sizeof(beacon_size);
}
@@ -1755,6 +1755,7 @@ static void mwifiex_complete_scan(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
+ adapter->survey_idx = 0;
if (adapter->curr_cmd->wait_q_enabled) {
adapter->cmd_wait_q.status = 0;
if (!priv->scan_request) {
@@ -1976,10 +1977,53 @@ int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
return 0;
}
+static void
+mwifiex_update_chan_statistics(struct mwifiex_private *priv,
+ struct mwifiex_ietypes_chanstats *tlv_stat)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 i, num_chan;
+ struct mwifiex_fw_chan_stats *fw_chan_stats;
+ struct mwifiex_chan_stats chan_stats;
+
+ fw_chan_stats = (void *)((u8 *)tlv_stat +
+ sizeof(struct mwifiex_ie_types_header));
+ num_chan = le16_to_cpu(tlv_stat->header.len) /
+ sizeof(struct mwifiex_chan_stats);
+
+ for (i = 0 ; i < num_chan; i++) {
+ chan_stats.chan_num = fw_chan_stats->chan_num;
+ chan_stats.bandcfg = fw_chan_stats->bandcfg;
+ chan_stats.flags = fw_chan_stats->flags;
+ chan_stats.noise = fw_chan_stats->noise;
+ chan_stats.total_bss = le16_to_cpu(fw_chan_stats->total_bss);
+ chan_stats.cca_scan_dur =
+ le16_to_cpu(fw_chan_stats->cca_scan_dur);
+ chan_stats.cca_busy_dur =
+ le16_to_cpu(fw_chan_stats->cca_busy_dur);
+ dev_dbg(adapter->dev,
+ "chan=%d, noise=%d, total_network=%d scan_duration=%d, busy_duration=%d\n",
+ chan_stats.chan_num,
+ chan_stats.noise,
+ chan_stats.total_bss,
+ chan_stats.cca_scan_dur,
+ chan_stats.cca_busy_dur);
+ memcpy(&adapter->chan_stats[adapter->survey_idx++], &chan_stats,
+ sizeof(struct mwifiex_chan_stats));
+ fw_chan_stats++;
+ }
+}
+
/* This function handles the command response of extended scan */
-int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv)
+int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
{
struct mwifiex_adapter *adapter = priv->adapter;
+ struct host_cmd_ds_802_11_scan_ext *ext_scan_resp;
+ struct mwifiex_ie_types_header *tlv;
+ struct mwifiex_ietypes_chanstats *tlv_stat;
+ u16 buf_left, type, len;
+
struct host_cmd_ds_command *cmd_ptr;
struct cmd_ctrl_node *cmd_node;
unsigned long cmd_flags, scan_flags;
@@ -1987,6 +2031,36 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv)
dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n");
+ ext_scan_resp = &resp->params.ext_scan;
+
+ tlv = (void *)ext_scan_resp->tlv_buffer;
+ buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN
+ - 1);
+
+ while (buf_left >= sizeof(struct mwifiex_ie_types_header)) {
+ type = le16_to_cpu(tlv->type);
+ len = le16_to_cpu(tlv->len);
+
+ if (buf_left < (sizeof(struct mwifiex_ie_types_header) + len)) {
+ dev_err(adapter->dev,
+ "error processing scan response TLVs");
+ break;
+ }
+
+ switch (type) {
+ case TLV_TYPE_CHANNEL_STATS:
+ tlv_stat = (void *)tlv;
+ mwifiex_update_chan_statistics(priv, tlv_stat);
+ break;
+ default:
+ break;
+ }
+
+ buf_left -= len + sizeof(struct mwifiex_ie_types_header);
+ tlv = (void *)((u8 *)tlv + len +
+ sizeof(struct mwifiex_ie_types_header));
+ }
+
spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_flags);
spin_lock_irqsave(&adapter->scan_pending_q_lock, scan_flags);
if (list_empty(&adapter->scan_pending_q)) {
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index b25766b43b9f..933dae137850 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -106,6 +106,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
card->supports_fw_dump = data->supports_fw_dump;
+ card->auto_tdls = data->auto_tdls;
}
sdio_claim_host(func);
@@ -1880,6 +1881,7 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
return -1;
}
+ adapter->auto_tdls = card->auto_tdls;
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 20cd9adc98d3..54c07156dd78 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -246,6 +246,7 @@ struct sdio_mmc_card {
u8 curr_wr_port;
u8 *mp_regs;
+ u8 auto_tdls;
struct mwifiex_sdio_mpa_tx mpa_tx;
struct mwifiex_sdio_mpa_rx mpa_rx;
@@ -262,6 +263,7 @@ struct mwifiex_sdio_device {
u16 tx_buf_size;
u32 mp_tx_agg_buf_size;
u32 mp_rx_agg_buf_size;
+ u8 auto_tdls;
};
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
@@ -387,6 +389,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.supports_fw_dump = false,
+ .auto_tdls = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -400,6 +403,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.supports_fw_dump = false,
+ .auto_tdls = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -413,6 +417,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
.supports_fw_dump = false,
+ .auto_tdls = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -426,6 +431,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
.supports_fw_dump = true,
+ .auto_tdls = false,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
@@ -439,6 +445,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
.supports_fw_dump = false,
+ .auto_tdls = true,
};
/*
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 4aad44685f8d..b65e1014b0fc 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -983,7 +983,7 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
adapter->curr_cmd->wait_q_enabled = false;
break;
case HostCmd_CMD_802_11_SCAN_EXT:
- ret = mwifiex_ret_802_11_scan_ext(priv);
+ ret = mwifiex_ret_802_11_scan_ext(priv, resp);
adapter->curr_cmd->wait_q_enabled = false;
break;
case HostCmd_CMD_802_11_BG_SCAN_QUERY:
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index f1c240eca0cd..b8c171df6223 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -55,9 +55,13 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
priv->scan_block = false;
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
- ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+ ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) {
mwifiex_disable_all_tdls_links(priv);
+ if (priv->adapter->auto_tdls)
+ mwifiex_clean_auto_tdls(priv);
+ }
+
/* Free Tx and Rx packets, report disconnect to upper layer */
mwifiex_clean_txrx(priv);
@@ -163,9 +167,6 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
NL80211_TDLS_TEARDOWN,
le16_to_cpu(tdls_evt->u.reason_code),
GFP_KERNEL);
- ret = mwifiex_tdls_oper(priv, tdls_evt->peer_mac,
- MWIFIEX_TDLS_DISABLE_LINK);
- queue_work(adapter->workqueue, &adapter->main_work);
break;
default:
break;
@@ -503,6 +504,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
ret = mwifiex_parse_tdls_event(priv, adapter->event_skb);
break;
+ case EVENT_TX_STATUS_REPORT:
+ dev_dbg(adapter->dev, "event: TX_STATUS Report\n");
+ mwifiex_parse_tx_status_event(priv, adapter->event_body);
+ break;
+
default:
dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 92f3eb839866..1626868a4b5c 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -1026,12 +1026,12 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
int max_len)
{
union {
- u32 l;
+ __le32 l;
u8 c[4];
} ver;
char fw_ver[32];
- ver.l = adapter->fw_release_number;
+ ver.l = cpu_to_le32(adapter->fw_release_number);
sprintf(fw_ver, "%u.%u.%u.p%u", ver.c[2], ver.c[1], ver.c[0], ver.c[3]);
snprintf(version, max_len, driver_version, fw_ver);
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 9ceb1dbe34c5..c2ad3b63ae70 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -232,6 +232,9 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
if (sta_ptr)
sta_ptr->rx_seq[local_rx_pd->priority] =
le16_to_cpu(local_rx_pd->seq_num);
+ mwifiex_auto_tdls_update_peer_signal(priv, ta,
+ local_rx_pd->snr,
+ local_rx_pd->nf);
}
} else {
if (rx_pkt_type != PKT_TYPE_BAR)
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index dab7b33c54be..b896d7375b52 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -77,6 +77,12 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
local_tx_pd->pkt_delay_2ms =
mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS ||
+ tx_info->flags & MWIFIEX_BUF_FLAG_ACTION_TX_STATUS) {
+ local_tx_pd->tx_token_id = tx_info->ack_frame_id;
+ local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS;
+ }
+
if (local_tx_pd->priority <
ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl))
/*
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index e2949077f5b5..22884b429be7 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -24,6 +24,7 @@
#define TDLS_REQ_FIX_LEN 6
#define TDLS_RESP_FIX_LEN 8
#define TDLS_CONFIRM_FIX_LEN 6
+#define MWIFIEX_TDLS_WMM_INFO_SIZE 7
static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
const u8 *mac, u8 status)
@@ -367,6 +368,55 @@ static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb)
*pos++ = MWIFIEX_TDLS_DEF_QOS_CAPAB;
}
+static void
+mwifiex_tdls_add_wmm_param_ie(struct mwifiex_private *priv, struct sk_buff *skb)
+{
+ struct ieee80211_wmm_param_ie *wmm;
+ u8 ac_vi[] = {0x42, 0x43, 0x5e, 0x00};
+ u8 ac_vo[] = {0x62, 0x32, 0x2f, 0x00};
+ u8 ac_be[] = {0x03, 0xa4, 0x00, 0x00};
+ u8 ac_bk[] = {0x27, 0xa4, 0x00, 0x00};
+
+ wmm = (void *)skb_put(skb, sizeof(*wmm));
+ memset(wmm, 0, sizeof(*wmm));
+
+ wmm->element_id = WLAN_EID_VENDOR_SPECIFIC;
+ wmm->len = sizeof(*wmm) - 2;
+ wmm->oui[0] = 0x00; /* Microsoft OUI 00:50:F2 */
+ wmm->oui[1] = 0x50;
+ wmm->oui[2] = 0xf2;
+ wmm->oui_type = 2; /* WME */
+ wmm->oui_subtype = 1; /* WME param */
+ wmm->version = 1; /* WME ver */
+ wmm->qos_info = 0; /* U-APSD not in use */
+
+ /* use default WMM AC parameters for TDLS link*/
+ memcpy(&wmm->ac[0], ac_be, sizeof(ac_be));
+ memcpy(&wmm->ac[1], ac_bk, sizeof(ac_bk));
+ memcpy(&wmm->ac[2], ac_vi, sizeof(ac_vi));
+ memcpy(&wmm->ac[3], ac_vo, sizeof(ac_vo));
+}
+
+static void
+mwifiex_add_wmm_info_ie(struct mwifiex_private *priv, struct sk_buff *skb,
+ u8 qosinfo)
+{
+ u8 *buf;
+
+ buf = (void *)skb_put(skb, MWIFIEX_TDLS_WMM_INFO_SIZE +
+ sizeof(struct ieee_types_header));
+
+ *buf++ = WLAN_EID_VENDOR_SPECIFIC;
+ *buf++ = 7; /* len */
+ *buf++ = 0x00; /* Microsoft OUI 00:50:F2 */
+ *buf++ = 0x50;
+ *buf++ = 0xf2;
+ *buf++ = 2; /* WME */
+ *buf++ = 0; /* WME info */
+ *buf++ = 1; /* WME ver */
+ *buf++ = qosinfo; /* U-APSD no in use */
+}
+
static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
const u8 *peer, u8 action_code,
u8 dialog_token,
@@ -421,6 +471,7 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
mwifiex_tdls_add_ext_capab(priv, skb);
mwifiex_tdls_add_qos_capab(skb);
+ mwifiex_add_wmm_info_ie(priv, skb, 0);
break;
case WLAN_TDLS_SETUP_RESPONSE:
@@ -458,6 +509,7 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
mwifiex_tdls_add_ext_capab(priv, skb);
mwifiex_tdls_add_qos_capab(skb);
+ mwifiex_add_wmm_info_ie(priv, skb, 0);
break;
case WLAN_TDLS_SETUP_CONFIRM:
@@ -466,6 +518,8 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
skb_put(skb, sizeof(tf->u.setup_cfm));
tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
tf->u.setup_cfm.dialog_token = dialog_token;
+
+ mwifiex_tdls_add_wmm_param_ie(priv, skb);
if (priv->adapter->is_hw_11ac_capable) {
ret = mwifiex_tdls_add_vht_oper(priv, peer, skb);
if (ret) {
@@ -544,6 +598,7 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
sizeof(struct ieee_types_bss_co_2040) +
sizeof(struct ieee80211_ht_operation) +
sizeof(struct ieee80211_tdls_lnkie) +
+ sizeof(struct ieee80211_wmm_param_ie) +
extra_ies_len;
if (priv->adapter->is_hw_11ac_capable)
@@ -973,6 +1028,7 @@ mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, const u8 *peer)
}
mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
+ mwifiex_auto_tdls_update_peer_status(priv, peer, TDLS_NOT_SETUP);
memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
@@ -1017,6 +1073,8 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE);
+ mwifiex_auto_tdls_update_peer_status(priv, peer,
+ TDLS_SETUP_COMPLETE);
} else {
dev_dbg(priv->adapter->dev,
"tdls: enable link %pM failed\n", peer);
@@ -1030,6 +1088,8 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
mwifiex_del_sta_entry(priv, peer);
}
mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
+ mwifiex_auto_tdls_update_peer_status(priv, peer,
+ TDLS_NOT_SETUP);
return -1;
}
@@ -1097,3 +1157,231 @@ void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
mwifiex_del_all_sta_list(priv);
}
+
+int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb)
+{
+ struct mwifiex_auto_tdls_peer *peer;
+ unsigned long flags;
+ u8 mac[ETH_ALEN];
+
+ ether_addr_copy(mac, skb->data);
+
+ spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ list_for_each_entry(peer, &priv->auto_tdls_list, list) {
+ if (!memcmp(mac, peer->mac_addr, ETH_ALEN)) {
+ if (peer->rssi <= MWIFIEX_TDLS_RSSI_HIGH &&
+ peer->tdls_status == TDLS_NOT_SETUP &&
+ (peer->failure_count <
+ MWIFIEX_TDLS_MAX_FAIL_COUNT)) {
+ peer->tdls_status = TDLS_SETUP_INPROGRESS;
+ dev_dbg(priv->adapter->dev,
+ "setup TDLS link, peer=%pM rssi=%d\n",
+ peer->mac_addr, peer->rssi);
+
+ cfg80211_tdls_oper_request(priv->netdev,
+ peer->mac_addr,
+ NL80211_TDLS_SETUP,
+ 0, GFP_ATOMIC);
+ peer->do_setup = false;
+ priv->check_tdls_tx = false;
+ } else if (peer->failure_count <
+ MWIFIEX_TDLS_MAX_FAIL_COUNT &&
+ peer->do_discover) {
+ mwifiex_send_tdls_data_frame(priv,
+ peer->mac_addr,
+ WLAN_TDLS_DISCOVERY_REQUEST,
+ 1, 0, NULL, 0);
+ peer->do_discover = false;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+
+ return 0;
+}
+
+void mwifiex_flush_auto_tdls_list(struct mwifiex_private *priv)
+{
+ struct mwifiex_auto_tdls_peer *peer, *tmp_node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ list_for_each_entry_safe(peer, tmp_node, &priv->auto_tdls_list, list) {
+ list_del(&peer->list);
+ kfree(peer);
+ }
+
+ INIT_LIST_HEAD(&priv->auto_tdls_list);
+ spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ priv->check_tdls_tx = false;
+}
+
+void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac)
+{
+ struct mwifiex_auto_tdls_peer *tdls_peer;
+ unsigned long flags;
+
+ if (!priv->adapter->auto_tdls)
+ return;
+
+ spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ list_for_each_entry(tdls_peer, &priv->auto_tdls_list, list) {
+ if (!memcmp(tdls_peer->mac_addr, mac, ETH_ALEN)) {
+ tdls_peer->tdls_status = TDLS_SETUP_INPROGRESS;
+ tdls_peer->rssi_jiffies = jiffies;
+ spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ return;
+ }
+ }
+
+ /* create new TDLS peer */
+ tdls_peer = kzalloc(sizeof(*tdls_peer), GFP_ATOMIC);
+ if (tdls_peer) {
+ ether_addr_copy(tdls_peer->mac_addr, mac);
+ tdls_peer->tdls_status = TDLS_SETUP_INPROGRESS;
+ tdls_peer->rssi_jiffies = jiffies;
+ INIT_LIST_HEAD(&tdls_peer->list);
+ list_add_tail(&tdls_peer->list, &priv->auto_tdls_list);
+ dev_dbg(priv->adapter->dev, "Add auto TDLS peer= %pM to list\n",
+ mac);
+ }
+
+ spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+}
+
+void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv,
+ const u8 *mac, u8 link_status)
+{
+ struct mwifiex_auto_tdls_peer *peer;
+ unsigned long flags;
+
+ if (!priv->adapter->auto_tdls)
+ return;
+
+ spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ list_for_each_entry(peer, &priv->auto_tdls_list, list) {
+ if (!memcmp(peer->mac_addr, mac, ETH_ALEN)) {
+ if ((link_status == TDLS_NOT_SETUP) &&
+ (peer->tdls_status == TDLS_SETUP_INPROGRESS))
+ peer->failure_count++;
+ else if (link_status == TDLS_SETUP_COMPLETE)
+ peer->failure_count = 0;
+
+ peer->tdls_status = link_status;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+}
+
+void mwifiex_auto_tdls_update_peer_signal(struct mwifiex_private *priv,
+ u8 *mac, s8 snr, s8 nflr)
+{
+ struct mwifiex_auto_tdls_peer *peer;
+ unsigned long flags;
+
+ if (!priv->adapter->auto_tdls)
+ return;
+
+ spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ list_for_each_entry(peer, &priv->auto_tdls_list, list) {
+ if (!memcmp(peer->mac_addr, mac, ETH_ALEN)) {
+ peer->rssi = nflr - snr;
+ peer->rssi_jiffies = jiffies;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+}
+
+void mwifiex_check_auto_tdls(unsigned long context)
+{
+ struct mwifiex_private *priv = (struct mwifiex_private *)context;
+ struct mwifiex_auto_tdls_peer *tdls_peer;
+ unsigned long flags;
+ u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+
+ if (WARN_ON_ONCE(!priv || !priv->adapter)) {
+ pr_err("mwifiex: %s: adapter or private structure is NULL\n",
+ __func__);
+ return;
+ }
+
+ if (unlikely(!priv->adapter->auto_tdls))
+ return;
+
+ if (!priv->auto_tdls_timer_active) {
+ dev_dbg(priv->adapter->dev,
+ "auto TDLS timer inactive; return");
+ return;
+ }
+
+ priv->check_tdls_tx = false;
+
+ if (list_empty(&priv->auto_tdls_list)) {
+ mod_timer(&priv->auto_tdls_timer,
+ jiffies +
+ msecs_to_jiffies(MWIFIEX_TIMER_10S));
+ return;
+ }
+
+ spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ list_for_each_entry(tdls_peer, &priv->auto_tdls_list, list) {
+ if ((jiffies - tdls_peer->rssi_jiffies) >
+ (MWIFIEX_AUTO_TDLS_IDLE_TIME * HZ)) {
+ tdls_peer->rssi = 0;
+ tdls_peer->do_discover = true;
+ priv->check_tdls_tx = true;
+ }
+
+ if (((tdls_peer->rssi >= MWIFIEX_TDLS_RSSI_LOW) ||
+ !tdls_peer->rssi) &&
+ tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) {
+ tdls_peer->tdls_status = TDLS_LINK_TEARDOWN;
+ dev_dbg(priv->adapter->dev,
+ "teardown TDLS link,peer=%pM rssi=%d\n",
+ tdls_peer->mac_addr, -tdls_peer->rssi);
+ tdls_peer->do_discover = true;
+ priv->check_tdls_tx = true;
+ cfg80211_tdls_oper_request(priv->netdev,
+ tdls_peer->mac_addr,
+ NL80211_TDLS_TEARDOWN,
+ reason, GFP_ATOMIC);
+ } else if (tdls_peer->rssi &&
+ tdls_peer->rssi <= MWIFIEX_TDLS_RSSI_HIGH &&
+ tdls_peer->tdls_status == TDLS_NOT_SETUP &&
+ tdls_peer->failure_count <
+ MWIFIEX_TDLS_MAX_FAIL_COUNT) {
+ priv->check_tdls_tx = true;
+ tdls_peer->do_setup = true;
+ dev_dbg(priv->adapter->dev,
+ "check TDLS with peer=%pM rssi=%d\n",
+ tdls_peer->mac_addr, -tdls_peer->rssi);
+ }
+ }
+ spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+
+ mod_timer(&priv->auto_tdls_timer,
+ jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
+}
+
+void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv)
+{
+ init_timer(&priv->auto_tdls_timer);
+ priv->auto_tdls_timer.function = mwifiex_check_auto_tdls;
+ priv->auto_tdls_timer.data = (unsigned long)priv;
+ priv->auto_tdls_timer_active = true;
+ mod_timer(&priv->auto_tdls_timer,
+ jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
+}
+
+void mwifiex_clean_auto_tdls(struct mwifiex_private *priv)
+{
+ if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ priv->adapter->auto_tdls &&
+ priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
+ priv->auto_tdls_timer_active = false;
+ del_timer(&priv->auto_tdls_timer);
+ mwifiex_flush_auto_tdls_list(priv);
+ }
+}
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 96a2126cc44b..6ae133333363 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -64,10 +64,6 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
else
ret = mwifiex_process_sta_rx_packet(priv, skb);
- /* Decrement RX pending counter for each packet */
- if (adapter->if_ops.data_complete)
- adapter->if_ops.data_complete(adapter);
-
return ret;
}
EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
@@ -207,3 +203,34 @@ done:
}
EXPORT_SYMBOL_GPL(mwifiex_write_data_complete);
+void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
+ void *event_body)
+{
+ struct tx_status_event *tx_status = (void *)priv->adapter->event_body;
+ struct sk_buff *ack_skb;
+ unsigned long flags;
+ struct mwifiex_txinfo *tx_info;
+
+ if (!tx_status->tx_token_id)
+ return;
+
+ spin_lock_irqsave(&priv->ack_status_lock, flags);
+ ack_skb = idr_find(&priv->ack_status_frames, tx_status->tx_token_id);
+ if (ack_skb)
+ idr_remove(&priv->ack_status_frames, tx_status->tx_token_id);
+ spin_unlock_irqrestore(&priv->ack_status_lock, flags);
+
+ if (ack_skb) {
+ tx_info = MWIFIEX_SKB_TXCB(ack_skb);
+
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS) {
+ /* consumes ack_skb */
+ skb_complete_wifi_ack(ack_skb, !tx_status->status);
+ } else {
+ cfg80211_mgmt_tx_status(priv->wdev, tx_info->cookie,
+ ack_skb->data, ack_skb->len,
+ !tx_status->status, GFP_ATOMIC);
+ dev_kfree_skb_any(ack_skb);
+ }
+ }
+}
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 300bab438011..0f347fdefa0a 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -167,7 +167,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail,
params->beacon.tail_len);
if (ht_ie) {
- memcpy(&bss_cfg->ht_cap, ht_ie + 2,
+ memcpy(&bss_cfg->ht_cap, ht_ie,
sizeof(struct ieee80211_ht_cap));
cap_info = le16_to_cpu(bss_cfg->ht_cap.cap_info);
memset(&bss_cfg->ht_cap.mcs, 0,
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index 7c2b97660a03..c54a537e31fb 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -110,6 +110,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, deauth_mac);
mwifiex_del_tx_ba_stream_tbl_by_ra(priv, deauth_mac);
}
+ mwifiex_wmm_del_peer_ra_list(priv, deauth_mac);
mwifiex_del_sta_entry(priv, deauth_mac);
break;
case EVENT_UAP_BSS_IDLE:
@@ -172,6 +173,10 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
return mwifiex_handle_event_ext_scan_report(priv,
adapter->event_skb->data);
break;
+ case EVENT_TX_STATUS_REPORT:
+ dev_dbg(adapter->dev, "event: TX_STATUS Report\n");
+ mwifiex_parse_tx_status_event(priv, adapter->event_body);
+ break;
default:
dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index ec7309d096ab..be3a203a529b 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -266,6 +266,7 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
struct rx_packet_hdr *rx_pkt_hdr;
u16 rx_pkt_type;
u8 ta[ETH_ALEN], pkt_type;
+ unsigned long flags;
struct mwifiex_sta_node *node;
uap_rx_pd = (struct uap_rxpd *)(skb->data);
@@ -294,10 +295,12 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) {
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
node = mwifiex_get_sta_entry(priv, ta);
if (node)
node->rx_seq[uap_rx_pd->priority] =
le16_to_cpu(uap_rx_pd->seq_num);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
if (!priv->ap_11n_enabled ||
@@ -370,10 +373,16 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
txpd->bss_num = priv->bss_num;
txpd->bss_type = priv->bss_type;
txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - len));
-
txpd->priority = (u8)skb->priority;
+
txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS ||
+ tx_info->flags & MWIFIEX_BUF_FLAG_ACTION_TX_STATUS) {
+ txpd->tx_token_id = tx_info->ack_frame_id;
+ txpd->flags |= MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS;
+ }
+
if (txpd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl))
/*
* Set the priority specific tx_control field, setting of 0 will
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 4371e12b36f3..1b56495ec872 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -27,6 +27,11 @@ static struct mwifiex_if_ops usb_ops;
static struct semaphore add_remove_card_sem;
static struct usb_device_id mwifiex_usb_table[] = {
+ /* 8766 */
+ {USB_DEVICE(USB8XXX_VID, USB8766_PID_1)},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8766_PID_2,
+ USB_CLASS_VENDOR_SPEC,
+ USB_SUBCLASS_VENDOR_SPEC, 0xff)},
/* 8797 */
{USB_DEVICE(USB8XXX_VID, USB8797_PID_1)},
{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8797_PID_2,
@@ -125,8 +130,10 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
dev_err(dev, "DATA: skb->len too large\n");
return -1;
}
- skb_queue_tail(&adapter->usb_rx_data_q, skb);
+
+ skb_queue_tail(&adapter->rx_data_q, skb);
adapter->data_received = true;
+ atomic_inc(&adapter->rx_pending);
break;
default:
dev_err(dev, "%s: unknown endport %#x\n", __func__, ep);
@@ -176,7 +183,6 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
else
skb_put(skb, recv_length - skb->len);
- atomic_inc(&adapter->rx_pending);
status = mwifiex_usb_recv(adapter, skb, context->ep);
dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
@@ -191,7 +197,6 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
if (card->rx_cmd_ep == context->ep)
return;
} else {
- atomic_dec(&adapter->rx_pending);
if (status == -1)
dev_err(adapter->dev,
"received data processing failed!\n");
@@ -222,7 +227,13 @@ setup_for_next:
else
size = MWIFIEX_RX_DATA_BUF_SIZE;
- mwifiex_usb_submit_rx_urb(context, size);
+ if (card->rx_cmd_ep == context->ep) {
+ mwifiex_usb_submit_rx_urb(context, size);
+ } else {
+ context->skb = NULL;
+ if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING)
+ mwifiex_usb_submit_rx_urb(context, size);
+ }
return;
}
@@ -348,10 +359,12 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
/* PID_1 is used for firmware downloading only */
switch (id_product) {
+ case USB8766_PID_1:
case USB8797_PID_1:
case USB8897_PID_1:
card->usb_boot_state = USB8XXX_FW_DNLD;
break;
+ case USB8766_PID_2:
case USB8797_PID_2:
case USB8897_PID_2:
card->usb_boot_state = USB8XXX_FW_READY;
@@ -780,6 +793,11 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME);
break;
+ case USB8766_PID_1:
+ case USB8766_PID_2:
+ adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
+ strcpy(adapter->fw_name, USB8766_DEFAULT_FW_NAME);
+ break;
case USB8797_PID_1:
case USB8797_PID_2:
default:
@@ -962,19 +980,11 @@ static void mwifiex_submit_rx_urb(struct mwifiex_adapter *adapter, u8 ep)
static int mwifiex_usb_cmd_event_complete(struct mwifiex_adapter *adapter,
struct sk_buff *skb)
{
- atomic_dec(&adapter->rx_pending);
mwifiex_submit_rx_urb(adapter, MWIFIEX_USB_EP_CMD_EVENT);
return 0;
}
-static int mwifiex_usb_data_complete(struct mwifiex_adapter *adapter)
-{
- atomic_dec(&adapter->rx_pending);
-
- return 0;
-}
-
/* This function wakes up the card. */
static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
{
@@ -986,6 +996,20 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
return 0;
}
+static void mwifiex_usb_submit_rem_rx_urbs(struct mwifiex_adapter *adapter)
+{
+ struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+ int i;
+ struct urb_context *ctx;
+
+ for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) {
+ if (card->rx_data_list[i].skb)
+ continue;
+ ctx = &card->rx_data_list[i];
+ mwifiex_usb_submit_rx_urb(ctx, MWIFIEX_RX_DATA_BUF_SIZE);
+ }
+}
+
static struct mwifiex_if_ops usb_ops = {
.register_dev = mwifiex_register_dev,
.unregister_dev = mwifiex_unregister_dev,
@@ -996,8 +1020,8 @@ static struct mwifiex_if_ops usb_ops = {
.dnld_fw = mwifiex_usb_dnld_fw,
.cmdrsp_complete = mwifiex_usb_cmd_event_complete,
.event_complete = mwifiex_usb_cmd_event_complete,
- .data_complete = mwifiex_usb_data_complete,
.host_to_card = mwifiex_usb_host_to_card,
+ .submit_rem_rx_urbs = mwifiex_usb_submit_rem_rx_urbs,
};
/* This function initializes the USB driver module.
@@ -1048,5 +1072,6 @@ MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex USB Driver version" USB_VERSION);
MODULE_VERSION(USB_VERSION);
MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(USB8766_DEFAULT_FW_NAME);
MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h
index 4c41c2a193c5..a7cbba1355af 100644
--- a/drivers/net/wireless/mwifiex/usb.h
+++ b/drivers/net/wireless/mwifiex/usb.h
@@ -24,6 +24,8 @@
#define USB8XXX_VID 0x1286
+#define USB8766_PID_1 0x2041
+#define USB8766_PID_2 0x2042
#define USB8797_PID_1 0x2043
#define USB8797_PID_2 0x2044
#define USB8897_PID_1 0x2045
@@ -37,6 +39,7 @@
#define MWIFIEX_RX_DATA_URB 6
#define MWIFIEX_USB_TIMEOUT 100
+#define USB8766_DEFAULT_FW_NAME "mrvl/usb8766_uapsta.bin"
#define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin"
#define USB8897_DEFAULT_FW_NAME "mrvl/usb8897_uapsta.bin"
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index ec79c49de097..b1768fbf98f2 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -141,6 +141,38 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
return 0;
}
+static int
+mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len,
+ struct rxpd *rx_pd)
+{
+ u16 stype;
+ u8 category, action_code;
+ struct ieee80211_hdr *ieee_hdr = (void *)payload;
+
+ stype = (le16_to_cpu(ieee_hdr->frame_control) & IEEE80211_FCTL_STYPE);
+
+ switch (stype) {
+ case IEEE80211_STYPE_ACTION:
+ category = *(payload + sizeof(struct ieee80211_hdr));
+ action_code = *(payload + sizeof(struct ieee80211_hdr) + 1);
+ if (category == WLAN_CATEGORY_PUBLIC &&
+ action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) {
+ dev_dbg(priv->adapter->dev,
+ "TDLS discovery response %pM nf=%d, snr=%d\n",
+ ieee_hdr->addr2, rx_pd->nf, rx_pd->snr);
+ mwifiex_auto_tdls_update_peer_signal(priv,
+ ieee_hdr->addr2,
+ rx_pd->snr,
+ rx_pd->nf);
+ }
+ break;
+ default:
+ dev_dbg(priv->adapter->dev,
+ "unknown mgmt frame subytpe %#x\n", stype);
+ }
+
+ return 0;
+}
/*
* This function processes the received management packet and send it
* to the kernel.
@@ -151,6 +183,7 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
{
struct rxpd *rx_pd;
u16 pkt_len;
+ struct ieee80211_hdr *ieee_hdr;
if (!skb)
return -1;
@@ -162,6 +195,11 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
pkt_len = le16_to_cpu(rx_pd->rx_pkt_length);
+ ieee_hdr = (void *)skb->data;
+ if (ieee80211_is_mgmt(ieee_hdr->frame_control)) {
+ mwifiex_parse_mgmt_packet(priv, (u8 *)ieee_hdr,
+ pkt_len, rx_pd);
+ }
/* Remove address4 */
memmove(skb->data + sizeof(struct ieee80211_hdr_3addr),
skb->data + sizeof(struct ieee80211_hdr),
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 94c98a86ebbe..ffffd2c5a76e 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -147,9 +147,6 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
struct mwifiex_sta_node *node;
unsigned long flags;
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
- node = mwifiex_get_sta_entry(priv, ra);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
for (i = 0; i < MAX_NUM_TID; ++i) {
ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
@@ -170,10 +167,13 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
}
} else {
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ node = mwifiex_get_sta_entry(priv, ra);
ra_list->is_11n_enabled =
mwifiex_is_sta_11n_enabled(priv, node);
if (ra_list->is_11n_enabled)
ra_list->max_amsdu = node->max_amsdu;
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
@@ -523,6 +523,13 @@ static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
}
}
+static int mwifiex_free_ack_frame(int id, void *p, void *data)
+{
+ pr_warn("Have pending ack frames!\n");
+ kfree_skb(p);
+ return 0;
+}
+
/*
* This function cleans up the Tx and Rx queues.
*
@@ -558,6 +565,9 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+
+ idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
+ idr_destroy(&priv->ack_status_frames);
}
/*
@@ -601,6 +611,32 @@ mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
}
/*
+ * This function deletes RA list nodes for given mac for all TIDs.
+ * Function also decrements TX pending count accordingly.
+ */
+void
+mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr)
+{
+ struct mwifiex_ra_list_tbl *ra_list;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+ for (i = 0; i < MAX_NUM_TID; ++i) {
+ ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr);
+
+ if (!ra_list)
+ continue;
+ mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
+ atomic_sub(ra_list->total_pkt_count, &priv->wmm.tx_pkts_queued);
+ list_del(&ra_list->list);
+ kfree(ra_list);
+ }
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
+/*
* This function checks if a particular RA list node exists in a given TID
* table index.
*/
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index ef1104476bd8..b8d1e04aa9b9 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -5548,7 +5548,9 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return rc;
}
-static void mwl8k_sw_scan_start(struct ieee80211_hw *hw)
+static void mwl8k_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct mwl8k_priv *priv = hw->priv;
u8 tmp;
@@ -5565,7 +5567,8 @@ static void mwl8k_sw_scan_start(struct ieee80211_hw *hw)
priv->sw_scan_start = true;
}
-static void mwl8k_sw_scan_complete(struct ieee80211_hw *hw)
+static void mwl8k_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct mwl8k_priv *priv = hw->priv;
u8 tmp;
diff --git a/drivers/net/wireless/p54/net2280.h b/drivers/net/wireless/p54/net2280.h
deleted file mode 100644
index aedfaf24f386..000000000000
--- a/drivers/net/wireless/p54/net2280.h
+++ /dev/null
@@ -1,451 +0,0 @@
-#ifndef NET2280_H
-#define NET2280_H
-/*
- * NetChip 2280 high/full speed USB device controller.
- * Unlike many such controllers, this one talks PCI.
- */
-
-/*
- * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
- * Copyright (C) 2003 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/*-------------------------------------------------------------------------*/
-
-/* NET2280 MEMORY MAPPED REGISTERS
- *
- * The register layout came from the chip documentation, and the bit
- * number definitions were extracted from chip specification.
- *
- * Use the shift operator ('<<') to build bit masks, with readl/writel
- * to access the registers through PCI.
- */
-
-/* main registers, BAR0 + 0x0000 */
-struct net2280_regs {
- /* offset 0x0000 */
- __le32 devinit;
-#define LOCAL_CLOCK_FREQUENCY 8
-#define FORCE_PCI_RESET 7
-#define PCI_ID 6
-#define PCI_ENABLE 5
-#define FIFO_SOFT_RESET 4
-#define CFG_SOFT_RESET 3
-#define PCI_SOFT_RESET 2
-#define USB_SOFT_RESET 1
-#define M8051_RESET 0
- __le32 eectl;
-#define EEPROM_ADDRESS_WIDTH 23
-#define EEPROM_CHIP_SELECT_ACTIVE 22
-#define EEPROM_PRESENT 21
-#define EEPROM_VALID 20
-#define EEPROM_BUSY 19
-#define EEPROM_CHIP_SELECT_ENABLE 18
-#define EEPROM_BYTE_READ_START 17
-#define EEPROM_BYTE_WRITE_START 16
-#define EEPROM_READ_DATA 8
-#define EEPROM_WRITE_DATA 0
- __le32 eeclkfreq;
- u32 _unused0;
- /* offset 0x0010 */
-
- __le32 pciirqenb0; /* interrupt PCI master ... */
-#define SETUP_PACKET_INTERRUPT_ENABLE 7
-#define ENDPOINT_F_INTERRUPT_ENABLE 6
-#define ENDPOINT_E_INTERRUPT_ENABLE 5
-#define ENDPOINT_D_INTERRUPT_ENABLE 4
-#define ENDPOINT_C_INTERRUPT_ENABLE 3
-#define ENDPOINT_B_INTERRUPT_ENABLE 2
-#define ENDPOINT_A_INTERRUPT_ENABLE 1
-#define ENDPOINT_0_INTERRUPT_ENABLE 0
- __le32 pciirqenb1;
-#define PCI_INTERRUPT_ENABLE 31
-#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
-#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
-#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
-#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
-#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
-#define PCI_TARGET_ABORT_ASSERTED_INTERRUPT_ENABLE 18
-#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
-#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
-#define GPIO_INTERRUPT_ENABLE 13
-#define DMA_D_INTERRUPT_ENABLE 12
-#define DMA_C_INTERRUPT_ENABLE 11
-#define DMA_B_INTERRUPT_ENABLE 10
-#define DMA_A_INTERRUPT_ENABLE 9
-#define EEPROM_DONE_INTERRUPT_ENABLE 8
-#define VBUS_INTERRUPT_ENABLE 7
-#define CONTROL_STATUS_INTERRUPT_ENABLE 6
-#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
-#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
-#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
-#define RESUME_INTERRUPT_ENABLE 1
-#define SOF_INTERRUPT_ENABLE 0
- __le32 cpu_irqenb0; /* ... or onboard 8051 */
-#define SETUP_PACKET_INTERRUPT_ENABLE 7
-#define ENDPOINT_F_INTERRUPT_ENABLE 6
-#define ENDPOINT_E_INTERRUPT_ENABLE 5
-#define ENDPOINT_D_INTERRUPT_ENABLE 4
-#define ENDPOINT_C_INTERRUPT_ENABLE 3
-#define ENDPOINT_B_INTERRUPT_ENABLE 2
-#define ENDPOINT_A_INTERRUPT_ENABLE 1
-#define ENDPOINT_0_INTERRUPT_ENABLE 0
- __le32 cpu_irqenb1;
-#define CPU_INTERRUPT_ENABLE 31
-#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
-#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
-#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
-#define PCI_INTA_INTERRUPT_ENABLE 24
-#define PCI_PME_INTERRUPT_ENABLE 23
-#define PCI_SERR_INTERRUPT_ENABLE 22
-#define PCI_PERR_INTERRUPT_ENABLE 21
-#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
-#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
-#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
-#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
-#define GPIO_INTERRUPT_ENABLE 13
-#define DMA_D_INTERRUPT_ENABLE 12
-#define DMA_C_INTERRUPT_ENABLE 11
-#define DMA_B_INTERRUPT_ENABLE 10
-#define DMA_A_INTERRUPT_ENABLE 9
-#define EEPROM_DONE_INTERRUPT_ENABLE 8
-#define VBUS_INTERRUPT_ENABLE 7
-#define CONTROL_STATUS_INTERRUPT_ENABLE 6
-#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
-#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
-#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
-#define RESUME_INTERRUPT_ENABLE 1
-#define SOF_INTERRUPT_ENABLE 0
-
- /* offset 0x0020 */
- u32 _unused1;
- __le32 usbirqenb1;
-#define USB_INTERRUPT_ENABLE 31
-#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
-#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
-#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
-#define PCI_INTA_INTERRUPT_ENABLE 24
-#define PCI_PME_INTERRUPT_ENABLE 23
-#define PCI_SERR_INTERRUPT_ENABLE 22
-#define PCI_PERR_INTERRUPT_ENABLE 21
-#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
-#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
-#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
-#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
-#define GPIO_INTERRUPT_ENABLE 13
-#define DMA_D_INTERRUPT_ENABLE 12
-#define DMA_C_INTERRUPT_ENABLE 11
-#define DMA_B_INTERRUPT_ENABLE 10
-#define DMA_A_INTERRUPT_ENABLE 9
-#define EEPROM_DONE_INTERRUPT_ENABLE 8
-#define VBUS_INTERRUPT_ENABLE 7
-#define CONTROL_STATUS_INTERRUPT_ENABLE 6
-#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
-#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
-#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
-#define RESUME_INTERRUPT_ENABLE 1
-#define SOF_INTERRUPT_ENABLE 0
- __le32 irqstat0;
-#define INTA_ASSERTED 12
-#define SETUP_PACKET_INTERRUPT 7
-#define ENDPOINT_F_INTERRUPT 6
-#define ENDPOINT_E_INTERRUPT 5
-#define ENDPOINT_D_INTERRUPT 4
-#define ENDPOINT_C_INTERRUPT 3
-#define ENDPOINT_B_INTERRUPT 2
-#define ENDPOINT_A_INTERRUPT 1
-#define ENDPOINT_0_INTERRUPT 0
- __le32 irqstat1;
-#define POWER_STATE_CHANGE_INTERRUPT 27
-#define PCI_ARBITER_TIMEOUT_INTERRUPT 26
-#define PCI_PARITY_ERROR_INTERRUPT 25
-#define PCI_INTA_INTERRUPT 24
-#define PCI_PME_INTERRUPT 23
-#define PCI_SERR_INTERRUPT 22
-#define PCI_PERR_INTERRUPT 21
-#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT 20
-#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT 19
-#define PCI_RETRY_ABORT_INTERRUPT 17
-#define PCI_MASTER_CYCLE_DONE_INTERRUPT 16
-#define GPIO_INTERRUPT 13
-#define DMA_D_INTERRUPT 12
-#define DMA_C_INTERRUPT 11
-#define DMA_B_INTERRUPT 10
-#define DMA_A_INTERRUPT 9
-#define EEPROM_DONE_INTERRUPT 8
-#define VBUS_INTERRUPT 7
-#define CONTROL_STATUS_INTERRUPT 6
-#define ROOT_PORT_RESET_INTERRUPT 4
-#define SUSPEND_REQUEST_INTERRUPT 3
-#define SUSPEND_REQUEST_CHANGE_INTERRUPT 2
-#define RESUME_INTERRUPT 1
-#define SOF_INTERRUPT 0
- /* offset 0x0030 */
- __le32 idxaddr;
- __le32 idxdata;
- __le32 fifoctl;
-#define PCI_BASE2_RANGE 16
-#define IGNORE_FIFO_AVAILABILITY 3
-#define PCI_BASE2_SELECT 2
-#define FIFO_CONFIGURATION_SELECT 0
- u32 _unused2;
- /* offset 0x0040 */
- __le32 memaddr;
-#define START 28
-#define DIRECTION 27
-#define FIFO_DIAGNOSTIC_SELECT 24
-#define MEMORY_ADDRESS 0
- __le32 memdata0;
- __le32 memdata1;
- u32 _unused3;
- /* offset 0x0050 */
- __le32 gpioctl;
-#define GPIO3_LED_SELECT 12
-#define GPIO3_INTERRUPT_ENABLE 11
-#define GPIO2_INTERRUPT_ENABLE 10
-#define GPIO1_INTERRUPT_ENABLE 9
-#define GPIO0_INTERRUPT_ENABLE 8
-#define GPIO3_OUTPUT_ENABLE 7
-#define GPIO2_OUTPUT_ENABLE 6
-#define GPIO1_OUTPUT_ENABLE 5
-#define GPIO0_OUTPUT_ENABLE 4
-#define GPIO3_DATA 3
-#define GPIO2_DATA 2
-#define GPIO1_DATA 1
-#define GPIO0_DATA 0
- __le32 gpiostat;
-#define GPIO3_INTERRUPT 3
-#define GPIO2_INTERRUPT 2
-#define GPIO1_INTERRUPT 1
-#define GPIO0_INTERRUPT 0
-} __packed;
-
-/* usb control, BAR0 + 0x0080 */
-struct net2280_usb_regs {
- /* offset 0x0080 */
- __le32 stdrsp;
-#define STALL_UNSUPPORTED_REQUESTS 31
-#define SET_TEST_MODE 16
-#define GET_OTHER_SPEED_CONFIGURATION 15
-#define GET_DEVICE_QUALIFIER 14
-#define SET_ADDRESS 13
-#define ENDPOINT_SET_CLEAR_HALT 12
-#define DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP 11
-#define GET_STRING_DESCRIPTOR_2 10
-#define GET_STRING_DESCRIPTOR_1 9
-#define GET_STRING_DESCRIPTOR_0 8
-#define GET_SET_INTERFACE 6
-#define GET_SET_CONFIGURATION 5
-#define GET_CONFIGURATION_DESCRIPTOR 4
-#define GET_DEVICE_DESCRIPTOR 3
-#define GET_ENDPOINT_STATUS 2
-#define GET_INTERFACE_STATUS 1
-#define GET_DEVICE_STATUS 0
- __le32 prodvendid;
-#define PRODUCT_ID 16
-#define VENDOR_ID 0
- __le32 relnum;
- __le32 usbctl;
-#define SERIAL_NUMBER_INDEX 16
-#define PRODUCT_ID_STRING_ENABLE 13
-#define VENDOR_ID_STRING_ENABLE 12
-#define USB_ROOT_PORT_WAKEUP_ENABLE 11
-#define VBUS_PIN 10
-#define TIMED_DISCONNECT 9
-#define SUSPEND_IMMEDIATELY 7
-#define SELF_POWERED_USB_DEVICE 6
-#define REMOTE_WAKEUP_SUPPORT 5
-#define PME_POLARITY 4
-#define USB_DETECT_ENABLE 3
-#define PME_WAKEUP_ENABLE 2
-#define DEVICE_REMOTE_WAKEUP_ENABLE 1
-#define SELF_POWERED_STATUS 0
- /* offset 0x0090 */
- __le32 usbstat;
-#define HIGH_SPEED 7
-#define FULL_SPEED 6
-#define GENERATE_RESUME 5
-#define GENERATE_DEVICE_REMOTE_WAKEUP 4
- __le32 xcvrdiag;
-#define FORCE_HIGH_SPEED_MODE 31
-#define FORCE_FULL_SPEED_MODE 30
-#define USB_TEST_MODE 24
-#define LINE_STATE 16
-#define TRANSCEIVER_OPERATION_MODE 2
-#define TRANSCEIVER_SELECT 1
-#define TERMINATION_SELECT 0
- __le32 setup0123;
- __le32 setup4567;
- /* offset 0x0090 */
- u32 _unused0;
- __le32 ouraddr;
-#define FORCE_IMMEDIATE 7
-#define OUR_USB_ADDRESS 0
- __le32 ourconfig;
-} __packed;
-
-/* pci control, BAR0 + 0x0100 */
-struct net2280_pci_regs {
- /* offset 0x0100 */
- __le32 pcimstctl;
-#define PCI_ARBITER_PARK_SELECT 13
-#define PCI_MULTI LEVEL_ARBITER 12
-#define PCI_RETRY_ABORT_ENABLE 11
-#define DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE 10
-#define DMA_READ_MULTIPLE_ENABLE 9
-#define DMA_READ_LINE_ENABLE 8
-#define PCI_MASTER_COMMAND_SELECT 6
-#define MEM_READ_OR_WRITE 0
-#define IO_READ_OR_WRITE 1
-#define CFG_READ_OR_WRITE 2
-#define PCI_MASTER_START 5
-#define PCI_MASTER_READ_WRITE 4
-#define PCI_MASTER_WRITE 0
-#define PCI_MASTER_READ 1
-#define PCI_MASTER_BYTE_WRITE_ENABLES 0
- __le32 pcimstaddr;
- __le32 pcimstdata;
- __le32 pcimststat;
-#define PCI_ARBITER_CLEAR 2
-#define PCI_EXTERNAL_ARBITER 1
-#define PCI_HOST_MODE 0
-} __packed;
-
-/* dma control, BAR0 + 0x0180 ... array of four structs like this,
- * for channels 0..3. see also struct net2280_dma: descriptor
- * that can be loaded into some of these registers.
- */
-struct net2280_dma_regs { /* [11.7] */
- /* offset 0x0180, 0x01a0, 0x01c0, 0x01e0, */
- __le32 dmactl;
-#define DMA_SCATTER_GATHER_DONE_INTERRUPT_ENABLE 25
-#define DMA_CLEAR_COUNT_ENABLE 21
-#define DESCRIPTOR_POLLING_RATE 19
-#define POLL_CONTINUOUS 0
-#define POLL_1_USEC 1
-#define POLL_100_USEC 2
-#define POLL_1_MSEC 3
-#define DMA_VALID_BIT_POLLING_ENABLE 18
-#define DMA_VALID_BIT_ENABLE 17
-#define DMA_SCATTER_GATHER_ENABLE 16
-#define DMA_OUT_AUTO_START_ENABLE 4
-#define DMA_PREEMPT_ENABLE 3
-#define DMA_FIFO_VALIDATE 2
-#define DMA_ENABLE 1
-#define DMA_ADDRESS_HOLD 0
- __le32 dmastat;
-#define DMA_SCATTER_GATHER_DONE_INTERRUPT 25
-#define DMA_TRANSACTION_DONE_INTERRUPT 24
-#define DMA_ABORT 1
-#define DMA_START 0
- u32 _unused0[2];
- /* offset 0x0190, 0x01b0, 0x01d0, 0x01f0, */
- __le32 dmacount;
-#define VALID_BIT 31
-#define DMA_DIRECTION 30
-#define DMA_DONE_INTERRUPT_ENABLE 29
-#define END_OF_CHAIN 28
-#define DMA_BYTE_COUNT_MASK ((1<<24)-1)
-#define DMA_BYTE_COUNT 0
- __le32 dmaaddr;
- __le32 dmadesc;
- u32 _unused1;
-} __packed;
-
-/* dedicated endpoint registers, BAR0 + 0x0200 */
-
-struct net2280_dep_regs { /* [11.8] */
- /* offset 0x0200, 0x0210, 0x220, 0x230, 0x240 */
- __le32 dep_cfg;
- /* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */
- __le32 dep_rsp;
- u32 _unused[2];
-} __packed;
-
-/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs
- * like this, for ep0 then the configurable endpoints A..F
- * ep0 reserved for control; E and F have only 64 bytes of fifo
- */
-struct net2280_ep_regs { /* [11.9] */
- /* offset 0x0300, 0x0320, 0x0340, 0x0360, 0x0380, 0x03a0, 0x03c0 */
- __le32 ep_cfg;
-#define ENDPOINT_BYTE_COUNT 16
-#define ENDPOINT_ENABLE 10
-#define ENDPOINT_TYPE 8
-#define ENDPOINT_DIRECTION 7
-#define ENDPOINT_NUMBER 0
- __le32 ep_rsp;
-#define SET_NAK_OUT_PACKETS 15
-#define SET_EP_HIDE_STATUS_PHASE 14
-#define SET_EP_FORCE_CRC_ERROR 13
-#define SET_INTERRUPT_MODE 12
-#define SET_CONTROL_STATUS_PHASE_HANDSHAKE 11
-#define SET_NAK_OUT_PACKETS_MODE 10
-#define SET_ENDPOINT_TOGGLE 9
-#define SET_ENDPOINT_HALT 8
-#define CLEAR_NAK_OUT_PACKETS 7
-#define CLEAR_EP_HIDE_STATUS_PHASE 6
-#define CLEAR_EP_FORCE_CRC_ERROR 5
-#define CLEAR_INTERRUPT_MODE 4
-#define CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE 3
-#define CLEAR_NAK_OUT_PACKETS_MODE 2
-#define CLEAR_ENDPOINT_TOGGLE 1
-#define CLEAR_ENDPOINT_HALT 0
- __le32 ep_irqenb;
-#define SHORT_PACKET_OUT_DONE_INTERRUPT_ENABLE 6
-#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 5
-#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3
-#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2
-#define DATA_OUT_PING_TOKEN_INTERRUPT_ENABLE 1
-#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0
- __le32 ep_stat;
-#define FIFO_VALID_COUNT 24
-#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 22
-#define TIMEOUT 21
-#define USB_STALL_SENT 20
-#define USB_IN_NAK_SENT 19
-#define USB_IN_ACK_RCVD 18
-#define USB_OUT_PING_NAK_SENT 17
-#define USB_OUT_ACK_SENT 16
-#define FIFO_OVERFLOW 13
-#define FIFO_UNDERFLOW 12
-#define FIFO_FULL 11
-#define FIFO_EMPTY 10
-#define FIFO_FLUSH 9
-#define SHORT_PACKET_OUT_DONE_INTERRUPT 6
-#define SHORT_PACKET_TRANSFERRED_INTERRUPT 5
-#define NAK_OUT_PACKETS 4
-#define DATA_PACKET_RECEIVED_INTERRUPT 3
-#define DATA_PACKET_TRANSMITTED_INTERRUPT 2
-#define DATA_OUT_PING_TOKEN_INTERRUPT 1
-#define DATA_IN_TOKEN_INTERRUPT 0
- /* offset 0x0310, 0x0330, 0x0350, 0x0370, 0x0390, 0x03b0, 0x03d0 */
- __le32 ep_avail;
- __le32 ep_data;
- u32 _unused0[2];
-} __packed;
-
-struct net2280_reg_write {
- __le16 port;
- __le32 addr;
- __le32 val;
-} __packed;
-
-struct net2280_reg_read {
- __le16 port;
- __le32 addr;
-} __packed;
-#endif /* NET2280_H */
diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h
index d273be7272b9..a5f5f0fea3bd 100644
--- a/drivers/net/wireless/p54/p54usb.h
+++ b/drivers/net/wireless/p54/p54usb.h
@@ -16,7 +16,7 @@
/* for isl3886 register definitions used on ver 1 devices */
#include "p54pci.h"
-#include "net2280.h"
+#include <linux/usb/net2280.h>
/* pci */
#define NET2280_BASE 0x10000000
@@ -93,6 +93,17 @@ enum net2280_op_type {
NET2280_DEV_CFG_U16 = 0x0883
};
+struct net2280_reg_write {
+ __le16 port;
+ __le32 addr;
+ __le32 val;
+} __packed;
+
+struct net2280_reg_read {
+ __le16 port;
+ __le32 addr;
+} __packed;
+
#define P54U_FW_BLOCK 2048
#define X2_SIGNATURE "x2 "
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index d849d590de25..05c64597838d 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -47,7 +47,7 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
* BBP and RF register require indirect register access,
* and use the CSR registers BBPCSR and RFCSR to achieve this.
* These indirect registers work with busy bits,
- * and we will try maximal REGISTER_BUSY_COUNT times to access
+ * and we will try maximal REGISTER_USB_BUSY_COUNT times to access
* the register while taking a REGISTER_BUSY_DELAY us delay
* between each attampt. When the busy bit is still set at that time,
* the access attempt is considered to have failed,
@@ -62,7 +62,7 @@ static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev,
__le16 reg;
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
USB_VENDOR_REQUEST_IN, offset,
- &reg, sizeof(reg), REGISTER_TIMEOUT);
+ &reg, sizeof(reg));
*value = le16_to_cpu(reg);
}
@@ -83,8 +83,7 @@ static inline void rt2500usb_register_multiread(struct rt2x00_dev *rt2x00dev,
{
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
USB_VENDOR_REQUEST_IN, offset,
- value, length,
- REGISTER_TIMEOUT16(length));
+ value, length);
}
static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev,
@@ -94,7 +93,7 @@ static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev,
__le16 reg = cpu_to_le16(value);
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
USB_VENDOR_REQUEST_OUT, offset,
- &reg, sizeof(reg), REGISTER_TIMEOUT);
+ &reg, sizeof(reg));
}
static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev,
@@ -113,8 +112,7 @@ static inline void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
{
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
USB_VENDOR_REQUEST_OUT, offset,
- value, length,
- REGISTER_TIMEOUT16(length));
+ value, length);
}
static int rt2500usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
@@ -124,7 +122,7 @@ static int rt2500usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
{
unsigned int i;
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
rt2500usb_register_read_lock(rt2x00dev, offset, reg);
if (!rt2x00_get_field16(*reg, field))
return 1;
@@ -906,7 +904,7 @@ static int rt2500usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
unsigned int i;
u8 value;
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
rt2500usb_bbp_read(rt2x00dev, 0, &value);
if ((value != 0xff) && (value != 0x00))
return 0;
@@ -1025,7 +1023,7 @@ static int rt2500usb_set_state(struct rt2x00_dev *rt2x00dev,
* We must wait until the register indicates that the
* device has entered the correct state.
*/
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
rt2500usb_register_read(rt2x00dev, MAC_CSR17, &reg2);
bbp_state = rt2x00_get_field16(reg2, MAC_CSR17_BBP_CURR_STATE);
rf_state = rt2x00_get_field16(reg2, MAC_CSR17_RF_CURR_STATE);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 9f57a2db791c..81ee481487cf 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -4119,7 +4119,20 @@ static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev,
* expected. We adjust it, based on TSSI reference and boundaries values
* provided in EEPROM.
*/
- delta += rt2800_get_gain_calibration_delta(rt2x00dev);
+ switch (rt2x00dev->chip.rt) {
+ case RT2860:
+ case RT2872:
+ case RT2883:
+ case RT3070:
+ case RT3071:
+ case RT3090:
+ case RT3572:
+ delta += rt2800_get_gain_calibration_delta(rt2x00dev);
+ break;
+ default:
+ /* TODO: temperature compensation code for other chips. */
+ break;
+ }
/*
* Decrease power according to user settings, on devices with unknown
@@ -4136,25 +4149,19 @@ static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev,
* TODO: we do not use +6 dBm option to do not increase power beyond
* regulatory limit, however this could be utilized for devices with
* CAPABILITY_POWER_LIMIT.
- *
- * TODO: add different temperature compensation code for RT3290 & RT5390
- * to allow to use BBP_R1 for those chips.
- */
- if (!rt2x00_rt(rt2x00dev, RT3290) &&
- !rt2x00_rt(rt2x00dev, RT5390)) {
- rt2800_bbp_read(rt2x00dev, 1, &r1);
- if (delta <= -12) {
- power_ctrl = 2;
- delta += 12;
- } else if (delta <= -6) {
- power_ctrl = 1;
- delta += 6;
- } else {
- power_ctrl = 0;
- }
- rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
- rt2800_bbp_write(rt2x00dev, 1, r1);
+ */
+ if (delta <= -12) {
+ power_ctrl = 2;
+ delta += 12;
+ } else if (delta <= -6) {
+ power_ctrl = 1;
+ delta += 6;
+ } else {
+ power_ctrl = 0;
}
+ rt2800_bbp_read(rt2x00dev, 1, &r1);
+ rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
+ rt2800_bbp_write(rt2x00dev, 1, r1);
offset = TX_PWR_CFG_0;
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index d13f25cd70d5..9bb398bed9bb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -1019,9 +1019,12 @@ struct rt2x00_bar_list_entry {
* Register defines.
* Some registers require multiple attempts before success,
* in those cases REGISTER_BUSY_COUNT attempts should be
- * taken with a REGISTER_BUSY_DELAY interval.
+ * taken with a REGISTER_BUSY_DELAY interval. Due to USB
+ * bus delays, we do not have to loop so many times to wait
+ * for valid register value on that bus.
*/
#define REGISTER_BUSY_COUNT 100
+#define REGISTER_USB_BUSY_COUNT 20
#define REGISTER_BUSY_DELAY 100
/*
@@ -1437,8 +1440,11 @@ int rt2x00mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw);
-void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw);
+void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr);
+void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
int rt2x00mac_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index ad6e5a8d1e10..cb40245a0695 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -568,7 +568,9 @@ int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove);
-void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw)
+void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
set_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags);
@@ -576,7 +578,8 @@ void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_start);
-void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw)
+void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
clear_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 86c43d112a4b..892270dd3e7b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -42,37 +42,27 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
{
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
int status;
- unsigned int i;
unsigned int pipe =
(requesttype == USB_VENDOR_REQUEST_IN) ?
usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
+ unsigned long expire = jiffies + msecs_to_jiffies(timeout);
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return -ENODEV;
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ do {
status = usb_control_msg(usb_dev, pipe, request, requesttype,
value, offset, buffer, buffer_length,
- timeout);
+ timeout / 2);
if (status >= 0)
return 0;
- /*
- * Check for errors
- * -ENODEV: Device has disappeared, no point continuing.
- * All other errors: Try again.
- */
- else if (status == -ENODEV) {
+ if (status == -ENODEV) {
+ /* Device has disappeared. */
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
break;
}
- }
-
- /* If the port is powered down, we get a -EPROTO error, and this
- * leads to a endless loop. So just say that the device is gone.
- */
- if (status == -EPROTO)
- clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
+ } while (time_before(jiffies, expire));
rt2x00_err(rt2x00dev,
"Vendor Request 0x%02x failed for offset 0x%04x with error %d\n",
@@ -116,7 +106,7 @@ EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock);
int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
const u8 request, const u8 requesttype,
const u16 offset, void *buffer,
- const u16 buffer_length, const int timeout)
+ const u16 buffer_length)
{
int status = 0;
unsigned char *tb;
@@ -131,7 +121,7 @@ int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
bsize = min_t(u16, CSR_CACHE_SIZE, len);
status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
requesttype, off, tb,
- bsize, timeout);
+ bsize, REGISTER_TIMEOUT);
tb += bsize;
len -= bsize;
@@ -154,7 +144,7 @@ int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return -ENODEV;
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
rt2x00usb_register_read_lock(rt2x00dev, offset, reg);
if (!rt2x00_get_field32(*reg, field))
return 1;
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 831b65f93feb..8f85fbd5f237 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -33,27 +33,14 @@
})
/*
- * For USB vendor requests we need to pass a timeout
- * time in ms, for this we use the REGISTER_TIMEOUT,
- * however when loading firmware a higher value is
- * required. In that case we use the REGISTER_TIMEOUT_FIRMWARE.
+ * For USB vendor requests we need to pass a timeout time in ms, for this we
+ * use the REGISTER_TIMEOUT, however when loading firmware or read EEPROM
+ * a higher value is required. In that case we use the REGISTER_TIMEOUT_FIRMWARE
+ * and EEPROM_TIMEOUT.
*/
-#define REGISTER_TIMEOUT 500
+#define REGISTER_TIMEOUT 100
#define REGISTER_TIMEOUT_FIRMWARE 1000
-
-/**
- * REGISTER_TIMEOUT16 - Determine the timeout for 16bit register access
- * @__datalen: Data length
- */
-#define REGISTER_TIMEOUT16(__datalen) \
- ( REGISTER_TIMEOUT * ((__datalen) / sizeof(u16)) )
-
-/**
- * REGISTER_TIMEOUT32 - Determine the timeout for 32bit register access
- * @__datalen: Data length
- */
-#define REGISTER_TIMEOUT32(__datalen) \
- ( REGISTER_TIMEOUT * ((__datalen) / sizeof(u32)) )
+#define EEPROM_TIMEOUT 2000
/*
* Cache size
@@ -126,7 +113,6 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
* @offset: Register offset to perform action on
* @buffer: Buffer where information will be read/written to by device
* @buffer_length: Size of &buffer
- * @timeout: Operation timeout
*
* This function will use a previously with kmalloc allocated cache
* to communicate with the device. The contents of the buffer pointer
@@ -139,7 +125,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
const u8 request, const u8 requesttype,
const u16 offset, void *buffer,
- const u16 buffer_length, const int timeout);
+ const u16 buffer_length);
/**
* rt2x00usb_vendor_request_buff - Send register command to device (buffered)
@@ -197,8 +183,7 @@ static inline int rt2x00usb_eeprom_read(struct rt2x00_dev *rt2x00dev,
{
return rt2x00usb_vendor_request(rt2x00dev, USB_EEPROM_READ,
USB_VENDOR_REQUEST_IN, 0, 0,
- eeprom, length,
- REGISTER_TIMEOUT16(length));
+ eeprom, length, EEPROM_TIMEOUT);
}
/**
@@ -217,7 +202,7 @@ static inline void rt2x00usb_register_read(struct rt2x00_dev *rt2x00dev,
__le32 reg;
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
USB_VENDOR_REQUEST_IN, offset,
- &reg, sizeof(reg), REGISTER_TIMEOUT);
+ &reg, sizeof(reg));
*value = le32_to_cpu(reg);
}
@@ -257,8 +242,7 @@ static inline void rt2x00usb_register_multiread(struct rt2x00_dev *rt2x00dev,
{
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
USB_VENDOR_REQUEST_IN, offset,
- value, length,
- REGISTER_TIMEOUT32(length));
+ value, length);
}
/**
@@ -277,7 +261,7 @@ static inline void rt2x00usb_register_write(struct rt2x00_dev *rt2x00dev,
__le32 reg = cpu_to_le32(value);
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
USB_VENDOR_REQUEST_OUT, offset,
- &reg, sizeof(reg), REGISTER_TIMEOUT);
+ &reg, sizeof(reg));
}
/**
@@ -316,8 +300,7 @@ static inline void rt2x00usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
{
rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
USB_VENDOR_REQUEST_OUT, offset,
- (void *)value, length,
- REGISTER_TIMEOUT32(length));
+ (void *)value, length);
}
/**
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 95724ff9c726..a5458cf01fb2 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1295,7 +1295,7 @@ static int rt73usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
unsigned int i;
u8 value;
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
rt73usb_bbp_read(rt2x00dev, 0, &value);
if ((value != 0xff) && (value != 0x00))
return 0;
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 07dae0d44abc..5fc6f52641bd 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -786,6 +786,7 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags, u64 multicast)
{
+ bool update_rcr = false;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -806,6 +807,7 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Disable receive multicast frame\n");
}
+ update_rcr = true;
}
if (changed_flags & FIF_FCSFAIL) {
@@ -818,6 +820,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Disable receive FCS error frame\n");
}
+ if (!update_rcr)
+ update_rcr = true;
}
/* if ssid not set to hw don't check bssid
@@ -832,6 +836,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->set_chk_bssid(hw, false);
else
rtlpriv->cfg->ops->set_chk_bssid(hw, true);
+ if (update_rcr)
+ update_rcr = false;
}
}
@@ -846,6 +852,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Disable receive control frame.\n");
}
+ if (!update_rcr)
+ update_rcr = true;
}
if (changed_flags & FIF_OTHER_BSS) {
@@ -858,7 +866,13 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Disable receive other BSS's frame.\n");
}
+ if (!update_rcr)
+ update_rcr = true;
}
+
+ if (update_rcr)
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+ (u8 *)(&mac->rx_conf));
}
static int rtl_op_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -1361,7 +1375,9 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
return 0;
}
-static void rtl_op_sw_scan_start(struct ieee80211_hw *hw)
+static void rtl_op_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -1396,7 +1412,8 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_BACKUP_BAND0);
}
-static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw)
+static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile b/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile
index 11952b99daf8..0315eeda9b60 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile
@@ -1,6 +1,3 @@
-obj-m := rtl8192ee.o
-
-
rtl8192ee-objs := \
dm.o \
fw.o \
@@ -14,6 +11,6 @@ rtl8192ee-objs := \
trx.o \
-obj-$(CONFIG_RTL8821AE) += rtl8192ee.o
+obj-$(CONFIG_RTL8192EE) += rtl8192ee.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
index 9c34a85fdb89..6220672a96f4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
@@ -1,6 +1,3 @@
-obj-m := rtl8723ae.o
-
-
rtl8723ae-objs := \
dm.o \
fw.o \
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/Makefile b/drivers/net/wireless/rtlwifi/rtl8723be/Makefile
index 59e416abd93a..a77c34102792 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/Makefile
@@ -1,6 +1,3 @@
-obj-m := rtl8723be.o
-
-
rtl8723be-objs := \
dm.o \
fw.o \
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile b/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile
index 87ad604a1eb3..f7a26f71197e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile
@@ -1,6 +1,3 @@
-obj-m := rtl8821ae.o
-
-
rtl8821ae-objs := \
dm.o \
fw.o \
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
index 1e9570fa874f..9b4d8a637915 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
@@ -800,7 +800,7 @@ static void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
"Invalid RateSection %d in Band 2.4G,Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
rate_section, path, txnum);
break;
- };
+ }
} else if (band == BAND_ON_5G) {
switch (rate_section) {
case OFDM:
@@ -823,7 +823,7 @@ static void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
"Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
rate_section, path, txnum);
break;
- };
+ }
} else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Invalid Band %d in PHY_SetTxPowerByRateBase()\n", band);
@@ -870,7 +870,7 @@ static u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
"Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
rate_section, path, txnum);
break;
- };
+ }
} else if (band == BAND_ON_5G) {
switch (rate_section) {
case OFDM:
@@ -893,7 +893,7 @@ static u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
"Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
rate_section, path, txnum);
break;
- };
+ }
} else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Invalid Band %d in PHY_GetTxPowerByRateBase()\n", band);
@@ -3746,7 +3746,7 @@ static void _rtl8821ae_iqk_tx_fill_iqc(struct ieee80211_hw *hw,
break;
default:
break;
- };
+ }
}
static void _rtl8821ae_iqk_rx_fill_iqc(struct ieee80211_hw *hw,
@@ -3767,7 +3767,7 @@ static void _rtl8821ae_iqk_rx_fill_iqc(struct ieee80211_hw *hw,
break;
default:
break;
- };
+ }
}
#define cal_num 10
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 38234851457e..0b30a7b4d663 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1029,7 +1029,7 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
goto out_sleep;
}
- skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+ skb = ieee80211_probereq_get(wl->hw, wl->vif->addr, ssid, ssid_len,
req->ie_len);
if (!skb) {
ret = -ENOMEM;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 05604ee31224..b82661962d33 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -64,6 +64,9 @@ static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf,
id != CMD_STOP_FWLOGGER))
return -EIO;
+ if (WARN_ON_ONCE(len < sizeof(*cmd)))
+ return -EIO;
+
cmd = buf;
cmd->id = cpu_to_le16(id);
cmd->status = 0;
@@ -128,8 +131,9 @@ static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf,
* send command to fw and return cmd status on success
* valid_rets contains a bitmap of allowed error codes
*/
-int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf, size_t len,
- size_t res_len, unsigned long valid_rets)
+static int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf,
+ size_t len, size_t res_len,
+ unsigned long valid_rets)
{
int ret = __wlcore_cmd_send(wl, id, buf, len, res_len);
@@ -150,7 +154,6 @@ fail:
wl12xx_queue_recovery_work(wl);
return ret;
}
-EXPORT_SYMBOL_GPL(wl1271_cmd_send);
/*
* wrapper for wlcore_cmd_send that accept only CMD_STATUS_SUCCESS
@@ -165,6 +168,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
return ret;
return 0;
}
+EXPORT_SYMBOL_GPL(wl1271_cmd_send);
/*
* Poll the mailbox event field until any of the bits in the mask is set or a
@@ -891,6 +895,9 @@ int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf,
wl1271_debug(DEBUG_CMD, "cmd configure (%d)", id);
+ if (WARN_ON_ONCE(len < sizeof(*acx)))
+ return -EIO;
+
acx->id = cpu_to_le16(id);
/* payload length, does not include any headers */
@@ -1138,7 +1145,7 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl1271_debug(DEBUG_SCAN, "build probe request band %d", band);
- skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
+ skb = ieee80211_probereq_get(wl->hw, vif->addr, ssid, ssid_len,
ie0_len + ie1_len);
if (!skb) {
ret = -ENOMEM;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index ca6a28b03f8f..453684a71d30 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -31,8 +31,6 @@ struct acx_header;
int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
size_t res_len);
-int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf, size_t len,
- size_t res_len, unsigned long valid_rets);
int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
u8 *role_id);
int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 16d10281798d..5153640f4532 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -259,10 +259,7 @@ void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap)
&wlvif->connection_loss_work,
msecs_to_jiffies(delay));
- ieee80211_cqm_rssi_notify(
- vif,
- NL80211_CQM_RSSI_BEACON_LOSS_EVENT,
- GFP_KERNEL);
+ ieee80211_cqm_beacon_loss_notify(vif, GFP_KERNEL);
}
}
EXPORT_SYMBOL_GPL(wlcore_event_beacon_loss);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 575c8f6d4009..6ad3fcedab9b 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -5177,10 +5177,11 @@ out:
}
static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct ieee80211_channel_switch *ch_switch)
{
struct wl1271 *wl = hw->priv;
- struct wl12xx_vif *wlvif;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
@@ -5190,14 +5191,8 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WLCORE_STATE_OFF)) {
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
-
- if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
- continue;
-
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
ieee80211_chswitch_done(vif, false);
- }
goto out;
} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
goto out;
@@ -5208,11 +5203,9 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
goto out;
/* TODO: change mac80211 to pass vif as param */
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- unsigned long delay_usec;
- if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
- continue;
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
+ unsigned long delay_usec;
ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
if (ret)
@@ -5222,10 +5215,10 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
/* indicate failure 5 seconds after channel switch time */
delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
- ch_switch->count;
+ ch_switch->count;
ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
- usecs_to_jiffies(delay_usec) +
- msecs_to_jiffies(5000));
+ usecs_to_jiffies(delay_usec) +
+ msecs_to_jiffies(5000));
}
out_sleep:
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 895fe84011e7..a6a32d337bbb 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -235,10 +235,10 @@ static void xenvif_down(struct xenvif *vif)
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
queue = &vif->queues[queue_index];
- napi_disable(&queue->napi);
disable_irq(queue->tx_irq);
if (queue->tx_irq != queue->rx_irq)
disable_irq(queue->rx_irq);
+ napi_disable(&queue->napi);
del_timer_sync(&queue->credit_timeout);
}
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 6563f0713fc0..4a509f715fe8 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -82,6 +82,16 @@ MODULE_PARM_DESC(max_queues,
static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
module_param(fatal_skb_slots, uint, 0444);
+/* The amount to copy out of the first guest Tx slot into the skb's
+ * linear area. If the first slot has more data, it will be mapped
+ * and put into the first frag.
+ *
+ * This is sized to avoid pulling headers from the frags for most
+ * TCP/IP packets.
+ */
+#define XEN_NETBACK_TX_COPY_LEN 128
+
+
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
u8 status);
@@ -125,13 +135,6 @@ static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
pending_tx_info[0]);
}
-/* This is a miniumum size for the linear area to avoid lots of
- * calls to __pskb_pull_tail() as we set up checksum offsets. The
- * value 128 was chosen as it covers all IPv4 and most likely
- * IPv6 headers.
- */
-#define PKT_PROT_LEN 128
-
static u16 frag_get_pending_idx(skb_frag_t *frag)
{
return (u16)frag->page_offset;
@@ -1446,9 +1449,9 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
index = pending_index(queue->pending_cons);
pending_idx = queue->pending_ring[index];
- data_len = (txreq.size > PKT_PROT_LEN &&
+ data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
- PKT_PROT_LEN : txreq.size;
+ XEN_NETBACK_TX_COPY_LEN : txreq.size;
skb = xenvif_alloc_skb(data_len);
if (unlikely(skb == NULL)) {
@@ -1550,7 +1553,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
unsigned int len;
BUG_ON(i >= MAX_SKB_FRAGS);
- page = alloc_page(GFP_ATOMIC|__GFP_COLD);
+ page = alloc_page(GFP_ATOMIC);
if (!page) {
int j;
skb->truesize += skb->data_len;
@@ -1653,11 +1656,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
}
}
- if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
- int target = min_t(int, skb->len, PKT_PROT_LEN);
- __pskb_pull_tail(skb, target - skb_headlen(skb));
- }
-
skb->dev = queue->vif->dev;
skb->protocol = eth_type_trans(skb, skb->dev);
skb_reset_network_header(skb);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index eeed0ce620f3..2f0a9ce9ff73 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -77,7 +77,9 @@ struct netfront_cb {
#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
-#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
+
+/* Minimum number of Rx slots (includes slot for GSO metadata). */
+#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
/* Queue name is interface name with "-qNNN" appended */
#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
@@ -137,13 +139,6 @@ struct netfront_queue {
struct xen_netif_rx_front_ring rx;
int rx_ring_ref;
- /* Receive-ring batched refills. */
-#define RX_MIN_TARGET 8
-#define RX_DFL_MIN_TARGET 64
-#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
- unsigned rx_min_target, rx_max_target, rx_target;
- struct sk_buff_head rx_batch;
-
struct timer_list rx_refill_timer;
struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
@@ -251,7 +246,7 @@ static void rx_refill_timeout(unsigned long data)
static int netfront_tx_slot_available(struct netfront_queue *queue)
{
return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
- (TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
+ (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
}
static void xennet_maybe_wake_tx(struct netfront_queue *queue)
@@ -265,77 +260,55 @@ static void xennet_maybe_wake_tx(struct netfront_queue *queue)
netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
}
-static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
+
+static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
{
- unsigned short id;
struct sk_buff *skb;
struct page *page;
- int i, batch_target, notify;
- RING_IDX req_prod = queue->rx.req_prod_pvt;
- grant_ref_t ref;
- unsigned long pfn;
- void *vaddr;
- struct xen_netif_rx_request *req;
- if (unlikely(!netif_carrier_ok(queue->info->netdev)))
- return;
+ skb = __netdev_alloc_skb(queue->info->netdev,
+ RX_COPY_THRESHOLD + NET_IP_ALIGN,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
- /*
- * Allocate skbuffs greedily, even though we batch updates to the
- * receive ring. This creates a less bursty demand on the memory
- * allocator, so should reduce the chance of failed allocation requests
- * both for ourself and for other kernel subsystems.
- */
- batch_target = queue->rx_target - (req_prod - queue->rx.rsp_cons);
- for (i = skb_queue_len(&queue->rx_batch); i < batch_target; i++) {
- skb = __netdev_alloc_skb(queue->info->netdev,
- RX_COPY_THRESHOLD + NET_IP_ALIGN,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb))
- goto no_skb;
-
- /* Align ip header to a 16 bytes boundary */
- skb_reserve(skb, NET_IP_ALIGN);
-
- page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
- if (!page) {
- kfree_skb(skb);
-no_skb:
- /* Could not allocate any skbuffs. Try again later. */
- mod_timer(&queue->rx_refill_timer,
- jiffies + (HZ/10));
-
- /* Any skbuffs queued for refill? Force them out. */
- if (i != 0)
- goto refill;
- break;
- }
-
- skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
- __skb_queue_tail(&queue->rx_batch, skb);
+ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page) {
+ kfree_skb(skb);
+ return NULL;
}
+ skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
+
+ /* Align ip header to a 16 bytes boundary */
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb->dev = queue->info->netdev;
+
+ return skb;
+}
+
- /* Is the batch large enough to be worthwhile? */
- if (i < (queue->rx_target/2)) {
- if (req_prod > queue->rx.sring->req_prod)
- goto push;
+static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
+{
+ RING_IDX req_prod = queue->rx.req_prod_pvt;
+ int notify;
+
+ if (unlikely(!netif_carrier_ok(queue->info->netdev)))
return;
- }
- /* Adjust our fill target if we risked running out of buffers. */
- if (((req_prod - queue->rx.sring->rsp_prod) < (queue->rx_target / 4)) &&
- ((queue->rx_target *= 2) > queue->rx_max_target))
- queue->rx_target = queue->rx_max_target;
+ for (req_prod = queue->rx.req_prod_pvt;
+ req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
+ req_prod++) {
+ struct sk_buff *skb;
+ unsigned short id;
+ grant_ref_t ref;
+ unsigned long pfn;
+ struct xen_netif_rx_request *req;
- refill:
- for (i = 0; ; i++) {
- skb = __skb_dequeue(&queue->rx_batch);
- if (skb == NULL)
+ skb = xennet_alloc_one_rx_buffer(queue);
+ if (!skb)
break;
- skb->dev = queue->info->netdev;
-
- id = xennet_rxidx(req_prod + i);
+ id = xennet_rxidx(req_prod);
BUG_ON(queue->rx_skbs[id]);
queue->rx_skbs[id] = skb;
@@ -345,9 +318,8 @@ no_skb:
queue->grant_rx_ref[id] = ref;
pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
- vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
- req = RING_GET_REQUEST(&queue->rx, req_prod + i);
+ req = RING_GET_REQUEST(&queue->rx, req_prod);
gnttab_grant_foreign_access_ref(ref,
queue->info->xbdev->otherend_id,
pfn_to_mfn(pfn),
@@ -357,11 +329,16 @@ no_skb:
req->gref = ref;
}
+ queue->rx.req_prod_pvt = req_prod;
+
+ /* Not enough requests? Try again later. */
+ if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
+ mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
+ return;
+ }
+
wmb(); /* barrier so backend seens requests */
- /* Above is a suitable barrier to ensure backend will see requests. */
- queue->rx.req_prod_pvt = req_prod + i;
- push:
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
if (notify)
notify_remote_via_irq(queue->rx_irq);
@@ -1068,13 +1045,6 @@ err:
work_done -= handle_incoming_queue(queue, &rxq);
- /* If we get a callback with very few responses, reduce fill target. */
- /* NB. Note exponential increase, linear decrease. */
- if (((queue->rx.req_prod_pvt - queue->rx.sring->rsp_prod) >
- ((3*queue->rx_target) / 4)) &&
- (--queue->rx_target < queue->rx_min_target))
- queue->rx_target = queue->rx_min_target;
-
xennet_alloc_rx_buffers(queue);
if (work_done < budget) {
@@ -1641,11 +1611,6 @@ static int xennet_init_queue(struct netfront_queue *queue)
spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->rx_lock);
- skb_queue_head_init(&queue->rx_batch);
- queue->rx_target = RX_DFL_MIN_TARGET;
- queue->rx_min_target = RX_DFL_MIN_TARGET;
- queue->rx_max_target = RX_MAX_TARGET;
-
init_timer(&queue->rx_refill_timer);
queue->rx_refill_timer.data = (unsigned long)queue;
queue->rx_refill_timer.function = rx_refill_timeout;
@@ -1668,7 +1633,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
}
/* A grant for every tx ring slot */
- if (gnttab_alloc_grant_references(TX_MAX_TARGET,
+ if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
&queue->gref_tx_head) < 0) {
pr_alert("can't alloc tx grant refs\n");
err = -ENOMEM;
@@ -1676,7 +1641,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
}
/* A grant for every rx ring slot */
- if (gnttab_alloc_grant_references(RX_MAX_TARGET,
+ if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
&queue->gref_rx_head) < 0) {
pr_alert("can't alloc rx grant refs\n");
err = -ENOMEM;
@@ -2144,83 +2109,18 @@ static const struct ethtool_ops xennet_ethtool_ops =
};
#ifdef CONFIG_SYSFS
-static ssize_t show_rxbuf_min(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct net_device *netdev = to_net_dev(dev);
- struct netfront_info *info = netdev_priv(netdev);
- unsigned int num_queues = netdev->real_num_tx_queues;
-
- if (num_queues)
- return sprintf(buf, "%u\n", info->queues[0].rx_min_target);
- else
- return sprintf(buf, "%u\n", RX_MIN_TARGET);
-}
-
-static ssize_t store_rxbuf_min(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t show_rxbuf(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct net_device *netdev = to_net_dev(dev);
- struct netfront_info *np = netdev_priv(netdev);
- unsigned int num_queues = netdev->real_num_tx_queues;
- char *endp;
- unsigned long target;
- unsigned int i;
- struct netfront_queue *queue;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- target = simple_strtoul(buf, &endp, 0);
- if (endp == buf)
- return -EBADMSG;
-
- if (target < RX_MIN_TARGET)
- target = RX_MIN_TARGET;
- if (target > RX_MAX_TARGET)
- target = RX_MAX_TARGET;
-
- for (i = 0; i < num_queues; ++i) {
- queue = &np->queues[i];
- spin_lock_bh(&queue->rx_lock);
- if (target > queue->rx_max_target)
- queue->rx_max_target = target;
- queue->rx_min_target = target;
- if (target > queue->rx_target)
- queue->rx_target = target;
-
- xennet_alloc_rx_buffers(queue);
-
- spin_unlock_bh(&queue->rx_lock);
- }
- return len;
-}
-
-static ssize_t show_rxbuf_max(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct net_device *netdev = to_net_dev(dev);
- struct netfront_info *info = netdev_priv(netdev);
- unsigned int num_queues = netdev->real_num_tx_queues;
-
- if (num_queues)
- return sprintf(buf, "%u\n", info->queues[0].rx_max_target);
- else
- return sprintf(buf, "%u\n", RX_MAX_TARGET);
+ return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
}
-static ssize_t store_rxbuf_max(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t store_rxbuf(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
- struct net_device *netdev = to_net_dev(dev);
- struct netfront_info *np = netdev_priv(netdev);
- unsigned int num_queues = netdev->real_num_tx_queues;
char *endp;
unsigned long target;
- unsigned int i = 0;
- struct netfront_queue *queue = NULL;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -2229,44 +2129,15 @@ static ssize_t store_rxbuf_max(struct device *dev,
if (endp == buf)
return -EBADMSG;
- if (target < RX_MIN_TARGET)
- target = RX_MIN_TARGET;
- if (target > RX_MAX_TARGET)
- target = RX_MAX_TARGET;
-
- for (i = 0; i < num_queues; ++i) {
- queue = &np->queues[i];
- spin_lock_bh(&queue->rx_lock);
- if (target < queue->rx_min_target)
- queue->rx_min_target = target;
- queue->rx_max_target = target;
- if (target < queue->rx_target)
- queue->rx_target = target;
-
- xennet_alloc_rx_buffers(queue);
+ /* rxbuf_min and rxbuf_max are no longer configurable. */
- spin_unlock_bh(&queue->rx_lock);
- }
return len;
}
-static ssize_t show_rxbuf_cur(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct net_device *netdev = to_net_dev(dev);
- struct netfront_info *info = netdev_priv(netdev);
- unsigned int num_queues = netdev->real_num_tx_queues;
-
- if (num_queues)
- return sprintf(buf, "%u\n", info->queues[0].rx_target);
- else
- return sprintf(buf, "0\n");
-}
-
static struct device_attribute xennet_attrs[] = {
- __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
- __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
- __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
+ __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf),
+ __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf),
+ __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL),
};
static int xennet_sysfs_addif(struct net_device *netdev)