aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/bonding/bond_3ad.c26
-rw-r--r--drivers/net/bonding/bond_alb.c88
-rw-r--r--drivers/net/bonding/bond_main.c189
-rw-r--r--drivers/net/bonding/bond_netlink.c5
-rw-r--r--drivers/net/bonding/bond_procfs.c3
-rw-r--r--drivers/net/can/Kconfig19
-rw-r--r--drivers/net/can/Makefile2
-rw-r--r--drivers/net/can/m_can/m_can.c752
-rw-r--r--drivers/net/can/peak_canfd/Kconfig13
-rw-r--r--drivers/net/can/peak_canfd/Makefile5
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c801
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd_user.h55
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c842
-rw-r--r--drivers/net/can/spi/Kconfig6
-rw-r--r--drivers/net/can/spi/Makefile1
-rw-r--r--drivers/net/can/spi/hi311x.c1076
-rw-r--r--drivers/net/can/ti_hecc.c170
-rw-r--r--drivers/net/can/usb/Kconfig6
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/mcba_usb.c904
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_ucan.h244
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c25
-rw-r--r--drivers/net/can/vcan.c7
-rw-r--r--drivers/net/can/vxcan.c316
-rw-r--r--drivers/net/cris/eth_v10.c32
-rw-r--r--drivers/net/dsa/Kconfig42
-rw-r--r--drivers/net/dsa/Makefile6
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c3
-rw-r--r--drivers/net/dsa/dsa_loop.c328
-rw-r--r--drivers/net/dsa/dsa_loop.h19
-rw-r--r--drivers/net/dsa/dsa_loop_bdinfo.c34
-rw-r--r--drivers/net/dsa/lan9303-core.c879
-rw-r--r--drivers/net/dsa/lan9303.h19
-rw-r--r--drivers/net/dsa/lan9303_i2c.c113
-rw-r--r--drivers/net/dsa/lan9303_mdio.c148
-rw-r--r--drivers/net/dsa/mt7530.c1126
-rw-r--r--drivers/net/dsa/mt7530.h402
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c1564
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h28
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c305
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c511
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c101
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h18
-rw-r--r--drivers/net/dsa/mv88e6xxx/mv88e6xxx.h113
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c81
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h19
-rw-r--r--drivers/net/dummy.c15
-rw-r--r--drivers/net/ethernet/3com/typhoon.c7
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h7
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c55
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h2
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c49
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c1
-rw-r--r--drivers/net/ethernet/apm/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/Makefile1
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/Kconfig11
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/Makefile6
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/enet.c83
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/enet.h45
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/ethtool.c187
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/ethtool.h78
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mac.c116
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mac.h107
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c759
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.h80
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mdio.c168
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/ring.c81
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/ring.h119
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c3
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c74
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c7
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h5
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h6
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c128
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c11
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c82
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c103
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c34
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c52
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c109
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c228
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h29
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c108
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c427
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h325
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c292
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c15
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c52
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c7
-rw-r--r--drivers/net/ethernet/cadence/macb.c58
-rw-r--r--drivers/net/ethernet/cadence/macb.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c86
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c459
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c399
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c137
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h32
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h18
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c36
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h47
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c39
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.h5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c64
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c29
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c391
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c397
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h32
-rw-r--r--drivers/net/ethernet/cavium/thunder/q_struct.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c75
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h4
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c42
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c45
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c140
-rw-r--r--drivers/net/ethernet/ethoc.c4
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c1914
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.h45
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c184
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c45
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c23
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h10
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c76
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h47
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c127
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c63
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c80
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c249
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c153
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h28
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c400
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c34
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c20
-rw-r--r--drivers/net/ethernet/ibm/emac/Makefile1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c9
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h1
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.c270
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.h23
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c1730
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h50
-rw-r--r--drivers/net/ethernet/intel/Kconfig11
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c117
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h28
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c121
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c105
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c81
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h68
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c68
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c16
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c119
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c106
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h274
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h99
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c474
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c247
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c83
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c1621
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1352
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h229
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c688
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h116
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h218
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c371
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h99
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c220
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_trace.h229
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c470
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h122
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h80
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h36
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h51
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.c564
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.h166
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c135
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c242
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c57
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h21
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h81
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c203
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c943
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c38
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h3
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c70
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c41
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h1
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c39
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h82
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c202
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c75
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c572
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c304
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c155
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c6
-rw-r--r--drivers/net/ethernet/marvell/Kconfig4
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c44
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c107
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c1313
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c14
-rw-r--r--drivers/net/ethernet/marvell/skge.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c607
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h487
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c345
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2070
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c682
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h145
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c808
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c399
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c139
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c176
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib.c498
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib.h (renamed from drivers/net/ethernet/qlogic/qed/qed_ptp.h)41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c223
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c153
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h247
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c942
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h141
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c196
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c187
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c207
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c351
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1517
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c41
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c28
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h184
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c1320
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c245
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c238
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_offload.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h30
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c467
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c345
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c106
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h174
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c (renamed from drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h)94
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c424
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c15
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c33
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h149
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c189
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c157
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c1566
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c1727
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h81
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c48
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h1110
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c59
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c185
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c82
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c373
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c113
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c149
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c1346
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h225
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c102
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c199
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c338
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c330
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c48
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c564
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c185
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h62
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h97
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_dcbnl.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c85
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c536
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c93
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c356
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c188
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c6
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c1
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c14
-rw-r--r--drivers/net/ethernet/realtek/8139too.c14
-rw-r--r--drivers/net/ethernet/realtek/r8169.c45
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c72
-rw-r--r--drivers/net/ethernet/sfc/ef10.c10
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c4
-rw-r--r--drivers/net/ethernet/sfc/tx.c4
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c14
-rw-r--r--drivers/net/ethernet/silan/sc92031.c83
-rw-r--r--drivers/net/ethernet/sis/sis190.c14
-rw-r--r--drivers/net/ethernet/sis/sis900.c18
-rw-r--r--drivers/net/ethernet/smsc/epic100.c16
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c51
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c98
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h81
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c371
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c53
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h103
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c412
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c225
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c56
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c55
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h49
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1835
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c156
-rw-r--r--drivers/net/ethernet/sun/cassini.c98
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c27
-rw-r--r--drivers/net/ethernet/sun/niu.c37
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c18
-rw-r--r--drivers/net/ethernet/sun/sunbmac.h1
-rw-r--r--drivers/net/ethernet/sun/sungem.c99
-rw-r--r--drivers/net/ethernet/sun/sunhme.c86
-rw-r--r--drivers/net/ethernet/sun/sunhme.h2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c116
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c56
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.h27
-rw-r--r--drivers/net/ethernet/synopsys/Kconfig41
-rw-r--r--drivers/net/ethernet/synopsys/Makefile10
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c737
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c644
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c275
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c3147
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c1350
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c78
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h744
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac.h660
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c43
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c16
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c3
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c51
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c24
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c14
-rw-r--r--drivers/net/ethernet/via/via-rhine.c14
-rw-r--r--drivers/net/ethernet/via/via-velocity.c62
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/fjes/fjes_ethtool.c19
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/gtp.c585
-rw-r--r--drivers/net/hyperv/hyperv_net.h19
-rw-r--r--drivers/net/hyperv/netvsc.c256
-rw-r--r--drivers/net/hyperv/netvsc_drv.c261
-rw-r--r--drivers/net/hyperv/rndis_filter.c115
-rw-r--r--drivers/net/ieee802154/Kconfig22
-rw-r--r--drivers/net/ieee802154/Makefile1
-rw-r--r--drivers/net/ieee802154/ca8210.c3242
-rw-r--r--drivers/net/ieee802154/mrf24j40.c1
-rw-r--r--drivers/net/ipvlan/ipvlan.h2
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c83
-rw-r--r--drivers/net/loopback.c34
-rw-r--r--drivers/net/macsec.c10
-rw-r--r--drivers/net/ntb_netdev.c25
-rw-r--r--drivers/net/phy/Kconfig62
-rw-r--r--drivers/net/phy/Makefile17
-rw-r--r--drivers/net/phy/bcm-phy-lib.c18
-rw-r--r--drivers/net/phy/bcm7xxx.c215
-rw-r--r--drivers/net/phy/broadcom.c69
-rw-r--r--drivers/net/phy/dp83867.c25
-rw-r--r--drivers/net/phy/intel-xway.c26
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c3
-rw-r--r--drivers/net/phy/mdio-boardinfo.c21
-rw-r--r--drivers/net/phy/mdio-boardinfo.h5
-rw-r--r--drivers/net/phy/mdio-xgene.c2
-rw-r--r--drivers/net/phy/mdio_bus.c88
-rw-r--r--drivers/net/phy/micrel.c13
-rw-r--r--drivers/net/phy/microchip.c5
-rw-r--r--drivers/net/phy/phy-core.c101
-rw-r--r--drivers/net/phy/phy.c278
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/phy/smsc.c1
-rw-r--r--drivers/net/team/team.c3
-rw-r--r--drivers/net/tun.c24
-rw-r--r--drivers/net/usb/asix_devices.c15
-rw-r--r--drivers/net/usb/ax88172a.c1
-rw-r--r--drivers/net/usb/ax88179_178a.c15
-rw-r--r--drivers/net/usb/catc.c31
-rw-r--r--drivers/net/usb/cdc_mbim.c1
-rw-r--r--drivers/net/usb/cdc_ncm.c5
-rw-r--r--drivers/net/usb/dm9601.c5
-rw-r--r--drivers/net/usb/int51x1.c1
-rw-r--r--drivers/net/usb/kaweth.c32
-rw-r--r--drivers/net/usb/lan78xx.c11
-rw-r--r--drivers/net/usb/mcs7830.c5
-rw-r--r--drivers/net/usb/pegasus.c50
-rw-r--r--drivers/net/usb/pegasus.h1
-rw-r--r--drivers/net/usb/qmi_wwan.c318
-rw-r--r--drivers/net/usb/r8152.c172
-rw-r--r--drivers/net/usb/rndis_host.c1
-rw-r--r--drivers/net/usb/rtl8150.c35
-rw-r--r--drivers/net/usb/sierra_net.c5
-rw-r--r--drivers/net/usb/smsc75xx.c5
-rw-r--r--drivers/net/usb/smsc95xx.c29
-rw-r--r--drivers/net/usb/smsc95xx.h490
-rw-r--r--drivers/net/usb/sr9700.c5
-rw-r--r--drivers/net/usb/sr9800.c5
-rw-r--r--drivers/net/usb/usbnet.c74
-rw-r--r--drivers/net/veth.c22
-rw-r--r--drivers/net/virtio_net.c272
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c25
-rw-r--r--drivers/net/vrf.c190
-rw-r--r--drivers/net/vsockmon.c170
-rw-r--r--drivers/net/vxlan.c51
-rw-r--r--drivers/net/wan/pc300too.c1
-rw-r--r--drivers/net/wireless/admtek/adm8211.c2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c72
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c36
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h11
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c105
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h24
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c57
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c265
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h80
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c81
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c19
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c32
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h19
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c27
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h34
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c25
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c62
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c8
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c8
-rw-r--r--drivers/net/wireless/ath/regd.c19
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Kconfig2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c13
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c10
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c94
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c140
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c16
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c27
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c21
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c43
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h34
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c35
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h73
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/atmel/atmel.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/xmit.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Kconfig10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c82
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c86
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c89
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c53
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h36
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c10
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c3
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c10
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-9000.c48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-a000.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h203
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h115
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h107
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c287
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c661
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c141
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c117
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c671
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c145
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c152
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c281
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h98
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c374
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c287
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c1018
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c237
-rw-r--r--drivers/net/wireless/intersil/orinoco/cfg.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c21
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c104
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c5
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c64
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h54
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c15
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c66
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c192
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c37
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c36
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c52
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c61
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c45
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.h5
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c18
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.c12
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mcu.c10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Kconfig2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800.h212
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c1556
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h31
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c18
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h9
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c240
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.h7
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c4
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c20
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c2468
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c1005
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c3256
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h40
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c1814
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c4215
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h23
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c64
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c69
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c165
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c507
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c1358
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h18
-rw-r--r--drivers/net/wireless/rndis_wlan.c28
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c2
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_sdio.c1
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c4
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c3
827 files changed, 79566 insertions, 31533 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 100fbdc9b95c..83a1616903f8 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -355,6 +355,14 @@ config NET_VRF
This option enables the support for mapping interfaces into VRF's. The
support enables VRF devices.
+config VSOCKMON
+ tristate "Virtual vsock monitoring device"
+ depends on VHOST_VSOCK
+ ---help---
+ This option enables a monitoring net device for vsock sockets. It is
+ mostly intended for developers or support to debug vsock issues. If
+ unsure, say N.
+
endif # NET_CORE
config SUNGEM_PHY
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 98ed4d96987c..b2f6556d8848 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_MII) += mii.o
obj-$(CONFIG_MDIO) += mdio.o
obj-$(CONFIG_NET) += Space.o loopback.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
-obj-$(CONFIG_PHYLIB) += phy/
+obj-y += phy/
obj-$(CONFIG_RIONET) += rionet.o
obj-$(CONFIG_NET_TEAM) += team/
obj-$(CONFIG_TUN) += tun.o
@@ -30,6 +30,7 @@ obj-$(CONFIG_GENEVE) += geneve.o
obj-$(CONFIG_GTP) += gtp.o
obj-$(CONFIG_NLMON) += nlmon.o
obj-$(CONFIG_NET_VRF) += vrf.o
+obj-$(CONFIG_VSOCKMON) += vsockmon.o
#
# Networking Drivers
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index edc70ffad660..c5fd4259da33 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -92,6 +92,7 @@ enum ad_link_speed_type {
AD_LINK_SPEED_2500MBPS,
AD_LINK_SPEED_10000MBPS,
AD_LINK_SPEED_20000MBPS,
+ AD_LINK_SPEED_25000MBPS,
AD_LINK_SPEED_40000MBPS,
AD_LINK_SPEED_56000MBPS,
AD_LINK_SPEED_100000MBPS,
@@ -260,6 +261,7 @@ static inline int __check_agg_selection_timer(struct port *port)
* %AD_LINK_SPEED_2500MBPS,
* %AD_LINK_SPEED_10000MBPS
* %AD_LINK_SPEED_20000MBPS
+ * %AD_LINK_SPEED_25000MBPS
* %AD_LINK_SPEED_40000MBPS
* %AD_LINK_SPEED_56000MBPS
* %AD_LINK_SPEED_100000MBPS
@@ -302,6 +304,10 @@ static u16 __get_link_speed(struct port *port)
speed = AD_LINK_SPEED_20000MBPS;
break;
+ case SPEED_25000:
+ speed = AD_LINK_SPEED_25000MBPS;
+ break;
+
case SPEED_40000:
speed = AD_LINK_SPEED_40000MBPS;
break;
@@ -707,6 +713,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
case AD_LINK_SPEED_20000MBPS:
bandwidth = nports * 20000;
break;
+ case AD_LINK_SPEED_25000MBPS:
+ bandwidth = nports * 25000;
+ break;
case AD_LINK_SPEED_40000MBPS:
bandwidth = nports * 40000;
break;
@@ -1052,8 +1061,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->sm_rx_state = AD_RX_INITIALIZE;
port->sm_vars |= AD_PORT_CHURNED;
/* check if port is not enabled */
- } else if (!(port->sm_vars & AD_PORT_BEGIN)
- && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED))
+ } else if (!(port->sm_vars & AD_PORT_BEGIN) && !port->is_enabled)
port->sm_rx_state = AD_RX_PORT_DISABLED;
/* check if new lacpdu arrived */
else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) ||
@@ -1081,11 +1089,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
/* if no lacpdu arrived and no timer is on */
switch (port->sm_rx_state) {
case AD_RX_PORT_DISABLED:
- if (port->sm_vars & AD_PORT_MOVED)
- port->sm_rx_state = AD_RX_INITIALIZE;
- else if (port->is_enabled
- && (port->sm_vars
- & AD_PORT_LACP_ENABLED))
+ if (port->is_enabled &&
+ (port->sm_vars & AD_PORT_LACP_ENABLED))
port->sm_rx_state = AD_RX_EXPIRED;
else if (port->is_enabled
&& ((port->sm_vars
@@ -1115,7 +1120,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
port->sm_vars &= ~AD_PORT_SELECTED;
__record_default(port);
port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
- port->sm_vars &= ~AD_PORT_MOVED;
port->sm_rx_state = AD_RX_PORT_DISABLED;
/* Fall Through */
@@ -2442,9 +2446,9 @@ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave)
spin_lock_bh(&slave->bond->mode_lock);
ad_update_actor_keys(port, false);
+ spin_unlock_bh(&slave->bond->mode_lock);
netdev_dbg(slave->bond->dev, "Port %d slave %s changed speed/duplex\n",
port->actor_port_number, slave->dev->name);
- spin_unlock_bh(&slave->bond->mode_lock);
}
/**
@@ -2488,12 +2492,12 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
agg = __get_first_agg(port);
ad_agg_selection_logic(agg, &dummy);
+ spin_unlock_bh(&slave->bond->mode_lock);
+
netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
port->actor_port_number,
link == BOND_LINK_UP ? "UP" : "DOWN");
- spin_unlock_bh(&slave->bond->mode_lock);
-
/* RTNL is held and mode_lock is released so it's safe
* to update slave_array here.
*/
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c80b023092dd..7d7a3cec149a 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -687,7 +687,8 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
/* the arp must be sent on the selected rx channel */
tx_slave = rlb_choose_channel(skb, bond);
if (tx_slave)
- ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr);
+ bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr,
+ tx_slave->dev->addr_len);
netdev_dbg(bond->dev, "Server sent ARP Reply packet\n");
} else if (arp->op_code == htons(ARPOP_REQUEST)) {
/* Create an entry in the rx_hashtbl for this client as a
@@ -1017,22 +1018,23 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
rcu_read_unlock();
}
-static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
+static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[],
+ unsigned int len)
{
struct net_device *dev = slave->dev;
- struct sockaddr s_addr;
+ struct sockaddr_storage ss;
if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
- memcpy(dev->dev_addr, addr, dev->addr_len);
+ memcpy(dev->dev_addr, addr, len);
return 0;
}
/* for rlb each slave must have a unique hw mac addresses so that
* each slave will receive packets destined to a different mac
*/
- memcpy(s_addr.sa_data, addr, dev->addr_len);
- s_addr.sa_family = dev->type;
- if (dev_set_mac_address(dev, &s_addr)) {
+ memcpy(ss.__data, addr, len);
+ ss.ss_family = dev->type;
+ if (dev_set_mac_address(dev, (struct sockaddr *)&ss)) {
netdev_err(slave->bond->dev, "dev_set_mac_address of dev %s failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
dev->name);
return -EOPNOTSUPP;
@@ -1046,11 +1048,14 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
*/
static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
{
- u8 tmp_mac_addr[ETH_ALEN];
+ u8 tmp_mac_addr[MAX_ADDR_LEN];
- ether_addr_copy(tmp_mac_addr, slave1->dev->dev_addr);
- alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
- alb_set_slave_mac_addr(slave2, tmp_mac_addr);
+ bond_hw_addr_copy(tmp_mac_addr, slave1->dev->dev_addr,
+ slave1->dev->addr_len);
+ alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr,
+ slave2->dev->addr_len);
+ alb_set_slave_mac_addr(slave2, tmp_mac_addr,
+ slave1->dev->addr_len);
}
@@ -1177,7 +1182,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
/* Try setting slave mac to bond address and fall-through
* to code handling that situation below...
*/
- alb_set_slave_mac_addr(slave, bond->dev->dev_addr);
+ alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
+ bond->dev->addr_len);
}
/* The slave's address is equal to the address of the bond.
@@ -1202,7 +1208,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
}
if (free_mac_slave) {
- alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
+ alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
+ free_mac_slave->dev->addr_len);
netdev_warn(bond->dev, "the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
slave->dev->name, free_mac_slave->dev->name);
@@ -1234,8 +1241,8 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
{
struct slave *slave, *rollback_slave;
struct list_head *iter;
- struct sockaddr sa;
- char tmp_addr[ETH_ALEN];
+ struct sockaddr_storage ss;
+ char tmp_addr[MAX_ADDR_LEN];
int res;
if (bond->alb_info.rlb_enabled)
@@ -1243,12 +1250,14 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
bond_for_each_slave(bond, slave, iter) {
/* save net_device's current hw address */
- ether_addr_copy(tmp_addr, slave->dev->dev_addr);
+ bond_hw_addr_copy(tmp_addr, slave->dev->dev_addr,
+ slave->dev->addr_len);
res = dev_set_mac_address(slave->dev, addr);
/* restore net_device's hw address */
- ether_addr_copy(slave->dev->dev_addr, tmp_addr);
+ bond_hw_addr_copy(slave->dev->dev_addr, tmp_addr,
+ slave->dev->addr_len);
if (res)
goto unwind;
@@ -1257,16 +1266,19 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
return 0;
unwind:
- memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
- sa.sa_family = bond->dev->type;
+ memcpy(ss.__data, bond->dev->dev_addr, bond->dev->addr_len);
+ ss.ss_family = bond->dev->type;
/* unwind from head to the slave that failed */
bond_for_each_slave(bond, rollback_slave, iter) {
if (rollback_slave == slave)
break;
- ether_addr_copy(tmp_addr, rollback_slave->dev->dev_addr);
- dev_set_mac_address(rollback_slave->dev, &sa);
- ether_addr_copy(rollback_slave->dev->dev_addr, tmp_addr);
+ bond_hw_addr_copy(tmp_addr, rollback_slave->dev->dev_addr,
+ rollback_slave->dev->addr_len);
+ dev_set_mac_address(rollback_slave->dev,
+ (struct sockaddr *)&ss);
+ bond_hw_addr_copy(rollback_slave->dev->dev_addr, tmp_addr,
+ rollback_slave->dev->addr_len);
}
return res;
@@ -1582,7 +1594,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
{
int res;
- res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
+ res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr,
+ slave->dev->addr_len);
if (res)
return res;
@@ -1696,17 +1709,20 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
* and thus filter bond->dev_addr's packets, so force bond's mac
*/
if (BOND_MODE(bond) == BOND_MODE_TLB) {
- struct sockaddr sa;
- u8 tmp_addr[ETH_ALEN];
+ struct sockaddr_storage ss;
+ u8 tmp_addr[MAX_ADDR_LEN];
- ether_addr_copy(tmp_addr, new_slave->dev->dev_addr);
+ bond_hw_addr_copy(tmp_addr, new_slave->dev->dev_addr,
+ new_slave->dev->addr_len);
- memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
- sa.sa_family = bond->dev->type;
+ bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
+ bond->dev->addr_len);
+ ss.ss_family = bond->dev->type;
/* we don't care if it can't change its mac, best effort */
- dev_set_mac_address(new_slave->dev, &sa);
+ dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss);
- ether_addr_copy(new_slave->dev->dev_addr, tmp_addr);
+ bond_hw_addr_copy(new_slave->dev->dev_addr, tmp_addr,
+ new_slave->dev->addr_len);
}
/* curr_active_slave must be set before calling alb_swap_mac_addr */
@@ -1716,7 +1732,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
alb_fasten_mac_swap(bond, swap_slave, new_slave);
} else {
/* set the new_slave to the bond mac address */
- alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
+ alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
+ bond->dev->addr_len);
alb_send_learning_packets(new_slave, bond->dev->dev_addr,
false);
}
@@ -1726,19 +1743,19 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct sockaddr *sa = addr;
+ struct sockaddr_storage *ss = addr;
struct slave *curr_active;
struct slave *swap_slave;
int res;
- if (!is_valid_ether_addr(sa->sa_data))
+ if (!is_valid_ether_addr(ss->__data))
return -EADDRNOTAVAIL;
res = alb_set_mac_address(bond, addr);
if (res)
return res;
- memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
+ bond_hw_addr_copy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
/* If there is no curr_active_slave there is nothing else to do.
* Otherwise we'll need to pass the new address to it and handle
@@ -1754,7 +1771,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
alb_swap_mac_addr(swap_slave, curr_active);
alb_fasten_mac_swap(bond, swap_slave, curr_active);
} else {
- alb_set_slave_mac_addr(curr_active, bond_dev->dev_addr);
+ alb_set_slave_mac_addr(curr_active, bond_dev->dev_addr,
+ bond_dev->addr_len);
alb_send_learning_packets(curr_active,
bond_dev->dev_addr, false);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 34481c9be1d1..2be78807fd6e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -201,12 +201,6 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(0);
unsigned int bond_net_id __read_mostly;
-static __be32 arp_target[BOND_MAX_ARP_TARGETS];
-static int arp_ip_count;
-static int bond_mode = BOND_MODE_ROUNDROBIN;
-static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
-static int lacp_fast;
-
/*-------------------------- Forward declarations ---------------------------*/
static int bond_init(struct net_device *bond_dev);
@@ -371,9 +365,10 @@ down:
/* Get link speed and duplex from the slave's base driver
* using ethtool. If for some reason the call fails or the
* values are invalid, set speed and duplex to -1,
- * and return.
+ * and return. Return 1 if speed or duplex settings are
+ * UNKNOWN; 0 otherwise.
*/
-static void bond_update_speed_duplex(struct slave *slave)
+static int bond_update_speed_duplex(struct slave *slave)
{
struct net_device *slave_dev = slave->dev;
struct ethtool_link_ksettings ecmd;
@@ -384,23 +379,21 @@ static void bond_update_speed_duplex(struct slave *slave)
res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
if (res < 0)
- return;
-
+ return 1;
if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
- return;
-
+ return 1;
switch (ecmd.base.duplex) {
case DUPLEX_FULL:
case DUPLEX_HALF:
break;
default:
- return;
+ return 1;
}
slave->speed = ecmd.base.speed;
slave->duplex = ecmd.base.duplex;
- return;
+ return 0;
}
const char *bond_slave_link_status(s8 link)
@@ -652,8 +645,8 @@ static void bond_do_fail_over_mac(struct bonding *bond,
struct slave *new_active,
struct slave *old_active)
{
- u8 tmp_mac[ETH_ALEN];
- struct sockaddr saddr;
+ u8 tmp_mac[MAX_ADDR_LEN];
+ struct sockaddr_storage ss;
int rv;
switch (bond->params.fail_over_mac) {
@@ -673,16 +666,20 @@ static void bond_do_fail_over_mac(struct bonding *bond,
old_active = bond_get_old_active(bond, new_active);
if (old_active) {
- ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
- ether_addr_copy(saddr.sa_data,
- old_active->dev->dev_addr);
- saddr.sa_family = new_active->dev->type;
+ bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
+ new_active->dev->addr_len);
+ bond_hw_addr_copy(ss.__data,
+ old_active->dev->dev_addr,
+ old_active->dev->addr_len);
+ ss.ss_family = new_active->dev->type;
} else {
- ether_addr_copy(saddr.sa_data, bond->dev->dev_addr);
- saddr.sa_family = bond->dev->type;
+ bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
+ bond->dev->addr_len);
+ ss.ss_family = bond->dev->type;
}
- rv = dev_set_mac_address(new_active->dev, &saddr);
+ rv = dev_set_mac_address(new_active->dev,
+ (struct sockaddr *)&ss);
if (rv) {
netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
-rv, new_active->dev->name);
@@ -692,10 +689,12 @@ static void bond_do_fail_over_mac(struct bonding *bond,
if (!old_active)
goto out;
- ether_addr_copy(saddr.sa_data, tmp_mac);
- saddr.sa_family = old_active->dev->type;
+ bond_hw_addr_copy(ss.__data, tmp_mac,
+ new_active->dev->addr_len);
+ ss.ss_family = old_active->dev->type;
- rv = dev_set_mac_address(old_active->dev, &saddr);
+ rv = dev_set_mac_address(old_active->dev,
+ (struct sockaddr *)&ss);
if (rv)
netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
-rv, new_active->dev->name);
@@ -1177,6 +1176,9 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
}
}
+ /* don't change skb->dev for link-local packets */
+ if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+ return RX_HANDLER_PASS;
if (bond_should_deliver_exact_match(skb, slave, bond))
return RX_HANDLER_EXACT;
@@ -1191,7 +1193,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
- ether_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr);
+ bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
+ bond->dev->addr_len);
}
return ret;
@@ -1330,7 +1333,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
struct bonding *bond = netdev_priv(bond_dev);
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL, *prev_slave;
- struct sockaddr addr;
+ struct sockaddr_storage ss;
int link_reporting;
int res = 0, i;
@@ -1481,16 +1484,17 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* that need it, and for restoring it upon release, and then
* set it to the master's address
*/
- ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
+ bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
+ slave_dev->addr_len);
if (!bond->params.fail_over_mac ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* Set slave to master's mac address. The application already
* set the master's mac address to that of the first slave
*/
- memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
- addr.sa_family = slave_dev->type;
- res = dev_set_mac_address(slave_dev, &addr);
+ memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
+ ss.ss_family = slave_dev->type;
+ res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
if (res) {
netdev_dbg(bond_dev, "Error %d calling set_mac_address\n", res);
goto err_restore_mtu;
@@ -1565,7 +1569,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
new_slave->delay = 0;
new_slave->link_failure_count = 0;
- bond_update_speed_duplex(new_slave);
+ if (bond_update_speed_duplex(new_slave))
+ new_slave->link = BOND_LINK_DOWN;
new_slave->last_rx = jiffies -
(msecs_to_jiffies(bond->params.arp_interval) + 1);
@@ -1773,9 +1778,10 @@ err_restore_mac:
* MAC if this slave's MAC is in use by the bond, or at
* least print a warning.
*/
- ether_addr_copy(addr.sa_data, new_slave->perm_hwaddr);
- addr.sa_family = slave_dev->type;
- dev_set_mac_address(slave_dev, &addr);
+ bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
+ new_slave->dev->addr_len);
+ ss.ss_family = slave_dev->type;
+ dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
}
err_restore_mtu:
@@ -1818,7 +1824,7 @@ static int __bond_release_one(struct net_device *bond_dev,
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
- struct sockaddr addr;
+ struct sockaddr_storage ss;
int old_flags = bond_dev->flags;
netdev_features_t old_features = bond_dev->features;
@@ -1953,9 +1959,10 @@ static int __bond_release_one(struct net_device *bond_dev,
if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* restore original ("permanent") mac address */
- ether_addr_copy(addr.sa_data, slave->perm_hwaddr);
- addr.sa_family = slave_dev->type;
- dev_set_mac_address(slave_dev, &addr);
+ bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
+ slave->dev->addr_len);
+ ss.ss_family = slave_dev->type;
+ dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
}
dev_set_mtu(slave_dev, slave->original_mtu);
@@ -2039,8 +2046,7 @@ static int bond_miimon_inspect(struct bonding *bond)
if (link_state)
continue;
- bond_set_slave_link_state(slave, BOND_LINK_FAIL,
- BOND_SLAVE_NOTIFY_LATER);
+ bond_propose_link_state(slave, BOND_LINK_FAIL);
slave->delay = bond->params.downdelay;
if (slave->delay) {
netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
@@ -2055,13 +2061,13 @@ static int bond_miimon_inspect(struct bonding *bond)
case BOND_LINK_FAIL:
if (link_state) {
/* recovered before downdelay expired */
- bond_set_slave_link_state(slave, BOND_LINK_UP,
- BOND_SLAVE_NOTIFY_LATER);
+ bond_propose_link_state(slave, BOND_LINK_UP);
slave->last_link_up = jiffies;
netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
(bond->params.downdelay - slave->delay) *
bond->params.miimon,
slave->dev->name);
+ commit++;
continue;
}
@@ -2078,8 +2084,7 @@ static int bond_miimon_inspect(struct bonding *bond)
if (!link_state)
continue;
- bond_set_slave_link_state(slave, BOND_LINK_BACK,
- BOND_SLAVE_NOTIFY_LATER);
+ bond_propose_link_state(slave, BOND_LINK_BACK);
slave->delay = bond->params.updelay;
if (slave->delay) {
@@ -2092,14 +2097,12 @@ static int bond_miimon_inspect(struct bonding *bond)
/*FALLTHRU*/
case BOND_LINK_BACK:
if (!link_state) {
- bond_set_slave_link_state(slave,
- BOND_LINK_DOWN,
- BOND_SLAVE_NOTIFY_LATER);
+ bond_propose_link_state(slave, BOND_LINK_DOWN);
netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
(bond->params.updelay - slave->delay) *
bond->params.miimon,
slave->dev->name);
-
+ commit++;
continue;
}
@@ -2132,7 +2135,13 @@ static void bond_miimon_commit(struct bonding *bond)
continue;
case BOND_LINK_UP:
- bond_update_speed_duplex(slave);
+ if (bond_update_speed_duplex(slave)) {
+ slave->link = BOND_LINK_DOWN;
+ netdev_warn(bond->dev,
+ "failed to get link speed/duplex for %s\n",
+ slave->dev->name);
+ continue;
+ }
bond_set_slave_link_state(slave, BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
slave->last_link_up = jiffies;
@@ -2231,6 +2240,8 @@ static void bond_mii_monitor(struct work_struct *work)
mii_work.work);
bool should_notify_peers = false;
unsigned long delay;
+ struct slave *slave;
+ struct list_head *iter;
delay = msecs_to_jiffies(bond->params.miimon);
@@ -2251,6 +2262,9 @@ static void bond_mii_monitor(struct work_struct *work)
goto re_arm;
}
+ bond_for_each_slave(bond, slave, iter) {
+ bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
+ }
bond_miimon_commit(bond);
rtnl_unlock(); /* might sleep, hold no other locks */
@@ -2575,10 +2589,8 @@ static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
* arp is transmitted to generate traffic. see activebackup_arp_monitor for
* arp monitoring in active backup mode.
*/
-static void bond_loadbalance_arp_mon(struct work_struct *work)
+static void bond_loadbalance_arp_mon(struct bonding *bond)
{
- struct bonding *bond = container_of(work, struct bonding,
- arp_work.work);
struct slave *slave, *oldcurrent;
struct list_head *iter;
int do_failover = 0, slave_state_changed = 0;
@@ -2916,10 +2928,8 @@ check_state:
return should_notify_rtnl;
}
-static void bond_activebackup_arp_mon(struct work_struct *work)
+static void bond_activebackup_arp_mon(struct bonding *bond)
{
- struct bonding *bond = container_of(work, struct bonding,
- arp_work.work);
bool should_notify_peers = false;
bool should_notify_rtnl = false;
int delta_in_ticks;
@@ -2972,6 +2982,17 @@ re_arm:
}
}
+static void bond_arp_monitor(struct work_struct *work)
+{
+ struct bonding *bond = container_of(work, struct bonding,
+ arp_work.work);
+
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
+ bond_activebackup_arp_mon(bond);
+ else
+ bond_loadbalance_arp_mon(bond);
+}
+
/*-------------------------- netdev event handling --------------------------*/
/* Change device name */
@@ -3222,16 +3243,13 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
/*-------------------------- Device entry points ----------------------------*/
-static void bond_work_init_all(struct bonding *bond)
+void bond_work_init_all(struct bonding *bond)
{
INIT_DELAYED_WORK(&bond->mcast_work,
bond_resend_igmp_join_requests_delayed);
INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
- if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
- INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
- else
- INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
+ INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
}
@@ -3266,8 +3284,6 @@ static int bond_open(struct net_device *bond_dev)
}
}
- bond_work_init_all(bond);
-
if (bond_is_lb(bond)) {
/* bond_alb_initialize must be called before the timer
* is started.
@@ -3327,12 +3343,17 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
u64 nv = new[i];
u64 ov = old[i];
+ s64 delta = nv - ov;
/* detects if this particular field is 32bit only */
if (((nv | ov) >> 32) == 0)
- res[i] += (u32)nv - (u32)ov;
- else
- res[i] += nv - ov;
+ delta = (s64)(s32)((u32)nv - (u32)ov);
+
+ /* filter anomalies, some drivers reset their stats
+ * at down/up events.
+ */
+ if (delta > 0)
+ res[i] += delta;
}
}
@@ -3619,7 +3640,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *rollback_slave;
- struct sockaddr *sa = addr, tmp_sa;
+ struct sockaddr_storage *ss = addr, tmp_ss;
struct list_head *iter;
int res = 0;
@@ -3636,7 +3657,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
return 0;
- if (!is_valid_ether_addr(sa->sa_data))
+ if (!is_valid_ether_addr(ss->__data))
return -EADDRNOTAVAIL;
bond_for_each_slave(bond, slave, iter) {
@@ -3655,12 +3676,12 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
}
/* success */
- memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
+ memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
return 0;
unwind:
- memcpy(tmp_sa.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
- tmp_sa.sa_family = bond_dev->type;
+ memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
+ tmp_ss.ss_family = bond_dev->type;
/* unwind from head to the slave that failed */
bond_for_each_slave(bond, rollback_slave, iter) {
@@ -3669,7 +3690,8 @@ unwind:
if (rollback_slave == slave)
break;
- tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_sa);
+ tmp_res = dev_set_mac_address(rollback_slave->dev,
+ (struct sockaddr *)&tmp_ss);
if (tmp_res) {
netdev_dbg(bond_dev, "unwind err %d dev %s\n",
tmp_res, rollback_slave->dev->name);
@@ -4252,6 +4274,12 @@ static int bond_check_params(struct bond_params *params)
int arp_all_targets_value;
u16 ad_actor_sys_prio = 0;
u16 ad_user_port_key = 0;
+ __be32 arp_target[BOND_MAX_ARP_TARGETS];
+ int arp_ip_count;
+ int bond_mode = BOND_MODE_ROUNDROBIN;
+ int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
+ int lacp_fast = 0;
+ int tlb_dynamic_lb = 0;
/* Convert string parameters. */
if (mode) {
@@ -4564,6 +4592,17 @@ static int bond_check_params(struct bond_params *params)
}
ad_user_port_key = valptr->value;
+ if (bond_mode == BOND_MODE_TLB) {
+ bond_opt_initstr(&newval, "default");
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB),
+ &newval);
+ if (!valptr) {
+ pr_err("Error: No tlb_dynamic_lb default value");
+ return -EINVAL;
+ }
+ tlb_dynamic_lb = valptr->value;
+ }
+
if (lp_interval == 0) {
pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
@@ -4591,7 +4630,7 @@ static int bond_check_params(struct bond_params *params)
params->min_links = min_links;
params->lp_interval = lp_interval;
params->packets_per_slave = packets_per_slave;
- params->tlb_dynamic_lb = 1; /* Default value */
+ params->tlb_dynamic_lb = tlb_dynamic_lb;
params->ad_actor_sys_prio = ad_actor_sys_prio;
eth_zero_addr(params->ad_actor_system);
params->ad_user_port_key = ad_user_port_key;
@@ -4687,6 +4726,8 @@ int bond_create(struct net *net, const char *name)
netif_carrier_off(bond_dev);
+ bond_work_init_all(bond);
+
rtnl_unlock();
if (res < 0)
bond_destructor(bond_dev);
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index b8df0f5e8c25..c502c139d3bc 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -449,6 +449,11 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
err = register_netdevice(bond_dev);
netif_carrier_off(bond_dev);
+ if (!err) {
+ struct bonding *bond = netdev_priv(bond_dev);
+
+ bond_work_init_all(bond);
+ }
return err;
}
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index f514fe5e80a5..d8d4ada034b7 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -183,7 +183,8 @@ static void bond_info_show_slave(struct seq_file *seq,
seq_printf(seq, "Link Failure Count: %u\n",
slave->link_failure_count);
- seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
+ seq_printf(seq, "Permanent HW addr: %*phC\n",
+ slave->dev->addr_len, slave->perm_hwaddr);
seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 22570ea3a8d2..ac4ff394bc56 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -9,6 +9,24 @@ config CAN_VCAN
This driver can also be built as a module. If so, the module
will be called vcan.
+config CAN_VXCAN
+ tristate "Virtual CAN Tunnel (vxcan)"
+ ---help---
+ Similar to the virtual ethernet driver veth, vxcan implements a
+ local CAN traffic tunnel between two virtual CAN network devices.
+ When creating a vxcan, two vxcan devices are created as pair.
+ When one end receives the packet it appears on its pair and vice
+ versa. The vxcan can be used for cross namespace communication.
+
+ In opposite to vcan loopback devices the vxcan only forwards CAN
+ frames to its pair and does *not* provide a local echo of sent
+ CAN frames. To disable a potential echo in af_can.c the vxcan driver
+ announces IFF_ECHO in the interface flags. To have a clean start
+ in each namespace the CAN GW hop counter is set to zero.
+
+ This driver can also be built as a module. If so, the module
+ will be called vxcan.
+
config CAN_SLCAN
tristate "Serial / USB serial CAN Adaptors (slcan)"
depends on TTY
@@ -142,6 +160,7 @@ source "drivers/net/can/cc770/Kconfig"
source "drivers/net/can/ifi_canfd/Kconfig"
source "drivers/net/can/m_can/Kconfig"
source "drivers/net/can/mscan/Kconfig"
+source "drivers/net/can/peak_canfd/Kconfig"
source "drivers/net/can/rcar/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
source "drivers/net/can/softing/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 0da4f2f5c7e3..4aabbee133b8 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_CAN_VCAN) += vcan.o
+obj-$(CONFIG_CAN_VXCAN) += vxcan.o
obj-$(CONFIG_CAN_SLCAN) += slcan.o
obj-$(CONFIG_CAN_DEV) += can-dev.o
@@ -26,6 +27,7 @@ obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_MSCAN) += mscan/
obj-$(CONFIG_CAN_M_CAN) += m_can/
+obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_canfd/
obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 7a6554efd42b..bf8fdaeb955e 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -23,7 +23,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-
+#include <linux/iopoll.h>
#include <linux/can/dev.h>
/* napi related */
@@ -37,17 +37,19 @@ enum m_can_reg {
M_CAN_CREL = 0x0,
M_CAN_ENDN = 0x4,
M_CAN_CUST = 0x8,
- M_CAN_FBTP = 0xc,
+ M_CAN_DBTP = 0xc,
M_CAN_TEST = 0x10,
M_CAN_RWD = 0x14,
M_CAN_CCCR = 0x18,
- M_CAN_BTP = 0x1c,
+ M_CAN_NBTP = 0x1c,
M_CAN_TSCC = 0x20,
M_CAN_TSCV = 0x24,
M_CAN_TOCC = 0x28,
M_CAN_TOCV = 0x2c,
M_CAN_ECR = 0x40,
M_CAN_PSR = 0x44,
+/* TDCR Register only available for version >=3.1.x */
+ M_CAN_TDCR = 0x48,
M_CAN_IR = 0x50,
M_CAN_IE = 0x54,
M_CAN_ILS = 0x58,
@@ -105,21 +107,29 @@ enum m_can_mram_cfg {
MRAM_CFG_NUM,
};
-/* Fast Bit Timing & Prescaler Register (FBTP) */
-#define FBTR_FBRP_MASK 0x1f
-#define FBTR_FBRP_SHIFT 16
-#define FBTR_FTSEG1_SHIFT 8
-#define FBTR_FTSEG1_MASK (0xf << FBTR_FTSEG1_SHIFT)
-#define FBTR_FTSEG2_SHIFT 4
-#define FBTR_FTSEG2_MASK (0x7 << FBTR_FTSEG2_SHIFT)
-#define FBTR_FSJW_SHIFT 0
-#define FBTR_FSJW_MASK 0x3
+/* Core Release Register (CREL) */
+#define CREL_REL_SHIFT 28
+#define CREL_REL_MASK (0xF << CREL_REL_SHIFT)
+#define CREL_STEP_SHIFT 24
+#define CREL_STEP_MASK (0xF << CREL_STEP_SHIFT)
+#define CREL_SUBSTEP_SHIFT 20
+#define CREL_SUBSTEP_MASK (0xF << CREL_SUBSTEP_SHIFT)
+
+/* Data Bit Timing & Prescaler Register (DBTP) */
+#define DBTP_TDC BIT(23)
+#define DBTP_DBRP_SHIFT 16
+#define DBTP_DBRP_MASK (0x1f << DBTP_DBRP_SHIFT)
+#define DBTP_DTSEG1_SHIFT 8
+#define DBTP_DTSEG1_MASK (0x1f << DBTP_DTSEG1_SHIFT)
+#define DBTP_DTSEG2_SHIFT 4
+#define DBTP_DTSEG2_MASK (0xf << DBTP_DTSEG2_SHIFT)
+#define DBTP_DSJW_SHIFT 0
+#define DBTP_DSJW_MASK (0xf << DBTP_DSJW_SHIFT)
/* Test Register (TEST) */
-#define TEST_LBCK BIT(4)
+#define TEST_LBCK BIT(4)
/* CC Control Register(CCCR) */
-#define CCCR_TEST BIT(7)
#define CCCR_CMR_MASK 0x3
#define CCCR_CMR_SHIFT 10
#define CCCR_CMR_CANFD 0x1
@@ -130,21 +140,32 @@ enum m_can_mram_cfg {
#define CCCR_CME_CAN 0
#define CCCR_CME_CANFD 0x1
#define CCCR_CME_CANFD_BRS 0x2
+#define CCCR_TXP BIT(14)
#define CCCR_TEST BIT(7)
#define CCCR_MON BIT(5)
+#define CCCR_CSR BIT(4)
+#define CCCR_CSA BIT(3)
+#define CCCR_ASM BIT(2)
#define CCCR_CCE BIT(1)
#define CCCR_INIT BIT(0)
#define CCCR_CANFD 0x10
-
-/* Bit Timing & Prescaler Register (BTP) */
-#define BTR_BRP_MASK 0x3ff
-#define BTR_BRP_SHIFT 16
-#define BTR_TSEG1_SHIFT 8
-#define BTR_TSEG1_MASK (0x3f << BTR_TSEG1_SHIFT)
-#define BTR_TSEG2_SHIFT 4
-#define BTR_TSEG2_MASK (0xf << BTR_TSEG2_SHIFT)
-#define BTR_SJW_SHIFT 0
-#define BTR_SJW_MASK 0xf
+/* for version >=3.1.x */
+#define CCCR_EFBI BIT(13)
+#define CCCR_PXHD BIT(12)
+#define CCCR_BRSE BIT(9)
+#define CCCR_FDOE BIT(8)
+/* only for version >=3.2.x */
+#define CCCR_NISO BIT(15)
+
+/* Nominal Bit Timing & Prescaler Register (NBTP) */
+#define NBTP_NSJW_SHIFT 25
+#define NBTP_NSJW_MASK (0x7f << NBTP_NSJW_SHIFT)
+#define NBTP_NBRP_SHIFT 16
+#define NBTP_NBRP_MASK (0x1ff << NBTP_NBRP_SHIFT)
+#define NBTP_NTSEG1_SHIFT 8
+#define NBTP_NTSEG1_MASK (0xff << NBTP_NTSEG1_SHIFT)
+#define NBTP_NTSEG2_SHIFT 0
+#define NBTP_NTSEG2_MASK (0x7f << NBTP_NTSEG2_SHIFT)
/* Error Counter Register(ECR) */
#define ECR_RP BIT(15)
@@ -161,6 +182,13 @@ enum m_can_mram_cfg {
/* Interrupt Register(IR) */
#define IR_ALL_INT 0xffffffff
+
+/* Renamed bits for versions > 3.1.x */
+#define IR_ARA BIT(29)
+#define IR_PED BIT(28)
+#define IR_PEA BIT(27)
+
+/* Bits for version 3.0.x */
#define IR_STE BIT(31)
#define IR_FOE BIT(30)
#define IR_ACKE BIT(29)
@@ -194,33 +222,40 @@ enum m_can_mram_cfg {
#define IR_RF0W BIT(1)
#define IR_RF0N BIT(0)
#define IR_ERR_STATE (IR_BO | IR_EW | IR_EP)
-#define IR_ERR_LEC (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
-#define IR_ERR_BUS (IR_ERR_LEC | IR_WDI | IR_ELO | IR_BEU | \
+
+/* Interrupts for version 3.0.x */
+#define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
+#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \
IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
IR_RF1L | IR_RF0L)
-#define IR_ERR_ALL (IR_ERR_STATE | IR_ERR_BUS)
+#define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
+/* Interrupts for version >= 3.1.x */
+#define IR_ERR_LEC_31X (IR_PED | IR_PEA)
+#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
+ IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
+ IR_RF1L | IR_RF0L)
+#define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
/* Interrupt Line Select (ILS) */
#define ILS_ALL_INT0 0x0
#define ILS_ALL_INT1 0xFFFFFFFF
/* Interrupt Line Enable (ILE) */
-#define ILE_EINT0 BIT(0)
#define ILE_EINT1 BIT(1)
+#define ILE_EINT0 BIT(0)
/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
-#define RXFC_FWM_OFF 24
-#define RXFC_FWM_MASK 0x7f
-#define RXFC_FWM_1 (1 << RXFC_FWM_OFF)
-#define RXFC_FS_OFF 16
-#define RXFC_FS_MASK 0x7f
+#define RXFC_FWM_SHIFT 24
+#define RXFC_FWM_MASK (0x7f < RXFC_FWM_SHIFT)
+#define RXFC_FS_SHIFT 16
+#define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT)
/* Rx FIFO 0/1 Status (RXF0S/RXF1S) */
#define RXFS_RFL BIT(25)
#define RXFS_FF BIT(24)
-#define RXFS_FPI_OFF 16
+#define RXFS_FPI_SHIFT 16
#define RXFS_FPI_MASK 0x3f0000
-#define RXFS_FGI_OFF 8
+#define RXFS_FGI_SHIFT 8
#define RXFS_FGI_MASK 0x3f00
#define RXFS_FFL_MASK 0x7f
@@ -229,23 +264,46 @@ enum m_can_mram_cfg {
#define M_CAN_RXESC_64BYTES 0x777
/* Tx Buffer Configuration(TXBC) */
-#define TXBC_NDTB_OFF 16
-#define TXBC_NDTB_MASK 0x3f
+#define TXBC_NDTB_SHIFT 16
+#define TXBC_NDTB_MASK (0x3f << TXBC_NDTB_SHIFT)
+#define TXBC_TFQS_SHIFT 24
+#define TXBC_TFQS_MASK (0x3f << TXBC_TFQS_SHIFT)
+
+/* Tx FIFO/Queue Status (TXFQS) */
+#define TXFQS_TFQF BIT(21)
+#define TXFQS_TFQPI_SHIFT 16
+#define TXFQS_TFQPI_MASK (0x1f << TXFQS_TFQPI_SHIFT)
+#define TXFQS_TFGI_SHIFT 8
+#define TXFQS_TFGI_MASK (0x1f << TXFQS_TFGI_SHIFT)
+#define TXFQS_TFFL_SHIFT 0
+#define TXFQS_TFFL_MASK (0x3f << TXFQS_TFFL_SHIFT)
/* Tx Buffer Element Size Configuration(TXESC) */
#define TXESC_TBDS_8BYTES 0x0
#define TXESC_TBDS_64BYTES 0x7
-/* Tx Event FIFO Con.guration (TXEFC) */
-#define TXEFC_EFS_OFF 16
-#define TXEFC_EFS_MASK 0x3f
+/* Tx Event FIFO Configuration (TXEFC) */
+#define TXEFC_EFS_SHIFT 16
+#define TXEFC_EFS_MASK (0x3f << TXEFC_EFS_SHIFT)
+
+/* Tx Event FIFO Status (TXEFS) */
+#define TXEFS_TEFL BIT(25)
+#define TXEFS_EFF BIT(24)
+#define TXEFS_EFGI_SHIFT 8
+#define TXEFS_EFGI_MASK (0x1f << TXEFS_EFGI_SHIFT)
+#define TXEFS_EFFL_SHIFT 0
+#define TXEFS_EFFL_MASK (0x3f << TXEFS_EFFL_SHIFT)
+
+/* Tx Event FIFO Acknowledge (TXEFA) */
+#define TXEFA_EFAI_SHIFT 0
+#define TXEFA_EFAI_MASK (0x1f << TXEFA_EFAI_SHIFT)
/* Message RAM Configuration (in bytes) */
#define SIDF_ELEMENT_SIZE 4
#define XIDF_ELEMENT_SIZE 8
#define RXF0_ELEMENT_SIZE 72
#define RXF1_ELEMENT_SIZE 72
-#define RXB_ELEMENT_SIZE 16
+#define RXB_ELEMENT_SIZE 72
#define TXE_ELEMENT_SIZE 8
#define TXB_ELEMENT_SIZE 72
@@ -261,13 +319,25 @@ enum m_can_mram_cfg {
#define RX_BUF_RTR BIT(29)
/* R1 */
#define RX_BUF_ANMF BIT(31)
-#define RX_BUF_EDL BIT(21)
+#define RX_BUF_FDF BIT(21)
#define RX_BUF_BRS BIT(20)
/* Tx Buffer Element */
-/* R0 */
+/* T0 */
+#define TX_BUF_ESI BIT(31)
#define TX_BUF_XTD BIT(30)
#define TX_BUF_RTR BIT(29)
+/* T1 */
+#define TX_BUF_EFC BIT(23)
+#define TX_BUF_FDF BIT(21)
+#define TX_BUF_BRS BIT(20)
+#define TX_BUF_MM_SHIFT 24
+#define TX_BUF_MM_MASK (0xff << TX_BUF_MM_SHIFT)
+
+/* Tx event FIFO Element */
+/* E1 */
+#define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT
+#define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT)
/* address offset and element number for each FIFO/Buffer in the Message RAM */
struct mram_cfg {
@@ -285,6 +355,7 @@ struct m_can_priv {
struct clk *cclk;
void __iomem *base;
u32 irqstatus;
+ int version;
/* message ram configuration */
void __iomem *mram_base;
@@ -316,6 +387,18 @@ static inline void m_can_fifo_write(const struct m_can_priv *priv,
fpi * TXB_ELEMENT_SIZE + offset);
}
+static inline u32 m_can_txe_fifo_read(const struct m_can_priv *priv,
+ u32 fgi,
+ u32 offset) {
+ return readl(priv->mram_base + priv->mcfg[MRAM_TXE].off +
+ fgi * TXE_ELEMENT_SIZE + offset);
+}
+
+static inline bool m_can_tx_fifo_full(const struct m_can_priv *priv)
+{
+ return !!(m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQF);
+}
+
static inline void m_can_config_endisable(const struct m_can_priv *priv,
bool enable)
{
@@ -349,7 +432,8 @@ static inline void m_can_config_endisable(const struct m_can_priv *priv,
static inline void m_can_enable_all_interrupts(const struct m_can_priv *priv)
{
- m_can_write(priv, M_CAN_ILE, ILE_EINT0 | ILE_EINT1);
+ /* Only interrupt line 0 is used in this driver */
+ m_can_write(priv, M_CAN_ILE, ILE_EINT0);
}
static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv)
@@ -367,9 +451,9 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
int i;
/* calculate the fifo get index for where to read data */
- fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF;
+ fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_SHIFT;
dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC);
- if (dlc & RX_BUF_EDL)
+ if (dlc & RX_BUF_FDF)
skb = alloc_canfd_skb(dev, &cf);
else
skb = alloc_can_skb(dev, (struct can_frame **)&cf);
@@ -378,7 +462,7 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
return;
}
- if (dlc & RX_BUF_EDL)
+ if (dlc & RX_BUF_FDF)
cf->len = can_dlc2len((dlc >> 16) & 0x0F);
else
cf->len = get_can_dlc((dlc >> 16) & 0x0F);
@@ -394,7 +478,7 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
netdev_dbg(dev, "ESI Error\n");
}
- if (!(dlc & RX_BUF_EDL) && (id & RX_BUF_RTR)) {
+ if (!(dlc & RX_BUF_FDF) && (id & RX_BUF_RTR)) {
cf->can_id |= CAN_RTR_FLAG;
} else {
if (dlc & RX_BUF_BRS)
@@ -532,7 +616,7 @@ static int __m_can_get_berr_counter(const struct net_device *dev,
ecr = m_can_read(priv, M_CAN_ECR);
bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT;
- bec->txerr = ecr & ECR_TEC_MASK;
+ bec->txerr = (ecr & ECR_TEC_MASK) >> ECR_TEC_SHIFT;
return 0;
}
@@ -723,7 +807,7 @@ static int m_can_poll(struct napi_struct *napi, int quota)
if (irqstatus & IR_ERR_STATE)
work_done += m_can_handle_state_errors(dev, psr);
- if (irqstatus & IR_ERR_BUS)
+ if (irqstatus & IR_ERR_BUS_30X)
work_done += m_can_handle_bus_errors(dev, irqstatus, psr);
if (irqstatus & IR_RF0N)
@@ -738,6 +822,44 @@ end:
return work_done;
}
+static void m_can_echo_tx_event(struct net_device *dev)
+{
+ u32 txe_count = 0;
+ u32 m_can_txefs;
+ u32 fgi = 0;
+ int i = 0;
+ unsigned int msg_mark;
+
+ struct m_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+
+ /* read tx event fifo status */
+ m_can_txefs = m_can_read(priv, M_CAN_TXEFS);
+
+ /* Get Tx Event fifo element count */
+ txe_count = (m_can_txefs & TXEFS_EFFL_MASK)
+ >> TXEFS_EFFL_SHIFT;
+
+ /* Get and process all sent elements */
+ for (i = 0; i < txe_count; i++) {
+ /* retrieve get index */
+ fgi = (m_can_read(priv, M_CAN_TXEFS) & TXEFS_EFGI_MASK)
+ >> TXEFS_EFGI_SHIFT;
+
+ /* get message marker */
+ msg_mark = (m_can_txe_fifo_read(priv, fgi, 4) &
+ TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT;
+
+ /* ack txe element */
+ m_can_write(priv, M_CAN_TXEFA, (TXEFA_EFAI_MASK &
+ (fgi << TXEFA_EFAI_SHIFT)));
+
+ /* update stats */
+ stats->tx_bytes += can_get_echo_skb(dev, msg_mark);
+ stats->tx_packets++;
+ }
+}
+
static irqreturn_t m_can_isr(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
@@ -758,24 +880,35 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
* - state change IRQ
* - bus error IRQ and bus error reporting
*/
- if ((ir & IR_RF0N) || (ir & IR_ERR_ALL)) {
+ if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) {
priv->irqstatus = ir;
m_can_disable_all_interrupts(priv);
napi_schedule(&priv->napi);
}
- /* transmission complete interrupt */
- if (ir & IR_TC) {
- stats->tx_bytes += can_get_echo_skb(dev, 0);
- stats->tx_packets++;
- can_led_event(dev, CAN_LED_EVENT_TX);
- netif_wake_queue(dev);
+ if (priv->version == 30) {
+ if (ir & IR_TC) {
+ /* Transmission Complete Interrupt*/
+ stats->tx_bytes += can_get_echo_skb(dev, 0);
+ stats->tx_packets++;
+ can_led_event(dev, CAN_LED_EVENT_TX);
+ netif_wake_queue(dev);
+ }
+ } else {
+ if (ir & IR_TEFN) {
+ /* New TX FIFO Element arrived */
+ m_can_echo_tx_event(dev);
+ can_led_event(dev, CAN_LED_EVENT_TX);
+ if (netif_queue_stopped(dev) &&
+ !m_can_tx_fifo_full(priv))
+ netif_wake_queue(dev);
+ }
}
return IRQ_HANDLED;
}
-static const struct can_bittiming_const m_can_bittiming_const = {
+static const struct can_bittiming_const m_can_bittiming_const_30X = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 64,
@@ -787,7 +920,7 @@ static const struct can_bittiming_const m_can_bittiming_const = {
.brp_inc = 1,
};
-static const struct can_bittiming_const m_can_data_bittiming_const = {
+static const struct can_bittiming_const m_can_data_bittiming_const_30X = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 16,
@@ -799,6 +932,30 @@ static const struct can_bittiming_const m_can_data_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const m_can_bittiming_const_31X = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
+ .tseg1_max = 256,
+ .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const m_can_data_bittiming_const_31X = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */
+ .tseg1_max = 32,
+ .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 32,
+ .brp_inc = 1,
+};
+
static int m_can_set_bittiming(struct net_device *dev)
{
struct m_can_priv *priv = netdev_priv(dev);
@@ -811,19 +968,19 @@ static int m_can_set_bittiming(struct net_device *dev)
sjw = bt->sjw - 1;
tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
tseg2 = bt->phase_seg2 - 1;
- reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) |
- (tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT);
- m_can_write(priv, M_CAN_BTP, reg_btp);
+ reg_btp = (brp << NBTP_NBRP_SHIFT) | (sjw << NBTP_NSJW_SHIFT) |
+ (tseg1 << NBTP_NTSEG1_SHIFT) | (tseg2 << NBTP_NTSEG2_SHIFT);
+ m_can_write(priv, M_CAN_NBTP, reg_btp);
if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
brp = dbt->brp - 1;
sjw = dbt->sjw - 1;
tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
tseg2 = dbt->phase_seg2 - 1;
- reg_btp = (brp << FBTR_FBRP_SHIFT) | (sjw << FBTR_FSJW_SHIFT) |
- (tseg1 << FBTR_FTSEG1_SHIFT) |
- (tseg2 << FBTR_FTSEG2_SHIFT);
- m_can_write(priv, M_CAN_FBTP, reg_btp);
+ reg_btp = (brp << DBTP_DBRP_SHIFT) | (sjw << DBTP_DSJW_SHIFT) |
+ (tseg1 << DBTP_DTSEG1_SHIFT) |
+ (tseg2 << DBTP_DTSEG2_SHIFT);
+ m_can_write(priv, M_CAN_DBTP, reg_btp);
}
return 0;
@@ -834,6 +991,7 @@ static int m_can_set_bittiming(struct net_device *dev)
* - configure rx fifo
* - accept non-matching frame into fifo 0
* - configure tx buffer
+ * - >= v3.1.x: TX FIFO is used
* - configure mode
* - setup bittiming
*/
@@ -850,49 +1008,89 @@ static void m_can_chip_config(struct net_device *dev)
/* Accept Non-matching Frames Into FIFO 0 */
m_can_write(priv, M_CAN_GFC, 0x0);
- /* only support one Tx Buffer currently */
- m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) |
- priv->mcfg[MRAM_TXB].off);
+ if (priv->version == 30) {
+ /* only support one Tx Buffer currently */
+ m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) |
+ priv->mcfg[MRAM_TXB].off);
+ } else {
+ /* TX FIFO is used for newer IP Core versions */
+ m_can_write(priv, M_CAN_TXBC,
+ (priv->mcfg[MRAM_TXB].num << TXBC_TFQS_SHIFT) |
+ (priv->mcfg[MRAM_TXB].off));
+ }
/* support 64 bytes payload */
m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES);
- m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) |
- priv->mcfg[MRAM_TXE].off);
+ /* TX Event FIFO */
+ if (priv->version == 30) {
+ m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) |
+ priv->mcfg[MRAM_TXE].off);
+ } else {
+ /* Full TX Event FIFO is used */
+ m_can_write(priv, M_CAN_TXEFC,
+ ((priv->mcfg[MRAM_TXE].num << TXEFC_EFS_SHIFT)
+ & TXEFC_EFS_MASK) |
+ priv->mcfg[MRAM_TXE].off);
+ }
/* rx fifo configuration, blocking mode, fifo size 1 */
m_can_write(priv, M_CAN_RXF0C,
- (priv->mcfg[MRAM_RXF0].num << RXFC_FS_OFF) |
- RXFC_FWM_1 | priv->mcfg[MRAM_RXF0].off);
+ (priv->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) |
+ priv->mcfg[MRAM_RXF0].off);
m_can_write(priv, M_CAN_RXF1C,
- (priv->mcfg[MRAM_RXF1].num << RXFC_FS_OFF) |
- RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off);
+ (priv->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) |
+ priv->mcfg[MRAM_RXF1].off);
cccr = m_can_read(priv, M_CAN_CCCR);
- cccr &= ~(CCCR_TEST | CCCR_MON | (CCCR_CMR_MASK << CCCR_CMR_SHIFT) |
- (CCCR_CME_MASK << CCCR_CME_SHIFT));
test = m_can_read(priv, M_CAN_TEST);
test &= ~TEST_LBCK;
+ if (priv->version == 30) {
+ /* Version 3.0.x */
- if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- cccr |= CCCR_MON;
+ cccr &= ~(CCCR_TEST | CCCR_MON |
+ (CCCR_CMR_MASK << CCCR_CMR_SHIFT) |
+ (CCCR_CME_MASK << CCCR_CME_SHIFT));
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT;
+
+ } else {
+ /* Version 3.1.x or 3.2.x */
+ cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE);
+ /* Only 3.2.x has NISO Bit implemented */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
+ cccr |= CCCR_NISO;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ cccr |= (CCCR_BRSE | CCCR_FDOE);
+ }
+
+ /* Loopback Mode */
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
- cccr |= CCCR_TEST;
+ cccr |= CCCR_TEST | CCCR_MON;
test |= TEST_LBCK;
}
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
- cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT;
+ /* Enable Monitoring (all versions) */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ cccr |= CCCR_MON;
+ /* Write config */
m_can_write(priv, M_CAN_CCCR, cccr);
m_can_write(priv, M_CAN_TEST, test);
- /* enable interrupts */
+ /* Enable interrupts */
m_can_write(priv, M_CAN_IR, IR_ALL_INT);
if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
- m_can_write(priv, M_CAN_IE, IR_ALL_INT & ~IR_ERR_LEC);
+ if (priv->version == 30)
+ m_can_write(priv, M_CAN_IE, IR_ALL_INT &
+ ~(IR_ERR_LEC_30X));
+ else
+ m_can_write(priv, M_CAN_IE, IR_ALL_INT &
+ ~(IR_ERR_LEC_31X));
else
m_can_write(priv, M_CAN_IE, IR_ALL_INT);
@@ -936,33 +1134,140 @@ static void free_m_can_dev(struct net_device *dev)
free_candev(dev);
}
-static struct net_device *alloc_m_can_dev(void)
+/* Checks core release number of M_CAN
+ * returns 0 if an unsupported device is detected
+ * else it returns the release and step coded as:
+ * return value = 10 * <release> + 1 * <step>
+ */
+static int m_can_check_core_release(void __iomem *m_can_base)
+{
+ u32 crel_reg;
+ u8 rel;
+ u8 step;
+ int res;
+ struct m_can_priv temp_priv = {
+ .base = m_can_base
+ };
+
+ /* Read Core Release Version and split into version number
+ * Example: Version 3.2.1 => rel = 3; step = 2; substep = 1;
+ */
+ crel_reg = m_can_read(&temp_priv, M_CAN_CREL);
+ rel = (u8)((crel_reg & CREL_REL_MASK) >> CREL_REL_SHIFT);
+ step = (u8)((crel_reg & CREL_STEP_MASK) >> CREL_STEP_SHIFT);
+
+ if (rel == 3) {
+ /* M_CAN v3.x.y: create return value */
+ res = 30 + step;
+ } else {
+ /* Unsupported M_CAN version */
+ res = 0;
+ }
+
+ return res;
+}
+
+/* Selectable Non ISO support only in version 3.2.x
+ * This function checks if the bit is writable.
+ */
+static bool m_can_niso_supported(const struct m_can_priv *priv)
+{
+ u32 cccr_reg, cccr_poll;
+ int niso_timeout;
+
+ m_can_config_endisable(priv, true);
+ cccr_reg = m_can_read(priv, M_CAN_CCCR);
+ cccr_reg |= CCCR_NISO;
+ m_can_write(priv, M_CAN_CCCR, cccr_reg);
+
+ niso_timeout = readl_poll_timeout((priv->base + M_CAN_CCCR), cccr_poll,
+ (cccr_poll == cccr_reg), 0, 10);
+
+ /* Clear NISO */
+ cccr_reg &= ~(CCCR_NISO);
+ m_can_write(priv, M_CAN_CCCR, cccr_reg);
+
+ m_can_config_endisable(priv, false);
+
+ /* return false if time out (-ETIMEDOUT), else return true */
+ return !niso_timeout;
+}
+
+static struct net_device *alloc_m_can_dev(struct platform_device *pdev,
+ void __iomem *addr, u32 tx_fifo_size)
{
struct net_device *dev;
struct m_can_priv *priv;
+ int m_can_version;
+ unsigned int echo_buffer_count;
+
+ m_can_version = m_can_check_core_release(addr);
+ /* return if unsupported version */
+ if (!m_can_version) {
+ dev = NULL;
+ goto return_dev;
+ }
- dev = alloc_candev(sizeof(*priv), 1);
- if (!dev)
- return NULL;
+ /* If version < 3.1.x, then only one echo buffer is used */
+ echo_buffer_count = ((m_can_version == 30)
+ ? 1U
+ : (unsigned int)tx_fifo_size);
+ dev = alloc_candev(sizeof(*priv), echo_buffer_count);
+ if (!dev) {
+ dev = NULL;
+ goto return_dev;
+ }
priv = netdev_priv(dev);
netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT);
+ /* Shared properties of all M_CAN versions */
+ priv->version = m_can_version;
priv->dev = dev;
- priv->can.bittiming_const = &m_can_bittiming_const;
- priv->can.data_bittiming_const = &m_can_data_bittiming_const;
+ priv->base = addr;
priv->can.do_set_mode = m_can_set_mode;
priv->can.do_get_berr_counter = m_can_get_berr_counter;
- /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
- can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
-
- /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
+ /* Set M_CAN supported operations */
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD;
+ /* Set properties depending on M_CAN version */
+ switch (priv->version) {
+ case 30:
+ /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
+ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
+ priv->can.bittiming_const = &m_can_bittiming_const_30X;
+ priv->can.data_bittiming_const =
+ &m_can_data_bittiming_const_30X;
+ break;
+ case 31:
+ /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
+ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
+ priv->can.bittiming_const = &m_can_bittiming_const_31X;
+ priv->can.data_bittiming_const =
+ &m_can_data_bittiming_const_31X;
+ break;
+ case 32:
+ priv->can.bittiming_const = &m_can_bittiming_const_31X;
+ priv->can.data_bittiming_const =
+ &m_can_data_bittiming_const_31X;
+ priv->can.ctrlmode_supported |= (m_can_niso_supported(priv)
+ ? CAN_CTRLMODE_FD_NON_ISO
+ : 0);
+ break;
+ default:
+ /* Unsupported device: free candev */
+ free_m_can_dev(dev);
+ dev_err(&pdev->dev, "Unsupported version number: %2d",
+ priv->version);
+ dev = NULL;
+ break;
+ }
+
+return_dev:
return dev;
}
@@ -1040,19 +1345,34 @@ static int m_can_close(struct net_device *dev)
return 0;
}
+static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx)
+{
+ struct m_can_priv *priv = netdev_priv(dev);
+ /*get wrap around for loopback skb index */
+ unsigned int wrap = priv->can.echo_skb_max;
+ int next_idx;
+
+ /* calculate next index */
+ next_idx = (++putidx >= wrap ? 0 : putidx);
+
+ /* check if occupied */
+ return !!priv->can.echo_skb[next_idx];
+}
+
static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct m_can_priv *priv = netdev_priv(dev);
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
- u32 id, cccr;
+ u32 id, cccr, fdflags;
int i;
+ int putidx;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
- netif_stop_queue(dev);
-
+ /* Generate ID field for TX buffer Element */
+ /* Common to all supported M_CAN versions */
if (cf->can_id & CAN_EFF_FLAG) {
id = cf->can_id & CAN_EFF_MASK;
id |= TX_BUF_XTD;
@@ -1063,33 +1383,93 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
if (cf->can_id & CAN_RTR_FLAG)
id |= TX_BUF_RTR;
- /* message ram configuration */
- m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id);
- m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, can_len2dlc(cf->len) << 16);
+ if (priv->version == 30) {
+ netif_stop_queue(dev);
- for (i = 0; i < cf->len; i += 4)
- m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(i / 4),
- *(u32 *)(cf->data + i));
+ /* message ram configuration */
+ m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id);
+ m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC,
+ can_len2dlc(cf->len) << 16);
- can_put_echo_skb(skb, dev, 0);
+ for (i = 0; i < cf->len; i += 4)
+ m_can_fifo_write(priv, 0,
+ M_CAN_FIFO_DATA(i / 4),
+ *(u32 *)(cf->data + i));
+
+ can_put_echo_skb(skb, dev, 0);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ cccr = m_can_read(priv, M_CAN_CCCR);
+ cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT);
+ if (can_is_canfd_skb(skb)) {
+ if (cf->flags & CANFD_BRS)
+ cccr |= CCCR_CMR_CANFD_BRS <<
+ CCCR_CMR_SHIFT;
+ else
+ cccr |= CCCR_CMR_CANFD <<
+ CCCR_CMR_SHIFT;
+ } else {
+ cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT;
+ }
+ m_can_write(priv, M_CAN_CCCR, cccr);
+ }
+ m_can_write(priv, M_CAN_TXBTIE, 0x1);
+ m_can_write(priv, M_CAN_TXBAR, 0x1);
+ /* End of xmit function for version 3.0.x */
+ } else {
+ /* Transmit routine for version >= v3.1.x */
+
+ /* Check if FIFO full */
+ if (m_can_tx_fifo_full(priv)) {
+ /* This shouldn't happen */
+ netif_stop_queue(dev);
+ netdev_warn(dev,
+ "TX queue active although FIFO is full.");
+ return NETDEV_TX_BUSY;
+ }
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
- cccr = m_can_read(priv, M_CAN_CCCR);
- cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT);
+ /* get put index for frame */
+ putidx = ((m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQPI_MASK)
+ >> TXFQS_TFQPI_SHIFT);
+ /* Write ID Field to FIFO Element */
+ m_can_fifo_write(priv, putidx, M_CAN_FIFO_ID, id);
+
+ /* get CAN FD configuration of frame */
+ fdflags = 0;
if (can_is_canfd_skb(skb)) {
+ fdflags |= TX_BUF_FDF;
if (cf->flags & CANFD_BRS)
- cccr |= CCCR_CMR_CANFD_BRS << CCCR_CMR_SHIFT;
- else
- cccr |= CCCR_CMR_CANFD << CCCR_CMR_SHIFT;
- } else {
- cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT;
+ fdflags |= TX_BUF_BRS;
}
- m_can_write(priv, M_CAN_CCCR, cccr);
- }
- /* enable first TX buffer to start transfer */
- m_can_write(priv, M_CAN_TXBTIE, 0x1);
- m_can_write(priv, M_CAN_TXBAR, 0x1);
+ /* Construct DLC Field. Also contains CAN-FD configuration
+ * use put index of fifo as message marker
+ * it is used in TX interrupt for
+ * sending the correct echo frame
+ */
+ m_can_fifo_write(priv, putidx, M_CAN_FIFO_DLC,
+ ((putidx << TX_BUF_MM_SHIFT) &
+ TX_BUF_MM_MASK) |
+ (can_len2dlc(cf->len) << 16) |
+ fdflags | TX_BUF_EFC);
+
+ for (i = 0; i < cf->len; i += 4)
+ m_can_fifo_write(priv, putidx, M_CAN_FIFO_DATA(i / 4),
+ *(u32 *)(cf->data + i));
+
+ /* Push loopback echo.
+ * Will be looped back on TX interrupt based on message marker
+ */
+ can_put_echo_skb(skb, dev, putidx);
+
+ /* Enable TX FIFO element to start transfer */
+ m_can_write(priv, M_CAN_TXBAR, (1 << putidx));
+
+ /* stop network queue if fifo full */
+ if (m_can_tx_fifo_full(priv) ||
+ m_can_next_echo_skb_occupied(dev, putidx))
+ netif_stop_queue(dev);
+ }
return NETDEV_TX_OK;
}
@@ -1109,55 +1489,37 @@ static int register_m_can_dev(struct net_device *dev)
return register_candev(dev);
}
-static int m_can_of_parse_mram(struct platform_device *pdev,
- struct m_can_priv *priv)
+static void m_can_of_parse_mram(struct m_can_priv *priv,
+ const u32 *mram_config_vals)
{
- struct device_node *np = pdev->dev.of_node;
- struct resource *res;
- void __iomem *addr;
- u32 out_val[MRAM_CFG_LEN];
- int i, start, end, ret;
+ int i, start, end;
- /* message ram could be shared */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
- if (!res)
- return -ENODEV;
-
- addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!addr)
- return -ENOMEM;
-
- /* get message ram configuration */
- ret = of_property_read_u32_array(np, "bosch,mram-cfg",
- out_val, sizeof(out_val) / 4);
- if (ret) {
- dev_err(&pdev->dev, "can not get message ram configuration\n");
- return -ENODEV;
- }
-
- priv->mram_base = addr;
- priv->mcfg[MRAM_SIDF].off = out_val[0];
- priv->mcfg[MRAM_SIDF].num = out_val[1];
+ priv->mcfg[MRAM_SIDF].off = mram_config_vals[0];
+ priv->mcfg[MRAM_SIDF].num = mram_config_vals[1];
priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off +
priv->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE;
- priv->mcfg[MRAM_XIDF].num = out_val[2];
+ priv->mcfg[MRAM_XIDF].num = mram_config_vals[2];
priv->mcfg[MRAM_RXF0].off = priv->mcfg[MRAM_XIDF].off +
priv->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE;
- priv->mcfg[MRAM_RXF0].num = out_val[3] & RXFC_FS_MASK;
+ priv->mcfg[MRAM_RXF0].num = mram_config_vals[3] &
+ (RXFC_FS_MASK >> RXFC_FS_SHIFT);
priv->mcfg[MRAM_RXF1].off = priv->mcfg[MRAM_RXF0].off +
priv->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE;
- priv->mcfg[MRAM_RXF1].num = out_val[4] & RXFC_FS_MASK;
+ priv->mcfg[MRAM_RXF1].num = mram_config_vals[4] &
+ (RXFC_FS_MASK >> RXFC_FS_SHIFT);
priv->mcfg[MRAM_RXB].off = priv->mcfg[MRAM_RXF1].off +
priv->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE;
- priv->mcfg[MRAM_RXB].num = out_val[5];
+ priv->mcfg[MRAM_RXB].num = mram_config_vals[5];
priv->mcfg[MRAM_TXE].off = priv->mcfg[MRAM_RXB].off +
priv->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE;
- priv->mcfg[MRAM_TXE].num = out_val[6];
+ priv->mcfg[MRAM_TXE].num = mram_config_vals[6];
priv->mcfg[MRAM_TXB].off = priv->mcfg[MRAM_TXE].off +
priv->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE;
- priv->mcfg[MRAM_TXB].num = out_val[7] & TXBC_NDTB_MASK;
+ priv->mcfg[MRAM_TXB].num = mram_config_vals[7] &
+ (TXBC_NDTB_MASK >> TXBC_NDTB_SHIFT);
- dev_dbg(&pdev->dev, "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
+ dev_dbg(priv->device,
+ "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
priv->mram_base,
priv->mcfg[MRAM_SIDF].off, priv->mcfg[MRAM_SIDF].num,
priv->mcfg[MRAM_XIDF].off, priv->mcfg[MRAM_XIDF].num,
@@ -1176,7 +1538,6 @@ static int m_can_of_parse_mram(struct platform_device *pdev,
for (i = start; i < end; i += 4)
writel(0x0, priv->mram_base + i);
- return 0;
}
static int m_can_plat_probe(struct platform_device *pdev)
@@ -1185,38 +1546,86 @@ static int m_can_plat_probe(struct platform_device *pdev)
struct m_can_priv *priv;
struct resource *res;
void __iomem *addr;
+ void __iomem *mram_addr;
struct clk *hclk, *cclk;
int irq, ret;
+ struct device_node *np;
+ u32 mram_config_vals[MRAM_CFG_LEN];
+ u32 tx_fifo_size;
+
+ np = pdev->dev.of_node;
hclk = devm_clk_get(&pdev->dev, "hclk");
cclk = devm_clk_get(&pdev->dev, "cclk");
+
if (IS_ERR(hclk) || IS_ERR(cclk)) {
- dev_err(&pdev->dev, "no clock find\n");
- return -ENODEV;
+ dev_err(&pdev->dev, "no clock found\n");
+ ret = -ENODEV;
+ goto failed_ret;
}
+ /* Enable clocks. Necessary to read Core Release in order to determine
+ * M_CAN version
+ */
+ ret = clk_prepare_enable(hclk);
+ if (ret)
+ goto disable_hclk_ret;
+
+ ret = clk_prepare_enable(cclk);
+ if (ret)
+ goto disable_cclk_ret;
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
addr = devm_ioremap_resource(&pdev->dev, res);
irq = platform_get_irq_byname(pdev, "int0");
- if (IS_ERR(addr) || irq < 0)
- return -EINVAL;
- /* allocate the m_can device */
- dev = alloc_m_can_dev();
- if (!dev)
- return -ENOMEM;
+ if (IS_ERR(addr) || irq < 0) {
+ ret = -EINVAL;
+ goto disable_cclk_ret;
+ }
+
+ /* message ram could be shared */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
+ if (!res) {
+ ret = -ENODEV;
+ goto disable_cclk_ret;
+ }
+
+ mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!mram_addr) {
+ ret = -ENOMEM;
+ goto disable_cclk_ret;
+ }
+
+ /* get message ram configuration */
+ ret = of_property_read_u32_array(np, "bosch,mram-cfg",
+ mram_config_vals,
+ sizeof(mram_config_vals) / 4);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not get Message RAM configuration.");
+ goto disable_cclk_ret;
+ }
+ /* Get TX FIFO size
+ * Defines the total amount of echo buffers for loopback
+ */
+ tx_fifo_size = mram_config_vals[7];
+
+ /* allocate the m_can device */
+ dev = alloc_m_can_dev(pdev, addr, tx_fifo_size);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto disable_cclk_ret;
+ }
priv = netdev_priv(dev);
dev->irq = irq;
- priv->base = addr;
priv->device = &pdev->dev;
priv->hclk = hclk;
priv->cclk = cclk;
priv->can.clock.freq = clk_get_rate(cclk);
+ priv->mram_base = mram_addr;
- ret = m_can_of_parse_mram(pdev, priv);
- if (ret)
- goto failed_free_dev;
+ m_can_of_parse_mram(priv, mram_config_vals);
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1230,13 +1639,22 @@ static int m_can_plat_probe(struct platform_device *pdev)
devm_can_led_init(dev);
- dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
- KBUILD_MODNAME, priv->base, dev->irq);
+ dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n",
+ KBUILD_MODNAME, dev->irq, priv->version);
- return 0;
+ /* Probe finished
+ * Stop clocks. They will be reactivated once the M_CAN device is opened
+ */
+
+ goto disable_cclk_ret;
failed_free_dev:
free_m_can_dev(dev);
+disable_cclk_ret:
+ clk_disable_unprepare(cclk);
+disable_hclk_ret:
+ clk_disable_unprepare(hclk);
+failed_ret:
return ret;
}
diff --git a/drivers/net/can/peak_canfd/Kconfig b/drivers/net/can/peak_canfd/Kconfig
new file mode 100644
index 000000000000..84b30978a19f
--- /dev/null
+++ b/drivers/net/can/peak_canfd/Kconfig
@@ -0,0 +1,13 @@
+config CAN_PEAK_PCIEFD
+ depends on PCI
+ tristate "PEAK-System PCAN-PCIe FD cards"
+ ---help---
+ This driver adds support for the PEAK-System PCI Express FD
+ CAN-FD cards family.
+ These 1x or 2x CAN-FD channels cards offer CAN 2.0 a/b as well as
+ CAN-FD access to the CAN bus. Besides the nominal bitrate of up to
+ 1 Mbit/s, the data bytes of CAN-FD frames can be transmitted with
+ up to 12 Mbit/s. A galvanic isolation of the CAN ports protects the
+ electronics of the card and the respective computer against
+ disturbances of up to 500 Volts. The PCAN-PCI Express FD can be
+ operated with ambient temperatures in a range of -40 to +85 °C.
diff --git a/drivers/net/can/peak_canfd/Makefile b/drivers/net/can/peak_canfd/Makefile
new file mode 100644
index 000000000000..3dc7a6a0ba59
--- /dev/null
+++ b/drivers/net/can/peak_canfd/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the PEAK-System CAN-FD IP module drivers
+#
+obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_pciefd.o
+peak_pciefd-y := peak_pciefd_main.o peak_canfd.o
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
new file mode 100644
index 000000000000..0d57be5ea97b
--- /dev/null
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -0,0 +1,801 @@
+/*
+ * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ *
+ * Copyright (C) 2016 PEAK System-Technik GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+
+#include "peak_canfd_user.h"
+
+/* internal IP core cache size (used as default echo skbs max number) */
+#define PCANFD_ECHO_SKB_MAX 24
+
+/* bittiming ranges of the PEAK-System PC CAN-FD interfaces */
+static const struct can_bittiming_const peak_canfd_nominal_const = {
+ .name = "peak_canfd",
+ .tseg1_min = 1,
+ .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
+ .tseg2_min = 1,
+ .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
+ .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
+ .brp_min = 1,
+ .brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const peak_canfd_data_const = {
+ .name = "peak_canfd",
+ .tseg1_min = 1,
+ .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
+ .tseg2_min = 1,
+ .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
+ .sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
+ .brp_min = 1,
+ .brp_max = (1 << PUCAN_TFAST_BRP_BITS),
+ .brp_inc = 1,
+};
+
+static struct peak_canfd_priv *pucan_init_cmd(struct peak_canfd_priv *priv)
+{
+ priv->cmd_len = 0;
+ return priv;
+}
+
+static void *pucan_add_cmd(struct peak_canfd_priv *priv, int cmd_op)
+{
+ struct pucan_command *cmd;
+
+ if (priv->cmd_len + sizeof(*cmd) > priv->cmd_maxlen)
+ return NULL;
+
+ cmd = priv->cmd_buffer + priv->cmd_len;
+
+ /* reset all unused bit to default */
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->opcode_channel = pucan_cmd_opcode_channel(priv->index, cmd_op);
+ priv->cmd_len += sizeof(*cmd);
+
+ return cmd;
+}
+
+static int pucan_write_cmd(struct peak_canfd_priv *priv)
+{
+ int err;
+
+ if (priv->pre_cmd) {
+ err = priv->pre_cmd(priv);
+ if (err)
+ return err;
+ }
+
+ err = priv->write_cmd(priv);
+ if (err)
+ return err;
+
+ if (priv->post_cmd)
+ err = priv->post_cmd(priv);
+
+ return err;
+}
+
+/* uCAN commands interface functions */
+static int pucan_set_reset_mode(struct peak_canfd_priv *priv)
+{
+ pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RESET_MODE);
+ return pucan_write_cmd(priv);
+}
+
+static int pucan_set_normal_mode(struct peak_canfd_priv *priv)
+{
+ int err;
+
+ pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_NORMAL_MODE);
+ err = pucan_write_cmd(priv);
+ if (!err)
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return err;
+}
+
+static int pucan_set_listen_only_mode(struct peak_canfd_priv *priv)
+{
+ int err;
+
+ pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_LISTEN_ONLY_MODE);
+ err = pucan_write_cmd(priv);
+ if (!err)
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return err;
+}
+
+static int pucan_set_timing_slow(struct peak_canfd_priv *priv,
+ const struct can_bittiming *pbt)
+{
+ struct pucan_timing_slow *cmd;
+
+ cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW);
+
+ cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->sjw - 1,
+ priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES);
+ cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1);
+ cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->phase_seg2 - 1);
+ cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->brp - 1));
+
+ cmd->ewl = 96; /* default */
+
+ netdev_dbg(priv->ndev,
+ "nominal: brp=%u tseg1=%u tseg2=%u sjw=%u\n",
+ le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw_t);
+
+ return pucan_write_cmd(priv);
+}
+
+static int pucan_set_timing_fast(struct peak_canfd_priv *priv,
+ const struct can_bittiming *pbt)
+{
+ struct pucan_timing_fast *cmd;
+
+ cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_FAST);
+
+ cmd->sjw = PUCAN_TFAST_SJW(pbt->sjw - 1);
+ cmd->tseg1 = PUCAN_TFAST_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1);
+ cmd->tseg2 = PUCAN_TFAST_TSEG2(pbt->phase_seg2 - 1);
+ cmd->brp = cpu_to_le16(PUCAN_TFAST_BRP(pbt->brp - 1));
+
+ netdev_dbg(priv->ndev,
+ "data: brp=%u tseg1=%u tseg2=%u sjw=%u\n",
+ le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw);
+
+ return pucan_write_cmd(priv);
+}
+
+static int pucan_set_std_filter(struct peak_canfd_priv *priv, u8 row, u32 mask)
+{
+ struct pucan_std_filter *cmd;
+
+ cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_STD_FILTER);
+
+ /* all the 11-bits CAN ID values are represented by one bit in a
+ * 64 rows array of 32 bits: the upper 6 bits of the CAN ID select the
+ * row while the lowest 5 bits select the bit in that row.
+ *
+ * bit filter
+ * 1 passed
+ * 0 discarded
+ */
+
+ /* select the row */
+ cmd->idx = row;
+
+ /* set/unset bits in the row */
+ cmd->mask = cpu_to_le32(mask);
+
+ return pucan_write_cmd(priv);
+}
+
+static int pucan_tx_abort(struct peak_canfd_priv *priv, u16 flags)
+{
+ struct pucan_tx_abort *cmd;
+
+ cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TX_ABORT);
+
+ cmd->flags = cpu_to_le16(flags);
+
+ return pucan_write_cmd(priv);
+}
+
+static int pucan_clr_err_counters(struct peak_canfd_priv *priv)
+{
+ struct pucan_wr_err_cnt *cmd;
+
+ cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_WR_ERR_CNT);
+
+ cmd->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE | PUCAN_WRERRCNT_RE);
+ cmd->tx_counter = 0;
+ cmd->rx_counter = 0;
+
+ return pucan_write_cmd(priv);
+}
+
+static int pucan_set_options(struct peak_canfd_priv *priv, u16 opt_mask)
+{
+ struct pucan_options *cmd;
+
+ cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_EN_OPTION);
+
+ cmd->options = cpu_to_le16(opt_mask);
+
+ return pucan_write_cmd(priv);
+}
+
+static int pucan_clr_options(struct peak_canfd_priv *priv, u16 opt_mask)
+{
+ struct pucan_options *cmd;
+
+ cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_CLR_DIS_OPTION);
+
+ cmd->options = cpu_to_le16(opt_mask);
+
+ return pucan_write_cmd(priv);
+}
+
+static int pucan_setup_rx_barrier(struct peak_canfd_priv *priv)
+{
+ pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RX_BARRIER);
+
+ return pucan_write_cmd(priv);
+}
+
+/* handle the reception of one CAN frame */
+static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
+ struct pucan_rx_msg *msg)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ const u16 rx_msg_flags = le16_to_cpu(msg->flags);
+ u8 cf_len;
+
+ if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN)
+ cf_len = can_dlc2len(get_canfd_dlc(pucan_msg_get_dlc(msg)));
+ else
+ cf_len = get_can_dlc(pucan_msg_get_dlc(msg));
+
+ /* if this frame is an echo, */
+ if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&
+ !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) {
+ int n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->echo_lock, flags);
+ n = can_get_echo_skb(priv->ndev, msg->client);
+ spin_unlock_irqrestore(&priv->echo_lock, flags);
+
+ /* count bytes of the echo instead of skb */
+ stats->tx_bytes += cf_len;
+ stats->tx_packets++;
+
+ if (n) {
+ /* restart tx queue only if a slot is free */
+ netif_wake_queue(priv->ndev);
+ }
+
+ return 0;
+ }
+
+ /* otherwise, it should be pushed into rx fifo */
+ if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
+ /* CANFD frame case */
+ skb = alloc_canfd_skb(priv->ndev, &cf);
+ if (!skb)
+ return -ENOMEM;
+
+ if (rx_msg_flags & PUCAN_MSG_BITRATE_SWITCH)
+ cf->flags |= CANFD_BRS;
+
+ if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND)
+ cf->flags |= CANFD_ESI;
+ } else {
+ /* CAN 2.0 frame case */
+ skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf);
+ if (!skb)
+ return -ENOMEM;
+ }
+
+ cf->can_id = le32_to_cpu(msg->can_id);
+ cf->len = cf_len;
+
+ if (rx_msg_flags & PUCAN_MSG_EXT_ID)
+ cf->can_id |= CAN_EFF_FLAG;
+
+ if (rx_msg_flags & PUCAN_MSG_RTR)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, msg->d, cf->len);
+
+ stats->rx_bytes += cf->len;
+ stats->rx_packets++;
+
+ netif_rx(skb);
+
+ return 0;
+}
+
+/* handle rx/tx error counters notification */
+static int pucan_handle_error(struct peak_canfd_priv *priv,
+ struct pucan_error_msg *msg)
+{
+ priv->bec.txerr = msg->tx_err_cnt;
+ priv->bec.rxerr = msg->rx_err_cnt;
+
+ return 0;
+}
+
+/* handle status notification */
+static int pucan_handle_status(struct peak_canfd_priv *priv,
+ struct pucan_status_msg *msg)
+{
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
+ if (pucan_status_is_rx_barrier(msg)) {
+ unsigned long flags;
+
+ if (priv->enable_tx_path) {
+ int err = priv->enable_tx_path(priv);
+
+ if (err)
+ return err;
+ }
+
+ /* restart network queue only if echo skb array is free */
+ spin_lock_irqsave(&priv->echo_lock, flags);
+
+ if (!priv->can.echo_skb[priv->echo_idx]) {
+ spin_unlock_irqrestore(&priv->echo_lock, flags);
+
+ netif_wake_queue(ndev);
+ } else {
+ spin_unlock_irqrestore(&priv->echo_lock, flags);
+ }
+
+ return 0;
+ }
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ /* test state error bits according to their priority */
+ if (pucan_status_is_busoff(msg)) {
+ netdev_dbg(ndev, "Bus-off entry status\n");
+ priv->can.state = CAN_STATE_BUS_OFF;
+ priv->can.can_stats.bus_off++;
+ can_bus_off(ndev);
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
+
+ } else if (pucan_status_is_passive(msg)) {
+ netdev_dbg(ndev, "Error passive status\n");
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ priv->can.can_stats.error_passive++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
+ CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ cf->data[6] = priv->bec.txerr;
+ cf->data[7] = priv->bec.rxerr;
+ }
+
+ } else if (pucan_status_is_warning(msg)) {
+ netdev_dbg(ndev, "Error warning status\n");
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = priv->bec.txerr;
+ cf->data[7] = priv->bec.rxerr;
+ }
+
+ } else if (priv->can.state != CAN_STATE_ERROR_ACTIVE) {
+ /* back to ERROR_ACTIVE */
+ netdev_dbg(ndev, "Error active status\n");
+ can_change_state(ndev, cf, CAN_STATE_ERROR_ACTIVE,
+ CAN_STATE_ERROR_ACTIVE);
+ } else {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (!skb) {
+ stats->rx_dropped++;
+ return -ENOMEM;
+ }
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+
+ return 0;
+}
+
+/* handle uCAN Rx overflow notification */
+static int pucan_handle_cache_critical(struct peak_canfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ skb = alloc_can_err_skb(priv->ndev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return -ENOMEM;
+ }
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ cf->data[6] = priv->bec.txerr;
+ cf->data[7] = priv->bec.rxerr;
+
+ stats->rx_bytes += cf->can_dlc;
+ stats->rx_packets++;
+ netif_rx(skb);
+
+ return 0;
+}
+
+/* handle a single uCAN message */
+int peak_canfd_handle_msg(struct peak_canfd_priv *priv,
+ struct pucan_rx_msg *msg)
+{
+ u16 msg_type = le16_to_cpu(msg->type);
+ int msg_size = le16_to_cpu(msg->size);
+ int err;
+
+ if (!msg_size || !msg_type) {
+ /* null packet found: end of list */
+ goto exit;
+ }
+
+ switch (msg_type) {
+ case PUCAN_MSG_CAN_RX:
+ err = pucan_handle_can_rx(priv, (struct pucan_rx_msg *)msg);
+ break;
+ case PUCAN_MSG_ERROR:
+ err = pucan_handle_error(priv, (struct pucan_error_msg *)msg);
+ break;
+ case PUCAN_MSG_STATUS:
+ err = pucan_handle_status(priv, (struct pucan_status_msg *)msg);
+ break;
+ case PUCAN_MSG_CACHE_CRITICAL:
+ err = pucan_handle_cache_critical(priv);
+ break;
+ default:
+ err = 0;
+ }
+
+ if (err < 0)
+ return err;
+
+exit:
+ return msg_size;
+}
+
+/* handle a list of rx_count messages from rx_msg memory address */
+int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
+ struct pucan_rx_msg *msg_list, int msg_count)
+{
+ void *msg_ptr = msg_list;
+ int i, msg_size;
+
+ for (i = 0; i < msg_count; i++) {
+ msg_size = peak_canfd_handle_msg(priv, msg_ptr);
+
+ /* a null packet can be found at the end of a list */
+ if (msg_size <= 0)
+ break;
+
+ msg_ptr += msg_size;
+ }
+
+ if (msg_size < 0)
+ return msg_size;
+
+ return i;
+}
+
+static int peak_canfd_start(struct peak_canfd_priv *priv)
+{
+ int err;
+
+ err = pucan_clr_err_counters(priv);
+ if (err)
+ goto err_exit;
+
+ priv->echo_idx = 0;
+
+ priv->bec.txerr = 0;
+ priv->bec.rxerr = 0;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ err = pucan_set_listen_only_mode(priv);
+ else
+ err = pucan_set_normal_mode(priv);
+
+err_exit:
+ return err;
+}
+
+static void peak_canfd_stop(struct peak_canfd_priv *priv)
+{
+ int err;
+
+ /* go back to RESET mode */
+ err = pucan_set_reset_mode(priv);
+ if (err) {
+ netdev_err(priv->ndev, "channel %u reset failed\n",
+ priv->index);
+ } else {
+ /* abort last Tx (MUST be done in RESET mode only!) */
+ pucan_tx_abort(priv, PUCAN_TX_ABORT_FLUSH);
+ }
+}
+
+static int peak_canfd_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ struct peak_canfd_priv *priv = netdev_priv(ndev);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ peak_canfd_start(priv);
+ netif_wake_queue(ndev);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int peak_canfd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct peak_canfd_priv *priv = netdev_priv(ndev);
+
+ *bec = priv->bec;
+ return 0;
+}
+
+static int peak_canfd_open(struct net_device *ndev)
+{
+ struct peak_canfd_priv *priv = netdev_priv(ndev);
+ int i, err = 0;
+
+ err = open_candev(ndev);
+ if (err) {
+ netdev_err(ndev, "open_candev() failed, error %d\n", err);
+ goto err_exit;
+ }
+
+ err = pucan_set_reset_mode(priv);
+ if (err)
+ goto err_close;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
+ err = pucan_clr_options(priv, PUCAN_OPTION_CANDFDISO);
+ else
+ err = pucan_set_options(priv, PUCAN_OPTION_CANDFDISO);
+
+ if (err)
+ goto err_close;
+ }
+
+ /* set option: get rx/tx error counters */
+ err = pucan_set_options(priv, PUCAN_OPTION_ERROR);
+ if (err)
+ goto err_close;
+
+ /* accept all standard CAN ID */
+ for (i = 0; i <= PUCAN_FLTSTD_ROW_IDX_MAX; i++)
+ pucan_set_std_filter(priv, i, 0xffffffff);
+
+ err = peak_canfd_start(priv);
+ if (err)
+ goto err_close;
+
+ /* receiving the RB status says when Tx path is ready */
+ err = pucan_setup_rx_barrier(priv);
+ if (!err)
+ goto err_exit;
+
+err_close:
+ close_candev(ndev);
+err_exit:
+ return err;
+}
+
+static int peak_canfd_set_bittiming(struct net_device *ndev)
+{
+ struct peak_canfd_priv *priv = netdev_priv(ndev);
+
+ return pucan_set_timing_slow(priv, &priv->can.bittiming);
+}
+
+static int peak_canfd_set_data_bittiming(struct net_device *ndev)
+{
+ struct peak_canfd_priv *priv = netdev_priv(ndev);
+
+ return pucan_set_timing_fast(priv, &priv->can.data_bittiming);
+}
+
+static int peak_canfd_close(struct net_device *ndev)
+{
+ struct peak_canfd_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ peak_canfd_stop(priv);
+ close_candev(ndev);
+
+ return 0;
+}
+
+static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct peak_canfd_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ struct pucan_tx_msg *msg;
+ u16 msg_size, msg_flags;
+ unsigned long flags;
+ bool should_stop_tx_queue;
+ int room_left;
+ u8 can_dlc;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ msg_size = ALIGN(sizeof(*msg) + cf->len, 4);
+ msg = priv->alloc_tx_msg(priv, msg_size, &room_left);
+
+ /* should never happen except under bus-off condition and (auto-)restart
+ * mechanism
+ */
+ if (!msg) {
+ stats->tx_dropped++;
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ msg->size = cpu_to_le16(msg_size);
+ msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
+ msg_flags = 0;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ msg_flags |= PUCAN_MSG_EXT_ID;
+ msg->can_id = cpu_to_le32(cf->can_id & CAN_EFF_MASK);
+ } else {
+ msg->can_id = cpu_to_le32(cf->can_id & CAN_SFF_MASK);
+ }
+
+ if (can_is_canfd_skb(skb)) {
+ /* CAN FD frame format */
+ can_dlc = can_len2dlc(cf->len);
+
+ msg_flags |= PUCAN_MSG_EXT_DATA_LEN;
+
+ if (cf->flags & CANFD_BRS)
+ msg_flags |= PUCAN_MSG_BITRATE_SWITCH;
+
+ if (cf->flags & CANFD_ESI)
+ msg_flags |= PUCAN_MSG_ERROR_STATE_IND;
+ } else {
+ /* CAN 2.0 frame format */
+ can_dlc = cf->len;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ msg_flags |= PUCAN_MSG_RTR;
+ }
+
+ /* always ask loopback for echo management */
+ msg_flags |= PUCAN_MSG_LOOPED_BACK;
+
+ /* set driver specific bit to differentiate with application loopback */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ msg_flags |= PUCAN_MSG_SELF_RECEIVE;
+
+ msg->flags = cpu_to_le16(msg_flags);
+ msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, can_dlc);
+ memcpy(msg->d, cf->data, cf->len);
+
+ /* struct msg client field is used as an index in the echo skbs ring */
+ msg->client = priv->echo_idx;
+
+ spin_lock_irqsave(&priv->echo_lock, flags);
+
+ /* prepare and save echo skb in internal slot */
+ can_put_echo_skb(skb, ndev, priv->echo_idx);
+
+ /* move echo index to the next slot */
+ priv->echo_idx = (priv->echo_idx + 1) % priv->can.echo_skb_max;
+
+ /* if next slot is not free, stop network queue (no slot free in echo
+ * skb ring means that the controller did not write these frames on
+ * the bus: no need to continue).
+ */
+ should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]);
+
+ spin_unlock_irqrestore(&priv->echo_lock, flags);
+
+ /* write the skb on the interface */
+ priv->write_tx_msg(priv, msg);
+
+ /* stop network tx queue if not enough room to save one more msg too */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ should_stop_tx_queue |= (room_left <
+ (sizeof(*msg) + CANFD_MAX_DLEN));
+ else
+ should_stop_tx_queue |= (room_left <
+ (sizeof(*msg) + CAN_MAX_DLEN));
+
+ if (should_stop_tx_queue)
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops peak_canfd_netdev_ops = {
+ .ndo_open = peak_canfd_open,
+ .ndo_stop = peak_canfd_close,
+ .ndo_start_xmit = peak_canfd_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
+ int echo_skb_max)
+{
+ struct net_device *ndev;
+ struct peak_canfd_priv *priv;
+
+ /* we DO support local echo */
+ if (echo_skb_max < 0)
+ echo_skb_max = PCANFD_ECHO_SKB_MAX;
+
+ /* allocate the candev object */
+ ndev = alloc_candev(sizeof_priv, echo_skb_max);
+ if (!ndev)
+ return NULL;
+
+ priv = netdev_priv(ndev);
+
+ /* complete now socket-can initialization side */
+ priv->can.state = CAN_STATE_STOPPED;
+ priv->can.bittiming_const = &peak_canfd_nominal_const;
+ priv->can.data_bittiming_const = &peak_canfd_data_const;
+
+ priv->can.do_set_mode = peak_canfd_set_mode;
+ priv->can.do_get_berr_counter = peak_canfd_get_berr_counter;
+ priv->can.do_set_bittiming = peak_canfd_set_bittiming;
+ priv->can.do_set_data_bittiming = peak_canfd_set_data_bittiming;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_FD_NON_ISO |
+ CAN_CTRLMODE_BERR_REPORTING;
+
+ priv->ndev = ndev;
+ priv->index = index;
+ priv->cmd_len = 0;
+ spin_lock_init(&priv->echo_lock);
+
+ ndev->flags |= IFF_ECHO;
+ ndev->netdev_ops = &peak_canfd_netdev_ops;
+ ndev->dev_id = index;
+
+ return ndev;
+}
diff --git a/drivers/net/can/peak_canfd/peak_canfd_user.h b/drivers/net/can/peak_canfd/peak_canfd_user.h
new file mode 100644
index 000000000000..bf6de47f69c2
--- /dev/null
+++ b/drivers/net/can/peak_canfd/peak_canfd_user.h
@@ -0,0 +1,55 @@
+/*
+ * CAN driver for PEAK System micro-CAN based adapters
+ *
+ * Copyright (C) 2003-2011 PEAK System-Technik GmbH
+ * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef PEAK_CANFD_USER_H
+#define PEAK_CANFD_USER_H
+
+#include <linux/can/dev/peak_canfd.h>
+
+#define PCANFD_ECHO_SKB_DEF -1
+
+/* data structure private to each uCAN interface */
+struct peak_canfd_priv {
+ struct can_priv can; /* socket-can private data */
+ struct net_device *ndev; /* network device */
+ int index; /* channel index */
+
+ struct can_berr_counter bec; /* rx/tx err counters */
+
+ int echo_idx; /* echo skb free slot index */
+ spinlock_t echo_lock;
+
+ int cmd_len;
+ void *cmd_buffer;
+ int cmd_maxlen;
+
+ int (*pre_cmd)(struct peak_canfd_priv *priv);
+ int (*write_cmd)(struct peak_canfd_priv *priv);
+ int (*post_cmd)(struct peak_canfd_priv *priv);
+
+ int (*enable_tx_path)(struct peak_canfd_priv *priv);
+ void *(*alloc_tx_msg)(struct peak_canfd_priv *priv, u16 msg_size,
+ int *room_left);
+ int (*write_tx_msg)(struct peak_canfd_priv *priv,
+ struct pucan_tx_msg *msg);
+};
+
+struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
+ int echo_skb_max);
+int peak_canfd_handle_msg(struct peak_canfd_priv *priv,
+ struct pucan_rx_msg *msg);
+int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
+ struct pucan_rx_msg *rx_msg, int rx_count);
+#endif
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
new file mode 100644
index 000000000000..51c2d182a33a
--- /dev/null
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -0,0 +1,842 @@
+/*
+ * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ *
+ * Derived from the PCAN project file driver/src/pcan_pci.c:
+ *
+ * Copyright (C) 2001-2006 PEAK System-Technik GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+
+#include "peak_canfd_user.h"
+
+MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe FD family cards");
+MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe FD CAN cards");
+MODULE_LICENSE("GPL v2");
+
+#define PCIEFD_DRV_NAME "peak_pciefd"
+
+#define PEAK_PCI_VENDOR_ID 0x001c /* The PCI device and vendor IDs */
+#define PEAK_PCIEFD_ID 0x0013 /* for PCIe slot cards */
+
+/* PEAK PCIe board access description */
+#define PCIEFD_BAR0_SIZE (64 * 1024)
+#define PCIEFD_RX_DMA_SIZE (4 * 1024)
+#define PCIEFD_TX_DMA_SIZE (4 * 1024)
+
+#define PCIEFD_TX_PAGE_SIZE (2 * 1024)
+
+/* System Control Registers */
+#define PCIEFD_REG_SYS_CTL_SET 0x0000 /* set bits */
+#define PCIEFD_REG_SYS_CTL_CLR 0x0004 /* clear bits */
+
+/* Version info registers */
+#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */
+#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */
+
+/* System Control Registers Bits */
+#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */
+#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */
+
+/* CAN-FD channel addresses */
+#define PCIEFD_CANX_OFF(c) (((c) + 1) * 0x1000)
+
+#define PCIEFD_ECHO_SKB_MAX PCANFD_ECHO_SKB_DEF
+
+/* CAN-FD channel registers */
+#define PCIEFD_REG_CAN_MISC 0x0000 /* Misc. control */
+#define PCIEFD_REG_CAN_CLK_SEL 0x0008 /* Clock selector */
+#define PCIEFD_REG_CAN_CMD_PORT_L 0x0010 /* 64-bits command port */
+#define PCIEFD_REG_CAN_CMD_PORT_H 0x0014
+#define PCIEFD_REG_CAN_TX_REQ_ACC 0x0020 /* Tx request accumulator */
+#define PCIEFD_REG_CAN_TX_CTL_SET 0x0030 /* Tx control set register */
+#define PCIEFD_REG_CAN_TX_CTL_CLR 0x0038 /* Tx control clear register */
+#define PCIEFD_REG_CAN_TX_DMA_ADDR_L 0x0040 /* 64-bits addr for Tx DMA */
+#define PCIEFD_REG_CAN_TX_DMA_ADDR_H 0x0044
+#define PCIEFD_REG_CAN_RX_CTL_SET 0x0050 /* Rx control set register */
+#define PCIEFD_REG_CAN_RX_CTL_CLR 0x0058 /* Rx control clear register */
+#define PCIEFD_REG_CAN_RX_CTL_WRT 0x0060 /* Rx control write register */
+#define PCIEFD_REG_CAN_RX_CTL_ACK 0x0068 /* Rx control ACK register */
+#define PCIEFD_REG_CAN_RX_DMA_ADDR_L 0x0070 /* 64-bits addr for Rx DMA */
+#define PCIEFD_REG_CAN_RX_DMA_ADDR_H 0x0074
+
+/* CAN-FD channel misc register bits */
+#define CANFD_MISC_TS_RST 0x00000001 /* timestamp cnt rst */
+
+/* CAN-FD channel Clock SELector Source & DIVider */
+#define CANFD_CLK_SEL_DIV_MASK 0x00000007
+#define CANFD_CLK_SEL_DIV_60MHZ 0x00000000 /* SRC=240MHz only */
+#define CANFD_CLK_SEL_DIV_40MHZ 0x00000001 /* SRC=240MHz only */
+#define CANFD_CLK_SEL_DIV_30MHZ 0x00000002 /* SRC=240MHz only */
+#define CANFD_CLK_SEL_DIV_24MHZ 0x00000003 /* SRC=240MHz only */
+#define CANFD_CLK_SEL_DIV_20MHZ 0x00000004 /* SRC=240MHz only */
+
+#define CANFD_CLK_SEL_SRC_MASK 0x00000008 /* 0=80MHz, 1=240MHz */
+#define CANFD_CLK_SEL_SRC_240MHZ 0x00000008
+#define CANFD_CLK_SEL_SRC_80MHZ (~CANFD_CLK_SEL_SRC_240MHZ & \
+ CANFD_CLK_SEL_SRC_MASK)
+
+#define CANFD_CLK_SEL_20MHZ (CANFD_CLK_SEL_SRC_240MHZ |\
+ CANFD_CLK_SEL_DIV_20MHZ)
+#define CANFD_CLK_SEL_24MHZ (CANFD_CLK_SEL_SRC_240MHZ |\
+ CANFD_CLK_SEL_DIV_24MHZ)
+#define CANFD_CLK_SEL_30MHZ (CANFD_CLK_SEL_SRC_240MHZ |\
+ CANFD_CLK_SEL_DIV_30MHZ)
+#define CANFD_CLK_SEL_40MHZ (CANFD_CLK_SEL_SRC_240MHZ |\
+ CANFD_CLK_SEL_DIV_40MHZ)
+#define CANFD_CLK_SEL_60MHZ (CANFD_CLK_SEL_SRC_240MHZ |\
+ CANFD_CLK_SEL_DIV_60MHZ)
+#define CANFD_CLK_SEL_80MHZ (CANFD_CLK_SEL_SRC_80MHZ)
+
+/* CAN-FD channel Rx/Tx control register bits */
+#define CANFD_CTL_UNC_BIT 0x00010000 /* Uncached DMA mem */
+#define CANFD_CTL_RST_BIT 0x00020000 /* reset DMA action */
+#define CANFD_CTL_IEN_BIT 0x00040000 /* IRQ enable */
+
+/* Rx IRQ Count and Time Limits */
+#define CANFD_CTL_IRQ_CL_DEF 16 /* Rx msg max nb per IRQ in Rx DMA */
+#define CANFD_CTL_IRQ_TL_DEF 10 /* Time before IRQ if < CL (x100 µs) */
+
+#define CANFD_OPTIONS_SET (CANFD_OPTION_ERROR | CANFD_OPTION_BUSLOAD)
+
+/* Tx anticipation window (link logical address should be aligned on 2K
+ * boundary)
+ */
+#define PCIEFD_TX_PAGE_COUNT (PCIEFD_TX_DMA_SIZE / PCIEFD_TX_PAGE_SIZE)
+
+#define CANFD_MSG_LNK_TX 0x1001 /* Tx msgs link */
+
+/* 32-bits IRQ status fields, heading Rx DMA area */
+static inline int pciefd_irq_tag(u32 irq_status)
+{
+ return irq_status & 0x0000000f;
+}
+
+static inline int pciefd_irq_rx_cnt(u32 irq_status)
+{
+ return (irq_status & 0x000007f0) >> 4;
+}
+
+static inline int pciefd_irq_is_lnk(u32 irq_status)
+{
+ return irq_status & 0x00010000;
+}
+
+/* Rx record */
+struct pciefd_rx_dma {
+ __le32 irq_status;
+ __le32 sys_time_low;
+ __le32 sys_time_high;
+ struct pucan_rx_msg msg[0];
+} __packed __aligned(4);
+
+/* Tx Link record */
+struct pciefd_tx_link {
+ __le16 size;
+ __le16 type;
+ __le32 laddr_lo;
+ __le32 laddr_hi;
+} __packed __aligned(4);
+
+/* Tx page descriptor */
+struct pciefd_page {
+ void *vbase; /* page virtual address */
+ dma_addr_t lbase; /* page logical address */
+ u32 offset;
+ u32 size;
+};
+
+#define CANFD_IRQ_SET 0x00000001
+#define CANFD_TX_PATH_SET 0x00000002
+
+/* CAN-FD channel object */
+struct pciefd_board;
+struct pciefd_can {
+ struct peak_canfd_priv ucan; /* must be the first member */
+ void __iomem *reg_base; /* channel config base addr */
+ struct pciefd_board *board; /* reverse link */
+
+ struct pucan_command pucan_cmd; /* command buffer */
+
+ dma_addr_t rx_dma_laddr; /* DMA virtual and logical addr */
+ void *rx_dma_vaddr; /* for Rx and Tx areas */
+ dma_addr_t tx_dma_laddr;
+ void *tx_dma_vaddr;
+
+ struct pciefd_page tx_pages[PCIEFD_TX_PAGE_COUNT];
+ u16 tx_pages_free; /* free Tx pages counter */
+ u16 tx_page_index; /* current page used for Tx */
+ spinlock_t tx_lock;
+
+ u32 irq_status;
+ u32 irq_tag; /* next irq tag */
+};
+
+/* PEAK-PCIe FD board object */
+struct pciefd_board {
+ void __iomem *reg_base;
+ struct pci_dev *pci_dev;
+ int can_count;
+ spinlock_t cmd_lock; /* 64-bits cmds must be atomic */
+ struct pciefd_can *can[0]; /* array of network devices */
+};
+
+/* supported device ids. */
+static const struct pci_device_id peak_pciefd_tbl[] = {
+ {PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, peak_pciefd_tbl);
+
+/* read a 32 bits value from a SYS block register */
+static inline u32 pciefd_sys_readreg(const struct pciefd_board *priv, u16 reg)
+{
+ return readl(priv->reg_base + reg);
+}
+
+/* write a 32 bits value into a SYS block register */
+static inline void pciefd_sys_writereg(const struct pciefd_board *priv,
+ u32 val, u16 reg)
+{
+ writel(val, priv->reg_base + reg);
+}
+
+/* read a 32 bits value from CAN-FD block register */
+static inline u32 pciefd_can_readreg(const struct pciefd_can *priv, u16 reg)
+{
+ return readl(priv->reg_base + reg);
+}
+
+/* write a 32 bits value into a CAN-FD block register */
+static inline void pciefd_can_writereg(const struct pciefd_can *priv,
+ u32 val, u16 reg)
+{
+ writel(val, priv->reg_base + reg);
+}
+
+/* give a channel logical Rx DMA address to the board */
+static void pciefd_can_setup_rx_dma(struct pciefd_can *priv)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ const u32 dma_addr_h = (u32)(priv->rx_dma_laddr >> 32);
+#else
+ const u32 dma_addr_h = 0;
+#endif
+
+ /* (DMA must be reset for Rx) */
+ pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_RX_CTL_SET);
+
+ /* write the logical address of the Rx DMA area for this channel */
+ pciefd_can_writereg(priv, (u32)priv->rx_dma_laddr,
+ PCIEFD_REG_CAN_RX_DMA_ADDR_L);
+ pciefd_can_writereg(priv, dma_addr_h, PCIEFD_REG_CAN_RX_DMA_ADDR_H);
+
+ /* also indicates that Rx DMA is cacheable */
+ pciefd_can_writereg(priv, CANFD_CTL_UNC_BIT, PCIEFD_REG_CAN_RX_CTL_CLR);
+}
+
+/* clear channel logical Rx DMA address from the board */
+static void pciefd_can_clear_rx_dma(struct pciefd_can *priv)
+{
+ /* DMA must be reset for Rx */
+ pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_RX_CTL_SET);
+
+ /* clear the logical address of the Rx DMA area for this channel */
+ pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_RX_DMA_ADDR_L);
+ pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_RX_DMA_ADDR_H);
+}
+
+/* give a channel logical Tx DMA address to the board */
+static void pciefd_can_setup_tx_dma(struct pciefd_can *priv)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ const u32 dma_addr_h = (u32)(priv->tx_dma_laddr >> 32);
+#else
+ const u32 dma_addr_h = 0;
+#endif
+
+ /* (DMA must be reset for Tx) */
+ pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_TX_CTL_SET);
+
+ /* write the logical address of the Tx DMA area for this channel */
+ pciefd_can_writereg(priv, (u32)priv->tx_dma_laddr,
+ PCIEFD_REG_CAN_TX_DMA_ADDR_L);
+ pciefd_can_writereg(priv, dma_addr_h, PCIEFD_REG_CAN_TX_DMA_ADDR_H);
+
+ /* also indicates that Tx DMA is cacheable */
+ pciefd_can_writereg(priv, CANFD_CTL_UNC_BIT, PCIEFD_REG_CAN_TX_CTL_CLR);
+}
+
+/* clear channel logical Tx DMA address from the board */
+static void pciefd_can_clear_tx_dma(struct pciefd_can *priv)
+{
+ /* DMA must be reset for Tx */
+ pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_TX_CTL_SET);
+
+ /* clear the logical address of the Tx DMA area for this channel */
+ pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_TX_DMA_ADDR_L);
+ pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_TX_DMA_ADDR_H);
+}
+
+static void pciefd_can_ack_rx_dma(struct pciefd_can *priv)
+{
+ /* read value of current IRQ tag and inc it for next one */
+ priv->irq_tag = le32_to_cpu(*(__le32 *)priv->rx_dma_vaddr);
+ priv->irq_tag++;
+ priv->irq_tag &= 0xf;
+
+ /* write the next IRQ tag for this CAN */
+ pciefd_can_writereg(priv, priv->irq_tag, PCIEFD_REG_CAN_RX_CTL_ACK);
+}
+
+/* IRQ handler */
+static irqreturn_t pciefd_irq_handler(int irq, void *arg)
+{
+ struct pciefd_can *priv = arg;
+ struct pciefd_rx_dma *rx_dma = priv->rx_dma_vaddr;
+
+ /* INTA mode only to sync with PCIe transaction */
+ if (!pci_dev_msi_enabled(priv->board->pci_dev))
+ (void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1);
+
+ /* read IRQ status from the first 32-bits of the Rx DMA area */
+ priv->irq_status = le32_to_cpu(rx_dma->irq_status);
+
+ /* check if this (shared) IRQ is for this CAN */
+ if (pciefd_irq_tag(priv->irq_status) != priv->irq_tag)
+ return IRQ_NONE;
+
+ /* handle rx messages (if any) */
+ peak_canfd_handle_msgs_list(&priv->ucan,
+ rx_dma->msg,
+ pciefd_irq_rx_cnt(priv->irq_status));
+
+ /* handle tx link interrupt (if any) */
+ if (pciefd_irq_is_lnk(priv->irq_status)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ priv->tx_pages_free++;
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ /* wake producer up */
+ netif_wake_queue(priv->ucan.ndev);
+ }
+
+ /* re-enable Rx DMA transfer for this CAN */
+ pciefd_can_ack_rx_dma(priv);
+
+ return IRQ_HANDLED;
+}
+
+static int pciefd_enable_tx_path(struct peak_canfd_priv *ucan)
+{
+ struct pciefd_can *priv = (struct pciefd_can *)ucan;
+ int i;
+
+ /* initialize the Tx pages descriptors */
+ priv->tx_pages_free = PCIEFD_TX_PAGE_COUNT - 1;
+ priv->tx_page_index = 0;
+
+ priv->tx_pages[0].vbase = priv->tx_dma_vaddr;
+ priv->tx_pages[0].lbase = priv->tx_dma_laddr;
+
+ for (i = 0; i < PCIEFD_TX_PAGE_COUNT; i++) {
+ priv->tx_pages[i].offset = 0;
+ priv->tx_pages[i].size = PCIEFD_TX_PAGE_SIZE -
+ sizeof(struct pciefd_tx_link);
+ if (i) {
+ priv->tx_pages[i].vbase =
+ priv->tx_pages[i - 1].vbase +
+ PCIEFD_TX_PAGE_SIZE;
+ priv->tx_pages[i].lbase =
+ priv->tx_pages[i - 1].lbase +
+ PCIEFD_TX_PAGE_SIZE;
+ }
+ }
+
+ /* setup Tx DMA addresses into IP core */
+ pciefd_can_setup_tx_dma(priv);
+
+ /* start (TX_RST=0) Tx Path */
+ pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_TX_CTL_CLR);
+
+ return 0;
+}
+
+/* board specific CANFD command pre-processing */
+static int pciefd_pre_cmd(struct peak_canfd_priv *ucan)
+{
+ struct pciefd_can *priv = (struct pciefd_can *)ucan;
+ u16 cmd = pucan_cmd_get_opcode(&priv->pucan_cmd);
+ int err;
+
+ /* pre-process command */
+ switch (cmd) {
+ case PUCAN_CMD_NORMAL_MODE:
+ case PUCAN_CMD_LISTEN_ONLY_MODE:
+
+ if (ucan->can.state == CAN_STATE_BUS_OFF)
+ break;
+
+ /* going into operational mode: setup IRQ handler */
+ err = request_irq(priv->board->pci_dev->irq,
+ pciefd_irq_handler,
+ IRQF_SHARED,
+ PCIEFD_DRV_NAME,
+ priv);
+ if (err)
+ return err;
+
+ /* setup Rx DMA address */
+ pciefd_can_setup_rx_dma(priv);
+
+ /* setup max count of msgs per IRQ */
+ pciefd_can_writereg(priv, (CANFD_CTL_IRQ_TL_DEF) << 8 |
+ CANFD_CTL_IRQ_CL_DEF,
+ PCIEFD_REG_CAN_RX_CTL_WRT);
+
+ /* clear DMA RST for Rx (Rx start) */
+ pciefd_can_writereg(priv, CANFD_CTL_RST_BIT,
+ PCIEFD_REG_CAN_RX_CTL_CLR);
+
+ /* reset timestamps */
+ pciefd_can_writereg(priv, !CANFD_MISC_TS_RST,
+ PCIEFD_REG_CAN_MISC);
+
+ /* do an initial ACK */
+ pciefd_can_ack_rx_dma(priv);
+
+ /* enable IRQ for this CAN after having set next irq_tag */
+ pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT,
+ PCIEFD_REG_CAN_RX_CTL_SET);
+
+ /* Tx path will be setup as soon as RX_BARRIER is received */
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* write a command */
+static int pciefd_write_cmd(struct peak_canfd_priv *ucan)
+{
+ struct pciefd_can *priv = (struct pciefd_can *)ucan;
+ unsigned long flags;
+
+ /* 64-bits command is atomic */
+ spin_lock_irqsave(&priv->board->cmd_lock, flags);
+
+ pciefd_can_writereg(priv, *(u32 *)ucan->cmd_buffer,
+ PCIEFD_REG_CAN_CMD_PORT_L);
+ pciefd_can_writereg(priv, *(u32 *)(ucan->cmd_buffer + 4),
+ PCIEFD_REG_CAN_CMD_PORT_H);
+
+ spin_unlock_irqrestore(&priv->board->cmd_lock, flags);
+
+ return 0;
+}
+
+/* board specific CANFD command post-processing */
+static int pciefd_post_cmd(struct peak_canfd_priv *ucan)
+{
+ struct pciefd_can *priv = (struct pciefd_can *)ucan;
+ u16 cmd = pucan_cmd_get_opcode(&priv->pucan_cmd);
+
+ switch (cmd) {
+ case PUCAN_CMD_RESET_MODE:
+
+ if (ucan->can.state == CAN_STATE_STOPPED)
+ break;
+
+ /* controller now in reset mode: */
+
+ /* stop and reset DMA addresses in Tx/Rx engines */
+ pciefd_can_clear_tx_dma(priv);
+ pciefd_can_clear_rx_dma(priv);
+
+ /* disable IRQ for this CAN */
+ pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT,
+ PCIEFD_REG_CAN_RX_CTL_CLR);
+
+ free_irq(priv->board->pci_dev->irq, priv);
+
+ ucan->can.state = CAN_STATE_STOPPED;
+
+ break;
+ }
+
+ return 0;
+}
+
+static void *pciefd_alloc_tx_msg(struct peak_canfd_priv *ucan, u16 msg_size,
+ int *room_left)
+{
+ struct pciefd_can *priv = (struct pciefd_can *)ucan;
+ struct pciefd_page *page = priv->tx_pages + priv->tx_page_index;
+ unsigned long flags;
+ void *msg;
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ if (page->offset + msg_size > page->size) {
+ struct pciefd_tx_link *lk;
+
+ /* not enough space in this page: try another one */
+ if (!priv->tx_pages_free) {
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ /* Tx overflow */
+ return NULL;
+ }
+
+ priv->tx_pages_free--;
+
+ /* keep address of the very last free slot of current page */
+ lk = page->vbase + page->offset;
+
+ /* next, move on a new free page */
+ priv->tx_page_index = (priv->tx_page_index + 1) %
+ PCIEFD_TX_PAGE_COUNT;
+ page = priv->tx_pages + priv->tx_page_index;
+
+ /* put link record to this new page at the end of prev one */
+ lk->size = cpu_to_le16(sizeof(*lk));
+ lk->type = cpu_to_le16(CANFD_MSG_LNK_TX);
+ lk->laddr_lo = cpu_to_le32(page->lbase);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ lk->laddr_hi = cpu_to_le32(page->lbase >> 32);
+#else
+ lk->laddr_hi = 0;
+#endif
+ /* next msgs will be put from the begininng of this new page */
+ page->offset = 0;
+ }
+
+ *room_left = priv->tx_pages_free * page->size;
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ msg = page->vbase + page->offset;
+
+ /* give back room left in the tx ring */
+ *room_left += page->size - (page->offset + msg_size);
+
+ return msg;
+}
+
+static int pciefd_write_tx_msg(struct peak_canfd_priv *ucan,
+ struct pucan_tx_msg *msg)
+{
+ struct pciefd_can *priv = (struct pciefd_can *)ucan;
+ struct pciefd_page *page = priv->tx_pages + priv->tx_page_index;
+
+ /* this slot is now reserved for writing the frame */
+ page->offset += le16_to_cpu(msg->size);
+
+ /* tell the board a frame has been written in Tx DMA area */
+ pciefd_can_writereg(priv, 1, PCIEFD_REG_CAN_TX_REQ_ACC);
+
+ return 0;
+}
+
+/* probe for CAN-FD channel #pciefd_board->can_count */
+static int pciefd_can_probe(struct pciefd_board *pciefd)
+{
+ struct net_device *ndev;
+ struct pciefd_can *priv;
+ u32 clk;
+ int err;
+
+ /* allocate the candev object with default isize of echo skbs ring */
+ ndev = alloc_peak_canfd_dev(sizeof(*priv), pciefd->can_count,
+ PCIEFD_ECHO_SKB_MAX);
+ if (!ndev) {
+ dev_err(&pciefd->pci_dev->dev,
+ "failed to alloc candev object\n");
+ goto failure;
+ }
+
+ priv = netdev_priv(ndev);
+
+ /* fill-in candev private object: */
+
+ /* setup PCIe-FD own callbacks */
+ priv->ucan.pre_cmd = pciefd_pre_cmd;
+ priv->ucan.write_cmd = pciefd_write_cmd;
+ priv->ucan.post_cmd = pciefd_post_cmd;
+ priv->ucan.enable_tx_path = pciefd_enable_tx_path;
+ priv->ucan.alloc_tx_msg = pciefd_alloc_tx_msg;
+ priv->ucan.write_tx_msg = pciefd_write_tx_msg;
+
+ /* setup PCIe-FD own command buffer */
+ priv->ucan.cmd_buffer = &priv->pucan_cmd;
+ priv->ucan.cmd_maxlen = sizeof(priv->pucan_cmd);
+
+ priv->board = pciefd;
+
+ /* CAN config regs block address */
+ priv->reg_base = pciefd->reg_base + PCIEFD_CANX_OFF(priv->ucan.index);
+
+ /* allocate non-cacheable DMA'able 4KB memory area for Rx */
+ priv->rx_dma_vaddr = dmam_alloc_coherent(&pciefd->pci_dev->dev,
+ PCIEFD_RX_DMA_SIZE,
+ &priv->rx_dma_laddr,
+ GFP_KERNEL);
+ if (!priv->rx_dma_vaddr) {
+ dev_err(&pciefd->pci_dev->dev,
+ "Rx dmam_alloc_coherent(%u) failure\n",
+ PCIEFD_RX_DMA_SIZE);
+ goto err_free_candev;
+ }
+
+ /* allocate non-cacheable DMA'able 4KB memory area for Tx */
+ priv->tx_dma_vaddr = dmam_alloc_coherent(&pciefd->pci_dev->dev,
+ PCIEFD_TX_DMA_SIZE,
+ &priv->tx_dma_laddr,
+ GFP_KERNEL);
+ if (!priv->tx_dma_vaddr) {
+ dev_err(&pciefd->pci_dev->dev,
+ "Tx dmaim_alloc_coherent(%u) failure\n",
+ PCIEFD_TX_DMA_SIZE);
+ goto err_free_candev;
+ }
+
+ /* CAN clock in RST mode */
+ pciefd_can_writereg(priv, CANFD_MISC_TS_RST, PCIEFD_REG_CAN_MISC);
+
+ /* read current clock value */
+ clk = pciefd_can_readreg(priv, PCIEFD_REG_CAN_CLK_SEL);
+ switch (clk) {
+ case CANFD_CLK_SEL_20MHZ:
+ priv->ucan.can.clock.freq = 20 * 1000 * 1000;
+ break;
+ case CANFD_CLK_SEL_24MHZ:
+ priv->ucan.can.clock.freq = 24 * 1000 * 1000;
+ break;
+ case CANFD_CLK_SEL_30MHZ:
+ priv->ucan.can.clock.freq = 30 * 1000 * 1000;
+ break;
+ case CANFD_CLK_SEL_40MHZ:
+ priv->ucan.can.clock.freq = 40 * 1000 * 1000;
+ break;
+ case CANFD_CLK_SEL_60MHZ:
+ priv->ucan.can.clock.freq = 60 * 1000 * 1000;
+ break;
+ default:
+ pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ,
+ PCIEFD_REG_CAN_CLK_SEL);
+
+ /* fallthough */
+ case CANFD_CLK_SEL_80MHZ:
+ priv->ucan.can.clock.freq = 80 * 1000 * 1000;
+ break;
+ }
+
+ ndev->irq = pciefd->pci_dev->irq;
+
+ SET_NETDEV_DEV(ndev, &pciefd->pci_dev->dev);
+
+ err = register_candev(ndev);
+ if (err) {
+ dev_err(&pciefd->pci_dev->dev,
+ "couldn't register CAN device: %d\n", err);
+ goto err_free_candev;
+ }
+
+ spin_lock_init(&priv->tx_lock);
+
+ /* save the object address in the board structure */
+ pciefd->can[pciefd->can_count] = priv;
+
+ dev_info(&pciefd->pci_dev->dev, "%s at reg_base=0x%p irq=%d\n",
+ ndev->name, priv->reg_base, pciefd->pci_dev->irq);
+
+ return 0;
+
+err_free_candev:
+ free_candev(ndev);
+
+failure:
+ return -ENOMEM;
+}
+
+/* remove a CAN-FD channel by releasing all of its resources */
+static void pciefd_can_remove(struct pciefd_can *priv)
+{
+ /* unregister (close) the can device to go back to RST mode first */
+ unregister_candev(priv->ucan.ndev);
+
+ /* finally, free the candev object */
+ free_candev(priv->ucan.ndev);
+}
+
+/* remove all CAN-FD channels by releasing their own resources */
+static void pciefd_can_remove_all(struct pciefd_board *pciefd)
+{
+ while (pciefd->can_count > 0)
+ pciefd_can_remove(pciefd->can[--pciefd->can_count]);
+}
+
+/* probe for the entire device */
+static int peak_pciefd_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct pciefd_board *pciefd;
+ int err, can_count;
+ u16 sub_sys_id;
+ u8 hw_ver_major;
+ u8 hw_ver_minor;
+ u8 hw_ver_sub;
+ u32 v2;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+ err = pci_request_regions(pdev, PCIEFD_DRV_NAME);
+ if (err)
+ goto err_disable_pci;
+
+ /* the number of channels depends on sub-system id */
+ err = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sub_sys_id);
+ if (err)
+ goto err_release_regions;
+
+ dev_dbg(&pdev->dev, "probing device %04x:%04x:%04x\n",
+ pdev->vendor, pdev->device, sub_sys_id);
+
+ if (sub_sys_id >= 0x0012)
+ can_count = 4;
+ else if (sub_sys_id >= 0x0010)
+ can_count = 3;
+ else if (sub_sys_id >= 0x0004)
+ can_count = 2;
+ else
+ can_count = 1;
+
+ /* allocate board structure object */
+ pciefd = devm_kzalloc(&pdev->dev, sizeof(*pciefd) +
+ can_count * sizeof(*pciefd->can),
+ GFP_KERNEL);
+ if (!pciefd) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* initialize the board structure */
+ pciefd->pci_dev = pdev;
+ spin_lock_init(&pciefd->cmd_lock);
+
+ /* save the PCI BAR0 virtual address for further system regs access */
+ pciefd->reg_base = pci_iomap(pdev, 0, PCIEFD_BAR0_SIZE);
+ if (!pciefd->reg_base) {
+ dev_err(&pdev->dev, "failed to map PCI resource #0\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* read the firmware version number */
+ v2 = pciefd_sys_readreg(pciefd, PCIEFD_REG_SYS_VER2);
+
+ hw_ver_major = (v2 & 0x0000f000) >> 12;
+ hw_ver_minor = (v2 & 0x00000f00) >> 8;
+ hw_ver_sub = (v2 & 0x000000f0) >> 4;
+
+ dev_info(&pdev->dev,
+ "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
+ hw_ver_major, hw_ver_minor, hw_ver_sub);
+
+ /* stop system clock */
+ pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
+ PCIEFD_REG_SYS_CTL_CLR);
+
+ pci_set_master(pdev);
+
+ /* create now the corresponding channels objects */
+ while (pciefd->can_count < can_count) {
+ err = pciefd_can_probe(pciefd);
+ if (err)
+ goto err_free_canfd;
+
+ pciefd->can_count++;
+ }
+
+ /* set system timestamps counter in RST mode */
+ pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_TS_RST,
+ PCIEFD_REG_SYS_CTL_SET);
+
+ /* wait a bit (read cycle) */
+ (void)pciefd_sys_readreg(pciefd, PCIEFD_REG_SYS_VER1);
+
+ /* free all clocks */
+ pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_TS_RST,
+ PCIEFD_REG_SYS_CTL_CLR);
+
+ /* start system clock */
+ pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
+ PCIEFD_REG_SYS_CTL_SET);
+
+ /* remember the board structure address in the device user data */
+ pci_set_drvdata(pdev, pciefd);
+
+ return 0;
+
+err_free_canfd:
+ pciefd_can_remove_all(pciefd);
+
+ pci_iounmap(pdev, pciefd->reg_base);
+
+err_release_regions:
+ pci_release_regions(pdev);
+
+err_disable_pci:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+/* free the board structure object, as well as its resources: */
+static void peak_pciefd_remove(struct pci_dev *pdev)
+{
+ struct pciefd_board *pciefd = pci_get_drvdata(pdev);
+
+ /* release CAN-FD channels resources */
+ pciefd_can_remove_all(pciefd);
+
+ pci_iounmap(pdev, pciefd->reg_base);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver peak_pciefd_driver = {
+ .name = PCIEFD_DRV_NAME,
+ .id_table = peak_pciefd_tbl,
+ .probe = peak_pciefd_probe,
+ .remove = peak_pciefd_remove,
+};
+
+module_pci_driver(peak_pciefd_driver);
diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
index 148cae5871a6..8f2e0dd7b756 100644
--- a/drivers/net/can/spi/Kconfig
+++ b/drivers/net/can/spi/Kconfig
@@ -1,6 +1,12 @@
menu "CAN SPI interfaces"
depends on SPI
+config CAN_HI311X
+ tristate "Holt HI311x SPI CAN controllers"
+ depends on CAN_DEV && SPI && HAS_DMA
+ ---help---
+ Driver for the Holt HI311x SPI CAN controllers.
+
config CAN_MCP251X
tristate "Microchip MCP251x SPI CAN controllers"
depends on HAS_DMA
diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile
index 0e86040cdd8c..f59fa3731073 100644
--- a/drivers/net/can/spi/Makefile
+++ b/drivers/net/can/spi/Makefile
@@ -3,4 +3,5 @@
#
+obj-$(CONFIG_CAN_HI311X) += hi311x.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
new file mode 100644
index 000000000000..5590c559a8ca
--- /dev/null
+++ b/drivers/net/can/spi/hi311x.c
@@ -0,0 +1,1076 @@
+/* CAN bus driver for Holt HI3110 CAN Controller with SPI Interface
+ *
+ * Copyright(C) Timesys Corporation 2016
+ *
+ * Based on Microchip 251x CAN Controller (mcp251x) Linux kernel driver
+ * Copyright 2009 Christian Pellegrin EVOL S.r.l.
+ * Copyright 2007 Raymarine UK, Ltd. All Rights Reserved.
+ * Copyright 2006 Arcom Control Systems Ltd.
+ *
+ * Based on CAN bus driver for the CCAN controller written by
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix
+ * - Simon Kallweit, intefo AG
+ * Copyright 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/can/core.h>
+#include <linux/can/dev.h>
+#include <linux/can/led.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/freezer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/uaccess.h>
+
+#define HI3110_MASTER_RESET 0x56
+#define HI3110_READ_CTRL0 0xD2
+#define HI3110_READ_CTRL1 0xD4
+#define HI3110_READ_STATF 0xE2
+#define HI3110_WRITE_CTRL0 0x14
+#define HI3110_WRITE_CTRL1 0x16
+#define HI3110_WRITE_INTE 0x1C
+#define HI3110_WRITE_BTR0 0x18
+#define HI3110_WRITE_BTR1 0x1A
+#define HI3110_READ_BTR0 0xD6
+#define HI3110_READ_BTR1 0xD8
+#define HI3110_READ_INTF 0xDE
+#define HI3110_READ_ERR 0xDC
+#define HI3110_READ_FIFO_WOTIME 0x48
+#define HI3110_WRITE_FIFO 0x12
+#define HI3110_READ_MESSTAT 0xDA
+#define HI3110_READ_REC 0xEA
+#define HI3110_READ_TEC 0xEC
+
+#define HI3110_CTRL0_MODE_MASK (7 << 5)
+#define HI3110_CTRL0_NORMAL_MODE (0 << 5)
+#define HI3110_CTRL0_LOOPBACK_MODE (1 << 5)
+#define HI3110_CTRL0_MONITOR_MODE (2 << 5)
+#define HI3110_CTRL0_SLEEP_MODE (3 << 5)
+#define HI3110_CTRL0_INIT_MODE (4 << 5)
+
+#define HI3110_CTRL1_TXEN BIT(7)
+
+#define HI3110_INT_RXTMP BIT(7)
+#define HI3110_INT_RXFIFO BIT(6)
+#define HI3110_INT_TXCPLT BIT(5)
+#define HI3110_INT_BUSERR BIT(4)
+#define HI3110_INT_MCHG BIT(3)
+#define HI3110_INT_WAKEUP BIT(2)
+#define HI3110_INT_F1MESS BIT(1)
+#define HI3110_INT_F0MESS BIT(0)
+
+#define HI3110_ERR_BUSOFF BIT(7)
+#define HI3110_ERR_TXERRP BIT(6)
+#define HI3110_ERR_RXERRP BIT(5)
+#define HI3110_ERR_BITERR BIT(4)
+#define HI3110_ERR_FRMERR BIT(3)
+#define HI3110_ERR_CRCERR BIT(2)
+#define HI3110_ERR_ACKERR BIT(1)
+#define HI3110_ERR_STUFERR BIT(0)
+#define HI3110_ERR_PROTOCOL_MASK (0x1F)
+#define HI3110_ERR_PASSIVE_MASK (0x60)
+
+#define HI3110_STAT_RXFMTY BIT(1)
+#define HI3110_STAT_BUSOFF BIT(2)
+#define HI3110_STAT_ERRP BIT(3)
+#define HI3110_STAT_ERRW BIT(4)
+
+#define HI3110_BTR0_SJW_SHIFT 6
+#define HI3110_BTR0_BRP_SHIFT 0
+
+#define HI3110_BTR1_SAMP_3PERBIT (1 << 7)
+#define HI3110_BTR1_SAMP_1PERBIT (0 << 7)
+#define HI3110_BTR1_TSEG2_SHIFT 4
+#define HI3110_BTR1_TSEG1_SHIFT 0
+
+#define HI3110_FIFO_WOTIME_TAG_OFF 0
+#define HI3110_FIFO_WOTIME_ID_OFF 1
+#define HI3110_FIFO_WOTIME_DLC_OFF 5
+#define HI3110_FIFO_WOTIME_DAT_OFF 6
+
+#define HI3110_FIFO_WOTIME_TAG_IDE BIT(7)
+#define HI3110_FIFO_WOTIME_ID_RTR BIT(0)
+
+#define HI3110_FIFO_TAG_OFF 0
+#define HI3110_FIFO_ID_OFF 1
+#define HI3110_FIFO_STD_DLC_OFF 3
+#define HI3110_FIFO_STD_DATA_OFF 4
+#define HI3110_FIFO_EXT_DLC_OFF 5
+#define HI3110_FIFO_EXT_DATA_OFF 6
+
+#define HI3110_CAN_MAX_DATA_LEN 8
+#define HI3110_RX_BUF_LEN 15
+#define HI3110_TX_STD_BUF_LEN 12
+#define HI3110_TX_EXT_BUF_LEN 14
+#define HI3110_CAN_FRAME_MAX_BITS 128
+#define HI3110_EFF_FLAGS 0x18 /* IDE + SRR */
+
+#define HI3110_TX_ECHO_SKB_MAX 1
+
+#define HI3110_OST_DELAY_MS (10)
+
+#define DEVICE_NAME "hi3110"
+
+static int hi3110_enable_dma = 1; /* Enable SPI DMA. Default: 1 (On) */
+module_param(hi3110_enable_dma, int, 0444);
+MODULE_PARM_DESC(hi3110_enable_dma, "Enable SPI DMA. Default: 1 (On)");
+
+static const struct can_bittiming_const hi3110_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 64,
+ .brp_inc = 1,
+};
+
+enum hi3110_model {
+ CAN_HI3110_HI3110 = 0x3110,
+};
+
+struct hi3110_priv {
+ struct can_priv can;
+ struct net_device *net;
+ struct spi_device *spi;
+ enum hi3110_model model;
+
+ struct mutex hi3110_lock; /* SPI device lock */
+
+ u8 *spi_tx_buf;
+ u8 *spi_rx_buf;
+ dma_addr_t spi_tx_dma;
+ dma_addr_t spi_rx_dma;
+
+ struct sk_buff *tx_skb;
+ int tx_len;
+
+ struct workqueue_struct *wq;
+ struct work_struct tx_work;
+ struct work_struct restart_work;
+
+ int force_quit;
+ int after_suspend;
+#define HI3110_AFTER_SUSPEND_UP 1
+#define HI3110_AFTER_SUSPEND_DOWN 2
+#define HI3110_AFTER_SUSPEND_POWER 4
+#define HI3110_AFTER_SUSPEND_RESTART 8
+ int restart_tx;
+ struct regulator *power;
+ struct regulator *transceiver;
+ struct clk *clk;
+};
+
+static void hi3110_clean(struct net_device *net)
+{
+ struct hi3110_priv *priv = netdev_priv(net);
+
+ if (priv->tx_skb || priv->tx_len)
+ net->stats.tx_errors++;
+ if (priv->tx_skb)
+ dev_kfree_skb(priv->tx_skb);
+ if (priv->tx_len)
+ can_free_echo_skb(priv->net, 0);
+ priv->tx_skb = NULL;
+ priv->tx_len = 0;
+}
+
+/* Note about handling of error return of hi3110_spi_trans: accessing
+ * registers via SPI is not really different conceptually than using
+ * normal I/O assembler instructions, although it's much more
+ * complicated from a practical POV. So it's not advisable to always
+ * check the return value of this function. Imagine that every
+ * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0)
+ * error();", it would be a great mess (well there are some situation
+ * when exception handling C++ like could be useful after all). So we
+ * just check that transfers are OK at the beginning of our
+ * conversation with the chip and to avoid doing really nasty things
+ * (like injecting bogus packets in the network stack).
+ */
+static int hi3110_spi_trans(struct spi_device *spi, int len)
+{
+ struct hi3110_priv *priv = spi_get_drvdata(spi);
+ struct spi_transfer t = {
+ .tx_buf = priv->spi_tx_buf,
+ .rx_buf = priv->spi_rx_buf,
+ .len = len,
+ .cs_change = 0,
+ };
+ struct spi_message m;
+ int ret;
+
+ spi_message_init(&m);
+
+ if (hi3110_enable_dma) {
+ t.tx_dma = priv->spi_tx_dma;
+ t.rx_dma = priv->spi_rx_dma;
+ m.is_dma_mapped = 1;
+ }
+
+ spi_message_add_tail(&t, &m);
+
+ ret = spi_sync(spi, &m);
+
+ if (ret)
+ dev_err(&spi->dev, "spi transfer failed: ret = %d\n", ret);
+ return ret;
+}
+
+static u8 hi3110_cmd(struct spi_device *spi, u8 command)
+{
+ struct hi3110_priv *priv = spi_get_drvdata(spi);
+
+ priv->spi_tx_buf[0] = command;
+ dev_dbg(&spi->dev, "hi3110_cmd: %02X\n", command);
+
+ return hi3110_spi_trans(spi, 1);
+}
+
+static u8 hi3110_read(struct spi_device *spi, u8 command)
+{
+ struct hi3110_priv *priv = spi_get_drvdata(spi);
+ u8 val = 0;
+
+ priv->spi_tx_buf[0] = command;
+ hi3110_spi_trans(spi, 2);
+ val = priv->spi_rx_buf[1];
+
+ return val;
+}
+
+static void hi3110_write(struct spi_device *spi, u8 reg, u8 val)
+{
+ struct hi3110_priv *priv = spi_get_drvdata(spi);
+
+ priv->spi_tx_buf[0] = reg;
+ priv->spi_tx_buf[1] = val;
+ hi3110_spi_trans(spi, 2);
+}
+
+static void hi3110_hw_tx_frame(struct spi_device *spi, u8 *buf, int len)
+{
+ struct hi3110_priv *priv = spi_get_drvdata(spi);
+
+ priv->spi_tx_buf[0] = HI3110_WRITE_FIFO;
+ memcpy(priv->spi_tx_buf + 1, buf, len);
+ hi3110_spi_trans(spi, len + 1);
+}
+
+static void hi3110_hw_tx(struct spi_device *spi, struct can_frame *frame)
+{
+ u8 buf[HI3110_TX_EXT_BUF_LEN];
+
+ buf[HI3110_FIFO_TAG_OFF] = 0;
+
+ if (frame->can_id & CAN_EFF_FLAG) {
+ /* Extended frame */
+ buf[HI3110_FIFO_ID_OFF] = (frame->can_id & CAN_EFF_MASK) >> 21;
+ buf[HI3110_FIFO_ID_OFF + 1] =
+ (((frame->can_id & CAN_EFF_MASK) >> 13) & 0xe0) |
+ HI3110_EFF_FLAGS |
+ (((frame->can_id & CAN_EFF_MASK) >> 15) & 0x07);
+ buf[HI3110_FIFO_ID_OFF + 2] =
+ (frame->can_id & CAN_EFF_MASK) >> 7;
+ buf[HI3110_FIFO_ID_OFF + 3] =
+ ((frame->can_id & CAN_EFF_MASK) << 1) |
+ ((frame->can_id & CAN_RTR_FLAG) ? 1 : 0);
+
+ buf[HI3110_FIFO_EXT_DLC_OFF] = frame->can_dlc;
+
+ memcpy(buf + HI3110_FIFO_EXT_DATA_OFF,
+ frame->data, frame->can_dlc);
+
+ hi3110_hw_tx_frame(spi, buf, HI3110_TX_EXT_BUF_LEN -
+ (HI3110_CAN_MAX_DATA_LEN - frame->can_dlc));
+ } else {
+ /* Standard frame */
+ buf[HI3110_FIFO_ID_OFF] = (frame->can_id & CAN_SFF_MASK) >> 3;
+ buf[HI3110_FIFO_ID_OFF + 1] =
+ ((frame->can_id & CAN_SFF_MASK) << 5) |
+ ((frame->can_id & CAN_RTR_FLAG) ? (1 << 4) : 0);
+
+ buf[HI3110_FIFO_STD_DLC_OFF] = frame->can_dlc;
+
+ memcpy(buf + HI3110_FIFO_STD_DATA_OFF,
+ frame->data, frame->can_dlc);
+
+ hi3110_hw_tx_frame(spi, buf, HI3110_TX_STD_BUF_LEN -
+ (HI3110_CAN_MAX_DATA_LEN - frame->can_dlc));
+ }
+}
+
+static void hi3110_hw_rx_frame(struct spi_device *spi, u8 *buf)
+{
+ struct hi3110_priv *priv = spi_get_drvdata(spi);
+
+ priv->spi_tx_buf[0] = HI3110_READ_FIFO_WOTIME;
+ hi3110_spi_trans(spi, HI3110_RX_BUF_LEN);
+ memcpy(buf, priv->spi_rx_buf + 1, HI3110_RX_BUF_LEN - 1);
+}
+
+static void hi3110_hw_rx(struct spi_device *spi)
+{
+ struct hi3110_priv *priv = spi_get_drvdata(spi);
+ struct sk_buff *skb;
+ struct can_frame *frame;
+ u8 buf[HI3110_RX_BUF_LEN - 1];
+
+ skb = alloc_can_skb(priv->net, &frame);
+ if (!skb) {
+ priv->net->stats.rx_dropped++;
+ return;
+ }
+
+ hi3110_hw_rx_frame(spi, buf);
+ if (buf[HI3110_FIFO_WOTIME_TAG_OFF] & HI3110_FIFO_WOTIME_TAG_IDE) {
+ /* IDE is recessive (1), indicating extended 29-bit frame */
+ frame->can_id = CAN_EFF_FLAG;
+ frame->can_id |=
+ (buf[HI3110_FIFO_WOTIME_ID_OFF] << 21) |
+ (((buf[HI3110_FIFO_WOTIME_ID_OFF + 1] & 0xE0) >> 5) << 18) |
+ ((buf[HI3110_FIFO_WOTIME_ID_OFF + 1] & 0x07) << 15) |
+ (buf[HI3110_FIFO_WOTIME_ID_OFF + 2] << 7) |
+ (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] >> 1);
+ } else {
+ /* IDE is dominant (0), frame indicating standard 11-bit */
+ frame->can_id =
+ (buf[HI3110_FIFO_WOTIME_ID_OFF] << 3) |
+ ((buf[HI3110_FIFO_WOTIME_ID_OFF + 1] & 0xE0) >> 5);
+ }
+
+ /* Data length */
+ frame->can_dlc = get_can_dlc(buf[HI3110_FIFO_WOTIME_DLC_OFF] & 0x0F);
+
+ if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR)
+ frame->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(frame->data, buf + HI3110_FIFO_WOTIME_DAT_OFF,
+ frame->can_dlc);
+
+ priv->net->stats.rx_packets++;
+ priv->net->stats.rx_bytes += frame->can_dlc;
+
+ can_led_event(priv->net, CAN_LED_EVENT_RX);
+
+ netif_rx_ni(skb);
+}
+
+static void hi3110_hw_sleep(struct spi_device *spi)
+{
+ hi3110_write(spi, HI3110_WRITE_CTRL0, HI3110_CTRL0_SLEEP_MODE);
+}
+
+static netdev_tx_t hi3110_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net)
+{
+ struct hi3110_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
+
+ if (priv->tx_skb || priv->tx_len) {
+ dev_err(&spi->dev, "hard_xmit called while tx busy\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ if (can_dropped_invalid_skb(net, skb))
+ return NETDEV_TX_OK;
+
+ netif_stop_queue(net);
+ priv->tx_skb = skb;
+ queue_work(priv->wq, &priv->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+static int hi3110_do_set_mode(struct net_device *net, enum can_mode mode)
+{
+ struct hi3110_priv *priv = netdev_priv(net);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ hi3110_clean(net);
+ /* We have to delay work since SPI I/O may sleep */
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ priv->restart_tx = 1;
+ if (priv->can.restart_ms == 0)
+ priv->after_suspend = HI3110_AFTER_SUSPEND_RESTART;
+ queue_work(priv->wq, &priv->restart_work);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int hi3110_get_berr_counter(const struct net_device *net,
+ struct can_berr_counter *bec)
+{
+ struct hi3110_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
+
+ bec->txerr = hi3110_read(spi, HI3110_READ_TEC);
+ bec->rxerr = hi3110_read(spi, HI3110_READ_REC);
+
+ return 0;
+}
+
+static int hi3110_set_normal_mode(struct spi_device *spi)
+{
+ struct hi3110_priv *priv = spi_get_drvdata(spi);
+ u8 reg = 0;
+
+ hi3110_write(spi, HI3110_WRITE_INTE, HI3110_INT_BUSERR |
+ HI3110_INT_RXFIFO | HI3110_INT_TXCPLT);
+
+ /* Enable TX */
+ hi3110_write(spi, HI3110_WRITE_CTRL1, HI3110_CTRL1_TXEN);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ reg = HI3110_CTRL0_LOOPBACK_MODE;
+ else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ reg = HI3110_CTRL0_MONITOR_MODE;
+ else
+ reg = HI3110_CTRL0_NORMAL_MODE;
+
+ hi3110_write(spi, HI3110_WRITE_CTRL0, reg);
+
+ /* Wait for the device to enter the mode */
+ mdelay(HI3110_OST_DELAY_MS);
+ reg = hi3110_read(spi, HI3110_READ_CTRL0);
+ if ((reg & HI3110_CTRL0_MODE_MASK) != reg)
+ return -EBUSY;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ return 0;
+}
+
+static int hi3110_do_set_bittiming(struct net_device *net)
+{
+ struct hi3110_priv *priv = netdev_priv(net);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ struct spi_device *spi = priv->spi;
+
+ hi3110_write(spi, HI3110_WRITE_BTR0,
+ ((bt->sjw - 1) << HI3110_BTR0_SJW_SHIFT) |
+ ((bt->brp - 1) << HI3110_BTR0_BRP_SHIFT));
+
+ hi3110_write(spi, HI3110_WRITE_BTR1,
+ (priv->can.ctrlmode &
+ CAN_CTRLMODE_3_SAMPLES ?
+ HI3110_BTR1_SAMP_3PERBIT : HI3110_BTR1_SAMP_1PERBIT) |
+ ((bt->phase_seg1 + bt->prop_seg - 1)
+ << HI3110_BTR1_TSEG1_SHIFT) |
+ ((bt->phase_seg2 - 1) << HI3110_BTR1_TSEG2_SHIFT));
+
+ dev_dbg(&spi->dev, "BT: 0x%02x 0x%02x\n",
+ hi3110_read(spi, HI3110_READ_BTR0),
+ hi3110_read(spi, HI3110_READ_BTR1));
+
+ return 0;
+}
+
+static int hi3110_setup(struct net_device *net)
+{
+ hi3110_do_set_bittiming(net);
+ return 0;
+}
+
+static int hi3110_hw_reset(struct spi_device *spi)
+{
+ u8 reg;
+ int ret;
+
+ /* Wait for oscillator startup timer after power up */
+ mdelay(HI3110_OST_DELAY_MS);
+
+ ret = hi3110_cmd(spi, HI3110_MASTER_RESET);
+ if (ret)
+ return ret;
+
+ /* Wait for oscillator startup timer after reset */
+ mdelay(HI3110_OST_DELAY_MS);
+
+ reg = hi3110_read(spi, HI3110_READ_CTRL0);
+ if ((reg & HI3110_CTRL0_MODE_MASK) != HI3110_CTRL0_INIT_MODE)
+ return -ENODEV;
+
+ /* As per the datasheet it appears the error flags are
+ * not cleared on reset. Explicitly clear them by performing a read
+ */
+ hi3110_read(spi, HI3110_READ_ERR);
+
+ return 0;
+}
+
+static int hi3110_hw_probe(struct spi_device *spi)
+{
+ u8 statf;
+
+ hi3110_hw_reset(spi);
+
+ /* Confirm correct operation by checking against reset values
+ * in datasheet
+ */
+ statf = hi3110_read(spi, HI3110_READ_STATF);
+
+ dev_dbg(&spi->dev, "statf: %02X\n", statf);
+
+ if (statf != 0x82)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int hi3110_power_enable(struct regulator *reg, int enable)
+{
+ if (IS_ERR_OR_NULL(reg))
+ return 0;
+
+ if (enable)
+ return regulator_enable(reg);
+ else
+ return regulator_disable(reg);
+}
+
+static int hi3110_stop(struct net_device *net)
+{
+ struct hi3110_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
+
+ close_candev(net);
+
+ priv->force_quit = 1;
+ free_irq(spi->irq, priv);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+
+ mutex_lock(&priv->hi3110_lock);
+
+ /* Disable transmit, interrupts and clear flags */
+ hi3110_write(spi, HI3110_WRITE_CTRL1, 0x0);
+ hi3110_write(spi, HI3110_WRITE_INTE, 0x0);
+ hi3110_read(spi, HI3110_READ_INTF);
+
+ hi3110_clean(net);
+
+ hi3110_hw_sleep(spi);
+
+ hi3110_power_enable(priv->transceiver, 0);
+
+ priv->can.state = CAN_STATE_STOPPED;
+
+ mutex_unlock(&priv->hi3110_lock);
+
+ can_led_event(net, CAN_LED_EVENT_STOP);
+
+ return 0;
+}
+
+static void hi3110_tx_work_handler(struct work_struct *ws)
+{
+ struct hi3110_priv *priv = container_of(ws, struct hi3110_priv,
+ tx_work);
+ struct spi_device *spi = priv->spi;
+ struct net_device *net = priv->net;
+ struct can_frame *frame;
+
+ mutex_lock(&priv->hi3110_lock);
+ if (priv->tx_skb) {
+ if (priv->can.state == CAN_STATE_BUS_OFF) {
+ hi3110_clean(net);
+ } else {
+ frame = (struct can_frame *)priv->tx_skb->data;
+ hi3110_hw_tx(spi, frame);
+ priv->tx_len = 1 + frame->can_dlc;
+ can_put_echo_skb(priv->tx_skb, net, 0);
+ priv->tx_skb = NULL;
+ }
+ }
+ mutex_unlock(&priv->hi3110_lock);
+}
+
+static void hi3110_restart_work_handler(struct work_struct *ws)
+{
+ struct hi3110_priv *priv = container_of(ws, struct hi3110_priv,
+ restart_work);
+ struct spi_device *spi = priv->spi;
+ struct net_device *net = priv->net;
+
+ mutex_lock(&priv->hi3110_lock);
+ if (priv->after_suspend) {
+ hi3110_hw_reset(spi);
+ hi3110_setup(net);
+ if (priv->after_suspend & HI3110_AFTER_SUSPEND_RESTART) {
+ hi3110_set_normal_mode(spi);
+ } else if (priv->after_suspend & HI3110_AFTER_SUSPEND_UP) {
+ netif_device_attach(net);
+ hi3110_clean(net);
+ hi3110_set_normal_mode(spi);
+ netif_wake_queue(net);
+ } else {
+ hi3110_hw_sleep(spi);
+ }
+ priv->after_suspend = 0;
+ priv->force_quit = 0;
+ }
+
+ if (priv->restart_tx) {
+ priv->restart_tx = 0;
+ hi3110_hw_reset(spi);
+ hi3110_setup(net);
+ hi3110_clean(net);
+ hi3110_set_normal_mode(spi);
+ netif_wake_queue(net);
+ }
+ mutex_unlock(&priv->hi3110_lock);
+}
+
+static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
+{
+ struct hi3110_priv *priv = dev_id;
+ struct spi_device *spi = priv->spi;
+ struct net_device *net = priv->net;
+
+ mutex_lock(&priv->hi3110_lock);
+
+ while (!priv->force_quit) {
+ enum can_state new_state;
+ u8 intf, eflag, statf;
+
+ while (!(HI3110_STAT_RXFMTY &
+ (statf = hi3110_read(spi, HI3110_READ_STATF)))) {
+ hi3110_hw_rx(spi);
+ }
+
+ intf = hi3110_read(spi, HI3110_READ_INTF);
+ eflag = hi3110_read(spi, HI3110_READ_ERR);
+ /* Update can state */
+ if (eflag & HI3110_ERR_BUSOFF)
+ new_state = CAN_STATE_BUS_OFF;
+ else if (eflag & HI3110_ERR_PASSIVE_MASK)
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ else if (statf & HI3110_STAT_ERRW)
+ new_state = CAN_STATE_ERROR_WARNING;
+ else
+ new_state = CAN_STATE_ERROR_ACTIVE;
+
+ if (new_state != priv->can.state) {
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ enum can_state rx_state, tx_state;
+ u8 rxerr, txerr;
+
+ skb = alloc_can_err_skb(net, &cf);
+ if (!skb)
+ break;
+
+ txerr = hi3110_read(spi, HI3110_READ_TEC);
+ rxerr = hi3110_read(spi, HI3110_READ_REC);
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ tx_state = txerr >= rxerr ? new_state : 0;
+ rx_state = txerr <= rxerr ? new_state : 0;
+ can_change_state(net, cf, tx_state, rx_state);
+ netif_rx_ni(skb);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ can_bus_off(net);
+ if (priv->can.restart_ms == 0) {
+ priv->force_quit = 1;
+ hi3110_hw_sleep(spi);
+ break;
+ }
+ }
+ }
+
+ /* Update bus errors */
+ if ((intf & HI3110_INT_BUSERR) &&
+ (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* Check for protocol errors */
+ if (eflag & HI3110_ERR_PROTOCOL_MASK) {
+ skb = alloc_can_err_skb(net, &cf);
+ if (!skb)
+ break;
+
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ priv->can.can_stats.bus_error++;
+ priv->net->stats.rx_errors++;
+ if (eflag & HI3110_ERR_BITERR)
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ else if (eflag & HI3110_ERR_FRMERR)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ else if (eflag & HI3110_ERR_STUFERR)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ else if (eflag & HI3110_ERR_CRCERR)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ else if (eflag & HI3110_ERR_ACKERR)
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+
+ cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
+ cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
+ netdev_dbg(priv->net, "Bus Error\n");
+ netif_rx_ni(skb);
+ }
+ }
+
+ if (intf == 0)
+ break;
+
+ if (intf & HI3110_INT_TXCPLT) {
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += priv->tx_len - 1;
+ can_led_event(net, CAN_LED_EVENT_TX);
+ if (priv->tx_len) {
+ can_get_echo_skb(net, 0);
+ priv->tx_len = 0;
+ }
+ netif_wake_queue(net);
+ }
+ }
+ mutex_unlock(&priv->hi3110_lock);
+ return IRQ_HANDLED;
+}
+
+static int hi3110_open(struct net_device *net)
+{
+ struct hi3110_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
+ unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_RISING;
+ int ret;
+
+ ret = open_candev(net);
+ if (ret)
+ return ret;
+
+ mutex_lock(&priv->hi3110_lock);
+ hi3110_power_enable(priv->transceiver, 1);
+
+ priv->force_quit = 0;
+ priv->tx_skb = NULL;
+ priv->tx_len = 0;
+
+ ret = request_threaded_irq(spi->irq, NULL, hi3110_can_ist,
+ flags, DEVICE_NAME, priv);
+ if (ret) {
+ dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
+ goto out_close;
+ }
+
+ priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ 0);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto out_free_irq;
+ }
+ INIT_WORK(&priv->tx_work, hi3110_tx_work_handler);
+ INIT_WORK(&priv->restart_work, hi3110_restart_work_handler);
+
+ ret = hi3110_hw_reset(spi);
+ if (ret)
+ goto out_free_wq;
+
+ ret = hi3110_setup(net);
+ if (ret)
+ goto out_free_wq;
+
+ ret = hi3110_set_normal_mode(spi);
+ if (ret)
+ goto out_free_wq;
+
+ can_led_event(net, CAN_LED_EVENT_OPEN);
+ netif_wake_queue(net);
+ mutex_unlock(&priv->hi3110_lock);
+
+ return 0;
+
+ out_free_wq:
+ destroy_workqueue(priv->wq);
+ out_free_irq:
+ free_irq(spi->irq, priv);
+ hi3110_hw_sleep(spi);
+ out_close:
+ hi3110_power_enable(priv->transceiver, 0);
+ close_candev(net);
+ mutex_unlock(&priv->hi3110_lock);
+ return ret;
+}
+
+static const struct net_device_ops hi3110_netdev_ops = {
+ .ndo_open = hi3110_open,
+ .ndo_stop = hi3110_stop,
+ .ndo_start_xmit = hi3110_hard_start_xmit,
+};
+
+static const struct of_device_id hi3110_of_match[] = {
+ {
+ .compatible = "holt,hi3110",
+ .data = (void *)CAN_HI3110_HI3110,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hi3110_of_match);
+
+static const struct spi_device_id hi3110_id_table[] = {
+ {
+ .name = "hi3110",
+ .driver_data = (kernel_ulong_t)CAN_HI3110_HI3110,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, hi3110_id_table);
+
+static int hi3110_can_probe(struct spi_device *spi)
+{
+ const struct of_device_id *of_id = of_match_device(hi3110_of_match,
+ &spi->dev);
+ struct net_device *net;
+ struct hi3110_priv *priv;
+ struct clk *clk;
+ int freq, ret;
+
+ clk = devm_clk_get(&spi->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&spi->dev, "no CAN clock source defined\n");
+ return PTR_ERR(clk);
+ }
+ freq = clk_get_rate(clk);
+
+ /* Sanity check */
+ if (freq > 40000000)
+ return -ERANGE;
+
+ /* Allocate can/net device */
+ net = alloc_candev(sizeof(struct hi3110_priv), HI3110_TX_ECHO_SKB_MAX);
+ if (!net)
+ return -ENOMEM;
+
+ if (!IS_ERR(clk)) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto out_free;
+ }
+
+ net->netdev_ops = &hi3110_netdev_ops;
+ net->flags |= IFF_ECHO;
+
+ priv = netdev_priv(net);
+ priv->can.bittiming_const = &hi3110_bittiming_const;
+ priv->can.do_set_mode = hi3110_do_set_mode;
+ priv->can.do_get_berr_counter = hi3110_get_berr_counter;
+ priv->can.clock.freq = freq / 2;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_BERR_REPORTING;
+
+ if (of_id)
+ priv->model = (enum hi3110_model)of_id->data;
+ else
+ priv->model = spi_get_device_id(spi)->driver_data;
+ priv->net = net;
+ priv->clk = clk;
+
+ spi_set_drvdata(spi, priv);
+
+ /* Configure the SPI bus */
+ spi->bits_per_word = 8;
+ ret = spi_setup(spi);
+ if (ret)
+ goto out_clk;
+
+ priv->power = devm_regulator_get_optional(&spi->dev, "vdd");
+ priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
+ if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
+ (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
+ ret = -EPROBE_DEFER;
+ goto out_clk;
+ }
+
+ ret = hi3110_power_enable(priv->power, 1);
+ if (ret)
+ goto out_clk;
+
+ priv->spi = spi;
+ mutex_init(&priv->hi3110_lock);
+
+ /* If requested, allocate DMA buffers */
+ if (hi3110_enable_dma) {
+ spi->dev.coherent_dma_mask = ~0;
+
+ /* Minimum coherent DMA allocation is PAGE_SIZE, so allocate
+ * that much and share it between Tx and Rx DMA buffers.
+ */
+ priv->spi_tx_buf = dmam_alloc_coherent(&spi->dev,
+ PAGE_SIZE,
+ &priv->spi_tx_dma,
+ GFP_DMA);
+
+ if (priv->spi_tx_buf) {
+ priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
+ priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
+ (PAGE_SIZE / 2));
+ } else {
+ /* Fall back to non-DMA */
+ hi3110_enable_dma = 0;
+ }
+ }
+
+ /* Allocate non-DMA buffers */
+ if (!hi3110_enable_dma) {
+ priv->spi_tx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN,
+ GFP_KERNEL);
+ if (!priv->spi_tx_buf) {
+ ret = -ENOMEM;
+ goto error_probe;
+ }
+ priv->spi_rx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN,
+ GFP_KERNEL);
+
+ if (!priv->spi_rx_buf) {
+ ret = -ENOMEM;
+ goto error_probe;
+ }
+ }
+
+ SET_NETDEV_DEV(net, &spi->dev);
+
+ ret = hi3110_hw_probe(spi);
+ if (ret) {
+ if (ret == -ENODEV)
+ dev_err(&spi->dev, "Cannot initialize %x. Wrong wiring?\n",
+ priv->model);
+ goto error_probe;
+ }
+ hi3110_hw_sleep(spi);
+
+ ret = register_candev(net);
+ if (ret)
+ goto error_probe;
+
+ devm_can_led_init(net);
+ netdev_info(net, "%x successfully initialized.\n", priv->model);
+
+ return 0;
+
+ error_probe:
+ hi3110_power_enable(priv->power, 0);
+
+ out_clk:
+ if (!IS_ERR(clk))
+ clk_disable_unprepare(clk);
+
+ out_free:
+ free_candev(net);
+
+ dev_err(&spi->dev, "Probe failed, err=%d\n", -ret);
+ return ret;
+}
+
+static int hi3110_can_remove(struct spi_device *spi)
+{
+ struct hi3110_priv *priv = spi_get_drvdata(spi);
+ struct net_device *net = priv->net;
+
+ unregister_candev(net);
+
+ hi3110_power_enable(priv->power, 0);
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
+ free_candev(net);
+
+ return 0;
+}
+
+static int __maybe_unused hi3110_can_suspend(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct hi3110_priv *priv = spi_get_drvdata(spi);
+ struct net_device *net = priv->net;
+
+ priv->force_quit = 1;
+ disable_irq(spi->irq);
+
+ /* Note: at this point neither IST nor workqueues are running.
+ * open/stop cannot be called anyway so locking is not needed
+ */
+ if (netif_running(net)) {
+ netif_device_detach(net);
+
+ hi3110_hw_sleep(spi);
+ hi3110_power_enable(priv->transceiver, 0);
+ priv->after_suspend = HI3110_AFTER_SUSPEND_UP;
+ } else {
+ priv->after_suspend = HI3110_AFTER_SUSPEND_DOWN;
+ }
+
+ if (!IS_ERR_OR_NULL(priv->power)) {
+ regulator_disable(priv->power);
+ priv->after_suspend |= HI3110_AFTER_SUSPEND_POWER;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused hi3110_can_resume(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct hi3110_priv *priv = spi_get_drvdata(spi);
+
+ if (priv->after_suspend & HI3110_AFTER_SUSPEND_POWER)
+ hi3110_power_enable(priv->power, 1);
+
+ if (priv->after_suspend & HI3110_AFTER_SUSPEND_UP) {
+ hi3110_power_enable(priv->transceiver, 1);
+ queue_work(priv->wq, &priv->restart_work);
+ } else {
+ priv->after_suspend = 0;
+ }
+
+ priv->force_quit = 0;
+ enable_irq(spi->irq);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(hi3110_can_pm_ops, hi3110_can_suspend, hi3110_can_resume);
+
+static struct spi_driver hi3110_can_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = hi3110_of_match,
+ .pm = &hi3110_can_pm_ops,
+ },
+ .id_table = hi3110_id_table,
+ .probe = hi3110_can_probe,
+ .remove = hi3110_can_remove,
+};
+
+module_spi_driver(hi3110_can_driver);
+
+MODULE_AUTHOR("Akshay Bhat <akshay.bhat@timesys.com>");
+MODULE_AUTHOR("Casey Fitzpatrick <casey.fitzpatrick@timesys.com>");
+MODULE_DESCRIPTION("Holt HI-3110 CAN driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 6749b1829469..4d4941469cfc 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -17,25 +17,6 @@
*
*/
-/*
- * Your platform definitions should specify module ram offsets and interrupt
- * number to use as follows:
- *
- * static struct ti_hecc_platform_data am3517_evm_hecc_pdata = {
- * .scc_hecc_offset = 0,
- * .scc_ram_offset = 0x3000,
- * .hecc_ram_offset = 0x3000,
- * .mbx_offset = 0x2000,
- * .int_line = 0,
- * .revision = 1,
- * .transceiver_switch = hecc_phy_control,
- * };
- *
- * Please see include/linux/can/platform/ti_hecc.h for description of
- * above fields.
- *
- */
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -46,11 +27,13 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include <linux/can/led.h>
-#include <linux/can/platform/ti_hecc.h>
#define DRV_NAME "ti_hecc"
#define HECC_MODULE_VERSION "0.7"
@@ -214,15 +197,14 @@ struct ti_hecc_priv {
struct net_device *ndev;
struct clk *clk;
void __iomem *base;
- u32 scc_ram_offset;
- u32 hecc_ram_offset;
- u32 mbx_offset;
- u32 int_line;
+ void __iomem *hecc_ram;
+ void __iomem *mbx;
+ bool use_hecc1int;
spinlock_t mbx_lock; /* CANME register needs protection */
u32 tx_head;
u32 tx_tail;
u32 rx_next;
- void (*transceiver_switch)(int);
+ struct regulator *reg_xceiver;
};
static inline int get_tx_head_mb(struct ti_hecc_priv *priv)
@@ -242,20 +224,18 @@ static inline int get_tx_head_prio(struct ti_hecc_priv *priv)
static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val)
{
- __raw_writel(val, priv->base + priv->hecc_ram_offset + mbxno * 4);
+ __raw_writel(val, priv->hecc_ram + mbxno * 4);
}
static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno,
u32 reg, u32 val)
{
- __raw_writel(val, priv->base + priv->mbx_offset + mbxno * 0x10 +
- reg);
+ __raw_writel(val, priv->mbx + mbxno * 0x10 + reg);
}
static inline u32 hecc_read_mbx(struct ti_hecc_priv *priv, u32 mbxno, u32 reg)
{
- return __raw_readl(priv->base + priv->mbx_offset + mbxno * 0x10 +
- reg);
+ return __raw_readl(priv->mbx + mbxno * 0x10 + reg);
}
static inline void hecc_write(struct ti_hecc_priv *priv, u32 reg, u32 val)
@@ -311,11 +291,16 @@ static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
return 0;
}
-static void ti_hecc_transceiver_switch(const struct ti_hecc_priv *priv,
- int on)
+static int ti_hecc_transceiver_switch(const struct ti_hecc_priv *priv,
+ int on)
{
- if (priv->transceiver_switch)
- priv->transceiver_switch(on);
+ if (!priv->reg_xceiver)
+ return 0;
+
+ if (on)
+ return regulator_enable(priv->reg_xceiver);
+ else
+ return regulator_disable(priv->reg_xceiver);
}
static void ti_hecc_reset(struct net_device *ndev)
@@ -409,7 +394,7 @@ static void ti_hecc_start(struct net_device *ndev)
/* Prevent message over-write & Enable interrupts */
hecc_write(priv, HECC_CANOPC, HECC_SET_REG);
- if (priv->int_line) {
+ if (priv->use_hecc1int) {
hecc_write(priv, HECC_CANMIL, HECC_SET_REG);
hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK |
HECC_CANGIM_I1EN | HECC_CANGIM_SIL);
@@ -760,7 +745,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
unsigned long ack, flags;
int_status = hecc_read(priv,
- (priv->int_line) ? HECC_CANGIF1 : HECC_CANGIF0);
+ (priv->use_hecc1int) ? HECC_CANGIF1 : HECC_CANGIF0);
if (!int_status)
return IRQ_NONE;
@@ -806,7 +791,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
}
/* clear all interrupt conditions - read back to avoid spurious ints */
- if (priv->int_line) {
+ if (priv->use_hecc1int) {
hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
int_status = hecc_read(priv, HECC_CANGIF1);
} else {
@@ -872,58 +857,87 @@ static const struct net_device_ops ti_hecc_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct of_device_id ti_hecc_dt_ids[] = {
+ {
+ .compatible = "ti,am3517-hecc",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ti_hecc_dt_ids);
+
static int ti_hecc_probe(struct platform_device *pdev)
{
struct net_device *ndev = (struct net_device *)0;
struct ti_hecc_priv *priv;
- struct ti_hecc_platform_data *pdata;
- struct resource *mem, *irq;
- void __iomem *addr;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res, *irq;
+ struct regulator *reg_xceiver;
int err = -ENODEV;
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "No platform data\n");
- goto probe_exit;
+ if (!IS_ENABLED(CONFIG_OF) || !np)
+ return -EINVAL;
+
+ reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
+ if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (IS_ERR(reg_xceiver))
+ reg_xceiver = NULL;
+
+ ndev = alloc_candev(sizeof(struct ti_hecc_priv), HECC_MAX_TX_MBOX);
+ if (!ndev) {
+ dev_err(&pdev->dev, "alloc_candev failed\n");
+ return -ENOMEM;
}
+ priv = netdev_priv(ndev);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(&pdev->dev, "No mem resources\n");
- goto probe_exit;
+ /* handle hecc memory */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hecc");
+ if (!res) {
+ dev_err(&pdev->dev, "can't get IORESOURCE_MEM hecc\n");
+ return -EINVAL;
}
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(&pdev->dev, "No irq resource\n");
- goto probe_exit;
+
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base)) {
+ dev_err(&pdev->dev, "hecc ioremap failed\n");
+ return PTR_ERR(priv->base);
}
- if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
- dev_err(&pdev->dev, "HECC region already claimed\n");
- err = -EBUSY;
- goto probe_exit;
+
+ /* handle hecc-ram memory */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hecc-ram");
+ if (!res) {
+ dev_err(&pdev->dev, "can't get IORESOURCE_MEM hecc-ram\n");
+ return -EINVAL;
}
- addr = ioremap(mem->start, resource_size(mem));
- if (!addr) {
- dev_err(&pdev->dev, "ioremap failed\n");
- err = -ENOMEM;
- goto probe_exit_free_region;
+
+ priv->hecc_ram = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->hecc_ram)) {
+ dev_err(&pdev->dev, "hecc-ram ioremap failed\n");
+ return PTR_ERR(priv->hecc_ram);
}
- ndev = alloc_candev(sizeof(struct ti_hecc_priv), HECC_MAX_TX_MBOX);
- if (!ndev) {
- dev_err(&pdev->dev, "alloc_candev failed\n");
- err = -ENOMEM;
- goto probe_exit_iounmap;
+ /* handle mbx memory */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mbx");
+ if (!res) {
+ dev_err(&pdev->dev, "can't get IORESOURCE_MEM mbx\n");
+ return -EINVAL;
+ }
+
+ priv->mbx = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->mbx)) {
+ dev_err(&pdev->dev, "mbx ioremap failed\n");
+ return PTR_ERR(priv->mbx);
+ }
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "No irq resource\n");
+ goto probe_exit;
}
- priv = netdev_priv(ndev);
priv->ndev = ndev;
- priv->base = addr;
- priv->scc_ram_offset = pdata->scc_ram_offset;
- priv->hecc_ram_offset = pdata->hecc_ram_offset;
- priv->mbx_offset = pdata->mbx_offset;
- priv->int_line = pdata->int_line;
- priv->transceiver_switch = pdata->transceiver_switch;
+ priv->reg_xceiver = reg_xceiver;
+ priv->use_hecc1int = of_property_read_bool(np, "ti,use-hecc1int");
priv->can.bittiming_const = &ti_hecc_bittiming_const;
priv->can.do_set_mode = ti_hecc_do_set_mode;
@@ -971,32 +985,23 @@ probe_exit_clk:
clk_put(priv->clk);
probe_exit_candev:
free_candev(ndev);
-probe_exit_iounmap:
- iounmap(addr);
-probe_exit_free_region:
- release_mem_region(mem->start, resource_size(mem));
probe_exit:
return err;
}
static int ti_hecc_remove(struct platform_device *pdev)
{
- struct resource *res;
struct net_device *ndev = platform_get_drvdata(pdev);
struct ti_hecc_priv *priv = netdev_priv(ndev);
unregister_candev(ndev);
clk_disable_unprepare(priv->clk);
clk_put(priv->clk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iounmap(priv->base);
- release_mem_region(res->start, resource_size(res));
free_candev(ndev);
return 0;
}
-
#ifdef CONFIG_PM
static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
{
@@ -1045,6 +1050,7 @@ static int ti_hecc_resume(struct platform_device *pdev)
static struct platform_driver ti_hecc_driver = {
.driver = {
.name = DRV_NAME,
+ .of_match_table = ti_hecc_dt_ids,
},
.probe = ti_hecc_probe,
.remove = ti_hecc_remove,
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 5f9e0e6301d0..c36f4bdcbf4f 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -83,4 +83,10 @@ config CAN_8DEV_USB
This driver supports the USB2CAN interface
from 8 devices (http://www.8devices.com).
+config CAN_MCBA_USB
+ tristate "Microchip CAN BUS Analyzer interface"
+ ---help---
+ This driver supports the CAN BUS Analyzer interface
+ from Microchip (http://www.microchip.com/development-tools/).
+
endmenu
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index a64cf983fb87..164453fd55d0 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
+obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
new file mode 100644
index 000000000000..7f0272558bef
--- /dev/null
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -0,0 +1,904 @@
+/* SocketCAN driver for Microchip CAN BUS Analyzer Tool
+ *
+ * Copyright (C) 2017 Mobica Limited
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.
+ *
+ * This driver is inspired by the 4.6.2 version of net/can/usb/usb_8dev.c
+ */
+
+#include <asm/unaligned.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/led.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+/* vendor and product id */
+#define MCBA_MODULE_NAME "mcba_usb"
+#define MCBA_VENDOR_ID 0x04d8
+#define MCBA_PRODUCT_ID 0x0a30
+
+/* driver constants */
+#define MCBA_MAX_RX_URBS 20
+#define MCBA_MAX_TX_URBS 20
+#define MCBA_CTX_FREE MCBA_MAX_TX_URBS
+
+/* RX buffer must be bigger than msg size since at the
+ * beggining USB messages are stacked.
+ */
+#define MCBA_USB_RX_BUFF_SIZE 64
+#define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg))
+
+/* MCBA endpoint numbers */
+#define MCBA_USB_EP_IN 1
+#define MCBA_USB_EP_OUT 1
+
+/* Microchip command id */
+#define MBCA_CMD_RECEIVE_MESSAGE 0xE3
+#define MBCA_CMD_I_AM_ALIVE_FROM_CAN 0xF5
+#define MBCA_CMD_I_AM_ALIVE_FROM_USB 0xF7
+#define MBCA_CMD_CHANGE_BIT_RATE 0xA1
+#define MBCA_CMD_TRANSMIT_MESSAGE_EV 0xA3
+#define MBCA_CMD_SETUP_TERMINATION_RESISTANCE 0xA8
+#define MBCA_CMD_READ_FW_VERSION 0xA9
+#define MBCA_CMD_NOTHING_TO_SEND 0xFF
+#define MBCA_CMD_TRANSMIT_MESSAGE_RSP 0xE2
+
+#define MCBA_VER_REQ_USB 1
+#define MCBA_VER_REQ_CAN 2
+
+#define MCBA_SIDL_EXID_MASK 0x8
+#define MCBA_DLC_MASK 0xf
+#define MCBA_DLC_RTR_MASK 0x40
+
+#define MCBA_CAN_STATE_WRN_TH 95
+#define MCBA_CAN_STATE_ERR_PSV_TH 127
+
+#define MCBA_TERMINATION_DISABLED CAN_TERMINATION_DISABLED
+#define MCBA_TERMINATION_ENABLED 120
+
+struct mcba_usb_ctx {
+ struct mcba_priv *priv;
+ u32 ndx;
+ u8 dlc;
+ bool can;
+};
+
+/* Structure to hold all of our device specific stuff */
+struct mcba_priv {
+ struct can_priv can; /* must be the first member */
+ struct sk_buff *echo_skb[MCBA_MAX_TX_URBS];
+ struct mcba_usb_ctx tx_context[MCBA_MAX_TX_URBS];
+ struct usb_device *udev;
+ struct net_device *netdev;
+ struct usb_anchor tx_submitted;
+ struct usb_anchor rx_submitted;
+ struct can_berr_counter bec;
+ bool usb_ka_first_pass;
+ bool can_ka_first_pass;
+ bool can_speed_check;
+ atomic_t free_ctx_cnt;
+};
+
+/* CAN frame */
+struct __packed mcba_usb_msg_can {
+ u8 cmd_id;
+ __be16 eid;
+ __be16 sid;
+ u8 dlc;
+ u8 data[8];
+ u8 timestamp[4];
+ u8 checksum;
+};
+
+/* command frame */
+struct __packed mcba_usb_msg {
+ u8 cmd_id;
+ u8 unused[18];
+};
+
+struct __packed mcba_usb_msg_ka_usb {
+ u8 cmd_id;
+ u8 termination_state;
+ u8 soft_ver_major;
+ u8 soft_ver_minor;
+ u8 unused[15];
+};
+
+struct __packed mcba_usb_msg_ka_can {
+ u8 cmd_id;
+ u8 tx_err_cnt;
+ u8 rx_err_cnt;
+ u8 rx_buff_ovfl;
+ u8 tx_bus_off;
+ __be16 can_bitrate;
+ __le16 rx_lost;
+ u8 can_stat;
+ u8 soft_ver_major;
+ u8 soft_ver_minor;
+ u8 debug_mode;
+ u8 test_complete;
+ u8 test_result;
+ u8 unused[4];
+};
+
+struct __packed mcba_usb_msg_change_bitrate {
+ u8 cmd_id;
+ __be16 bitrate;
+ u8 unused[16];
+};
+
+struct __packed mcba_usb_msg_termination {
+ u8 cmd_id;
+ u8 termination;
+ u8 unused[17];
+};
+
+struct __packed mcba_usb_msg_fw_ver {
+ u8 cmd_id;
+ u8 pic;
+ u8 unused[17];
+};
+
+static const struct usb_device_id mcba_usb_table[] = {
+ { USB_DEVICE(MCBA_VENDOR_ID, MCBA_PRODUCT_ID) },
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, mcba_usb_table);
+
+static const u16 mcba_termination[] = { MCBA_TERMINATION_DISABLED,
+ MCBA_TERMINATION_ENABLED };
+
+static const u32 mcba_bitrate[] = { 20000, 33333, 50000, 80000, 83333,
+ 100000, 125000, 150000, 175000, 200000,
+ 225000, 250000, 275000, 300000, 500000,
+ 625000, 800000, 1000000 };
+
+static inline void mcba_init_ctx(struct mcba_priv *priv)
+{
+ int i = 0;
+
+ for (i = 0; i < MCBA_MAX_TX_URBS; i++) {
+ priv->tx_context[i].ndx = MCBA_CTX_FREE;
+ priv->tx_context[i].priv = priv;
+ }
+
+ atomic_set(&priv->free_ctx_cnt, ARRAY_SIZE(priv->tx_context));
+}
+
+static inline struct mcba_usb_ctx *mcba_usb_get_free_ctx(struct mcba_priv *priv,
+ struct can_frame *cf)
+{
+ int i = 0;
+ struct mcba_usb_ctx *ctx = NULL;
+
+ for (i = 0; i < MCBA_MAX_TX_URBS; i++) {
+ if (priv->tx_context[i].ndx == MCBA_CTX_FREE) {
+ ctx = &priv->tx_context[i];
+ ctx->ndx = i;
+
+ if (cf) {
+ ctx->can = true;
+ ctx->dlc = cf->can_dlc;
+ } else {
+ ctx->can = false;
+ ctx->dlc = 0;
+ }
+
+ atomic_dec(&priv->free_ctx_cnt);
+ break;
+ }
+ }
+
+ if (!atomic_read(&priv->free_ctx_cnt))
+ /* That was the last free ctx. Slow down tx path */
+ netif_stop_queue(priv->netdev);
+
+ return ctx;
+}
+
+/* mcba_usb_free_ctx and mcba_usb_get_free_ctx are executed by different
+ * threads. The order of execution in below function is important.
+ */
+static inline void mcba_usb_free_ctx(struct mcba_usb_ctx *ctx)
+{
+ /* Increase number of free ctxs before freeing ctx */
+ atomic_inc(&ctx->priv->free_ctx_cnt);
+
+ ctx->ndx = MCBA_CTX_FREE;
+
+ /* Wake up the queue once ctx is marked free */
+ netif_wake_queue(ctx->priv->netdev);
+}
+
+static void mcba_usb_write_bulk_callback(struct urb *urb)
+{
+ struct mcba_usb_ctx *ctx = urb->context;
+ struct net_device *netdev;
+
+ WARN_ON(!ctx);
+
+ netdev = ctx->priv->netdev;
+
+ /* free up our allocated buffer */
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
+
+ if (ctx->can) {
+ if (!netif_device_present(netdev))
+ return;
+
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += ctx->dlc;
+
+ can_led_event(netdev, CAN_LED_EVENT_TX);
+ can_get_echo_skb(netdev, ctx->ndx);
+ }
+
+ if (urb->status)
+ netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
+
+ /* Release the context */
+ mcba_usb_free_ctx(ctx);
+}
+
+/* Send data to device */
+static netdev_tx_t mcba_usb_xmit(struct mcba_priv *priv,
+ struct mcba_usb_msg *usb_msg,
+ struct mcba_usb_ctx *ctx)
+{
+ struct urb *urb;
+ u8 *buf;
+ int err;
+
+ /* create a URB, and a buffer for it, and copy the data to the URB */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+
+ buf = usb_alloc_coherent(priv->udev, MCBA_USB_TX_BUFF_SIZE, GFP_ATOMIC,
+ &urb->transfer_dma);
+ if (!buf) {
+ err = -ENOMEM;
+ goto nomembuf;
+ }
+
+ memcpy(buf, usb_msg, MCBA_USB_TX_BUFF_SIZE);
+
+ usb_fill_bulk_urb(urb, priv->udev,
+ usb_sndbulkpipe(priv->udev, MCBA_USB_EP_OUT), buf,
+ MCBA_USB_TX_BUFF_SIZE, mcba_usb_write_bulk_callback,
+ ctx);
+
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &priv->tx_submitted);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(err))
+ goto failed;
+
+ /* Release our reference to this URB, the USB core will eventually free
+ * it entirely.
+ */
+ usb_free_urb(urb);
+
+ return 0;
+
+failed:
+ usb_unanchor_urb(urb);
+ usb_free_coherent(priv->udev, MCBA_USB_TX_BUFF_SIZE, buf,
+ urb->transfer_dma);
+
+ if (err == -ENODEV)
+ netif_device_detach(priv->netdev);
+ else
+ netdev_warn(priv->netdev, "failed tx_urb %d\n", err);
+
+nomembuf:
+ usb_free_urb(urb);
+
+ return err;
+}
+
+/* Send data to device */
+static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct mcba_priv *priv = netdev_priv(netdev);
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ struct mcba_usb_ctx *ctx = NULL;
+ struct net_device_stats *stats = &priv->netdev->stats;
+ u16 sid;
+ int err;
+ struct mcba_usb_msg_can usb_msg = {
+ .cmd_id = MBCA_CMD_TRANSMIT_MESSAGE_EV
+ };
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ ctx = mcba_usb_get_free_ctx(priv, cf);
+ if (!ctx)
+ return NETDEV_TX_BUSY;
+
+ can_put_echo_skb(skb, priv->netdev, ctx->ndx);
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ /* SIDH | SIDL | EIDH | EIDL
+ * 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0
+ */
+ sid = MCBA_SIDL_EXID_MASK;
+ /* store 28-18 bits */
+ sid |= (cf->can_id & 0x1ffc0000) >> 13;
+ /* store 17-16 bits */
+ sid |= (cf->can_id & 0x30000) >> 16;
+ put_unaligned_be16(sid, &usb_msg.sid);
+
+ /* store 15-0 bits */
+ put_unaligned_be16(cf->can_id & 0xffff, &usb_msg.eid);
+ } else {
+ /* SIDH | SIDL
+ * 10 - 3 | 2 1 0 x x x x x
+ */
+ put_unaligned_be16((cf->can_id & CAN_SFF_MASK) << 5,
+ &usb_msg.sid);
+ usb_msg.eid = 0;
+ }
+
+ usb_msg.dlc = cf->can_dlc;
+
+ memcpy(usb_msg.data, cf->data, usb_msg.dlc);
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ usb_msg.dlc |= MCBA_DLC_RTR_MASK;
+
+ err = mcba_usb_xmit(priv, (struct mcba_usb_msg *)&usb_msg, ctx);
+ if (err)
+ goto xmit_failed;
+
+ return NETDEV_TX_OK;
+
+xmit_failed:
+ can_free_echo_skb(priv->netdev, ctx->ndx);
+ mcba_usb_free_ctx(ctx);
+ dev_kfree_skb(skb);
+ stats->tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+/* Send cmd to device */
+static void mcba_usb_xmit_cmd(struct mcba_priv *priv,
+ struct mcba_usb_msg *usb_msg)
+{
+ struct mcba_usb_ctx *ctx = NULL;
+ int err;
+
+ ctx = mcba_usb_get_free_ctx(priv, NULL);
+ if (!ctx) {
+ netdev_err(priv->netdev,
+ "Lack of free ctx. Sending (%d) cmd aborted",
+ usb_msg->cmd_id);
+
+ return;
+ }
+
+ err = mcba_usb_xmit(priv, usb_msg, ctx);
+ if (err)
+ netdev_err(priv->netdev, "Failed to send cmd (%d)",
+ usb_msg->cmd_id);
+}
+
+static void mcba_usb_xmit_change_bitrate(struct mcba_priv *priv, u16 bitrate)
+{
+ struct mcba_usb_msg_change_bitrate usb_msg = {
+ .cmd_id = MBCA_CMD_CHANGE_BIT_RATE
+ };
+
+ put_unaligned_be16(bitrate, &usb_msg.bitrate);
+
+ mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg);
+}
+
+static void mcba_usb_xmit_read_fw_ver(struct mcba_priv *priv, u8 pic)
+{
+ struct mcba_usb_msg_fw_ver usb_msg = {
+ .cmd_id = MBCA_CMD_READ_FW_VERSION,
+ .pic = pic
+ };
+
+ mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg);
+}
+
+static void mcba_usb_process_can(struct mcba_priv *priv,
+ struct mcba_usb_msg_can *msg)
+{
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats = &priv->netdev->stats;
+ u16 sid;
+
+ skb = alloc_can_skb(priv->netdev, &cf);
+ if (!skb)
+ return;
+
+ sid = get_unaligned_be16(&msg->sid);
+
+ if (sid & MCBA_SIDL_EXID_MASK) {
+ /* SIDH | SIDL | EIDH | EIDL
+ * 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0
+ */
+ cf->can_id = CAN_EFF_FLAG;
+
+ /* store 28-18 bits */
+ cf->can_id |= (sid & 0xffe0) << 13;
+ /* store 17-16 bits */
+ cf->can_id |= (sid & 3) << 16;
+ /* store 15-0 bits */
+ cf->can_id |= get_unaligned_be16(&msg->eid);
+ } else {
+ /* SIDH | SIDL
+ * 10 - 3 | 2 1 0 x x x x x
+ */
+ cf->can_id = (sid & 0xffe0) >> 5;
+ }
+
+ if (msg->dlc & MCBA_DLC_RTR_MASK)
+ cf->can_id |= CAN_RTR_FLAG;
+
+ cf->can_dlc = get_can_dlc(msg->dlc & MCBA_DLC_MASK);
+
+ memcpy(cf->data, msg->data, cf->can_dlc);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ can_led_event(priv->netdev, CAN_LED_EVENT_RX);
+ netif_rx(skb);
+}
+
+static void mcba_usb_process_ka_usb(struct mcba_priv *priv,
+ struct mcba_usb_msg_ka_usb *msg)
+{
+ if (unlikely(priv->usb_ka_first_pass)) {
+ netdev_info(priv->netdev, "PIC USB version %hhu.%hhu\n",
+ msg->soft_ver_major, msg->soft_ver_minor);
+
+ priv->usb_ka_first_pass = false;
+ }
+
+ if (msg->termination_state)
+ priv->can.termination = MCBA_TERMINATION_ENABLED;
+ else
+ priv->can.termination = MCBA_TERMINATION_DISABLED;
+}
+
+static u32 convert_can2host_bitrate(struct mcba_usb_msg_ka_can *msg)
+{
+ const u32 bitrate = get_unaligned_be16(&msg->can_bitrate);
+
+ if ((bitrate == 33) || (bitrate == 83))
+ return bitrate * 1000 + 333;
+ else
+ return bitrate * 1000;
+}
+
+static void mcba_usb_process_ka_can(struct mcba_priv *priv,
+ struct mcba_usb_msg_ka_can *msg)
+{
+ if (unlikely(priv->can_ka_first_pass)) {
+ netdev_info(priv->netdev, "PIC CAN version %hhu.%hhu\n",
+ msg->soft_ver_major, msg->soft_ver_minor);
+
+ priv->can_ka_first_pass = false;
+ }
+
+ if (unlikely(priv->can_speed_check)) {
+ const u32 bitrate = convert_can2host_bitrate(msg);
+
+ priv->can_speed_check = false;
+
+ if (bitrate != priv->can.bittiming.bitrate)
+ netdev_err(
+ priv->netdev,
+ "Wrong bitrate reported by the device (%u). Expected %u",
+ bitrate, priv->can.bittiming.bitrate);
+ }
+
+ priv->bec.txerr = msg->tx_err_cnt;
+ priv->bec.rxerr = msg->rx_err_cnt;
+
+ if (msg->tx_bus_off)
+ priv->can.state = CAN_STATE_BUS_OFF;
+
+ else if ((priv->bec.txerr > MCBA_CAN_STATE_ERR_PSV_TH) ||
+ (priv->bec.rxerr > MCBA_CAN_STATE_ERR_PSV_TH))
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+
+ else if ((priv->bec.txerr > MCBA_CAN_STATE_WRN_TH) ||
+ (priv->bec.rxerr > MCBA_CAN_STATE_WRN_TH))
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+}
+
+static void mcba_usb_process_rx(struct mcba_priv *priv,
+ struct mcba_usb_msg *msg)
+{
+ switch (msg->cmd_id) {
+ case MBCA_CMD_I_AM_ALIVE_FROM_CAN:
+ mcba_usb_process_ka_can(priv,
+ (struct mcba_usb_msg_ka_can *)msg);
+ break;
+
+ case MBCA_CMD_I_AM_ALIVE_FROM_USB:
+ mcba_usb_process_ka_usb(priv,
+ (struct mcba_usb_msg_ka_usb *)msg);
+ break;
+
+ case MBCA_CMD_RECEIVE_MESSAGE:
+ mcba_usb_process_can(priv, (struct mcba_usb_msg_can *)msg);
+ break;
+
+ case MBCA_CMD_NOTHING_TO_SEND:
+ /* Side effect of communication between PIC_USB and PIC_CAN.
+ * PIC_CAN is telling us that it has nothing to send
+ */
+ break;
+
+ case MBCA_CMD_TRANSMIT_MESSAGE_RSP:
+ /* Transmission response from the device containing timestamp */
+ break;
+
+ default:
+ netdev_warn(priv->netdev, "Unsupported msg (0x%hhX)",
+ msg->cmd_id);
+ break;
+ }
+}
+
+/* Callback for reading data from device
+ *
+ * Check urb status, call read function and resubmit urb read operation.
+ */
+static void mcba_usb_read_bulk_callback(struct urb *urb)
+{
+ struct mcba_priv *priv = urb->context;
+ struct net_device *netdev;
+ int retval;
+ int pos = 0;
+
+ netdev = priv->netdev;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ switch (urb->status) {
+ case 0: /* success */
+ break;
+
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+
+ default:
+ netdev_info(netdev, "Rx URB aborted (%d)\n", urb->status);
+
+ goto resubmit_urb;
+ }
+
+ while (pos < urb->actual_length) {
+ struct mcba_usb_msg *msg;
+
+ if (pos + sizeof(struct mcba_usb_msg) > urb->actual_length) {
+ netdev_err(priv->netdev, "format error\n");
+ break;
+ }
+
+ msg = (struct mcba_usb_msg *)(urb->transfer_buffer + pos);
+ mcba_usb_process_rx(priv, msg);
+
+ pos += sizeof(struct mcba_usb_msg);
+ }
+
+resubmit_urb:
+
+ usb_fill_bulk_urb(urb, priv->udev,
+ usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_OUT),
+ urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE,
+ mcba_usb_read_bulk_callback, priv);
+
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (retval == -ENODEV)
+ netif_device_detach(netdev);
+ else if (retval)
+ netdev_err(netdev, "failed resubmitting read bulk urb: %d\n",
+ retval);
+}
+
+/* Start USB device */
+static int mcba_usb_start(struct mcba_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ int err, i;
+
+ mcba_init_ctx(priv);
+
+ for (i = 0; i < MCBA_MAX_RX_URBS; i++) {
+ struct urb *urb = NULL;
+ u8 *buf;
+
+ /* create a URB, and a buffer for it */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ err = -ENOMEM;
+ break;
+ }
+
+ buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
+ GFP_KERNEL, &urb->transfer_dma);
+ if (!buf) {
+ netdev_err(netdev, "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ err = -ENOMEM;
+ break;
+ }
+
+ usb_fill_bulk_urb(urb, priv->udev,
+ usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN),
+ buf, MCBA_USB_RX_BUFF_SIZE,
+ mcba_usb_read_bulk_callback, priv);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &priv->rx_submitted);
+
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err) {
+ usb_unanchor_urb(urb);
+ usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
+ buf, urb->transfer_dma);
+ usb_free_urb(urb);
+ break;
+ }
+
+ /* Drop reference, USB core will take care of freeing it */
+ usb_free_urb(urb);
+ }
+
+ /* Did we submit any URBs */
+ if (i == 0) {
+ netdev_warn(netdev, "couldn't setup read URBs\n");
+ return err;
+ }
+
+ /* Warn if we've couldn't transmit all the URBs */
+ if (i < MCBA_MAX_RX_URBS)
+ netdev_warn(netdev, "rx performance may be slow\n");
+
+ mcba_usb_xmit_read_fw_ver(priv, MCBA_VER_REQ_USB);
+ mcba_usb_xmit_read_fw_ver(priv, MCBA_VER_REQ_CAN);
+
+ return err;
+}
+
+/* Open USB device */
+static int mcba_usb_open(struct net_device *netdev)
+{
+ struct mcba_priv *priv = netdev_priv(netdev);
+ int err;
+
+ /* common open */
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ priv->can_speed_check = true;
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ can_led_event(netdev, CAN_LED_EVENT_OPEN);
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+static void mcba_urb_unlink(struct mcba_priv *priv)
+{
+ usb_kill_anchored_urbs(&priv->rx_submitted);
+ usb_kill_anchored_urbs(&priv->tx_submitted);
+}
+
+/* Close USB device */
+static int mcba_usb_close(struct net_device *netdev)
+{
+ struct mcba_priv *priv = netdev_priv(netdev);
+
+ priv->can.state = CAN_STATE_STOPPED;
+
+ netif_stop_queue(netdev);
+
+ /* Stop polling */
+ mcba_urb_unlink(priv);
+
+ close_candev(netdev);
+ can_led_event(netdev, CAN_LED_EVENT_STOP);
+
+ return 0;
+}
+
+/* Set network device mode
+ *
+ * Maybe we should leave this function empty, because the device
+ * set mode variable with open command.
+ */
+static int mcba_net_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ return 0;
+}
+
+static int mcba_net_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct mcba_priv *priv = netdev_priv(netdev);
+
+ bec->txerr = priv->bec.txerr;
+ bec->rxerr = priv->bec.rxerr;
+
+ return 0;
+}
+
+static const struct net_device_ops mcba_netdev_ops = {
+ .ndo_open = mcba_usb_open,
+ .ndo_stop = mcba_usb_close,
+ .ndo_start_xmit = mcba_usb_start_xmit,
+};
+
+/* Microchip CANBUS has hardcoded bittiming values by default.
+ * This function sends request via USB to change the speed and align bittiming
+ * values for presentation purposes only
+ */
+static int mcba_net_set_bittiming(struct net_device *netdev)
+{
+ struct mcba_priv *priv = netdev_priv(netdev);
+ const u16 bitrate_kbps = priv->can.bittiming.bitrate / 1000;
+
+ mcba_usb_xmit_change_bitrate(priv, bitrate_kbps);
+
+ return 0;
+}
+
+static int mcba_set_termination(struct net_device *netdev, u16 term)
+{
+ struct mcba_priv *priv = netdev_priv(netdev);
+ struct mcba_usb_msg_termination usb_msg = {
+ .cmd_id = MBCA_CMD_SETUP_TERMINATION_RESISTANCE
+ };
+
+ if (term == MCBA_TERMINATION_ENABLED)
+ usb_msg.termination = 1;
+ else
+ usb_msg.termination = 0;
+
+ mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg);
+
+ return 0;
+}
+
+static int mcba_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct net_device *netdev;
+ struct mcba_priv *priv;
+ int err = -ENOMEM;
+ struct usb_device *usbdev = interface_to_usbdev(intf);
+
+ netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS);
+ if (!netdev) {
+ dev_err(&intf->dev, "Couldn't alloc candev\n");
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(netdev);
+
+ priv->udev = usbdev;
+ priv->netdev = netdev;
+ priv->usb_ka_first_pass = true;
+ priv->can_ka_first_pass = true;
+ priv->can_speed_check = false;
+
+ init_usb_anchor(&priv->rx_submitted);
+ init_usb_anchor(&priv->tx_submitted);
+
+ usb_set_intfdata(intf, priv);
+
+ /* Init CAN device */
+ priv->can.state = CAN_STATE_STOPPED;
+ priv->can.termination_const = mcba_termination;
+ priv->can.termination_const_cnt = ARRAY_SIZE(mcba_termination);
+ priv->can.bitrate_const = mcba_bitrate;
+ priv->can.bitrate_const_cnt = ARRAY_SIZE(mcba_bitrate);
+
+ priv->can.do_set_termination = mcba_set_termination;
+ priv->can.do_set_mode = mcba_net_set_mode;
+ priv->can.do_get_berr_counter = mcba_net_get_berr_counter;
+ priv->can.do_set_bittiming = mcba_net_set_bittiming;
+
+ netdev->netdev_ops = &mcba_netdev_ops;
+
+ netdev->flags |= IFF_ECHO; /* we support local echo */
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ err = register_candev(netdev);
+ if (err) {
+ netdev_err(netdev, "couldn't register CAN device: %d\n", err);
+
+ goto cleanup_free_candev;
+ }
+
+ devm_can_led_init(netdev);
+
+ /* Start USB dev only if we have successfully registered CAN device */
+ err = mcba_usb_start(priv);
+ if (err) {
+ if (err == -ENODEV)
+ netif_device_detach(priv->netdev);
+
+ netdev_warn(netdev, "couldn't start device: %d\n", err);
+
+ goto cleanup_unregister_candev;
+ }
+
+ dev_info(&intf->dev, "Microchip CAN BUS analizer connected\n");
+
+ return 0;
+
+cleanup_unregister_candev:
+ unregister_candev(priv->netdev);
+
+cleanup_free_candev:
+ free_candev(netdev);
+
+ return err;
+}
+
+/* Called by the usb core when driver is unloaded or device is removed */
+static void mcba_usb_disconnect(struct usb_interface *intf)
+{
+ struct mcba_priv *priv = usb_get_intfdata(intf);
+
+ usb_set_intfdata(intf, NULL);
+
+ netdev_info(priv->netdev, "device disconnected\n");
+
+ unregister_candev(priv->netdev);
+ free_candev(priv->netdev);
+
+ mcba_urb_unlink(priv);
+}
+
+static struct usb_driver mcba_usb_driver = {
+ .name = MCBA_MODULE_NAME,
+ .probe = mcba_usb_probe,
+ .disconnect = mcba_usb_disconnect,
+ .id_table = mcba_usb_table,
+};
+
+module_usb_driver(mcba_usb_driver);
+
+MODULE_AUTHOR("Remigiusz Kołłątaj <remigiusz.kollataj@mobica.com>");
+MODULE_DESCRIPTION("SocketCAN driver for Microchip CAN BUS Analyzer Tool");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/peak_usb/pcan_ucan.h b/drivers/net/can/usb/peak_usb/pcan_ucan.h
deleted file mode 100644
index 2147678f0225..000000000000
--- a/drivers/net/can/usb/peak_usb/pcan_ucan.h
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * CAN driver for PEAK System micro-CAN based adapters
- *
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published
- * by the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-#ifndef PUCAN_H
-#define PUCAN_H
-
-/* uCAN commands opcodes list (low-order 10 bits) */
-#define PUCAN_CMD_NOP 0x000
-#define PUCAN_CMD_RESET_MODE 0x001
-#define PUCAN_CMD_NORMAL_MODE 0x002
-#define PUCAN_CMD_LISTEN_ONLY_MODE 0x003
-#define PUCAN_CMD_TIMING_SLOW 0x004
-#define PUCAN_CMD_TIMING_FAST 0x005
-#define PUCAN_CMD_FILTER_STD 0x008
-#define PUCAN_CMD_TX_ABORT 0x009
-#define PUCAN_CMD_WR_ERR_CNT 0x00a
-#define PUCAN_CMD_SET_EN_OPTION 0x00b
-#define PUCAN_CMD_CLR_DIS_OPTION 0x00c
-#define PUCAN_CMD_END_OF_COLLECTION 0x3ff
-
-/* uCAN received messages list */
-#define PUCAN_MSG_CAN_RX 0x0001
-#define PUCAN_MSG_ERROR 0x0002
-#define PUCAN_MSG_STATUS 0x0003
-#define PUCAN_MSG_BUSLOAD 0x0004
-#define PUCAN_MSG_CAN_TX 0x1000
-
-/* uCAN command common header */
-struct __packed pucan_command {
- __le16 opcode_channel;
- u16 args[3];
-};
-
-#define PUCAN_TSLOW_BRP_BITS 10
-#define PUCAN_TSLOW_TSGEG1_BITS 8
-#define PUCAN_TSLOW_TSGEG2_BITS 7
-#define PUCAN_TSLOW_SJW_BITS 7
-
-#define PUCAN_TSLOW_BRP_MASK ((1 << PUCAN_TSLOW_BRP_BITS) - 1)
-#define PUCAN_TSLOW_TSEG1_MASK ((1 << PUCAN_TSLOW_TSGEG1_BITS) - 1)
-#define PUCAN_TSLOW_TSEG2_MASK ((1 << PUCAN_TSLOW_TSGEG2_BITS) - 1)
-#define PUCAN_TSLOW_SJW_MASK ((1 << PUCAN_TSLOW_SJW_BITS) - 1)
-
-/* uCAN TIMING_SLOW command fields */
-#define PUCAN_TSLOW_SJW_T(s, t) (((s) & PUCAN_TSLOW_SJW_MASK) | \
- ((!!(t)) << 7))
-#define PUCAN_TSLOW_TSEG2(t) ((t) & PUCAN_TSLOW_TSEG2_MASK)
-#define PUCAN_TSLOW_TSEG1(t) ((t) & PUCAN_TSLOW_TSEG1_MASK)
-#define PUCAN_TSLOW_BRP(b) ((b) & PUCAN_TSLOW_BRP_MASK)
-
-struct __packed pucan_timing_slow {
- __le16 opcode_channel;
-
- u8 ewl; /* Error Warning limit */
- u8 sjw_t; /* Sync Jump Width + Triple sampling */
- u8 tseg2; /* Timing SEGment 2 */
- u8 tseg1; /* Timing SEGment 1 */
-
- __le16 brp; /* BaudRate Prescaler */
-};
-
-#define PUCAN_TFAST_BRP_BITS 10
-#define PUCAN_TFAST_TSGEG1_BITS 5
-#define PUCAN_TFAST_TSGEG2_BITS 4
-#define PUCAN_TFAST_SJW_BITS 4
-
-#define PUCAN_TFAST_BRP_MASK ((1 << PUCAN_TFAST_BRP_BITS) - 1)
-#define PUCAN_TFAST_TSEG1_MASK ((1 << PUCAN_TFAST_TSGEG1_BITS) - 1)
-#define PUCAN_TFAST_TSEG2_MASK ((1 << PUCAN_TFAST_TSGEG2_BITS) - 1)
-#define PUCAN_TFAST_SJW_MASK ((1 << PUCAN_TFAST_SJW_BITS) - 1)
-
-/* uCAN TIMING_FAST command fields */
-#define PUCAN_TFAST_SJW(s) ((s) & PUCAN_TFAST_SJW_MASK)
-#define PUCAN_TFAST_TSEG2(t) ((t) & PUCAN_TFAST_TSEG2_MASK)
-#define PUCAN_TFAST_TSEG1(t) ((t) & PUCAN_TFAST_TSEG1_MASK)
-#define PUCAN_TFAST_BRP(b) ((b) & PUCAN_TFAST_BRP_MASK)
-
-struct __packed pucan_timing_fast {
- __le16 opcode_channel;
-
- u8 unused;
- u8 sjw; /* Sync Jump Width */
- u8 tseg2; /* Timing SEGment 2 */
- u8 tseg1; /* Timing SEGment 1 */
-
- __le16 brp; /* BaudRate Prescaler */
-};
-
-/* uCAN FILTER_STD command fields */
-#define PUCAN_FLTSTD_ROW_IDX_BITS 6
-
-struct __packed pucan_filter_std {
- __le16 opcode_channel;
-
- __le16 idx;
- __le32 mask; /* CAN-ID bitmask in idx range */
-};
-
-/* uCAN WR_ERR_CNT command fields */
-#define PUCAN_WRERRCNT_TE 0x4000 /* Tx error cntr write Enable */
-#define PUCAN_WRERRCNT_RE 0x8000 /* Rx error cntr write Enable */
-
-struct __packed pucan_wr_err_cnt {
- __le16 opcode_channel;
-
- __le16 sel_mask;
- u8 tx_counter; /* Tx error counter new value */
- u8 rx_counter; /* Rx error counter new value */
-
- u16 unused;
-};
-
-/* uCAN SET_EN/CLR_DIS _OPTION command fields */
-#define PUCAN_OPTION_ERROR 0x0001
-#define PUCAN_OPTION_BUSLOAD 0x0002
-#define PUCAN_OPTION_CANDFDISO 0x0004
-
-struct __packed pucan_options {
- __le16 opcode_channel;
-
- __le16 options;
- u32 unused;
-};
-
-/* uCAN received messages global format */
-struct __packed pucan_msg {
- __le16 size;
- __le16 type;
- __le32 ts_low;
- __le32 ts_high;
-};
-
-/* uCAN flags for CAN/CANFD messages */
-#define PUCAN_MSG_SELF_RECEIVE 0x80
-#define PUCAN_MSG_ERROR_STATE_IND 0x40 /* error state indicator */
-#define PUCAN_MSG_BITRATE_SWITCH 0x20 /* bitrate switch */
-#define PUCAN_MSG_EXT_DATA_LEN 0x10 /* extended data length */
-#define PUCAN_MSG_SINGLE_SHOT 0x08
-#define PUCAN_MSG_LOOPED_BACK 0x04
-#define PUCAN_MSG_EXT_ID 0x02
-#define PUCAN_MSG_RTR 0x01
-
-struct __packed pucan_rx_msg {
- __le16 size;
- __le16 type;
- __le32 ts_low;
- __le32 ts_high;
- __le32 tag_low;
- __le32 tag_high;
- u8 channel_dlc;
- u8 client;
- __le16 flags;
- __le32 can_id;
- u8 d[0];
-};
-
-/* uCAN error types */
-#define PUCAN_ERMSG_BIT_ERROR 0
-#define PUCAN_ERMSG_FORM_ERROR 1
-#define PUCAN_ERMSG_STUFF_ERROR 2
-#define PUCAN_ERMSG_OTHER_ERROR 3
-#define PUCAN_ERMSG_ERR_CNT_DEC 4
-
-struct __packed pucan_error_msg {
- __le16 size;
- __le16 type;
- __le32 ts_low;
- __le32 ts_high;
- u8 channel_type_d;
- u8 code_g;
- u8 tx_err_cnt;
- u8 rx_err_cnt;
-};
-
-#define PUCAN_BUS_PASSIVE 0x20
-#define PUCAN_BUS_WARNING 0x40
-#define PUCAN_BUS_BUSOFF 0x80
-
-struct __packed pucan_status_msg {
- __le16 size;
- __le16 type;
- __le32 ts_low;
- __le32 ts_high;
- u8 channel_p_w_b;
- u8 unused[3];
-};
-
-/* uCAN transmitted message format */
-#define PUCAN_MSG_CHANNEL_DLC(c, d) (((c) & 0xf) | ((d) << 4))
-
-struct __packed pucan_tx_msg {
- __le16 size;
- __le16 type;
- __le32 tag_low;
- __le32 tag_high;
- u8 channel_dlc;
- u8 client;
- __le16 flags;
- __le32 can_id;
- u8 d[0];
-};
-
-/* build the cmd opcode_channel field with respect to the correct endianness */
-static inline __le16 pucan_cmd_opcode_channel(struct peak_usb_device *dev,
- int opcode)
-{
- return cpu_to_le16(((dev->ctrl_idx) << 12) | ((opcode) & 0x3ff));
-}
-
-/* return the channel number part from any received message channel_dlc field */
-static inline int pucan_msg_get_channel(struct pucan_rx_msg *rm)
-{
- return rm->channel_dlc & 0xf;
-}
-
-/* return the dlc value from any received message channel_dlc field */
-static inline int pucan_msg_get_dlc(struct pucan_rx_msg *rm)
-{
- return rm->channel_dlc >> 4;
-}
-
-static inline int pucan_ermsg_get_channel(struct pucan_error_msg *em)
-{
- return em->channel_type_d & 0x0f;
-}
-
-static inline int pucan_stmsg_get_channel(struct pucan_status_msg *sm)
-{
- return sm->channel_p_w_b & 0x0f;
-}
-
-#endif
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 528d3bb4917f..7ccdc3e30c98 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -19,10 +19,10 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/can/dev/peak_canfd.h>
#include "pcan_usb_core.h"
#include "pcan_usb_pro.h"
-#include "pcan_ucan.h"
MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB FD adapter");
MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro FD adapter");
@@ -238,7 +238,7 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
/* 1st, reset error counters: */
prc = (struct pucan_wr_err_cnt *)pc;
- prc->opcode_channel = pucan_cmd_opcode_channel(dev,
+ prc->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx,
PUCAN_CMD_WR_ERR_CNT);
/* select both counters */
@@ -257,9 +257,10 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
puo->opcode_channel =
(dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ?
- pucan_cmd_opcode_channel(dev,
+ pucan_cmd_opcode_channel(dev->ctrl_idx,
PUCAN_CMD_CLR_DIS_OPTION) :
- pucan_cmd_opcode_channel(dev, PUCAN_CMD_SET_EN_OPTION);
+ pucan_cmd_opcode_channel(dev->ctrl_idx,
+ PUCAN_CMD_SET_EN_OPTION);
puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO);
@@ -274,7 +275,7 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
/* next, go back to operational mode */
cmd = (struct pucan_command *)pc;
- cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx,
(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) ?
PUCAN_CMD_LISTEN_ONLY_MODE :
PUCAN_CMD_NORMAL_MODE);
@@ -296,7 +297,7 @@ static int pcan_usb_fd_set_bus(struct peak_usb_device *dev, u8 onoff)
struct pucan_command *cmd = (struct pucan_command *)pc;
/* build cmd to go back to reset mode */
- cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx,
PUCAN_CMD_RESET_MODE);
l = sizeof(struct pucan_command);
}
@@ -332,7 +333,7 @@ static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx,
}
for (i = idx; i < n; i++, cmd++) {
- cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx,
PUCAN_CMD_FILTER_STD);
cmd->idx = cpu_to_le16(i);
cmd->mask = cpu_to_le32(mask);
@@ -352,7 +353,7 @@ static int pcan_usb_fd_set_options(struct peak_usb_device *dev,
{
struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev);
- cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx,
(onoff) ? PUCAN_CMD_SET_EN_OPTION :
PUCAN_CMD_CLR_DIS_OPTION);
@@ -368,7 +369,7 @@ static int pcan_usb_fd_set_can_led(struct peak_usb_device *dev, u8 led_mode)
{
struct pcan_ufd_led *cmd = pcan_usb_fd_cmd_buffer(dev);
- cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx,
PCAN_UFD_CMD_LED_SET);
cmd->mode = led_mode;
@@ -382,7 +383,7 @@ static int pcan_usb_fd_set_clock_domain(struct peak_usb_device *dev,
{
struct pcan_ufd_clock *cmd = pcan_usb_fd_cmd_buffer(dev);
- cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx,
PCAN_UFD_CMD_CLK_SET);
cmd->mode = clk_mode;
@@ -396,7 +397,7 @@ static int pcan_usb_fd_set_bittiming_slow(struct peak_usb_device *dev,
{
struct pucan_timing_slow *cmd = pcan_usb_fd_cmd_buffer(dev);
- cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx,
PUCAN_CMD_TIMING_SLOW);
cmd->sjw_t = PUCAN_TSLOW_SJW_T(bt->sjw - 1,
dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES);
@@ -417,7 +418,7 @@ static int pcan_usb_fd_set_bittiming_fast(struct peak_usb_device *dev,
{
struct pucan_timing_fast *cmd = pcan_usb_fd_cmd_buffer(dev);
- cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx,
PUCAN_CMD_TIMING_FAST);
cmd->sjw = PUCAN_TFAST_SJW(bt->sjw - 1);
cmd->tseg2 = PUCAN_TFAST_TSEG2(bt->phase_seg2 - 1);
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 674f367087c5..facca33d53e9 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -1,7 +1,7 @@
/*
* vcan.c - Virtual CAN interface
*
- * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
+ * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,9 +50,12 @@
#include <linux/slab.h>
#include <net/rtnetlink.h>
+#define DRV_NAME "vcan"
+
MODULE_DESCRIPTION("virtual CAN interface");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
/*
@@ -164,7 +167,7 @@ static void vcan_setup(struct net_device *dev)
}
static struct rtnl_link_ops vcan_link_ops __read_mostly = {
- .kind = "vcan",
+ .kind = DRV_NAME,
.setup = vcan_setup,
};
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
new file mode 100644
index 000000000000..7fbb24795681
--- /dev/null
+++ b/drivers/net/can/vxcan.c
@@ -0,0 +1,316 @@
+/*
+ * vxcan.c - Virtual CAN Tunnel for cross namespace communication
+ *
+ * This code is derived from drivers/net/can/vcan.c for the virtual CAN
+ * specific parts and from drivers/net/veth.c to implement the netlink API
+ * for network interface pairs in a common and established way.
+ *
+ * Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/skb.h>
+#include <linux/can/vxcan.h>
+#include <linux/slab.h>
+#include <net/rtnetlink.h>
+
+#define DRV_NAME "vxcan"
+
+MODULE_DESCRIPTION("Virtual CAN Tunnel");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
+
+struct vxcan_priv {
+ struct net_device __rcu *peer;
+};
+
+static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vxcan_priv *priv = netdev_priv(dev);
+ struct net_device *peer;
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ struct net_device_stats *peerstats, *srcstats = &dev->stats;
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ rcu_read_lock();
+ peer = rcu_dereference(priv->peer);
+ if (unlikely(!peer)) {
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ goto out_unlock;
+ }
+
+ skb = can_create_echo_skb(skb);
+ if (!skb)
+ goto out_unlock;
+
+ /* reset CAN GW hop counter */
+ skb->csum_start = 0;
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->dev = peer;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
+ srcstats->tx_packets++;
+ srcstats->tx_bytes += cfd->len;
+ peerstats = &peer->stats;
+ peerstats->rx_packets++;
+ peerstats->rx_bytes += cfd->len;
+ }
+
+out_unlock:
+ rcu_read_unlock();
+ return NETDEV_TX_OK;
+}
+
+
+static int vxcan_open(struct net_device *dev)
+{
+ struct vxcan_priv *priv = netdev_priv(dev);
+ struct net_device *peer = rtnl_dereference(priv->peer);
+
+ if (!peer)
+ return -ENOTCONN;
+
+ if (peer->flags & IFF_UP) {
+ netif_carrier_on(dev);
+ netif_carrier_on(peer);
+ }
+ return 0;
+}
+
+static int vxcan_close(struct net_device *dev)
+{
+ struct vxcan_priv *priv = netdev_priv(dev);
+ struct net_device *peer = rtnl_dereference(priv->peer);
+
+ netif_carrier_off(dev);
+ if (peer)
+ netif_carrier_off(peer);
+
+ return 0;
+}
+
+static int vxcan_get_iflink(const struct net_device *dev)
+{
+ struct vxcan_priv *priv = netdev_priv(dev);
+ struct net_device *peer;
+ int iflink;
+
+ rcu_read_lock();
+ peer = rcu_dereference(priv->peer);
+ iflink = peer ? peer->ifindex : 0;
+ rcu_read_unlock();
+
+ return iflink;
+}
+
+static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
+{
+ /* Do not allow changing the MTU while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static const struct net_device_ops vxcan_netdev_ops = {
+ .ndo_open = vxcan_open,
+ .ndo_stop = vxcan_close,
+ .ndo_start_xmit = vxcan_xmit,
+ .ndo_get_iflink = vxcan_get_iflink,
+ .ndo_change_mtu = vxcan_change_mtu,
+};
+
+static void vxcan_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_CAN;
+ dev->mtu = CAN_MTU;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->tx_queue_len = 0;
+ dev->flags = (IFF_NOARP|IFF_ECHO);
+ dev->netdev_ops = &vxcan_netdev_ops;
+ dev->destructor = free_netdev;
+}
+
+/* forward declaration for rtnl_create_link() */
+static struct rtnl_link_ops vxcan_link_ops;
+
+static int vxcan_newlink(struct net *net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct vxcan_priv *priv;
+ struct net_device *peer;
+ struct net *peer_net;
+
+ struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
+ char ifname[IFNAMSIZ];
+ unsigned char name_assign_type;
+ struct ifinfomsg *ifmp = NULL;
+ int err;
+
+ /* register peer device */
+ if (data && data[VXCAN_INFO_PEER]) {
+ struct nlattr *nla_peer;
+
+ nla_peer = data[VXCAN_INFO_PEER];
+ ifmp = nla_data(nla_peer);
+ err = rtnl_nla_parse_ifla(peer_tb,
+ nla_data(nla_peer) +
+ sizeof(struct ifinfomsg),
+ nla_len(nla_peer) -
+ sizeof(struct ifinfomsg),
+ NULL);
+ if (err < 0)
+ return err;
+
+ tbp = peer_tb;
+ }
+
+ if (tbp[IFLA_IFNAME]) {
+ nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
+ name_assign_type = NET_NAME_USER;
+ } else {
+ snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
+ name_assign_type = NET_NAME_ENUM;
+ }
+
+ peer_net = rtnl_link_get_net(net, tbp);
+ if (IS_ERR(peer_net))
+ return PTR_ERR(peer_net);
+
+ peer = rtnl_create_link(peer_net, ifname, name_assign_type,
+ &vxcan_link_ops, tbp);
+ if (IS_ERR(peer)) {
+ put_net(peer_net);
+ return PTR_ERR(peer);
+ }
+
+ if (ifmp && dev->ifindex)
+ peer->ifindex = ifmp->ifi_index;
+
+ err = register_netdevice(peer);
+ put_net(peer_net);
+ peer_net = NULL;
+ if (err < 0) {
+ free_netdev(peer);
+ return err;
+ }
+
+ netif_carrier_off(peer);
+
+ err = rtnl_configure_link(peer, ifmp);
+ if (err < 0) {
+ unregister_netdevice(peer);
+ return err;
+ }
+
+ /* register first device */
+ if (tb[IFLA_IFNAME])
+ nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
+ else
+ snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
+
+ err = register_netdevice(dev);
+ if (err < 0) {
+ unregister_netdevice(peer);
+ return err;
+ }
+
+ netif_carrier_off(dev);
+
+ /* cross link the device pair */
+ priv = netdev_priv(dev);
+ rcu_assign_pointer(priv->peer, peer);
+
+ priv = netdev_priv(peer);
+ rcu_assign_pointer(priv->peer, dev);
+
+ return 0;
+}
+
+static void vxcan_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct vxcan_priv *priv;
+ struct net_device *peer;
+
+ priv = netdev_priv(dev);
+ peer = rtnl_dereference(priv->peer);
+
+ /* Note : dellink() is called from default_device_exit_batch(),
+ * before a rcu_synchronize() point. The devices are guaranteed
+ * not being freed before one RCU grace period.
+ */
+ RCU_INIT_POINTER(priv->peer, NULL);
+ unregister_netdevice_queue(dev, head);
+
+ if (peer) {
+ priv = netdev_priv(peer);
+ RCU_INIT_POINTER(priv->peer, NULL);
+ unregister_netdevice_queue(peer, head);
+ }
+}
+
+static const struct nla_policy vxcan_policy[VXCAN_INFO_MAX + 1] = {
+ [VXCAN_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
+};
+
+static struct net *vxcan_get_link_net(const struct net_device *dev)
+{
+ struct vxcan_priv *priv = netdev_priv(dev);
+ struct net_device *peer = rtnl_dereference(priv->peer);
+
+ return peer ? dev_net(peer) : dev_net(dev);
+}
+
+static struct rtnl_link_ops vxcan_link_ops = {
+ .kind = DRV_NAME,
+ .priv_size = sizeof(struct vxcan_priv),
+ .setup = vxcan_setup,
+ .newlink = vxcan_newlink,
+ .dellink = vxcan_dellink,
+ .policy = vxcan_policy,
+ .maxtype = VXCAN_INFO_MAX,
+ .get_link_net = vxcan_get_link_net,
+};
+
+static __init int vxcan_init(void)
+{
+ pr_info("vxcan: Virtual CAN Tunnel driver\n");
+
+ return rtnl_link_register(&vxcan_link_ops);
+}
+
+static __exit void vxcan_exit(void)
+{
+ rtnl_link_unregister(&vxcan_link_ops);
+}
+
+module_init(vxcan_init);
+module_exit(vxcan_exit);
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 91c876a0a647..da020418a652 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1412,31 +1412,39 @@ e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return rc;
}
-static int e100_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int e100_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct net_local *np = netdev_priv(dev);
+ u32 supported;
int err;
spin_lock_irq(&np->lock);
- err = mii_ethtool_gset(&np->mii_if, cmd);
+ err = mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
spin_unlock_irq(&np->lock);
/* The PHY may support 1000baseT, but the Etrax100 does not. */
- cmd->supported &= ~(SUPPORTED_1000baseT_Half
- | SUPPORTED_1000baseT_Full);
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+
+ supported &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full);
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+
return err;
}
-static int e100_set_settings(struct net_device *dev,
- struct ethtool_cmd *ecmd)
+static int e100_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
{
- if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (ecmd->base.autoneg == AUTONEG_ENABLE) {
e100_set_duplex(dev, autoneg);
e100_set_speed(dev, 0);
} else {
- e100_set_duplex(dev, ecmd->duplex == DUPLEX_HALF ? half : full);
- e100_set_speed(dev, ecmd->speed == SPEED_10 ? 10: 100);
+ e100_set_duplex(dev, ecmd->base.duplex == DUPLEX_HALF ?
+ half : full);
+ e100_set_speed(dev, ecmd->base.speed == SPEED_10 ? 10 : 100);
}
return 0;
@@ -1459,11 +1467,11 @@ static int e100_nway_reset(struct net_device *dev)
}
static const struct ethtool_ops e100_ethtool_ops = {
- .get_settings = e100_get_settings,
- .set_settings = e100_set_settings,
.get_drvinfo = e100_get_drvinfo,
.nway_reset = e100_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = e100_get_link_ksettings,
+ .set_link_ksettings = e100_set_link_ksettings,
};
static int
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 065984670ff1..862ee22303c2 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -11,7 +11,7 @@ config NET_DSA_MV88E6060
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
- depends on HAS_IOMEM && NET_DSA
+ depends on HAS_IOMEM && NET_DSA && OF_MDIO
select NET_DSA_TAG_BRCM
select FIXED_PHY
select BCM7XXX_PHY
@@ -34,4 +34,44 @@ config NET_DSA_QCA8K
This enables support for the Qualcomm Atheros QCA8K Ethernet
switch chips.
+config NET_DSA_LOOP
+ tristate "DSA mock-up Ethernet switch chip support"
+ depends on NET_DSA
+ select FIXED_PHY
+ ---help---
+ This enables support for a fake mock-up switch chip which
+ exercises the DSA APIs.
+
+config NET_DSA_MT7530
+ tristate "Mediatek MT7530 Ethernet switch support"
+ depends on NET_DSA
+ select NET_DSA_TAG_MTK
+ ---help---
+ This enables support for the Mediatek MT7530 Ethernet switch
+ chip.
+
+config NET_DSA_SMSC_LAN9303
+ tristate
+ select NET_DSA_TAG_LAN9303
+ ---help---
+ This enables support for the SMSC/Microchip LAN9303 3 port ethernet
+ switch chips.
+
+config NET_DSA_SMSC_LAN9303_I2C
+ tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in I2C managed mode"
+ depends on NET_DSA && I2C
+ select NET_DSA_SMSC_LAN9303
+ select REGMAP_I2C
+ ---help---
+ Enable access functions if the SMSC/Microchip LAN9303 is configured
+ for I2C managed mode.
+
+config NET_DSA_SMSC_LAN9303_MDIO
+ tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in MDIO managed mode"
+ depends on NET_DSA
+ select NET_DSA_SMSC_LAN9303
+ ---help---
+ Enable access functions if the SMSC/Microchip LAN9303 is configured
+ for MDIO managed mode.
+
endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index a3c941632217..edd630361736 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -2,6 +2,10 @@ obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
-
+obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
+obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
+obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
+obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
obj-y += b53/
obj-y += mv88e6xxx/
+obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 346dd9a1232d..2fb32d67065f 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -10,10 +10,11 @@
*/
#include <linux/list.h>
-#include <net/dsa.h>
#include <linux/ethtool.h>
#include <linux/if_ether.h>
#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <net/dsa.h>
#include <linux/bitmap.h>
#include "bcm_sf2.h"
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
new file mode 100644
index 000000000000..f0fc4de4fc9a
--- /dev/null
+++ b/drivers/net/dsa/dsa_loop.c
@@ -0,0 +1,328 @@
+/*
+ * Distributed Switch Architecture loopback driver
+ *
+ * Copyright (C) 2016, Florian Fainelli <f.fainelli@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/export.h>
+#include <linux/workqueue.h>
+#include <linux/module.h>
+#include <linux/if_bridge.h>
+#include <net/switchdev.h>
+#include <net/dsa.h>
+
+#include "dsa_loop.h"
+
+struct dsa_loop_vlan {
+ u16 members;
+ u16 untagged;
+};
+
+#define DSA_LOOP_VLANS 5
+
+struct dsa_loop_priv {
+ struct mii_bus *bus;
+ unsigned int port_base;
+ struct dsa_loop_vlan vlans[DSA_LOOP_VLANS];
+ struct net_device *netdev;
+ u16 pvid;
+};
+
+static struct phy_device *phydevs[PHY_MAX_ADDR];
+
+static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds)
+{
+ dev_dbg(ds->dev, "%s\n", __func__);
+
+ return DSA_TAG_PROTO_NONE;
+}
+
+static int dsa_loop_setup(struct dsa_switch *ds)
+{
+ dev_dbg(ds->dev, "%s\n", __func__);
+
+ return 0;
+}
+
+static int dsa_loop_set_addr(struct dsa_switch *ds, u8 *addr)
+{
+ dev_dbg(ds->dev, "%s\n", __func__);
+
+ return 0;
+}
+
+static int dsa_loop_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ struct dsa_loop_priv *ps = ds->priv;
+ struct mii_bus *bus = ps->bus;
+
+ dev_dbg(ds->dev, "%s\n", __func__);
+
+ return mdiobus_read_nested(bus, ps->port_base + port, regnum);
+}
+
+static int dsa_loop_phy_write(struct dsa_switch *ds, int port,
+ int regnum, u16 value)
+{
+ struct dsa_loop_priv *ps = ds->priv;
+ struct mii_bus *bus = ps->bus;
+
+ dev_dbg(ds->dev, "%s\n", __func__);
+
+ return mdiobus_write_nested(bus, ps->port_base + port, regnum, value);
+}
+
+static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ dev_dbg(ds->dev, "%s\n", __func__);
+
+ return 0;
+}
+
+static void dsa_loop_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ dev_dbg(ds->dev, "%s\n", __func__);
+}
+
+static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ dev_dbg(ds->dev, "%s\n", __func__);
+}
+
+static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ dev_dbg(ds->dev, "%s\n", __func__);
+
+ return 0;
+}
+
+static int dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct dsa_loop_priv *ps = ds->priv;
+ struct mii_bus *bus = ps->bus;
+
+ dev_dbg(ds->dev, "%s\n", __func__);
+
+ /* Just do a sleeping operation to make lockdep checks effective */
+ mdiobus_read(bus, ps->port_base + port, MII_BMSR);
+
+ if (vlan->vid_end > DSA_LOOP_VLANS)
+ return -ERANGE;
+
+ return 0;
+}
+
+static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct dsa_loop_priv *ps = ds->priv;
+ struct mii_bus *bus = ps->bus;
+ struct dsa_loop_vlan *vl;
+ u16 vid;
+
+ dev_dbg(ds->dev, "%s\n", __func__);
+
+ /* Just do a sleeping operation to make lockdep checks effective */
+ mdiobus_read(bus, ps->port_base + port, MII_BMSR);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ vl = &ps->vlans[vid];
+
+ vl->members |= BIT(port);
+ if (untagged)
+ vl->untagged |= BIT(port);
+ else
+ vl->untagged &= ~BIT(port);
+ }
+
+ if (pvid)
+ ps->pvid = vid;
+}
+
+static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct dsa_loop_priv *ps = ds->priv;
+ struct mii_bus *bus = ps->bus;
+ struct dsa_loop_vlan *vl;
+ u16 vid, pvid = ps->pvid;
+
+ dev_dbg(ds->dev, "%s\n", __func__);
+
+ /* Just do a sleeping operation to make lockdep checks effective */
+ mdiobus_read(bus, ps->port_base + port, MII_BMSR);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ vl = &ps->vlans[vid];
+
+ vl->members &= ~BIT(port);
+ if (untagged)
+ vl->untagged &= ~BIT(port);
+
+ if (pvid == vid)
+ pvid = 1;
+ }
+ ps->pvid = pvid;
+
+ return 0;
+}
+
+static int dsa_loop_port_vlan_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_vlan *vlan,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct dsa_loop_priv *ps = ds->priv;
+ struct mii_bus *bus = ps->bus;
+ struct dsa_loop_vlan *vl;
+ u16 vid, vid_start = 0;
+ int err = 0;
+
+ dev_dbg(ds->dev, "%s\n", __func__);
+
+ /* Just do a sleeping operation to make lockdep checks effective */
+ mdiobus_read(bus, ps->port_base + port, MII_BMSR);
+
+ for (vid = vid_start; vid < DSA_LOOP_VLANS; vid++) {
+ vl = &ps->vlans[vid];
+
+ if (!(vl->members & BIT(port)))
+ continue;
+
+ vlan->vid_begin = vlan->vid_end = vid;
+ vlan->flags = 0;
+
+ if (vl->untagged & BIT(port))
+ vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+ if (ps->pvid == vid)
+ vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+
+ err = cb(&vlan->obj);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static struct dsa_switch_ops dsa_loop_driver = {
+ .get_tag_protocol = dsa_loop_get_protocol,
+ .setup = dsa_loop_setup,
+ .set_addr = dsa_loop_set_addr,
+ .phy_read = dsa_loop_phy_read,
+ .phy_write = dsa_loop_phy_write,
+ .port_bridge_join = dsa_loop_port_bridge_join,
+ .port_bridge_leave = dsa_loop_port_bridge_leave,
+ .port_stp_state_set = dsa_loop_port_stp_state_set,
+ .port_vlan_filtering = dsa_loop_port_vlan_filtering,
+ .port_vlan_prepare = dsa_loop_port_vlan_prepare,
+ .port_vlan_add = dsa_loop_port_vlan_add,
+ .port_vlan_del = dsa_loop_port_vlan_del,
+ .port_vlan_dump = dsa_loop_port_vlan_dump,
+};
+
+static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
+{
+ struct dsa_loop_pdata *pdata = mdiodev->dev.platform_data;
+ struct dsa_loop_priv *ps;
+ struct dsa_switch *ds;
+
+ if (!pdata)
+ return -ENODEV;
+
+ dev_info(&mdiodev->dev, "%s: 0x%0x\n",
+ pdata->name, pdata->enabled_ports);
+
+ ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
+ if (!ds)
+ return -ENOMEM;
+
+ ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL);
+ ps->netdev = dev_get_by_name(&init_net, pdata->netdev);
+ if (!ps->netdev)
+ return -EPROBE_DEFER;
+
+ pdata->cd.netdev[DSA_LOOP_CPU_PORT] = &ps->netdev->dev;
+
+ ds->dev = &mdiodev->dev;
+ ds->ops = &dsa_loop_driver;
+ ds->priv = ps;
+ ps->bus = mdiodev->bus;
+
+ dev_set_drvdata(&mdiodev->dev, ds);
+
+ return dsa_register_switch(ds, ds->dev);
+}
+
+static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+ struct dsa_loop_priv *ps = ds->priv;
+
+ dsa_unregister_switch(ds);
+ dev_put(ps->netdev);
+}
+
+static struct mdio_driver dsa_loop_drv = {
+ .mdiodrv.driver = {
+ .name = "dsa-loop",
+ },
+ .probe = dsa_loop_drv_probe,
+ .remove = dsa_loop_drv_remove,
+};
+
+#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
+
+static void unregister_fixed_phys(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FIXED_PHYS; i++)
+ if (phydevs[i])
+ fixed_phy_unregister(phydevs[i]);
+}
+
+static int __init dsa_loop_init(void)
+{
+ struct fixed_phy_status status = {
+ .link = 1,
+ .speed = SPEED_100,
+ .duplex = DUPLEX_FULL,
+ };
+ unsigned int i;
+
+ for (i = 0; i < NUM_FIXED_PHYS; i++)
+ phydevs[i] = fixed_phy_register(PHY_POLL, &status, -1, NULL);
+
+ return mdio_driver_register(&dsa_loop_drv);
+}
+module_init(dsa_loop_init);
+
+static void __exit dsa_loop_exit(void)
+{
+ mdio_driver_unregister(&dsa_loop_drv);
+ unregister_fixed_phys();
+}
+module_exit(dsa_loop_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florian Fainelli");
+MODULE_DESCRIPTION("DSA loopback driver");
diff --git a/drivers/net/dsa/dsa_loop.h b/drivers/net/dsa/dsa_loop.h
new file mode 100644
index 000000000000..dc396877fc95
--- /dev/null
+++ b/drivers/net/dsa/dsa_loop.h
@@ -0,0 +1,19 @@
+#ifndef __DSA_LOOP_H
+#define __DSA_LOOP_H
+
+struct dsa_chip_data;
+
+struct dsa_loop_pdata {
+ /* Must be first, such that dsa_register_switch() can access this
+ * without gory pointer manipulations
+ */
+ struct dsa_chip_data cd;
+ const char *name;
+ unsigned int enabled_ports;
+ const char *netdev;
+};
+
+#define DSA_LOOP_NUM_PORTS 6
+#define DSA_LOOP_CPU_PORT (DSA_LOOP_NUM_PORTS - 1)
+
+#endif /* __DSA_LOOP_H */
diff --git a/drivers/net/dsa/dsa_loop_bdinfo.c b/drivers/net/dsa/dsa_loop_bdinfo.c
new file mode 100644
index 000000000000..fb8d5dc71013
--- /dev/null
+++ b/drivers/net/dsa/dsa_loop_bdinfo.c
@@ -0,0 +1,34 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+
+#include "dsa_loop.h"
+
+static struct dsa_loop_pdata dsa_loop_pdata = {
+ .cd = {
+ .port_names[0] = "lan1",
+ .port_names[1] = "lan2",
+ .port_names[2] = "lan3",
+ .port_names[3] = "lan4",
+ .port_names[DSA_LOOP_CPU_PORT] = "cpu",
+ },
+ .name = "DSA mockup driver",
+ .enabled_ports = 0x1f,
+ .netdev = "eth0",
+};
+
+static const struct mdio_board_info bdinfo = {
+ .bus_id = "fixed-0",
+ .modalias = "dsa-loop",
+ .mdio_addr = 31,
+ .platform_data = &dsa_loop_pdata,
+};
+
+static int __init dsa_loop_bdinfo_init(void)
+{
+ return mdiobus_register_board_info(&bdinfo, 1);
+}
+arch_initcall(dsa_loop_bdinfo_init)
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
new file mode 100644
index 000000000000..c8b2423c8ef7
--- /dev/null
+++ b/drivers/net/dsa/lan9303-core.c
@@ -0,0 +1,879 @@
+/*
+ * Copyright (C) 2017 Pengutronix, Juergen Borleis <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regmap.h>
+#include <linux/mutex.h>
+#include <linux/mii.h>
+
+#include "lan9303.h"
+
+#define LAN9303_CHIP_REV 0x14
+# define LAN9303_CHIP_ID 0x9303
+#define LAN9303_IRQ_CFG 0x15
+# define LAN9303_IRQ_CFG_IRQ_ENABLE BIT(8)
+# define LAN9303_IRQ_CFG_IRQ_POL BIT(4)
+# define LAN9303_IRQ_CFG_IRQ_TYPE BIT(0)
+#define LAN9303_INT_STS 0x16
+# define LAN9303_INT_STS_PHY_INT2 BIT(27)
+# define LAN9303_INT_STS_PHY_INT1 BIT(26)
+#define LAN9303_INT_EN 0x17
+# define LAN9303_INT_EN_PHY_INT2_EN BIT(27)
+# define LAN9303_INT_EN_PHY_INT1_EN BIT(26)
+#define LAN9303_HW_CFG 0x1D
+# define LAN9303_HW_CFG_READY BIT(27)
+# define LAN9303_HW_CFG_AMDX_EN_PORT2 BIT(26)
+# define LAN9303_HW_CFG_AMDX_EN_PORT1 BIT(25)
+#define LAN9303_PMI_DATA 0x29
+#define LAN9303_PMI_ACCESS 0x2A
+# define LAN9303_PMI_ACCESS_PHY_ADDR(x) (((x) & 0x1f) << 11)
+# define LAN9303_PMI_ACCESS_MIIRINDA(x) (((x) & 0x1f) << 6)
+# define LAN9303_PMI_ACCESS_MII_BUSY BIT(0)
+# define LAN9303_PMI_ACCESS_MII_WRITE BIT(1)
+#define LAN9303_MANUAL_FC_1 0x68
+#define LAN9303_MANUAL_FC_2 0x69
+#define LAN9303_MANUAL_FC_0 0x6a
+#define LAN9303_SWITCH_CSR_DATA 0x6b
+#define LAN9303_SWITCH_CSR_CMD 0x6c
+#define LAN9303_SWITCH_CSR_CMD_BUSY BIT(31)
+#define LAN9303_SWITCH_CSR_CMD_RW BIT(30)
+#define LAN9303_SWITCH_CSR_CMD_LANES (BIT(19) | BIT(18) | BIT(17) | BIT(16))
+#define LAN9303_VIRT_PHY_BASE 0x70
+#define LAN9303_VIRT_SPECIAL_CTRL 0x77
+
+#define LAN9303_SW_DEV_ID 0x0000
+#define LAN9303_SW_RESET 0x0001
+#define LAN9303_SW_RESET_RESET BIT(0)
+#define LAN9303_SW_IMR 0x0004
+#define LAN9303_SW_IPR 0x0005
+#define LAN9303_MAC_VER_ID_0 0x0400
+#define LAN9303_MAC_RX_CFG_0 0x0401
+# define LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES BIT(1)
+# define LAN9303_MAC_RX_CFG_X_RX_ENABLE BIT(0)
+#define LAN9303_MAC_RX_UNDSZE_CNT_0 0x0410
+#define LAN9303_MAC_RX_64_CNT_0 0x0411
+#define LAN9303_MAC_RX_127_CNT_0 0x0412
+#define LAN9303_MAC_RX_255_CNT_0 0x413
+#define LAN9303_MAC_RX_511_CNT_0 0x0414
+#define LAN9303_MAC_RX_1023_CNT_0 0x0415
+#define LAN9303_MAC_RX_MAX_CNT_0 0x0416
+#define LAN9303_MAC_RX_OVRSZE_CNT_0 0x0417
+#define LAN9303_MAC_RX_PKTOK_CNT_0 0x0418
+#define LAN9303_MAC_RX_CRCERR_CNT_0 0x0419
+#define LAN9303_MAC_RX_MULCST_CNT_0 0x041a
+#define LAN9303_MAC_RX_BRDCST_CNT_0 0x041b
+#define LAN9303_MAC_RX_PAUSE_CNT_0 0x041c
+#define LAN9303_MAC_RX_FRAG_CNT_0 0x041d
+#define LAN9303_MAC_RX_JABB_CNT_0 0x041e
+#define LAN9303_MAC_RX_ALIGN_CNT_0 0x041f
+#define LAN9303_MAC_RX_PKTLEN_CNT_0 0x0420
+#define LAN9303_MAC_RX_GOODPKTLEN_CNT_0 0x0421
+#define LAN9303_MAC_RX_SYMBL_CNT_0 0x0422
+#define LAN9303_MAC_RX_CTLFRM_CNT_0 0x0423
+
+#define LAN9303_MAC_TX_CFG_0 0x0440
+# define LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT (21 << 2)
+# define LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE BIT(1)
+# define LAN9303_MAC_TX_CFG_X_TX_ENABLE BIT(0)
+#define LAN9303_MAC_TX_DEFER_CNT_0 0x0451
+#define LAN9303_MAC_TX_PAUSE_CNT_0 0x0452
+#define LAN9303_MAC_TX_PKTOK_CNT_0 0x0453
+#define LAN9303_MAC_TX_64_CNT_0 0x0454
+#define LAN9303_MAC_TX_127_CNT_0 0x0455
+#define LAN9303_MAC_TX_255_CNT_0 0x0456
+#define LAN9303_MAC_TX_511_CNT_0 0x0457
+#define LAN9303_MAC_TX_1023_CNT_0 0x0458
+#define LAN9303_MAC_TX_MAX_CNT_0 0x0459
+#define LAN9303_MAC_TX_UNDSZE_CNT_0 0x045a
+#define LAN9303_MAC_TX_PKTLEN_CNT_0 0x045c
+#define LAN9303_MAC_TX_BRDCST_CNT_0 0x045d
+#define LAN9303_MAC_TX_MULCST_CNT_0 0x045e
+#define LAN9303_MAC_TX_LATECOL_0 0x045f
+#define LAN9303_MAC_TX_EXCOL_CNT_0 0x0460
+#define LAN9303_MAC_TX_SNGLECOL_CNT_0 0x0461
+#define LAN9303_MAC_TX_MULTICOL_CNT_0 0x0462
+#define LAN9303_MAC_TX_TOTALCOL_CNT_0 0x0463
+
+#define LAN9303_MAC_VER_ID_1 0x0800
+#define LAN9303_MAC_RX_CFG_1 0x0801
+#define LAN9303_MAC_TX_CFG_1 0x0840
+#define LAN9303_MAC_VER_ID_2 0x0c00
+#define LAN9303_MAC_RX_CFG_2 0x0c01
+#define LAN9303_MAC_TX_CFG_2 0x0c40
+#define LAN9303_SWE_ALR_CMD 0x1800
+#define LAN9303_SWE_VLAN_CMD 0x180b
+# define LAN9303_SWE_VLAN_CMD_RNW BIT(5)
+# define LAN9303_SWE_VLAN_CMD_PVIDNVLAN BIT(4)
+#define LAN9303_SWE_VLAN_WR_DATA 0x180c
+#define LAN9303_SWE_VLAN_RD_DATA 0x180e
+# define LAN9303_SWE_VLAN_MEMBER_PORT2 BIT(17)
+# define LAN9303_SWE_VLAN_UNTAG_PORT2 BIT(16)
+# define LAN9303_SWE_VLAN_MEMBER_PORT1 BIT(15)
+# define LAN9303_SWE_VLAN_UNTAG_PORT1 BIT(14)
+# define LAN9303_SWE_VLAN_MEMBER_PORT0 BIT(13)
+# define LAN9303_SWE_VLAN_UNTAG_PORT0 BIT(12)
+#define LAN9303_SWE_VLAN_CMD_STS 0x1810
+#define LAN9303_SWE_GLB_INGRESS_CFG 0x1840
+#define LAN9303_SWE_PORT_STATE 0x1843
+# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT2 (0)
+# define LAN9303_SWE_PORT_STATE_LEARNING_PORT2 BIT(5)
+# define LAN9303_SWE_PORT_STATE_BLOCKING_PORT2 BIT(4)
+# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT1 (0)
+# define LAN9303_SWE_PORT_STATE_LEARNING_PORT1 BIT(3)
+# define LAN9303_SWE_PORT_STATE_BLOCKING_PORT1 BIT(2)
+# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT0 (0)
+# define LAN9303_SWE_PORT_STATE_LEARNING_PORT0 BIT(1)
+# define LAN9303_SWE_PORT_STATE_BLOCKING_PORT0 BIT(0)
+#define LAN9303_SWE_PORT_MIRROR 0x1846
+# define LAN9303_SWE_PORT_MIRROR_SNIFF_ALL BIT(8)
+# define LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT2 BIT(7)
+# define LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT1 BIT(6)
+# define LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT0 BIT(5)
+# define LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT2 BIT(4)
+# define LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT1 BIT(3)
+# define LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT0 BIT(2)
+# define LAN9303_SWE_PORT_MIRROR_ENABLE_RX_MIRRORING BIT(1)
+# define LAN9303_SWE_PORT_MIRROR_ENABLE_TX_MIRRORING BIT(0)
+#define LAN9303_SWE_INGRESS_PORT_TYPE 0x1847
+#define LAN9303_BM_CFG 0x1c00
+#define LAN9303_BM_EGRSS_PORT_TYPE 0x1c0c
+# define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT2 (BIT(17) | BIT(16))
+# define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT1 (BIT(9) | BIT(8))
+# define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT0 (BIT(1) | BIT(0))
+
+#define LAN9303_PORT_0_OFFSET 0x400
+#define LAN9303_PORT_1_OFFSET 0x800
+#define LAN9303_PORT_2_OFFSET 0xc00
+
+/* the built-in PHYs are of type LAN911X */
+#define MII_LAN911X_SPECIAL_MODES 0x12
+#define MII_LAN911X_SPECIAL_CONTROL_STATUS 0x1f
+
+static const struct regmap_range lan9303_valid_regs[] = {
+ regmap_reg_range(0x14, 0x17), /* misc, interrupt */
+ regmap_reg_range(0x19, 0x19), /* endian test */
+ regmap_reg_range(0x1d, 0x1d), /* hardware config */
+ regmap_reg_range(0x23, 0x24), /* general purpose timer */
+ regmap_reg_range(0x27, 0x27), /* counter */
+ regmap_reg_range(0x29, 0x2a), /* PMI index regs */
+ regmap_reg_range(0x68, 0x6a), /* flow control */
+ regmap_reg_range(0x6b, 0x6c), /* switch fabric indirect regs */
+ regmap_reg_range(0x6d, 0x6f), /* misc */
+ regmap_reg_range(0x70, 0x77), /* virtual phy */
+ regmap_reg_range(0x78, 0x7a), /* GPIO */
+ regmap_reg_range(0x7c, 0x7e), /* MAC & reset */
+ regmap_reg_range(0x80, 0xb7), /* switch fabric direct regs (wr only) */
+};
+
+static const struct regmap_range lan9303_reserved_ranges[] = {
+ regmap_reg_range(0x00, 0x13),
+ regmap_reg_range(0x18, 0x18),
+ regmap_reg_range(0x1a, 0x1c),
+ regmap_reg_range(0x1e, 0x22),
+ regmap_reg_range(0x25, 0x26),
+ regmap_reg_range(0x28, 0x28),
+ regmap_reg_range(0x2b, 0x67),
+ regmap_reg_range(0x7b, 0x7b),
+ regmap_reg_range(0x7f, 0x7f),
+ regmap_reg_range(0xb8, 0xff),
+};
+
+const struct regmap_access_table lan9303_register_set = {
+ .yes_ranges = lan9303_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(lan9303_valid_regs),
+ .no_ranges = lan9303_reserved_ranges,
+ .n_no_ranges = ARRAY_SIZE(lan9303_reserved_ranges),
+};
+EXPORT_SYMBOL(lan9303_register_set);
+
+static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg)
+{
+ int ret, i;
+
+ /* we can lose arbitration for the I2C case, because the device
+ * tries to detect and read an external EEPROM after reset and acts as
+ * a master on the shared I2C bus itself. This conflicts with our
+ * attempts to access the device as a slave at the same moment.
+ */
+ for (i = 0; i < 5; i++) {
+ ret = regmap_read(regmap, offset, reg);
+ if (!ret)
+ return 0;
+ if (ret != -EAGAIN)
+ break;
+ msleep(500);
+ }
+
+ return -EIO;
+}
+
+static int lan9303_virt_phy_reg_read(struct lan9303 *chip, int regnum)
+{
+ int ret;
+ u32 val;
+
+ if (regnum > MII_EXPANSION)
+ return -EINVAL;
+
+ ret = lan9303_read(chip->regmap, LAN9303_VIRT_PHY_BASE + regnum, &val);
+ if (ret)
+ return ret;
+
+ return val & 0xffff;
+}
+
+static int lan9303_virt_phy_reg_write(struct lan9303 *chip, int regnum, u16 val)
+{
+ if (regnum > MII_EXPANSION)
+ return -EINVAL;
+
+ return regmap_write(chip->regmap, LAN9303_VIRT_PHY_BASE + regnum, val);
+}
+
+static int lan9303_port_phy_reg_wait_for_completion(struct lan9303 *chip)
+{
+ int ret, i;
+ u32 reg;
+
+ for (i = 0; i < 25; i++) {
+ ret = lan9303_read(chip->regmap, LAN9303_PMI_ACCESS, &reg);
+ if (ret) {
+ dev_err(chip->dev,
+ "Failed to read pmi access status: %d\n", ret);
+ return ret;
+ }
+ if (!(reg & LAN9303_PMI_ACCESS_MII_BUSY))
+ return 0;
+ msleep(1);
+ }
+
+ return -EIO;
+}
+
+static int lan9303_port_phy_reg_read(struct lan9303 *chip, int addr, int regnum)
+{
+ int ret;
+ u32 val;
+
+ val = LAN9303_PMI_ACCESS_PHY_ADDR(addr);
+ val |= LAN9303_PMI_ACCESS_MIIRINDA(regnum);
+
+ mutex_lock(&chip->indirect_mutex);
+
+ ret = lan9303_port_phy_reg_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ /* start the MII read cycle */
+ ret = regmap_write(chip->regmap, LAN9303_PMI_ACCESS, val);
+ if (ret)
+ goto on_error;
+
+ ret = lan9303_port_phy_reg_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ /* read the result of this operation */
+ ret = lan9303_read(chip->regmap, LAN9303_PMI_DATA, &val);
+ if (ret)
+ goto on_error;
+
+ mutex_unlock(&chip->indirect_mutex);
+
+ return val & 0xffff;
+
+on_error:
+ mutex_unlock(&chip->indirect_mutex);
+ return ret;
+}
+
+static int lan9303_phy_reg_write(struct lan9303 *chip, int addr, int regnum,
+ unsigned int val)
+{
+ int ret;
+ u32 reg;
+
+ reg = LAN9303_PMI_ACCESS_PHY_ADDR(addr);
+ reg |= LAN9303_PMI_ACCESS_MIIRINDA(regnum);
+ reg |= LAN9303_PMI_ACCESS_MII_WRITE;
+
+ mutex_lock(&chip->indirect_mutex);
+
+ ret = lan9303_port_phy_reg_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ /* write the data first... */
+ ret = regmap_write(chip->regmap, LAN9303_PMI_DATA, val);
+ if (ret)
+ goto on_error;
+
+ /* ...then start the MII write cycle */
+ ret = regmap_write(chip->regmap, LAN9303_PMI_ACCESS, reg);
+
+on_error:
+ mutex_unlock(&chip->indirect_mutex);
+ return ret;
+}
+
+static int lan9303_switch_wait_for_completion(struct lan9303 *chip)
+{
+ int ret, i;
+ u32 reg;
+
+ for (i = 0; i < 25; i++) {
+ ret = lan9303_read(chip->regmap, LAN9303_SWITCH_CSR_CMD, &reg);
+ if (ret) {
+ dev_err(chip->dev,
+ "Failed to read csr command status: %d\n", ret);
+ return ret;
+ }
+ if (!(reg & LAN9303_SWITCH_CSR_CMD_BUSY))
+ return 0;
+ msleep(1);
+ }
+
+ return -EIO;
+}
+
+static int lan9303_write_switch_reg(struct lan9303 *chip, u16 regnum, u32 val)
+{
+ u32 reg;
+ int ret;
+
+ reg = regnum;
+ reg |= LAN9303_SWITCH_CSR_CMD_LANES;
+ reg |= LAN9303_SWITCH_CSR_CMD_BUSY;
+
+ mutex_lock(&chip->indirect_mutex);
+
+ ret = lan9303_switch_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ ret = regmap_write(chip->regmap, LAN9303_SWITCH_CSR_DATA, val);
+ if (ret) {
+ dev_err(chip->dev, "Failed to write csr data reg: %d\n", ret);
+ goto on_error;
+ }
+
+ /* trigger write */
+ ret = regmap_write(chip->regmap, LAN9303_SWITCH_CSR_CMD, reg);
+ if (ret)
+ dev_err(chip->dev, "Failed to write csr command reg: %d\n",
+ ret);
+
+on_error:
+ mutex_unlock(&chip->indirect_mutex);
+ return ret;
+}
+
+static int lan9303_read_switch_reg(struct lan9303 *chip, u16 regnum, u32 *val)
+{
+ u32 reg;
+ int ret;
+
+ reg = regnum;
+ reg |= LAN9303_SWITCH_CSR_CMD_LANES;
+ reg |= LAN9303_SWITCH_CSR_CMD_RW;
+ reg |= LAN9303_SWITCH_CSR_CMD_BUSY;
+
+ mutex_lock(&chip->indirect_mutex);
+
+ ret = lan9303_switch_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ /* trigger read */
+ ret = regmap_write(chip->regmap, LAN9303_SWITCH_CSR_CMD, reg);
+ if (ret) {
+ dev_err(chip->dev, "Failed to write csr command reg: %d\n",
+ ret);
+ goto on_error;
+ }
+
+ ret = lan9303_switch_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ ret = lan9303_read(chip->regmap, LAN9303_SWITCH_CSR_DATA, val);
+ if (ret)
+ dev_err(chip->dev, "Failed to read csr data reg: %d\n", ret);
+on_error:
+ mutex_unlock(&chip->indirect_mutex);
+ return ret;
+}
+
+static int lan9303_detect_phy_setup(struct lan9303 *chip)
+{
+ int reg;
+
+ /* depending on the 'phy_addr_sel_strap' setting, the three phys are
+ * using IDs 0-1-2 or IDs 1-2-3. We cannot read back the
+ * 'phy_addr_sel_strap' setting directly, so we need a test, which
+ * configuration is active:
+ * Special reg 18 of phy 3 reads as 0x0000, if 'phy_addr_sel_strap' is 0
+ * and the IDs are 0-1-2, else it contains something different from
+ * 0x0000, which means 'phy_addr_sel_strap' is 1 and the IDs are 1-2-3.
+ */
+ reg = lan9303_port_phy_reg_read(chip, 3, MII_LAN911X_SPECIAL_MODES);
+ if (reg < 0) {
+ dev_err(chip->dev, "Failed to detect phy config: %d\n", reg);
+ return reg;
+ }
+
+ if (reg != 0)
+ chip->phy_addr_sel_strap = 1;
+ else
+ chip->phy_addr_sel_strap = 0;
+
+ dev_dbg(chip->dev, "Phy setup '%s' detected\n",
+ chip->phy_addr_sel_strap ? "1-2-3" : "0-1-2");
+
+ return 0;
+}
+
+#define LAN9303_MAC_RX_CFG_OFFS (LAN9303_MAC_RX_CFG_0 - LAN9303_PORT_0_OFFSET)
+#define LAN9303_MAC_TX_CFG_OFFS (LAN9303_MAC_TX_CFG_0 - LAN9303_PORT_0_OFFSET)
+
+static int lan9303_disable_packet_processing(struct lan9303 *chip,
+ unsigned int port)
+{
+ int ret;
+
+ /* disable RX, but keep register reset default values else */
+ ret = lan9303_write_switch_reg(chip, LAN9303_MAC_RX_CFG_OFFS + port,
+ LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES);
+ if (ret)
+ return ret;
+
+ /* disable TX, but keep register reset default values else */
+ return lan9303_write_switch_reg(chip, LAN9303_MAC_TX_CFG_OFFS + port,
+ LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT |
+ LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE);
+}
+
+static int lan9303_enable_packet_processing(struct lan9303 *chip,
+ unsigned int port)
+{
+ int ret;
+
+ /* enable RX and keep register reset default values else */
+ ret = lan9303_write_switch_reg(chip, LAN9303_MAC_RX_CFG_OFFS + port,
+ LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES |
+ LAN9303_MAC_RX_CFG_X_RX_ENABLE);
+ if (ret)
+ return ret;
+
+ /* enable TX and keep register reset default values else */
+ return lan9303_write_switch_reg(chip, LAN9303_MAC_TX_CFG_OFFS + port,
+ LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT |
+ LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE |
+ LAN9303_MAC_TX_CFG_X_TX_ENABLE);
+}
+
+/* We want a special working switch:
+ * - do not forward packets between port 1 and 2
+ * - forward everything from port 1 to port 0
+ * - forward everything from port 2 to port 0
+ * - forward special tagged packets from port 0 to port 1 *or* port 2
+ */
+static int lan9303_separate_ports(struct lan9303 *chip)
+{
+ int ret;
+
+ ret = lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_MIRROR,
+ LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT0 |
+ LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT1 |
+ LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT2 |
+ LAN9303_SWE_PORT_MIRROR_ENABLE_RX_MIRRORING |
+ LAN9303_SWE_PORT_MIRROR_SNIFF_ALL);
+ if (ret)
+ return ret;
+
+ /* enable defining the destination port via special VLAN tagging
+ * for port 0
+ */
+ ret = lan9303_write_switch_reg(chip, LAN9303_SWE_INGRESS_PORT_TYPE,
+ 0x03);
+ if (ret)
+ return ret;
+
+ /* tag incoming packets at port 1 and 2 on their way to port 0 to be
+ * able to discover their source port
+ */
+ ret = lan9303_write_switch_reg(chip, LAN9303_BM_EGRSS_PORT_TYPE,
+ LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT0);
+ if (ret)
+ return ret;
+
+ /* prevent port 1 and 2 from forwarding packets by their own */
+ return lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_STATE,
+ LAN9303_SWE_PORT_STATE_FORWARDING_PORT0 |
+ LAN9303_SWE_PORT_STATE_BLOCKING_PORT1 |
+ LAN9303_SWE_PORT_STATE_BLOCKING_PORT2);
+}
+
+static int lan9303_handle_reset(struct lan9303 *chip)
+{
+ if (!chip->reset_gpio)
+ return 0;
+
+ if (chip->reset_duration != 0)
+ msleep(chip->reset_duration);
+
+ /* release (deassert) reset and activate the device */
+ gpiod_set_value_cansleep(chip->reset_gpio, 0);
+
+ return 0;
+}
+
+/* stop processing packets for all ports */
+static int lan9303_disable_processing(struct lan9303 *chip)
+{
+ int ret;
+
+ ret = lan9303_disable_packet_processing(chip, LAN9303_PORT_0_OFFSET);
+ if (ret)
+ return ret;
+ ret = lan9303_disable_packet_processing(chip, LAN9303_PORT_1_OFFSET);
+ if (ret)
+ return ret;
+ return lan9303_disable_packet_processing(chip, LAN9303_PORT_2_OFFSET);
+}
+
+static int lan9303_check_device(struct lan9303 *chip)
+{
+ int ret;
+ u32 reg;
+
+ ret = lan9303_read(chip->regmap, LAN9303_CHIP_REV, &reg);
+ if (ret) {
+ dev_err(chip->dev, "failed to read chip revision register: %d\n",
+ ret);
+ if (!chip->reset_gpio) {
+ dev_dbg(chip->dev,
+ "hint: maybe failed due to missing reset GPIO\n");
+ }
+ return ret;
+ }
+
+ if ((reg >> 16) != LAN9303_CHIP_ID) {
+ dev_err(chip->dev, "expecting LAN9303 chip, but found: %X\n",
+ reg >> 16);
+ return ret;
+ }
+
+ /* The default state of the LAN9303 device is to forward packets between
+ * all ports (if not configured differently by an external EEPROM).
+ * The initial state of a DSA device must be forwarding packets only
+ * between the external and the internal ports and no forwarding
+ * between the external ports. In preparation we stop packet handling
+ * at all for now until the LAN9303 device is re-programmed accordingly.
+ */
+ ret = lan9303_disable_processing(chip);
+ if (ret)
+ dev_warn(chip->dev, "failed to disable switching %d\n", ret);
+
+ dev_info(chip->dev, "Found LAN9303 rev. %u\n", reg & 0xffff);
+
+ ret = lan9303_detect_phy_setup(chip);
+ if (ret) {
+ dev_err(chip->dev,
+ "failed to discover phy bootstrap setup: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* ---------------------------- DSA -----------------------------------*/
+
+static enum dsa_tag_protocol lan9303_get_tag_protocol(struct dsa_switch *ds)
+{
+ return DSA_TAG_PROTO_LAN9303;
+}
+
+static int lan9303_setup(struct dsa_switch *ds)
+{
+ struct lan9303 *chip = ds->priv;
+ int ret;
+
+ /* Make sure that port 0 is the cpu port */
+ if (!dsa_is_cpu_port(ds, 0)) {
+ dev_err(chip->dev, "port 0 is not the CPU port\n");
+ return -EINVAL;
+ }
+
+ ret = lan9303_separate_ports(chip);
+ if (ret)
+ dev_err(chip->dev, "failed to separate ports %d\n", ret);
+
+ ret = lan9303_enable_packet_processing(chip, LAN9303_PORT_0_OFFSET);
+ if (ret)
+ dev_err(chip->dev, "failed to re-enable switching %d\n", ret);
+
+ return 0;
+}
+
+struct lan9303_mib_desc {
+ unsigned int offset; /* offset of first MAC */
+ const char *name;
+};
+
+static const struct lan9303_mib_desc lan9303_mib[] = {
+ { .offset = LAN9303_MAC_RX_BRDCST_CNT_0, .name = "RxBroad", },
+ { .offset = LAN9303_MAC_RX_PAUSE_CNT_0, .name = "RxPause", },
+ { .offset = LAN9303_MAC_RX_MULCST_CNT_0, .name = "RxMulti", },
+ { .offset = LAN9303_MAC_RX_PKTOK_CNT_0, .name = "RxOk", },
+ { .offset = LAN9303_MAC_RX_CRCERR_CNT_0, .name = "RxCrcErr", },
+ { .offset = LAN9303_MAC_RX_ALIGN_CNT_0, .name = "RxAlignErr", },
+ { .offset = LAN9303_MAC_RX_JABB_CNT_0, .name = "RxJabber", },
+ { .offset = LAN9303_MAC_RX_FRAG_CNT_0, .name = "RxFragment", },
+ { .offset = LAN9303_MAC_RX_64_CNT_0, .name = "Rx64Byte", },
+ { .offset = LAN9303_MAC_RX_127_CNT_0, .name = "Rx128Byte", },
+ { .offset = LAN9303_MAC_RX_255_CNT_0, .name = "Rx256Byte", },
+ { .offset = LAN9303_MAC_RX_511_CNT_0, .name = "Rx512Byte", },
+ { .offset = LAN9303_MAC_RX_1023_CNT_0, .name = "Rx1024Byte", },
+ { .offset = LAN9303_MAC_RX_MAX_CNT_0, .name = "RxMaxByte", },
+ { .offset = LAN9303_MAC_RX_PKTLEN_CNT_0, .name = "RxByteCnt", },
+ { .offset = LAN9303_MAC_RX_SYMBL_CNT_0, .name = "RxSymbolCnt", },
+ { .offset = LAN9303_MAC_RX_CTLFRM_CNT_0, .name = "RxCfs", },
+ { .offset = LAN9303_MAC_RX_OVRSZE_CNT_0, .name = "RxOverFlow", },
+ { .offset = LAN9303_MAC_TX_UNDSZE_CNT_0, .name = "TxShort", },
+ { .offset = LAN9303_MAC_TX_BRDCST_CNT_0, .name = "TxBroad", },
+ { .offset = LAN9303_MAC_TX_PAUSE_CNT_0, .name = "TxPause", },
+ { .offset = LAN9303_MAC_TX_MULCST_CNT_0, .name = "TxMulti", },
+ { .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "TxUnderRun", },
+ { .offset = LAN9303_MAC_TX_64_CNT_0, .name = "Tx64Byte", },
+ { .offset = LAN9303_MAC_TX_127_CNT_0, .name = "Tx128Byte", },
+ { .offset = LAN9303_MAC_TX_255_CNT_0, .name = "Tx256Byte", },
+ { .offset = LAN9303_MAC_TX_511_CNT_0, .name = "Tx512Byte", },
+ { .offset = LAN9303_MAC_TX_1023_CNT_0, .name = "Tx1024Byte", },
+ { .offset = LAN9303_MAC_TX_MAX_CNT_0, .name = "TxMaxByte", },
+ { .offset = LAN9303_MAC_TX_PKTLEN_CNT_0, .name = "TxByteCnt", },
+ { .offset = LAN9303_MAC_TX_PKTOK_CNT_0, .name = "TxOk", },
+ { .offset = LAN9303_MAC_TX_TOTALCOL_CNT_0, .name = "TxCollision", },
+ { .offset = LAN9303_MAC_TX_MULTICOL_CNT_0, .name = "TxMultiCol", },
+ { .offset = LAN9303_MAC_TX_SNGLECOL_CNT_0, .name = "TxSingleCol", },
+ { .offset = LAN9303_MAC_TX_EXCOL_CNT_0, .name = "TxExcCol", },
+ { .offset = LAN9303_MAC_TX_DEFER_CNT_0, .name = "TxDefer", },
+ { .offset = LAN9303_MAC_TX_LATECOL_0, .name = "TxLateCol", },
+};
+
+static void lan9303_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ unsigned int u;
+
+ for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) {
+ strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name,
+ ETH_GSTRING_LEN);
+ }
+}
+
+static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct lan9303 *chip = ds->priv;
+ u32 reg;
+ unsigned int u, poff;
+ int ret;
+
+ poff = port * 0x400;
+
+ for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) {
+ ret = lan9303_read_switch_reg(chip,
+ lan9303_mib[u].offset + poff,
+ &reg);
+ if (ret)
+ dev_warn(chip->dev, "Reading status reg %u failed\n",
+ lan9303_mib[u].offset + poff);
+ data[u] = reg;
+ }
+}
+
+static int lan9303_get_sset_count(struct dsa_switch *ds)
+{
+ return ARRAY_SIZE(lan9303_mib);
+}
+
+static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ struct lan9303 *chip = ds->priv;
+ int phy_base = chip->phy_addr_sel_strap;
+
+ if (phy == phy_base)
+ return lan9303_virt_phy_reg_read(chip, regnum);
+ if (phy > phy_base + 2)
+ return -ENODEV;
+
+ return lan9303_port_phy_reg_read(chip, phy, regnum);
+}
+
+static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u16 val)
+{
+ struct lan9303 *chip = ds->priv;
+ int phy_base = chip->phy_addr_sel_strap;
+
+ if (phy == phy_base)
+ return lan9303_virt_phy_reg_write(chip, regnum, val);
+ if (phy > phy_base + 2)
+ return -ENODEV;
+
+ return lan9303_phy_reg_write(chip, phy, regnum, val);
+}
+
+static int lan9303_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct lan9303 *chip = ds->priv;
+
+ /* enable internal packet processing */
+ switch (port) {
+ case 1:
+ return lan9303_enable_packet_processing(chip,
+ LAN9303_PORT_1_OFFSET);
+ case 2:
+ return lan9303_enable_packet_processing(chip,
+ LAN9303_PORT_2_OFFSET);
+ default:
+ dev_dbg(chip->dev,
+ "Error: request to power up invalid port %d\n", port);
+ }
+
+ return -ENODEV;
+}
+
+static void lan9303_port_disable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct lan9303 *chip = ds->priv;
+
+ /* disable internal packet processing */
+ switch (port) {
+ case 1:
+ lan9303_disable_packet_processing(chip, LAN9303_PORT_1_OFFSET);
+ lan9303_phy_reg_write(chip, chip->phy_addr_sel_strap + 1,
+ MII_BMCR, BMCR_PDOWN);
+ break;
+ case 2:
+ lan9303_disable_packet_processing(chip, LAN9303_PORT_2_OFFSET);
+ lan9303_phy_reg_write(chip, chip->phy_addr_sel_strap + 2,
+ MII_BMCR, BMCR_PDOWN);
+ break;
+ default:
+ dev_dbg(chip->dev,
+ "Error: request to power down invalid port %d\n", port);
+ }
+}
+
+static struct dsa_switch_ops lan9303_switch_ops = {
+ .get_tag_protocol = lan9303_get_tag_protocol,
+ .setup = lan9303_setup,
+ .get_strings = lan9303_get_strings,
+ .phy_read = lan9303_phy_read,
+ .phy_write = lan9303_phy_write,
+ .get_ethtool_stats = lan9303_get_ethtool_stats,
+ .get_sset_count = lan9303_get_sset_count,
+ .port_enable = lan9303_port_enable,
+ .port_disable = lan9303_port_disable,
+};
+
+static int lan9303_register_switch(struct lan9303 *chip)
+{
+ chip->ds = dsa_switch_alloc(chip->dev, DSA_MAX_PORTS);
+ if (!chip->ds)
+ return -ENOMEM;
+
+ chip->ds->priv = chip;
+ chip->ds->ops = &lan9303_switch_ops;
+ chip->ds->phys_mii_mask = chip->phy_addr_sel_strap ? 0xe : 0x7;
+
+ return dsa_register_switch(chip->ds, chip->dev);
+}
+
+static void lan9303_probe_reset_gpio(struct lan9303 *chip,
+ struct device_node *np)
+{
+ chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
+ GPIOD_OUT_LOW);
+
+ if (!chip->reset_gpio) {
+ dev_dbg(chip->dev, "No reset GPIO defined\n");
+ return;
+ }
+
+ chip->reset_duration = 200;
+
+ if (np) {
+ of_property_read_u32(np, "reset-duration",
+ &chip->reset_duration);
+ } else {
+ dev_dbg(chip->dev, "reset duration defaults to 200 ms\n");
+ }
+
+ /* A sane reset duration should not be longer than 1s */
+ if (chip->reset_duration > 1000)
+ chip->reset_duration = 1000;
+}
+
+int lan9303_probe(struct lan9303 *chip, struct device_node *np)
+{
+ int ret;
+
+ mutex_init(&chip->indirect_mutex);
+
+ lan9303_probe_reset_gpio(chip, np);
+
+ ret = lan9303_handle_reset(chip);
+ if (ret)
+ return ret;
+
+ ret = lan9303_check_device(chip);
+ if (ret)
+ return ret;
+
+ ret = lan9303_register_switch(chip);
+ if (ret) {
+ dev_dbg(chip->dev, "Failed to register switch: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(lan9303_probe);
+
+int lan9303_remove(struct lan9303 *chip)
+{
+ int rc;
+
+ rc = lan9303_disable_processing(chip);
+ if (rc != 0)
+ dev_warn(chip->dev, "shutting down failed\n");
+
+ dsa_unregister_switch(chip->ds);
+
+ /* assert reset to the whole device to prevent it from doing anything */
+ gpiod_set_value_cansleep(chip->reset_gpio, 1);
+ gpiod_unexport(chip->reset_gpio);
+
+ return 0;
+}
+EXPORT_SYMBOL(lan9303_remove);
+
+MODULE_AUTHOR("Juergen Borleis <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Core driver for SMSC/Microchip LAN9303 three port ethernet switch");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lan9303.h b/drivers/net/dsa/lan9303.h
new file mode 100644
index 000000000000..d1512dad2d90
--- /dev/null
+++ b/drivers/net/dsa/lan9303.h
@@ -0,0 +1,19 @@
+#include <linux/regmap.h>
+#include <linux/device.h>
+#include <net/dsa.h>
+
+struct lan9303 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_data;
+ struct gpio_desc *reset_gpio;
+ u32 reset_duration; /* in [ms] */
+ bool phy_addr_sel_strap;
+ struct dsa_switch *ds;
+ struct mutex indirect_mutex; /* protect indexed register access */
+};
+
+extern const struct regmap_access_table lan9303_register_set;
+
+int lan9303_probe(struct lan9303 *chip, struct device_node *np);
+int lan9303_remove(struct lan9303 *chip);
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
new file mode 100644
index 000000000000..ab3ce0da5071
--- /dev/null
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2017 Pengutronix, Juergen Borleis <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+
+#include "lan9303.h"
+
+struct lan9303_i2c {
+ struct i2c_client *device;
+ struct lan9303 chip;
+};
+
+static const struct regmap_config lan9303_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 1,
+ .can_multi_write = true,
+ .max_register = 0x0ff, /* address bits 0..1 are not used */
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+
+ .volatile_table = &lan9303_register_set,
+ .wr_table = &lan9303_register_set,
+ .rd_table = &lan9303_register_set,
+
+ .cache_type = REGCACHE_NONE,
+};
+
+static int lan9303_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lan9303_i2c *sw_dev;
+ int ret;
+
+ sw_dev = devm_kzalloc(&client->dev, sizeof(struct lan9303_i2c),
+ GFP_KERNEL);
+ if (!sw_dev)
+ return -ENOMEM;
+
+ sw_dev->chip.regmap = devm_regmap_init_i2c(client,
+ &lan9303_i2c_regmap_config);
+ if (IS_ERR(sw_dev->chip.regmap)) {
+ ret = PTR_ERR(sw_dev->chip.regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* link forward and backward */
+ sw_dev->device = client;
+ i2c_set_clientdata(client, sw_dev);
+ sw_dev->chip.dev = &client->dev;
+
+ ret = lan9303_probe(&sw_dev->chip, client->dev.of_node);
+ if (ret != 0)
+ return ret;
+
+ dev_info(&client->dev, "LAN9303 I2C driver loaded successfully\n");
+
+ return 0;
+}
+
+static int lan9303_i2c_remove(struct i2c_client *client)
+{
+ struct lan9303_i2c *sw_dev;
+
+ sw_dev = i2c_get_clientdata(client);
+ if (!sw_dev)
+ return -ENODEV;
+
+ return lan9303_remove(&sw_dev->chip);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct i2c_device_id lan9303_i2c_id[] = {
+ { "lan9303", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, lan9303_i2c_id);
+
+static const struct of_device_id lan9303_i2c_of_match[] = {
+ { .compatible = "smsc,lan9303-i2c", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lan9303_i2c_of_match);
+
+static struct i2c_driver lan9303_i2c_driver = {
+ .driver = {
+ .name = "LAN9303_I2C",
+ .of_match_table = of_match_ptr(lan9303_i2c_of_match),
+ },
+ .probe = lan9303_i2c_probe,
+ .remove = lan9303_i2c_remove,
+ .id_table = lan9303_i2c_id,
+};
+module_i2c_driver(lan9303_i2c_driver);
+
+MODULE_AUTHOR("Juergen Borleis <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Driver for SMSC/Microchip LAN9303 three port ethernet switch in I2C managed mode");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
new file mode 100644
index 000000000000..93c36c0541cf
--- /dev/null
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2017 Pengutronix, Juergen Borleis <kernel@pengutronix.de>
+ *
+ * Partially based on a patch from
+ * Copyright (c) 2014 Stefan Roese <sr@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+
+#include "lan9303.h"
+
+/* Generate phy-addr and -reg from the input address */
+#define PHY_ADDR(x) ((((x) >> 6) + 0x10) & 0x1f)
+#define PHY_REG(x) (((x) >> 1) & 0x1f)
+
+struct lan9303_mdio {
+ struct mdio_device *device;
+ struct lan9303 chip;
+};
+
+static void lan9303_mdio_real_write(struct mdio_device *mdio, int reg, u16 val)
+{
+ mdio->bus->write(mdio->bus, PHY_ADDR(reg), PHY_REG(reg), val);
+}
+
+static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val)
+{
+ struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
+
+ mutex_lock(&sw_dev->device->bus->mdio_lock);
+ lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff);
+ lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff);
+ mutex_unlock(&sw_dev->device->bus->mdio_lock);
+
+ return 0;
+}
+
+static u16 lan9303_mdio_real_read(struct mdio_device *mdio, int reg)
+{
+ return mdio->bus->read(mdio->bus, PHY_ADDR(reg), PHY_REG(reg));
+}
+
+static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
+{
+ struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
+
+ mutex_lock(&sw_dev->device->bus->mdio_lock);
+ *val = lan9303_mdio_real_read(sw_dev->device, reg);
+ *val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16);
+ mutex_unlock(&sw_dev->device->bus->mdio_lock);
+
+ return 0;
+}
+
+static const struct regmap_config lan9303_mdio_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 1,
+ .can_multi_write = true,
+ .max_register = 0x0ff, /* address bits 0..1 are not used */
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+
+ .volatile_table = &lan9303_register_set,
+ .wr_table = &lan9303_register_set,
+ .rd_table = &lan9303_register_set,
+
+ .reg_read = lan9303_mdio_read,
+ .reg_write = lan9303_mdio_write,
+
+ .cache_type = REGCACHE_NONE,
+};
+
+static int lan9303_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct lan9303_mdio *sw_dev;
+ int ret;
+
+ sw_dev = devm_kzalloc(&mdiodev->dev, sizeof(struct lan9303_mdio),
+ GFP_KERNEL);
+ if (!sw_dev)
+ return -ENOMEM;
+
+ sw_dev->chip.regmap = devm_regmap_init(&mdiodev->dev, NULL, sw_dev,
+ &lan9303_mdio_regmap_config);
+ if (IS_ERR(sw_dev->chip.regmap)) {
+ ret = PTR_ERR(sw_dev->chip.regmap);
+ dev_err(&mdiodev->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ /* link forward and backward */
+ sw_dev->device = mdiodev;
+ dev_set_drvdata(&mdiodev->dev, sw_dev);
+ sw_dev->chip.dev = &mdiodev->dev;
+
+ ret = lan9303_probe(&sw_dev->chip, mdiodev->dev.of_node);
+ if (ret != 0)
+ return ret;
+
+ dev_info(&mdiodev->dev, "LAN9303 MDIO driver loaded successfully\n");
+
+ return 0;
+}
+
+static void lan9303_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct lan9303_mdio *sw_dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (!sw_dev)
+ return;
+
+ lan9303_remove(&sw_dev->chip);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct of_device_id lan9303_mdio_of_match[] = {
+ { .compatible = "smsc,lan9303-mdio" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lan9303_mdio_of_match);
+
+static struct mdio_driver lan9303_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "LAN9303_MDIO",
+ .of_match_table = of_match_ptr(lan9303_mdio_of_match),
+ },
+ .probe = lan9303_mdio_probe,
+ .remove = lan9303_mdio_remove,
+};
+mdio_module_driver(lan9303_mdio_driver);
+
+MODULE_AUTHOR("Stefan Roese <sr@denx.de>, Juergen Borleis <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Driver for SMSC/Microchip LAN9303 three port ethernet switch in MDIO managed mode");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
new file mode 100644
index 000000000000..b070c167e70f
--- /dev/null
+++ b/drivers/net/dsa/mt7530.c
@@ -0,0 +1,1126 @@
+/*
+ * Mediatek MT7530 DSA Switch driver
+ * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/iopoll.h>
+#include <linux/mdio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_gpio.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/gpio/consumer.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "mt7530.h"
+
+/* String, offset, and register size in bytes if different from 4 bytes */
+static const struct mt7530_mib_desc mt7530_mib[] = {
+ MIB_DESC(1, 0x00, "TxDrop"),
+ MIB_DESC(1, 0x04, "TxCrcErr"),
+ MIB_DESC(1, 0x08, "TxUnicast"),
+ MIB_DESC(1, 0x0c, "TxMulticast"),
+ MIB_DESC(1, 0x10, "TxBroadcast"),
+ MIB_DESC(1, 0x14, "TxCollision"),
+ MIB_DESC(1, 0x18, "TxSingleCollision"),
+ MIB_DESC(1, 0x1c, "TxMultipleCollision"),
+ MIB_DESC(1, 0x20, "TxDeferred"),
+ MIB_DESC(1, 0x24, "TxLateCollision"),
+ MIB_DESC(1, 0x28, "TxExcessiveCollistion"),
+ MIB_DESC(1, 0x2c, "TxPause"),
+ MIB_DESC(1, 0x30, "TxPktSz64"),
+ MIB_DESC(1, 0x34, "TxPktSz65To127"),
+ MIB_DESC(1, 0x38, "TxPktSz128To255"),
+ MIB_DESC(1, 0x3c, "TxPktSz256To511"),
+ MIB_DESC(1, 0x40, "TxPktSz512To1023"),
+ MIB_DESC(1, 0x44, "Tx1024ToMax"),
+ MIB_DESC(2, 0x48, "TxBytes"),
+ MIB_DESC(1, 0x60, "RxDrop"),
+ MIB_DESC(1, 0x64, "RxFiltering"),
+ MIB_DESC(1, 0x6c, "RxMulticast"),
+ MIB_DESC(1, 0x70, "RxBroadcast"),
+ MIB_DESC(1, 0x74, "RxAlignErr"),
+ MIB_DESC(1, 0x78, "RxCrcErr"),
+ MIB_DESC(1, 0x7c, "RxUnderSizeErr"),
+ MIB_DESC(1, 0x80, "RxFragErr"),
+ MIB_DESC(1, 0x84, "RxOverSzErr"),
+ MIB_DESC(1, 0x88, "RxJabberErr"),
+ MIB_DESC(1, 0x8c, "RxPause"),
+ MIB_DESC(1, 0x90, "RxPktSz64"),
+ MIB_DESC(1, 0x94, "RxPktSz65To127"),
+ MIB_DESC(1, 0x98, "RxPktSz128To255"),
+ MIB_DESC(1, 0x9c, "RxPktSz256To511"),
+ MIB_DESC(1, 0xa0, "RxPktSz512To1023"),
+ MIB_DESC(1, 0xa4, "RxPktSz1024ToMax"),
+ MIB_DESC(2, 0xa8, "RxBytes"),
+ MIB_DESC(1, 0xb0, "RxCtrlDrop"),
+ MIB_DESC(1, 0xb4, "RxIngressDrop"),
+ MIB_DESC(1, 0xb8, "RxArlDrop"),
+};
+
+static int
+mt7623_trgmii_write(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ int ret;
+
+ ret = regmap_write(priv->ethernet, TRGMII_BASE(reg), val);
+ if (ret < 0)
+ dev_err(priv->dev,
+ "failed to priv write register\n");
+ return ret;
+}
+
+static u32
+mt7623_trgmii_read(struct mt7530_priv *priv, u32 reg)
+{
+ int ret;
+ u32 val;
+
+ ret = regmap_read(priv->ethernet, TRGMII_BASE(reg), &val);
+ if (ret < 0) {
+ dev_err(priv->dev,
+ "failed to priv read register\n");
+ return ret;
+ }
+
+ return val;
+}
+
+static void
+mt7623_trgmii_rmw(struct mt7530_priv *priv, u32 reg,
+ u32 mask, u32 set)
+{
+ u32 val;
+
+ val = mt7623_trgmii_read(priv, reg);
+ val &= ~mask;
+ val |= set;
+ mt7623_trgmii_write(priv, reg, val);
+}
+
+static void
+mt7623_trgmii_set(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ mt7623_trgmii_rmw(priv, reg, 0, val);
+}
+
+static void
+mt7623_trgmii_clear(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ mt7623_trgmii_rmw(priv, reg, val, 0);
+}
+
+static int
+core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad)
+{
+ struct mii_bus *bus = priv->bus;
+ int value, ret;
+
+ /* Write the desired MMD Devad */
+ ret = bus->write(bus, 0, MII_MMD_CTRL, devad);
+ if (ret < 0)
+ goto err;
+
+ /* Write the desired MMD register address */
+ ret = bus->write(bus, 0, MII_MMD_DATA, prtad);
+ if (ret < 0)
+ goto err;
+
+ /* Select the Function : DATA with no post increment */
+ ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+ if (ret < 0)
+ goto err;
+
+ /* Read the content of the MMD's selected register */
+ value = bus->read(bus, 0, MII_MMD_DATA);
+
+ return value;
+err:
+ dev_err(&bus->dev, "failed to read mmd register\n");
+
+ return ret;
+}
+
+static int
+core_write_mmd_indirect(struct mt7530_priv *priv, int prtad,
+ int devad, u32 data)
+{
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ /* Write the desired MMD Devad */
+ ret = bus->write(bus, 0, MII_MMD_CTRL, devad);
+ if (ret < 0)
+ goto err;
+
+ /* Write the desired MMD register address */
+ ret = bus->write(bus, 0, MII_MMD_DATA, prtad);
+ if (ret < 0)
+ goto err;
+
+ /* Select the Function : DATA with no post increment */
+ ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+ if (ret < 0)
+ goto err;
+
+ /* Write the data into MMD's selected register */
+ ret = bus->write(bus, 0, MII_MMD_DATA, data);
+err:
+ if (ret < 0)
+ dev_err(&bus->dev,
+ "failed to write mmd register\n");
+ return ret;
+}
+
+static void
+core_write(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ struct mii_bus *bus = priv->bus;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
+
+ mutex_unlock(&bus->mdio_lock);
+}
+
+static void
+core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set)
+{
+ struct mii_bus *bus = priv->bus;
+ u32 val;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ val = core_read_mmd_indirect(priv, reg, MDIO_MMD_VEND2);
+ val &= ~mask;
+ val |= set;
+ core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
+
+ mutex_unlock(&bus->mdio_lock);
+}
+
+static void
+core_set(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ core_rmw(priv, reg, 0, val);
+}
+
+static void
+core_clear(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ core_rmw(priv, reg, val, 0);
+}
+
+static int
+mt7530_mii_write(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ struct mii_bus *bus = priv->bus;
+ u16 page, r, lo, hi;
+ int ret;
+
+ page = (reg >> 6) & 0x3ff;
+ r = (reg >> 2) & 0xf;
+ lo = val & 0xffff;
+ hi = val >> 16;
+
+ /* MT7530 uses 31 as the pseudo port */
+ ret = bus->write(bus, 0x1f, 0x1f, page);
+ if (ret < 0)
+ goto err;
+
+ ret = bus->write(bus, 0x1f, r, lo);
+ if (ret < 0)
+ goto err;
+
+ ret = bus->write(bus, 0x1f, 0x10, hi);
+err:
+ if (ret < 0)
+ dev_err(&bus->dev,
+ "failed to write mt7530 register\n");
+ return ret;
+}
+
+static u32
+mt7530_mii_read(struct mt7530_priv *priv, u32 reg)
+{
+ struct mii_bus *bus = priv->bus;
+ u16 page, r, lo, hi;
+ int ret;
+
+ page = (reg >> 6) & 0x3ff;
+ r = (reg >> 2) & 0xf;
+
+ /* MT7530 uses 31 as the pseudo port */
+ ret = bus->write(bus, 0x1f, 0x1f, page);
+ if (ret < 0) {
+ dev_err(&bus->dev,
+ "failed to read mt7530 register\n");
+ return ret;
+ }
+
+ lo = bus->read(bus, 0x1f, r);
+ hi = bus->read(bus, 0x1f, 0x10);
+
+ return (hi << 16) | (lo & 0xffff);
+}
+
+static void
+mt7530_write(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ struct mii_bus *bus = priv->bus;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ mt7530_mii_write(priv, reg, val);
+
+ mutex_unlock(&bus->mdio_lock);
+}
+
+static u32
+_mt7530_read(struct mt7530_dummy_poll *p)
+{
+ struct mii_bus *bus = p->priv->bus;
+ u32 val;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ val = mt7530_mii_read(p->priv, p->reg);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ return val;
+}
+
+static u32
+mt7530_read(struct mt7530_priv *priv, u32 reg)
+{
+ struct mt7530_dummy_poll p;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, reg);
+ return _mt7530_read(&p);
+}
+
+static void
+mt7530_rmw(struct mt7530_priv *priv, u32 reg,
+ u32 mask, u32 set)
+{
+ struct mii_bus *bus = priv->bus;
+ u32 val;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ val = mt7530_mii_read(priv, reg);
+ val &= ~mask;
+ val |= set;
+ mt7530_mii_write(priv, reg, val);
+
+ mutex_unlock(&bus->mdio_lock);
+}
+
+static void
+mt7530_set(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ mt7530_rmw(priv, reg, 0, val);
+}
+
+static void
+mt7530_clear(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ mt7530_rmw(priv, reg, val, 0);
+}
+
+static int
+mt7530_fdb_cmd(struct mt7530_priv *priv, enum mt7530_fdb_cmd cmd, u32 *rsp)
+{
+ u32 val;
+ int ret;
+ struct mt7530_dummy_poll p;
+
+ /* Set the command operating upon the MAC address entries */
+ val = ATC_BUSY | ATC_MAT(0) | cmd;
+ mt7530_write(priv, MT7530_ATC, val);
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_ATC);
+ ret = readx_poll_timeout(_mt7530_read, &p, val,
+ !(val & ATC_BUSY), 20, 20000);
+ if (ret < 0) {
+ dev_err(priv->dev, "reset timeout\n");
+ return ret;
+ }
+
+ /* Additional sanity for read command if the specified
+ * entry is invalid
+ */
+ val = mt7530_read(priv, MT7530_ATC);
+ if ((cmd == MT7530_FDB_READ) && (val & ATC_INVALID))
+ return -EINVAL;
+
+ if (rsp)
+ *rsp = val;
+
+ return 0;
+}
+
+static void
+mt7530_fdb_read(struct mt7530_priv *priv, struct mt7530_fdb *fdb)
+{
+ u32 reg[3];
+ int i;
+
+ /* Read from ARL table into an array */
+ for (i = 0; i < 3; i++) {
+ reg[i] = mt7530_read(priv, MT7530_TSRA1 + (i * 4));
+
+ dev_dbg(priv->dev, "%s(%d) reg[%d]=0x%x\n",
+ __func__, __LINE__, i, reg[i]);
+ }
+
+ fdb->vid = (reg[1] >> CVID) & CVID_MASK;
+ fdb->aging = (reg[2] >> AGE_TIMER) & AGE_TIMER_MASK;
+ fdb->port_mask = (reg[2] >> PORT_MAP) & PORT_MAP_MASK;
+ fdb->mac[0] = (reg[0] >> MAC_BYTE_0) & MAC_BYTE_MASK;
+ fdb->mac[1] = (reg[0] >> MAC_BYTE_1) & MAC_BYTE_MASK;
+ fdb->mac[2] = (reg[0] >> MAC_BYTE_2) & MAC_BYTE_MASK;
+ fdb->mac[3] = (reg[0] >> MAC_BYTE_3) & MAC_BYTE_MASK;
+ fdb->mac[4] = (reg[1] >> MAC_BYTE_4) & MAC_BYTE_MASK;
+ fdb->mac[5] = (reg[1] >> MAC_BYTE_5) & MAC_BYTE_MASK;
+ fdb->noarp = ((reg[2] >> ENT_STATUS) & ENT_STATUS_MASK) == STATIC_ENT;
+}
+
+static void
+mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
+ u8 port_mask, const u8 *mac,
+ u8 aging, u8 type)
+{
+ u32 reg[3] = { 0 };
+ int i;
+
+ reg[1] |= vid & CVID_MASK;
+ reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER;
+ reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP;
+ /* STATIC_ENT indicate that entry is static wouldn't
+ * be aged out and STATIC_EMP specified as erasing an
+ * entry
+ */
+ reg[2] |= (type & ENT_STATUS_MASK) << ENT_STATUS;
+ reg[1] |= mac[5] << MAC_BYTE_5;
+ reg[1] |= mac[4] << MAC_BYTE_4;
+ reg[0] |= mac[3] << MAC_BYTE_3;
+ reg[0] |= mac[2] << MAC_BYTE_2;
+ reg[0] |= mac[1] << MAC_BYTE_1;
+ reg[0] |= mac[0] << MAC_BYTE_0;
+
+ /* Write array into the ARL table */
+ for (i = 0; i < 3; i++)
+ mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
+}
+
+static int
+mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 ncpo1, ssc_delta, trgint, i;
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ trgint = 0;
+ ncpo1 = 0x0c80;
+ ssc_delta = 0x87;
+ break;
+ case PHY_INTERFACE_MODE_TRGMII:
+ trgint = 1;
+ ncpo1 = 0x1400;
+ ssc_delta = 0x57;
+ break;
+ default:
+ dev_err(priv->dev, "xMII mode %d not supported\n", mode);
+ return -EINVAL;
+ }
+
+ mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
+ P6_INTF_MODE(trgint));
+
+ /* Lower Tx Driving for TRGMII path */
+ for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
+ mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
+ TD_DM_DRVP(8) | TD_DM_DRVN(8));
+
+ /* Setup core clock for MT7530 */
+ if (!trgint) {
+ /* Disable MT7530 core clock */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+
+ /* Disable PLL, since phy_device has not yet been created
+ * provided for phy_[read,write]_mmd_indirect is called, we
+ * provide our own core_write_mmd_indirect to complete this
+ * function.
+ */
+ core_write_mmd_indirect(priv,
+ CORE_GSWPLL_GRP1,
+ MDIO_MMD_VEND2,
+ 0);
+
+ /* Set core clock into 500Mhz */
+ core_write(priv, CORE_GSWPLL_GRP2,
+ RG_GSWPLL_POSDIV_500M(1) |
+ RG_GSWPLL_FBKDIV_500M(25));
+
+ /* Enable PLL */
+ core_write(priv, CORE_GSWPLL_GRP1,
+ RG_GSWPLL_EN_PRE |
+ RG_GSWPLL_POSDIV_200M(2) |
+ RG_GSWPLL_FBKDIV_200M(32));
+
+ /* Enable MT7530 core clock */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+ }
+
+ /* Setup the MT7530 TRGMII Tx Clock */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+ core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+ core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+ core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP4,
+ RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
+ RG_SYSPLL_BIAS_LPF_EN);
+ core_write(priv, CORE_PLL_GROUP2,
+ RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
+ RG_SYSPLL_POSDIV(1));
+ core_write(priv, CORE_PLL_GROUP7,
+ RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
+ RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG,
+ REG_GSWCK_EN | REG_TRGMIICK_EN);
+
+ if (!trgint)
+ for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+ mt7530_rmw(priv, MT7530_TRGMII_RD(i),
+ RD_TAP_MASK, RD_TAP(16));
+ else
+ mt7623_trgmii_set(priv, GSW_INTF_MODE, INTF_MODE_TRGMII);
+
+ return 0;
+}
+
+static int
+mt7623_pad_clk_setup(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int i;
+
+ for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+ mt7623_trgmii_write(priv, GSW_TRGMII_TD_ODT(i),
+ TD_DM_DRVP(8) | TD_DM_DRVN(8));
+
+ mt7623_trgmii_set(priv, GSW_TRGMII_RCK_CTRL, RX_RST | RXC_DQSISEL);
+ mt7623_trgmii_clear(priv, GSW_TRGMII_RCK_CTRL, RX_RST);
+
+ return 0;
+}
+
+static void
+mt7530_mib_reset(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_FLUSH);
+ mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE);
+}
+
+static void
+mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable)
+{
+ u32 mask = PMCR_TX_EN | PMCR_RX_EN;
+
+ if (enable)
+ mt7530_set(priv, MT7530_PMCR_P(port), mask);
+ else
+ mt7530_clear(priv, MT7530_PMCR_P(port), mask);
+}
+
+static int mt7530_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return mdiobus_read_nested(priv->bus, port, regnum);
+}
+
+int mt7530_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return mdiobus_write_nested(priv->bus, port, regnum, val);
+}
+
+static void
+mt7530_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++)
+ strncpy(data + i * ETH_GSTRING_LEN, mt7530_mib[i].name,
+ ETH_GSTRING_LEN);
+}
+
+static void
+mt7530_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct mt7530_priv *priv = ds->priv;
+ const struct mt7530_mib_desc *mib;
+ u32 reg, i;
+ u64 hi;
+
+ for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) {
+ mib = &mt7530_mib[i];
+ reg = MT7530_PORT_MIB_COUNTER(port) + mib->offset;
+
+ data[i] = mt7530_read(priv, reg);
+ if (mib->size == 2) {
+ hi = mt7530_read(priv, reg + 4);
+ data[i] |= hi << 32;
+ }
+ }
+}
+
+static int
+mt7530_get_sset_count(struct dsa_switch *ds)
+{
+ return ARRAY_SIZE(mt7530_mib);
+}
+
+static void mt7530_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ if (phy_is_pseudo_fixed_link(phydev)) {
+ dev_dbg(priv->dev, "phy-mode for master device = %x\n",
+ phydev->interface);
+
+ /* Setup TX circuit incluing relevant PAD and driving */
+ mt7530_pad_clk_setup(ds, phydev->interface);
+
+ /* Setup RX circuit, relevant PAD and driving on the host
+ * which must be placed after the setup on the device side is
+ * all finished.
+ */
+ mt7623_pad_clk_setup(ds);
+ }
+}
+
+static int
+mt7530_cpu_port_enable(struct mt7530_priv *priv,
+ int port)
+{
+ /* Enable Mediatek header mode on the cpu port */
+ mt7530_write(priv, MT7530_PVC_P(port),
+ PORT_SPEC_TAG);
+
+ /* Setup the MAC by default for the cpu port */
+ mt7530_write(priv, MT7530_PMCR_P(port), PMCR_CPUP_LINK);
+
+ /* Disable auto learning on the cpu port */
+ mt7530_set(priv, MT7530_PSC_P(port), SA_DIS);
+
+ /* Unknown unicast frame fordwarding to the cpu port */
+ mt7530_set(priv, MT7530_MFC, UNU_FFP(BIT(port)));
+
+ /* CPU port gets connected to all user ports of
+ * the switch
+ */
+ mt7530_write(priv, MT7530_PCR_P(port),
+ PCR_MATRIX(priv->ds->enabled_port_mask));
+
+ return 0;
+}
+
+static int
+mt7530_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+
+ /* Setup the MAC for the user port */
+ mt7530_write(priv, MT7530_PMCR_P(port), PMCR_USERP_LINK);
+
+ /* Allow the user port gets connected to the cpu port and also
+ * restore the port matrix if the port is the member of a certain
+ * bridge.
+ */
+ priv->ports[port].pm |= PCR_MATRIX(BIT(MT7530_CPU_PORT));
+ priv->ports[port].enable = true;
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
+ priv->ports[port].pm);
+ mt7530_port_set_status(priv, port, 1);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static void
+mt7530_port_disable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+
+ /* Clear up all port matrix which could be restored in the next
+ * enablement for the port.
+ */
+ priv->ports[port].enable = false;
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
+ PCR_MATRIX_CLR);
+ mt7530_port_set_status(priv, port, 0);
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static void
+mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ stp_state = MT7530_STP_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ stp_state = MT7530_STP_BLOCKING;
+ break;
+ case BR_STATE_LISTENING:
+ stp_state = MT7530_STP_LISTENING;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = MT7530_STP_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ stp_state = MT7530_STP_FORWARDING;
+ break;
+ }
+
+ mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK, stp_state);
+}
+
+static int
+mt7530_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 port_bitmap = BIT(MT7530_CPU_PORT);
+ int i;
+
+ mutex_lock(&priv->reg_mutex);
+
+ for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Add this port to the port matrix of the other ports in the
+ * same bridge. If the port is disabled, port matrix is kept
+ * and not being setup until the port becomes enabled.
+ */
+ if (ds->enabled_port_mask & BIT(i) && i != port) {
+ if (ds->ports[i].bridge_dev != bridge)
+ continue;
+ if (priv->ports[i].enable)
+ mt7530_set(priv, MT7530_PCR_P(i),
+ PCR_MATRIX(BIT(port)));
+ priv->ports[i].pm |= PCR_MATRIX(BIT(port));
+
+ port_bitmap |= BIT(i);
+ }
+ }
+
+ /* Add the all other ports to this port matrix. */
+ if (priv->ports[port].enable)
+ mt7530_rmw(priv, MT7530_PCR_P(port),
+ PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap));
+ priv->ports[port].pm |= PCR_MATRIX(port_bitmap);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static void
+mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int i;
+
+ mutex_lock(&priv->reg_mutex);
+
+ for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Remove this port from the port matrix of the other ports
+ * in the same bridge. If the port is disabled, port matrix
+ * is kept and not being setup until the port becomes enabled.
+ */
+ if (ds->enabled_port_mask & BIT(i) && i != port) {
+ if (ds->ports[i].bridge_dev != bridge)
+ continue;
+ if (priv->ports[i].enable)
+ mt7530_clear(priv, MT7530_PCR_P(i),
+ PCR_MATRIX(BIT(port)));
+ priv->ports[i].pm &= ~PCR_MATRIX(BIT(port));
+ }
+ }
+
+ /* Set the cpu port to be the only one in the port matrix of
+ * this port.
+ */
+ if (priv->ports[port].enable)
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
+ PCR_MATRIX(BIT(MT7530_CPU_PORT)));
+ priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT));
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+mt7530_port_fdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int ret;
+
+ /* Because auto-learned entrie shares the same FDB table.
+ * an entry is reserved with no port_mask to make sure fdb_add
+ * is called while the entry is still available.
+ */
+ mutex_lock(&priv->reg_mutex);
+ mt7530_fdb_write(priv, fdb->vid, 0, fdb->addr, -1, STATIC_ENT);
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static void
+mt7530_port_fdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u8 port_mask = BIT(port);
+
+ mutex_lock(&priv->reg_mutex);
+ mt7530_fdb_write(priv, fdb->vid, port_mask, fdb->addr, -1, STATIC_ENT);
+ mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+mt7530_port_fdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int ret;
+ u8 port_mask = BIT(port);
+
+ mutex_lock(&priv->reg_mutex);
+ mt7530_fdb_write(priv, fdb->vid, port_mask, fdb->addr, -1, STATIC_EMP);
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int
+mt7530_port_fdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct mt7530_fdb _fdb = { 0 };
+ int cnt = MT7530_NUM_FDB_RECORDS;
+ int ret = 0;
+ u32 rsp = 0;
+
+ mutex_lock(&priv->reg_mutex);
+
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_START, &rsp);
+ if (ret < 0)
+ goto err;
+
+ do {
+ if (rsp & ATC_SRCH_HIT) {
+ mt7530_fdb_read(priv, &_fdb);
+ if (_fdb.port_mask & BIT(port)) {
+ ether_addr_copy(fdb->addr, _fdb.mac);
+ fdb->vid = _fdb.vid;
+ fdb->ndm_state = _fdb.noarp ?
+ NUD_NOARP : NUD_REACHABLE;
+ ret = cb(&fdb->obj);
+ if (ret < 0)
+ break;
+ }
+ }
+ } while (--cnt &&
+ !(rsp & ATC_SRCH_END) &&
+ !mt7530_fdb_cmd(priv, MT7530_FDB_NEXT, &rsp));
+err:
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static enum dsa_tag_protocol
+mtk_get_tag_protocol(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ if (!dsa_is_cpu_port(ds, MT7530_CPU_PORT)) {
+ dev_warn(priv->dev,
+ "port not matched with tagging CPU port\n");
+ return DSA_TAG_PROTO_NONE;
+ } else {
+ return DSA_TAG_PROTO_MTK;
+ }
+}
+
+static int
+mt7530_setup(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int ret, i;
+ u32 id, val;
+ struct device_node *dn;
+ struct mt7530_dummy_poll p;
+
+ /* The parent node of master_netdev which holds the common system
+ * controller also is the container for two GMACs nodes representing
+ * as two netdev instances.
+ */
+ dn = ds->master_netdev->dev.of_node->parent;
+ priv->ethernet = syscon_node_to_regmap(dn);
+ if (IS_ERR(priv->ethernet))
+ return PTR_ERR(priv->ethernet);
+
+ regulator_set_voltage(priv->core_pwr, 1000000, 1000000);
+ ret = regulator_enable(priv->core_pwr);
+ if (ret < 0) {
+ dev_err(priv->dev,
+ "Failed to enable core power: %d\n", ret);
+ return ret;
+ }
+
+ regulator_set_voltage(priv->io_pwr, 3300000, 3300000);
+ ret = regulator_enable(priv->io_pwr);
+ if (ret < 0) {
+ dev_err(priv->dev, "Failed to enable io pwr: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Reset whole chip through gpio pin or memory-mapped registers for
+ * different type of hardware
+ */
+ if (priv->mcm) {
+ reset_control_assert(priv->rstc);
+ usleep_range(1000, 1100);
+ reset_control_deassert(priv->rstc);
+ } else {
+ gpiod_set_value_cansleep(priv->reset, 0);
+ usleep_range(1000, 1100);
+ gpiod_set_value_cansleep(priv->reset, 1);
+ }
+
+ /* Waiting for MT7530 got to stable */
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
+ ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
+ 20, 1000000);
+ if (ret < 0) {
+ dev_err(priv->dev, "reset timeout\n");
+ return ret;
+ }
+
+ id = mt7530_read(priv, MT7530_CREV);
+ id >>= CHIP_NAME_SHIFT;
+ if (id != MT7530_ID) {
+ dev_err(priv->dev, "chip %x can't be supported\n", id);
+ return -ENODEV;
+ }
+
+ /* Reset the switch through internal reset */
+ mt7530_write(priv, MT7530_SYS_CTRL,
+ SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
+ SYS_CTRL_REG_RST);
+
+ /* Enable Port 6 only; P5 as GMAC5 which currently is not supported */
+ val = mt7530_read(priv, MT7530_MHWTRAP);
+ val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
+ val |= MHWTRAP_MANUAL;
+ mt7530_write(priv, MT7530_MHWTRAP, val);
+
+ /* Enable and reset MIB counters */
+ mt7530_mib_reset(ds);
+
+ mt7530_clear(priv, MT7530_MFC, UNU_FFP_MASK);
+
+ for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Disable forwarding by default on all ports */
+ mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
+ PCR_MATRIX_CLR);
+
+ if (dsa_is_cpu_port(ds, i))
+ mt7530_cpu_port_enable(priv, i);
+ else
+ mt7530_port_disable(ds, i, NULL);
+ }
+
+ /* Flush the FDB table */
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct dsa_switch_ops mt7530_switch_ops = {
+ .get_tag_protocol = mtk_get_tag_protocol,
+ .setup = mt7530_setup,
+ .get_strings = mt7530_get_strings,
+ .phy_read = mt7530_phy_read,
+ .phy_write = mt7530_phy_write,
+ .get_ethtool_stats = mt7530_get_ethtool_stats,
+ .get_sset_count = mt7530_get_sset_count,
+ .adjust_link = mt7530_adjust_link,
+ .port_enable = mt7530_port_enable,
+ .port_disable = mt7530_port_disable,
+ .port_stp_state_set = mt7530_stp_state_set,
+ .port_bridge_join = mt7530_port_bridge_join,
+ .port_bridge_leave = mt7530_port_bridge_leave,
+ .port_fdb_prepare = mt7530_port_fdb_prepare,
+ .port_fdb_add = mt7530_port_fdb_add,
+ .port_fdb_del = mt7530_port_fdb_del,
+ .port_fdb_dump = mt7530_port_fdb_dump,
+};
+
+static int
+mt7530_probe(struct mdio_device *mdiodev)
+{
+ struct mt7530_priv *priv;
+ struct device_node *dn;
+
+ dn = mdiodev->dev.of_node;
+
+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ /* Use medatek,mcm property to distinguish hardware type that would
+ * casues a little bit differences on power-on sequence.
+ */
+ priv->mcm = of_property_read_bool(dn, "mediatek,mcm");
+ if (priv->mcm) {
+ dev_info(&mdiodev->dev, "MT7530 adapts as multi-chip module\n");
+
+ priv->rstc = devm_reset_control_get(&mdiodev->dev, "mcm");
+ if (IS_ERR(priv->rstc)) {
+ dev_err(&mdiodev->dev, "Couldn't get our reset line\n");
+ return PTR_ERR(priv->rstc);
+ }
+ }
+
+ priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core");
+ if (IS_ERR(priv->core_pwr))
+ return PTR_ERR(priv->core_pwr);
+
+ priv->io_pwr = devm_regulator_get(&mdiodev->dev, "io");
+ if (IS_ERR(priv->io_pwr))
+ return PTR_ERR(priv->io_pwr);
+
+ /* Not MCM that indicates switch works as the remote standalone
+ * integrated circuit so the GPIO pin would be used to complete
+ * the reset, otherwise memory-mapped register accessing used
+ * through syscon provides in the case of MCM.
+ */
+ if (!priv->mcm) {
+ priv->reset = devm_gpiod_get_optional(&mdiodev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(&mdiodev->dev, "Couldn't get our reset line\n");
+ return PTR_ERR(priv->reset);
+ }
+ }
+
+ priv->bus = mdiodev->bus;
+ priv->dev = &mdiodev->dev;
+ priv->ds->priv = priv;
+ priv->ds->ops = &mt7530_switch_ops;
+ mutex_init(&priv->reg_mutex);
+ dev_set_drvdata(&mdiodev->dev, priv);
+
+ return dsa_register_switch(priv->ds, &mdiodev->dev);
+}
+
+static void
+mt7530_remove(struct mdio_device *mdiodev)
+{
+ struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
+ int ret = 0;
+
+ ret = regulator_disable(priv->core_pwr);
+ if (ret < 0)
+ dev_err(priv->dev,
+ "Failed to disable core power: %d\n", ret);
+
+ ret = regulator_disable(priv->io_pwr);
+ if (ret < 0)
+ dev_err(priv->dev, "Failed to disable io pwr: %d\n",
+ ret);
+
+ dsa_unregister_switch(priv->ds);
+ mutex_destroy(&priv->reg_mutex);
+}
+
+static const struct of_device_id mt7530_of_match[] = {
+ { .compatible = "mediatek,mt7530" },
+ { /* sentinel */ },
+};
+
+static struct mdio_driver mt7530_mdio_driver = {
+ .probe = mt7530_probe,
+ .remove = mt7530_remove,
+ .mdiodrv.driver = {
+ .name = "mt7530",
+ .of_match_table = mt7530_of_match,
+ },
+};
+
+mdio_module_driver(mt7530_mdio_driver);
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mediatek-mt7530");
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
new file mode 100644
index 000000000000..b83d76b99802
--- /dev/null
+++ b/drivers/net/dsa/mt7530.h
@@ -0,0 +1,402 @@
+/*
+ * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7530_H
+#define __MT7530_H
+
+#define MT7530_NUM_PORTS 7
+#define MT7530_CPU_PORT 6
+#define MT7530_NUM_FDB_RECORDS 2048
+
+#define NUM_TRGMII_CTRL 5
+
+#define TRGMII_BASE(x) (0x10000 + (x))
+
+/* Registers to ethsys access */
+#define ETHSYS_CLKCFG0 0x2c
+#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
+
+#define SYSC_REG_RSTCTRL 0x34
+#define RESET_MCM BIT(2)
+
+/* Registers to mac forward control for unknown frames */
+#define MT7530_MFC 0x10
+#define BC_FFP(x) (((x) & 0xff) << 24)
+#define UNM_FFP(x) (((x) & 0xff) << 16)
+#define UNU_FFP(x) (((x) & 0xff) << 8)
+#define UNU_FFP_MASK UNU_FFP(~0)
+
+/* Registers for address table access */
+#define MT7530_ATA1 0x74
+#define STATIC_EMP 0
+#define STATIC_ENT 3
+#define MT7530_ATA2 0x78
+
+/* Register for address table write data */
+#define MT7530_ATWD 0x7c
+
+/* Register for address table control */
+#define MT7530_ATC 0x80
+#define ATC_HASH (((x) & 0xfff) << 16)
+#define ATC_BUSY BIT(15)
+#define ATC_SRCH_END BIT(14)
+#define ATC_SRCH_HIT BIT(13)
+#define ATC_INVALID BIT(12)
+#define ATC_MAT(x) (((x) & 0xf) << 8)
+#define ATC_MAT_MACTAB ATC_MAT(0)
+
+enum mt7530_fdb_cmd {
+ MT7530_FDB_READ = 0,
+ MT7530_FDB_WRITE = 1,
+ MT7530_FDB_FLUSH = 2,
+ MT7530_FDB_START = 4,
+ MT7530_FDB_NEXT = 5,
+};
+
+/* Registers for table search read address */
+#define MT7530_TSRA1 0x84
+#define MAC_BYTE_0 24
+#define MAC_BYTE_1 16
+#define MAC_BYTE_2 8
+#define MAC_BYTE_3 0
+#define MAC_BYTE_MASK 0xff
+
+#define MT7530_TSRA2 0x88
+#define MAC_BYTE_4 24
+#define MAC_BYTE_5 16
+#define CVID 0
+#define CVID_MASK 0xfff
+
+#define MT7530_ATRD 0x8C
+#define AGE_TIMER 24
+#define AGE_TIMER_MASK 0xff
+#define PORT_MAP 4
+#define PORT_MAP_MASK 0xff
+#define ENT_STATUS 2
+#define ENT_STATUS_MASK 0x3
+
+/* Register for vlan table control */
+#define MT7530_VTCR 0x90
+#define VTCR_BUSY BIT(31)
+#define VTCR_FUNC (((x) & 0xf) << 12)
+#define VTCR_FUNC_RD_VID 0x1
+#define VTCR_FUNC_WR_VID 0x2
+#define VTCR_FUNC_INV_VID 0x3
+#define VTCR_FUNC_VAL_VID 0x4
+#define VTCR_VID ((x) & 0xfff)
+
+/* Register for setup vlan and acl write data */
+#define MT7530_VAWD1 0x94
+#define PORT_STAG BIT(31)
+#define IVL_MAC BIT(30)
+#define PORT_MEM(x) (((x) & 0xff) << 16)
+#define VALID BIT(1)
+
+#define MT7530_VAWD2 0x98
+
+/* Register for port STP state control */
+#define MT7530_SSP_P(x) (0x2000 + ((x) * 0x100))
+#define FID_PST(x) ((x) & 0x3)
+#define FID_PST_MASK FID_PST(0x3)
+
+enum mt7530_stp_state {
+ MT7530_STP_DISABLED = 0,
+ MT7530_STP_BLOCKING = 1,
+ MT7530_STP_LISTENING = 1,
+ MT7530_STP_LEARNING = 2,
+ MT7530_STP_FORWARDING = 3
+};
+
+/* Register for port control */
+#define MT7530_PCR_P(x) (0x2004 + ((x) * 0x100))
+#define PORT_VLAN(x) ((x) & 0x3)
+#define PCR_MATRIX(x) (((x) & 0xff) << 16)
+#define PORT_PRI(x) (((x) & 0x7) << 24)
+#define EG_TAG(x) (((x) & 0x3) << 28)
+#define PCR_MATRIX_MASK PCR_MATRIX(0xff)
+#define PCR_MATRIX_CLR PCR_MATRIX(0)
+
+/* Register for port security control */
+#define MT7530_PSC_P(x) (0x200c + ((x) * 0x100))
+#define SA_DIS BIT(4)
+
+/* Register for port vlan control */
+#define MT7530_PVC_P(x) (0x2010 + ((x) * 0x100))
+#define PORT_SPEC_TAG BIT(5)
+#define VLAN_ATTR(x) (((x) & 0x3) << 6)
+#define STAG_VPID (((x) & 0xffff) << 16)
+
+/* Register for port port-and-protocol based vlan 1 control */
+#define MT7530_PPBV1_P(x) (0x2014 + ((x) * 0x100))
+
+/* Register for port MAC control register */
+#define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100))
+#define PMCR_IFG_XMIT(x) (((x) & 0x3) << 18)
+#define PMCR_MAC_MODE BIT(16)
+#define PMCR_FORCE_MODE BIT(15)
+#define PMCR_TX_EN BIT(14)
+#define PMCR_RX_EN BIT(13)
+#define PMCR_BACKOFF_EN BIT(9)
+#define PMCR_BACKPR_EN BIT(8)
+#define PMCR_TX_FC_EN BIT(5)
+#define PMCR_RX_FC_EN BIT(4)
+#define PMCR_FORCE_SPEED_1000 BIT(3)
+#define PMCR_FORCE_FDX BIT(1)
+#define PMCR_FORCE_LNK BIT(0)
+#define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
+ PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \
+ PMCR_TX_EN | PMCR_RX_EN | \
+ PMCR_TX_FC_EN | PMCR_RX_FC_EN)
+#define PMCR_CPUP_LINK (PMCR_COMMON_LINK | PMCR_FORCE_MODE | \
+ PMCR_FORCE_SPEED_1000 | \
+ PMCR_FORCE_FDX | \
+ PMCR_FORCE_LNK)
+#define PMCR_USERP_LINK PMCR_COMMON_LINK
+#define PMCR_FIXED_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
+ PMCR_FORCE_MODE | PMCR_TX_EN | \
+ PMCR_RX_EN | PMCR_BACKPR_EN | \
+ PMCR_BACKOFF_EN | \
+ PMCR_FORCE_SPEED_1000 | \
+ PMCR_FORCE_FDX | \
+ PMCR_FORCE_LNK)
+#define PMCR_FIXED_LINK_FC (PMCR_FIXED_LINK | \
+ PMCR_TX_FC_EN | PMCR_RX_FC_EN)
+
+#define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100)
+
+/* Register for MIB */
+#define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100)
+#define MT7530_MIB_CCR 0x4fe0
+#define CCR_MIB_ENABLE BIT(31)
+#define CCR_RX_OCT_CNT_GOOD BIT(7)
+#define CCR_RX_OCT_CNT_BAD BIT(6)
+#define CCR_TX_OCT_CNT_GOOD BIT(5)
+#define CCR_TX_OCT_CNT_BAD BIT(4)
+#define CCR_MIB_FLUSH (CCR_RX_OCT_CNT_GOOD | \
+ CCR_RX_OCT_CNT_BAD | \
+ CCR_TX_OCT_CNT_GOOD | \
+ CCR_TX_OCT_CNT_BAD)
+#define CCR_MIB_ACTIVATE (CCR_MIB_ENABLE | \
+ CCR_RX_OCT_CNT_GOOD | \
+ CCR_RX_OCT_CNT_BAD | \
+ CCR_TX_OCT_CNT_GOOD | \
+ CCR_TX_OCT_CNT_BAD)
+/* Register for system reset */
+#define MT7530_SYS_CTRL 0x7000
+#define SYS_CTRL_PHY_RST BIT(2)
+#define SYS_CTRL_SW_RST BIT(1)
+#define SYS_CTRL_REG_RST BIT(0)
+
+/* Register for hw trap status */
+#define MT7530_HWTRAP 0x7800
+
+/* Register for hw trap modification */
+#define MT7530_MHWTRAP 0x7804
+#define MHWTRAP_MANUAL BIT(16)
+#define MHWTRAP_P5_MAC_SEL BIT(13)
+#define MHWTRAP_P6_DIS BIT(8)
+#define MHWTRAP_P5_RGMII_MODE BIT(7)
+#define MHWTRAP_P5_DIS BIT(6)
+#define MHWTRAP_PHY_ACCESS BIT(5)
+
+/* Register for TOP signal control */
+#define MT7530_TOP_SIG_CTRL 0x7808
+#define TOP_SIG_CTRL_NORMAL (BIT(17) | BIT(16))
+
+#define MT7530_IO_DRV_CR 0x7810
+#define P5_IO_CLK_DRV(x) ((x) & 0x3)
+#define P5_IO_DATA_DRV(x) (((x) & 0x3) << 4)
+
+#define MT7530_P6ECR 0x7830
+#define P6_INTF_MODE_MASK 0x3
+#define P6_INTF_MODE(x) ((x) & 0x3)
+
+/* Registers for TRGMII on the both side */
+#define MT7530_TRGMII_RCK_CTRL 0x7a00
+#define GSW_TRGMII_RCK_CTRL 0x300
+#define RX_RST BIT(31)
+#define RXC_DQSISEL BIT(30)
+#define DQSI1_TAP_MASK (0x7f << 8)
+#define DQSI0_TAP_MASK 0x7f
+#define DQSI1_TAP(x) (((x) & 0x7f) << 8)
+#define DQSI0_TAP(x) ((x) & 0x7f)
+
+#define MT7530_TRGMII_RCK_RTT 0x7a04
+#define GSW_TRGMII_RCK_RTT 0x304
+#define DQS1_GATE BIT(31)
+#define DQS0_GATE BIT(30)
+
+#define MT7530_TRGMII_RD(x) (0x7a10 + (x) * 8)
+#define GSW_TRGMII_RD(x) (0x310 + (x) * 8)
+#define BSLIP_EN BIT(31)
+#define EDGE_CHK BIT(30)
+#define RD_TAP_MASK 0x7f
+#define RD_TAP(x) ((x) & 0x7f)
+
+#define GSW_TRGMII_TXCTRL 0x340
+#define MT7530_TRGMII_TXCTRL 0x7a40
+#define TRAIN_TXEN BIT(31)
+#define TXC_INV BIT(30)
+#define TX_RST BIT(28)
+
+#define MT7530_TRGMII_TD_ODT(i) (0x7a54 + 8 * (i))
+#define GSW_TRGMII_TD_ODT(i) (0x354 + 8 * (i))
+#define TD_DM_DRVP(x) ((x) & 0xf)
+#define TD_DM_DRVN(x) (((x) & 0xf) << 4)
+
+#define GSW_INTF_MODE 0x390
+#define INTF_MODE_TRGMII BIT(1)
+
+#define MT7530_TRGMII_TCK_CTRL 0x7a78
+#define TCK_TAP(x) (((x) & 0xf) << 8)
+
+#define MT7530_P5RGMIIRXCR 0x7b00
+#define CSR_RGMII_EDGE_ALIGN BIT(8)
+#define CSR_RGMII_RXC_0DEG_CFG(x) ((x) & 0xf)
+
+#define MT7530_P5RGMIITXCR 0x7b04
+#define CSR_RGMII_TXC_CFG(x) ((x) & 0x1f)
+
+#define MT7530_CREV 0x7ffc
+#define CHIP_NAME_SHIFT 16
+#define MT7530_ID 0x7530
+
+/* Registers for core PLL access through mmd indirect */
+#define CORE_PLL_GROUP2 0x401
+#define RG_SYSPLL_EN_NORMAL BIT(15)
+#define RG_SYSPLL_VODEN BIT(14)
+#define RG_SYSPLL_LF BIT(13)
+#define RG_SYSPLL_RST_DLY(x) (((x) & 0x3) << 12)
+#define RG_SYSPLL_LVROD_EN BIT(10)
+#define RG_SYSPLL_PREDIV(x) (((x) & 0x3) << 8)
+#define RG_SYSPLL_POSDIV(x) (((x) & 0x3) << 5)
+#define RG_SYSPLL_FBKSEL BIT(4)
+#define RT_SYSPLL_EN_AFE_OLT BIT(0)
+
+#define CORE_PLL_GROUP4 0x403
+#define RG_SYSPLL_DDSFBK_EN BIT(12)
+#define RG_SYSPLL_BIAS_EN BIT(11)
+#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
+
+#define CORE_PLL_GROUP5 0x404
+#define RG_LCDDS_PCW_NCPO1(x) ((x) & 0xffff)
+
+#define CORE_PLL_GROUP6 0x405
+#define RG_LCDDS_PCW_NCPO0(x) ((x) & 0xffff)
+
+#define CORE_PLL_GROUP7 0x406
+#define RG_LCDDS_PWDB BIT(15)
+#define RG_LCDDS_ISO_EN BIT(13)
+#define RG_LCCDS_C(x) (((x) & 0x7) << 4)
+#define RG_LCDDS_PCW_NCPO_CHG BIT(3)
+
+#define CORE_PLL_GROUP10 0x409
+#define RG_LCDDS_SSC_DELTA(x) ((x) & 0xfff)
+
+#define CORE_PLL_GROUP11 0x40a
+#define RG_LCDDS_SSC_DELTA1(x) ((x) & 0xfff)
+
+#define CORE_GSWPLL_GRP1 0x40d
+#define RG_GSWPLL_PREDIV(x) (((x) & 0x3) << 14)
+#define RG_GSWPLL_POSDIV_200M(x) (((x) & 0x3) << 12)
+#define RG_GSWPLL_EN_PRE BIT(11)
+#define RG_GSWPLL_FBKSEL BIT(10)
+#define RG_GSWPLL_BP BIT(9)
+#define RG_GSWPLL_BR BIT(8)
+#define RG_GSWPLL_FBKDIV_200M(x) ((x) & 0xff)
+
+#define CORE_GSWPLL_GRP2 0x40e
+#define RG_GSWPLL_POSDIV_500M(x) (((x) & 0x3) << 8)
+#define RG_GSWPLL_FBKDIV_500M(x) ((x) & 0xff)
+
+#define CORE_TRGMII_GSW_CLK_CG 0x410
+#define REG_GSWCK_EN BIT(0)
+#define REG_TRGMIICK_EN BIT(1)
+
+#define MIB_DESC(_s, _o, _n) \
+ { \
+ .size = (_s), \
+ .offset = (_o), \
+ .name = (_n), \
+ }
+
+struct mt7530_mib_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+struct mt7530_fdb {
+ u16 vid;
+ u8 port_mask;
+ u8 aging;
+ u8 mac[6];
+ bool noarp;
+};
+
+struct mt7530_port {
+ bool enable;
+ u32 pm;
+};
+
+/* struct mt7530_priv - This is the main data structure for holding the state
+ * of the driver
+ * @dev: The device pointer
+ * @ds: The pointer to the dsa core structure
+ * @bus: The bus used for the device and built-in PHY
+ * @rstc: The pointer to reset control used by MCM
+ * @ethernet: The regmap used for access TRGMII-based registers
+ * @core_pwr: The power supplied into the core
+ * @io_pwr: The power supplied into the I/O
+ * @reset: The descriptor for GPIO line tied to its reset pin
+ * @mcm: Flag for distinguishing if standalone IC or module
+ * coupling
+ * @ports: Holding the state among ports
+ * @reg_mutex: The lock for protecting among process accessing
+ * registers
+ */
+struct mt7530_priv {
+ struct device *dev;
+ struct dsa_switch *ds;
+ struct mii_bus *bus;
+ struct reset_control *rstc;
+ struct regmap *ethernet;
+ struct regulator *core_pwr;
+ struct regulator *io_pwr;
+ struct gpio_desc *reset;
+ bool mcm;
+
+ struct mt7530_port ports[MT7530_NUM_PORTS];
+ /* protect among processes for registers access*/
+ struct mutex reg_mutex;
+};
+
+struct mt7530_hw_stats {
+ const char *string;
+ u16 reg;
+ u8 sizeof_stat;
+};
+
+struct mt7530_dummy_poll {
+ struct mt7530_priv *priv;
+ u32 reg;
+};
+
+static inline void INIT_MT7530_DUMMY_POLL(struct mt7530_dummy_poll *p,
+ struct mt7530_priv *priv, u32 reg)
+{
+ p->priv = priv;
+ p->reg = reg;
+}
+
+#endif /* __MT7530_H */
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index c36be318de1a..6edd869c8d6f 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -1,5 +1,7 @@
obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
mv88e6xxx-objs := chip.o
mv88e6xxx-objs += global1.o
+mv88e6xxx-objs += global1_atu.o
+mv88e6xxx-objs += global1_vtu.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2.o
mv88e6xxx-objs += port.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 03dc886ed3d6..19581d783d8e 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3,11 +3,11 @@
*
* Copyright (c) 2008 Marvell Semiconductor
*
- * Copyright (c) 2015 CMC Electronics, Inc.
- * Added support for VLAN Table Unit operations
- *
* Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
*
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -677,36 +677,6 @@ static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip,
return err;
}
-static bool mv88e6xxx_6097_family(struct mv88e6xxx_chip *chip)
-{
- return chip->info->family == MV88E6XXX_FAMILY_6097;
-}
-
-static bool mv88e6xxx_6165_family(struct mv88e6xxx_chip *chip)
-{
- return chip->info->family == MV88E6XXX_FAMILY_6165;
-}
-
-static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip)
-{
- return chip->info->family == MV88E6XXX_FAMILY_6320;
-}
-
-static bool mv88e6xxx_6341_family(struct mv88e6xxx_chip *chip)
-{
- return chip->info->family == MV88E6XXX_FAMILY_6341;
-}
-
-static bool mv88e6xxx_6351_family(struct mv88e6xxx_chip *chip)
-{
- return chip->info->family == MV88E6XXX_FAMILY_6351;
-}
-
-static bool mv88e6xxx_6352_family(struct mv88e6xxx_chip *chip)
-{
- return chip->info->family == MV88E6XXX_FAMILY_6352;
-}
-
static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
int link, int speed, int duplex,
phy_interface_t mode)
@@ -1066,11 +1036,6 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
mutex_unlock(&chip->reg_lock);
}
-static int _mv88e6xxx_atu_wait(struct mv88e6xxx_chip *chip)
-{
- return mv88e6xxx_g1_wait(chip, GLOBAL_ATU_OP, GLOBAL_ATU_OP_BUSY);
-}
-
static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *e)
{
@@ -1130,143 +1095,42 @@ out:
return err;
}
-static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_chip *chip, u16 fid, u16 cmd)
-{
- u16 val;
- int err;
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_ATU_FID)) {
- err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_FID, fid);
- if (err)
- return err;
- } else if (mv88e6xxx_num_databases(chip) == 256) {
- /* ATU DBNum[7:4] are located in ATU Control 15:12 */
- err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL,
- (val & 0xfff) | ((fid << 8) & 0xf000));
- if (err)
- return err;
-
- /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
- cmd |= fid & 0xf;
- }
-
- err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_OP, cmd);
- if (err)
- return err;
-
- return _mv88e6xxx_atu_wait(chip);
-}
-
-static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_atu_entry *entry)
-{
- u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
-
- if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
- unsigned int mask, shift;
-
- if (entry->trunk) {
- data |= GLOBAL_ATU_DATA_TRUNK;
- mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
- shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
- } else {
- mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
- shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
- }
-
- data |= (entry->portv_trunkid << shift) & mask;
- }
-
- return mv88e6xxx_g1_write(chip, GLOBAL_ATU_DATA, data);
-}
-
-static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_atu_entry *entry,
- bool static_too)
-{
- int op;
- int err;
-
- err = _mv88e6xxx_atu_wait(chip);
- if (err)
- return err;
-
- err = _mv88e6xxx_atu_data_write(chip, entry);
- if (err)
- return err;
-
- if (entry->fid) {
- op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
- GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
- } else {
- op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
- GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
- }
-
- return _mv88e6xxx_atu_cmd(chip, entry->fid, op);
-}
-
-static int _mv88e6xxx_atu_flush(struct mv88e6xxx_chip *chip,
- u16 fid, bool static_too)
+static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
{
- struct mv88e6xxx_atu_entry entry = {
- .fid = fid,
- .state = 0, /* EntryState bits must be 0 */
- };
+ struct dsa_switch *ds = NULL;
+ struct net_device *br;
+ u16 pvlan;
+ int i;
- return _mv88e6xxx_atu_flush_move(chip, &entry, static_too);
-}
+ if (dev < DSA_MAX_SWITCHES)
+ ds = chip->ds->dst->ds[dev];
-static int _mv88e6xxx_atu_move(struct mv88e6xxx_chip *chip, u16 fid,
- int from_port, int to_port, bool static_too)
-{
- struct mv88e6xxx_atu_entry entry = {
- .trunk = false,
- .fid = fid,
- };
+ /* Prevent frames from unknown switch or port */
+ if (!ds || port >= ds->num_ports)
+ return 0;
- /* EntryState bits must be 0xF */
- entry.state = GLOBAL_ATU_DATA_STATE_MASK;
+ /* Frames from DSA links and CPU ports can egress any local port */
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ return mv88e6xxx_port_mask(chip);
- /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
- entry.portv_trunkid = (to_port & 0x0f) << 4;
- entry.portv_trunkid |= from_port & 0x0f;
+ br = ds->ports[port].bridge_dev;
+ pvlan = 0;
- return _mv88e6xxx_atu_flush_move(chip, &entry, static_too);
-}
+ /* Frames from user ports can egress any local DSA links and CPU ports,
+ * as well as any local member of their bridge group.
+ */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
+ if (dsa_is_cpu_port(chip->ds, i) ||
+ dsa_is_dsa_port(chip->ds, i) ||
+ (br && chip->ds->ports[i].bridge_dev == br))
+ pvlan |= BIT(i);
-static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid,
- int port, bool static_too)
-{
- /* Destination port 0xF means remove the entries */
- return _mv88e6xxx_atu_move(chip, fid, port, 0x0f, static_too);
+ return pvlan;
}
-static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
+static int mv88e6xxx_port_vlan_map(struct mv88e6xxx_chip *chip, int port)
{
- struct dsa_switch *ds = chip->ds;
- struct net_device *bridge = ds->ports[port].bridge_dev;
- u16 output_ports = 0;
- int i;
-
- /* allow CPU port or DSA link(s) to send frames to every port */
- if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
- output_ports = ~0;
- } else {
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- /* allow sending frames to every group member */
- if (bridge && ds->ports[i].bridge_dev == bridge)
- output_ports |= BIT(i);
-
- /* allow sending frames to CPU port and DSA link(s) */
- if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
- output_ports |= BIT(i);
- }
- }
+ u16 output_ports = mv88e6xxx_port_vlan(chip, chip->ds->index, port);
/* prevent frames from going back out of the port they came in on */
output_ports &= ~BIT(port);
@@ -1306,182 +1170,98 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
netdev_err(ds->ports[port].netdev, "failed to update state\n");
}
-static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+static int mv88e6xxx_atu_setup(struct mv88e6xxx_chip *chip)
{
- struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
- err = _mv88e6xxx_atu_remove(chip, 0, port, false);
- mutex_unlock(&chip->reg_lock);
-
+ err = mv88e6xxx_g1_atu_flush(chip, 0, true);
if (err)
- netdev_err(ds->ports[port].netdev, "failed to flush ATU\n");
-}
-
-static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_chip *chip)
-{
- return mv88e6xxx_g1_wait(chip, GLOBAL_VTU_OP, GLOBAL_VTU_OP_BUSY);
-}
-
-static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_chip *chip, u16 op)
-{
- int err;
+ return err;
- err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_OP, op);
+ err = mv88e6xxx_g1_atu_set_learn2all(chip, true);
if (err)
return err;
- return _mv88e6xxx_vtu_wait(chip);
+ return mv88e6xxx_g1_atu_set_age_time(chip, 300000);
}
-static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_chip *chip)
+static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
{
- int ret;
+ u16 pvlan = 0;
- ret = _mv88e6xxx_vtu_wait(chip);
- if (ret < 0)
- return ret;
+ if (!mv88e6xxx_has_pvt(chip))
+ return -EOPNOTSUPP;
+
+ /* Skip the local source device, which uses in-chip port VLAN */
+ if (dev != chip->ds->index)
+ pvlan = mv88e6xxx_port_vlan(chip, dev, port);
- return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_FLUSH_ALL);
+ return mv88e6xxx_g2_pvt_write(chip, dev, port, pvlan);
}
-static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry,
- unsigned int nibble_offset)
+static int mv88e6xxx_pvt_setup(struct mv88e6xxx_chip *chip)
{
- u16 regs[3];
- int i, err;
-
- for (i = 0; i < 3; ++i) {
- u16 *reg = &regs[i];
+ int dev, port;
+ int err;
- err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_DATA_0_3 + i, reg);
- if (err)
- return err;
- }
+ if (!mv88e6xxx_has_pvt(chip))
+ return 0;
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- unsigned int shift = (i % 4) * 4 + nibble_offset;
- u16 reg = regs[i / 4];
+ /* Clear 5 Bit Port for usage with Marvell Link Street devices:
+ * use 4 bits for the Src_Port/Src_Trunk and 5 bits for the Src_Dev.
+ */
+ err = mv88e6xxx_g2_misc_4_bit_port(chip);
+ if (err)
+ return err;
- entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
+ for (dev = 0; dev < MV88E6XXX_MAX_PVT_SWITCHES; ++dev) {
+ for (port = 0; port < MV88E6XXX_MAX_PVT_PORTS; ++port) {
+ err = mv88e6xxx_pvt_map(chip, dev, port);
+ if (err)
+ return err;
+ }
}
return 0;
}
-static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- return _mv88e6xxx_vtu_stu_data_read(chip, entry, 0);
-}
-
-static int mv88e6xxx_stu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- return _mv88e6xxx_vtu_stu_data_read(chip, entry, 2);
-}
-
-static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry,
- unsigned int nibble_offset)
+static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
{
- u16 regs[3] = { 0 };
- int i, err;
-
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- unsigned int shift = (i % 4) * 4 + nibble_offset;
- u8 data = entry->data[i];
-
- regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
- }
-
- for (i = 0; i < 3; ++i) {
- u16 reg = regs[i];
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
- err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_DATA_0_3 + i, reg);
- if (err)
- return err;
- }
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_g1_atu_remove(chip, 0, port, false);
+ mutex_unlock(&chip->reg_lock);
- return 0;
+ if (err)
+ netdev_err(ds->ports[port].netdev, "failed to flush ATU\n");
}
-static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip)
{
- return _mv88e6xxx_vtu_stu_data_write(chip, entry, 0);
-}
+ if (!chip->info->max_vid)
+ return 0;
-static int mv88e6xxx_stu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- return _mv88e6xxx_vtu_stu_data_write(chip, entry, 2);
+ return mv88e6xxx_g1_vtu_flush(chip);
}
-static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_chip *chip, u16 vid)
+static int mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
{
- return mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID,
- vid & GLOBAL_VTU_VID_MASK);
+ if (!chip->info->ops->vtu_getnext)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->vtu_getnext(chip, entry);
}
-static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
{
- struct mv88e6xxx_vtu_entry next = { 0 };
- u16 val;
- int err;
-
- err = _mv88e6xxx_vtu_wait(chip);
- if (err)
- return err;
-
- err = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_VTU_GET_NEXT);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_VID, &val);
- if (err)
- return err;
-
- next.vid = val & GLOBAL_VTU_VID_MASK;
- next.valid = !!(val & GLOBAL_VTU_VID_VALID);
-
- if (next.valid) {
- err = mv88e6xxx_vtu_data_read(chip, &next);
- if (err)
- return err;
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_VTU_FID)) {
- err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_FID, &val);
- if (err)
- return err;
-
- next.fid = val & GLOBAL_VTU_FID_MASK;
- } else if (mv88e6xxx_num_databases(chip) == 256) {
- /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
- * VTU DBNum[3:0] are located in VTU Operation 3:0
- */
- err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_OP, &val);
- if (err)
- return err;
-
- next.fid = (val & 0xf00) >> 4;
- next.fid |= val & 0xf;
- }
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) {
- err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_SID, &val);
- if (err)
- return err;
-
- next.sid = val & GLOBAL_VTU_SID_MASK;
- }
- }
+ if (!chip->info->ops->vtu_loadpurge)
+ return -EOPNOTSUPP;
- *entry = next;
- return 0;
+ return chip->info->ops->vtu_loadpurge(chip, entry);
}
static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
@@ -1489,11 +1269,13 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
int (*cb)(struct switchdev_obj *obj))
{
struct mv88e6xxx_chip *chip = ds->priv;
- struct mv88e6xxx_vtu_entry next;
+ struct mv88e6xxx_vtu_entry next = {
+ .vid = chip->info->max_vid,
+ };
u16 pvid;
int err;
- if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
+ if (!chip->info->max_vid)
return -EOPNOTSUPP;
mutex_lock(&chip->reg_lock);
@@ -1502,19 +1284,15 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
if (err)
goto unlock;
- err = _mv88e6xxx_vtu_vid_write(chip, GLOBAL_VTU_VID_MASK);
- if (err)
- goto unlock;
-
do {
- err = _mv88e6xxx_vtu_getnext(chip, &next);
+ err = mv88e6xxx_vtu_getnext(chip, &next);
if (err)
break;
if (!next.valid)
break;
- if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ if (next.member[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
continue;
/* reinit and dump this VLAN obj */
@@ -1522,7 +1300,7 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
vlan->vid_end = next.vid;
vlan->flags = 0;
- if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
+ if (next.member[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
if (next.vid == pvid)
@@ -1531,7 +1309,7 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
err = cb(&vlan->obj);
if (err)
break;
- } while (next.vid < GLOBAL_VTU_VID_MASK);
+ } while (next.vid < chip->info->max_vid);
unlock:
mutex_unlock(&chip->reg_lock);
@@ -1539,133 +1317,12 @@ unlock:
return err;
}
-static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
- u16 reg = 0;
- int err;
-
- err = _mv88e6xxx_vtu_wait(chip);
- if (err)
- return err;
-
- if (!entry->valid)
- goto loadpurge;
-
- /* Write port member tags */
- err = mv88e6xxx_vtu_data_write(chip, entry);
- if (err)
- return err;
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) {
- reg = entry->sid & GLOBAL_VTU_SID_MASK;
- err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID, reg);
- if (err)
- return err;
- }
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_VTU_FID)) {
- reg = entry->fid & GLOBAL_VTU_FID_MASK;
- err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_FID, reg);
- if (err)
- return err;
- } else if (mv88e6xxx_num_databases(chip) == 256) {
- /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
- * VTU DBNum[3:0] are located in VTU Operation 3:0
- */
- op |= (entry->fid & 0xf0) << 8;
- op |= entry->fid & 0xf;
- }
-
- reg = GLOBAL_VTU_VID_VALID;
-loadpurge:
- reg |= entry->vid & GLOBAL_VTU_VID_MASK;
- err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID, reg);
- if (err)
- return err;
-
- return _mv88e6xxx_vtu_cmd(chip, op);
-}
-
-static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_chip *chip, u8 sid,
- struct mv88e6xxx_vtu_entry *entry)
-{
- struct mv88e6xxx_vtu_entry next = { 0 };
- u16 val;
- int err;
-
- err = _mv88e6xxx_vtu_wait(chip);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID,
- sid & GLOBAL_VTU_SID_MASK);
- if (err)
- return err;
-
- err = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_GET_NEXT);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_SID, &val);
- if (err)
- return err;
-
- next.sid = val & GLOBAL_VTU_SID_MASK;
-
- err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_VID, &val);
- if (err)
- return err;
-
- next.valid = !!(val & GLOBAL_VTU_VID_VALID);
-
- if (next.valid) {
- err = mv88e6xxx_stu_data_read(chip, &next);
- if (err)
- return err;
- }
-
- *entry = next;
- return 0;
-}
-
-static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- u16 reg = 0;
- int err;
-
- err = _mv88e6xxx_vtu_wait(chip);
- if (err)
- return err;
-
- if (!entry->valid)
- goto loadpurge;
-
- /* Write port states */
- err = mv88e6xxx_stu_data_write(chip, entry);
- if (err)
- return err;
-
- reg = GLOBAL_VTU_VID_VALID;
-loadpurge:
- err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID, reg);
- if (err)
- return err;
-
- reg = entry->sid & GLOBAL_VTU_SID_MASK;
- err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID, reg);
- if (err)
- return err;
-
- return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE);
-}
-
-static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
+static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
{
DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
- struct mv88e6xxx_vtu_entry vlan;
+ struct mv88e6xxx_vtu_entry vlan = {
+ .vid = chip->info->max_vid,
+ };
int i, err;
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
@@ -1680,12 +1337,8 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
}
/* Set every FID bit used by the VLAN entries */
- err = _mv88e6xxx_vtu_vid_write(chip, GLOBAL_VTU_VID_MASK);
- if (err)
- return err;
-
do {
- err = _mv88e6xxx_vtu_getnext(chip, &vlan);
+ err = mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
return err;
@@ -1693,7 +1346,7 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
break;
set_bit(vlan.fid, fid_bitmap);
- } while (vlan.vid < GLOBAL_VTU_VID_MASK);
+ } while (vlan.vid < chip->info->max_vid);
/* The reset value 0x000 is used to indicate that multiple address
* databases are not needed. Return the next positive available.
@@ -1703,92 +1356,55 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
return -ENOSPC;
/* Clear the database */
- return _mv88e6xxx_atu_flush(chip, *fid, true);
+ return mv88e6xxx_g1_atu_flush(chip, *fid, true);
}
-static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
- struct mv88e6xxx_vtu_entry *entry)
-{
- struct dsa_switch *ds = chip->ds;
- struct mv88e6xxx_vtu_entry vlan = {
- .valid = true,
- .vid = vid,
- };
- int i, err;
-
- err = _mv88e6xxx_fid_new(chip, &vlan.fid);
- if (err)
- return err;
-
- /* exclude all ports except the CPU and DSA ports */
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
- vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
- ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
- : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
-
- if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) ||
- mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip) ||
- mv88e6xxx_6341_family(chip)) {
- struct mv88e6xxx_vtu_entry vstp;
-
- /* Adding a VTU entry requires a valid STU entry. As VSTP is not
- * implemented, only one STU entry is needed to cover all VTU
- * entries. Thus, validate the SID 0.
- */
- vlan.sid = 0;
- err = _mv88e6xxx_stu_getnext(chip, GLOBAL_VTU_SID_MASK, &vstp);
- if (err)
- return err;
-
- if (vstp.sid != vlan.sid || !vstp.valid) {
- memset(&vstp, 0, sizeof(vstp));
- vstp.valid = true;
- vstp.sid = vlan.sid;
-
- err = _mv88e6xxx_stu_loadpurge(chip, &vstp);
- if (err)
- return err;
- }
- }
-
- *entry = vlan;
- return 0;
-}
-
-static int _mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
- struct mv88e6xxx_vtu_entry *entry, bool creat)
+static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
+ struct mv88e6xxx_vtu_entry *entry, bool new)
{
int err;
if (!vid)
return -EINVAL;
- err = _mv88e6xxx_vtu_vid_write(chip, vid - 1);
- if (err)
- return err;
+ entry->vid = vid - 1;
+ entry->valid = false;
- err = _mv88e6xxx_vtu_getnext(chip, entry);
+ err = mv88e6xxx_vtu_getnext(chip, entry);
if (err)
return err;
- if (entry->vid != vid || !entry->valid) {
- if (!creat)
- return -EOPNOTSUPP;
- /* -ENOENT would've been more appropriate, but switchdev expects
- * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
- */
+ if (entry->vid == vid && entry->valid)
+ return 0;
- err = _mv88e6xxx_vtu_new(chip, vid, entry);
+ if (new) {
+ int i;
+
+ /* Initialize a fresh VLAN entry */
+ memset(entry, 0, sizeof(*entry));
+ entry->valid = true;
+ entry->vid = vid;
+
+ /* Include only CPU and DSA ports */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
+ entry->member[i] = dsa_is_normal_port(chip->ds, i) ?
+ GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER :
+ GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED;
+
+ return mv88e6xxx_atu_new(chip, &entry->fid);
}
- return err;
+ /* switchdev expects -EOPNOTSUPP to honor software VLANs */
+ return -EOPNOTSUPP;
}
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid_begin, u16 vid_end)
{
struct mv88e6xxx_chip *chip = ds->priv;
- struct mv88e6xxx_vtu_entry vlan;
+ struct mv88e6xxx_vtu_entry vlan = {
+ .vid = vid_begin - 1,
+ };
int i, err;
if (!vid_begin)
@@ -1796,12 +1412,8 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
mutex_lock(&chip->reg_lock);
- err = _mv88e6xxx_vtu_vid_write(chip, vid_begin - 1);
- if (err)
- goto unlock;
-
do {
- err = _mv88e6xxx_vtu_getnext(chip, &vlan);
+ err = mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
goto unlock;
@@ -1818,7 +1430,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (!ds->ports[port].netdev)
continue;
- if (vlan.data[i] ==
+ if (vlan.member[i] ==
GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
continue;
@@ -1852,7 +1464,7 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
PORT_CONTROL_2_8021Q_DISABLED;
int err;
- if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
+ if (!chip->info->max_vid)
return -EOPNOTSUPP;
mutex_lock(&chip->reg_lock);
@@ -1870,7 +1482,7 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
+ if (!chip->info->max_vid)
return -EOPNOTSUPP;
/* If the requested port doesn't belong to the same bridge as the VLAN
@@ -1893,15 +1505,15 @@ static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
struct mv88e6xxx_vtu_entry vlan;
int err;
- err = _mv88e6xxx_vtu_get(chip, vid, &vlan, true);
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan, true);
if (err)
return err;
- vlan.data[port] = untagged ?
+ vlan.member[port] = untagged ?
GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
- return _mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ return mv88e6xxx_vtu_loadpurge(chip, &vlan);
}
static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
@@ -1913,7 +1525,7 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
u16 vid;
- if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
+ if (!chip->info->max_vid)
return;
mutex_lock(&chip->reg_lock);
@@ -1938,15 +1550,15 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry vlan;
int i, err;
- err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false);
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan, false);
if (err)
return err;
/* Tell switchdev if this VLAN is handled in software */
- if (vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ if (vlan.member[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
return -EOPNOTSUPP;
- vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
+ vlan.member[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
/* keep the VLAN unless all ports are excluded */
vlan.valid = false;
@@ -1954,17 +1566,17 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
continue;
- if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
+ if (vlan.member[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
vlan.valid = true;
break;
}
}
- err = _mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
if (err)
return err;
- return _mv88e6xxx_atu_remove(chip, vlan.fid, port, false);
+ return mv88e6xxx_g1_atu_remove(chip, vlan.fid, port, false);
}
static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
@@ -1974,7 +1586,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
u16 pvid, vid;
int err = 0;
- if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
+ if (!chip->info->max_vid)
return -EOPNOTSUPP;
mutex_lock(&chip->reg_lock);
@@ -2001,96 +1613,6 @@ unlock:
return err;
}
-static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_chip *chip,
- const unsigned char *addr)
-{
- int i, err;
-
- for (i = 0; i < 3; i++) {
- err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_MAC_01 + i,
- (addr[i * 2] << 8) | addr[i * 2 + 1]);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_chip *chip,
- unsigned char *addr)
-{
- u16 val;
- int i, err;
-
- for (i = 0; i < 3; i++) {
- err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_MAC_01 + i, &val);
- if (err)
- return err;
-
- addr[i * 2] = val >> 8;
- addr[i * 2 + 1] = val & 0xff;
- }
-
- return 0;
-}
-
-static int _mv88e6xxx_atu_load(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_atu_entry *entry)
-{
- int ret;
-
- ret = _mv88e6xxx_atu_wait(chip);
- if (ret < 0)
- return ret;
-
- ret = _mv88e6xxx_atu_mac_write(chip, entry->mac);
- if (ret < 0)
- return ret;
-
- ret = _mv88e6xxx_atu_data_write(chip, entry);
- if (ret < 0)
- return ret;
-
- return _mv88e6xxx_atu_cmd(chip, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
-}
-
-static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
- struct mv88e6xxx_atu_entry *entry);
-
-static int mv88e6xxx_atu_get(struct mv88e6xxx_chip *chip, int fid,
- const u8 *addr, struct mv88e6xxx_atu_entry *entry)
-{
- struct mv88e6xxx_atu_entry next;
- int err;
-
- memcpy(next.mac, addr, ETH_ALEN);
- eth_addr_dec(next.mac);
-
- err = _mv88e6xxx_atu_mac_write(chip, next.mac);
- if (err)
- return err;
-
- do {
- err = _mv88e6xxx_atu_getnext(chip, fid, &next);
- if (err)
- return err;
-
- if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
- break;
-
- if (ether_addr_equal(next.mac, addr)) {
- *entry = next;
- return 0;
- }
- } while (ether_addr_greater(addr, next.mac));
-
- memset(entry, 0, sizeof(*entry));
- entry->fid = fid;
- ether_addr_copy(entry->mac, addr);
-
- return 0;
-}
-
static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
const unsigned char *addr, u16 vid,
u8 state)
@@ -2103,25 +1625,36 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
if (vid == 0)
err = mv88e6xxx_port_get_fid(chip, port, &vlan.fid);
else
- err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false);
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan, false);
if (err)
return err;
- err = mv88e6xxx_atu_get(chip, vlan.fid, addr, &entry);
+ entry.state = GLOBAL_ATU_DATA_STATE_UNUSED;
+ ether_addr_copy(entry.mac, addr);
+ eth_addr_dec(entry.mac);
+
+ err = mv88e6xxx_g1_atu_getnext(chip, vlan.fid, &entry);
if (err)
return err;
+ /* Initialize a fresh ATU entry if it isn't found */
+ if (entry.state == GLOBAL_ATU_DATA_STATE_UNUSED ||
+ !ether_addr_equal(entry.mac, addr)) {
+ memset(&entry, 0, sizeof(entry));
+ ether_addr_copy(entry.mac, addr);
+ }
+
/* Purge the ATU entry only if no port is using it anymore */
if (state == GLOBAL_ATU_DATA_STATE_UNUSED) {
- entry.portv_trunkid &= ~BIT(port);
- if (!entry.portv_trunkid)
+ entry.portvec &= ~BIT(port);
+ if (!entry.portvec)
entry.state = GLOBAL_ATU_DATA_STATE_UNUSED;
} else {
- entry.portv_trunkid |= BIT(port);
+ entry.portvec |= BIT(port);
entry.state = state;
}
- return _mv88e6xxx_atu_load(chip, &entry);
+ return mv88e6xxx_g1_atu_loadpurge(chip, vlan.fid, &entry);
}
static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
@@ -2161,75 +1694,26 @@ static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
return err;
}
-static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
- struct mv88e6xxx_atu_entry *entry)
-{
- struct mv88e6xxx_atu_entry next = { 0 };
- u16 val;
- int err;
-
- next.fid = fid;
-
- err = _mv88e6xxx_atu_wait(chip);
- if (err)
- return err;
-
- err = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
- if (err)
- return err;
-
- err = _mv88e6xxx_atu_mac_read(chip, next.mac);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_DATA, &val);
- if (err)
- return err;
-
- next.state = val & GLOBAL_ATU_DATA_STATE_MASK;
- if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
- unsigned int mask, shift;
-
- if (val & GLOBAL_ATU_DATA_TRUNK) {
- next.trunk = true;
- mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
- shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
- } else {
- next.trunk = false;
- mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
- shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
- }
-
- next.portv_trunkid = (val & mask) >> shift;
- }
-
- *entry = next;
- return 0;
-}
-
static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip,
u16 fid, u16 vid, int port,
struct switchdev_obj *obj,
int (*cb)(struct switchdev_obj *obj))
{
- struct mv88e6xxx_atu_entry addr = {
- .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- };
+ struct mv88e6xxx_atu_entry addr;
int err;
- err = _mv88e6xxx_atu_mac_write(chip, addr.mac);
- if (err)
- return err;
+ addr.state = GLOBAL_ATU_DATA_STATE_UNUSED;
+ eth_broadcast_addr(addr.mac);
do {
- err = _mv88e6xxx_atu_getnext(chip, fid, &addr);
+ err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr);
if (err)
return err;
if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
break;
- if (addr.trunk || (addr.portv_trunkid & BIT(port)) == 0)
+ if (addr.trunk || (addr.portvec & BIT(port)) == 0)
continue;
if (obj->id == SWITCHDEV_OBJ_ID_PORT_FDB) {
@@ -2271,7 +1755,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
int (*cb)(struct switchdev_obj *obj))
{
struct mv88e6xxx_vtu_entry vlan = {
- .vid = GLOBAL_VTU_VID_MASK, /* all ones */
+ .vid = chip->info->max_vid,
};
u16 fid;
int err;
@@ -2286,12 +1770,8 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
return err;
/* Dump VLANs' Filtering Information Databases */
- err = _mv88e6xxx_vtu_vid_write(chip, vlan.vid);
- if (err)
- return err;
-
do {
- err = _mv88e6xxx_vtu_getnext(chip, &vlan);
+ err = mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
return err;
@@ -2302,7 +1782,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
obj, cb);
if (err)
return err;
- } while (vlan.vid < GLOBAL_VTU_VID_MASK);
+ } while (vlan.vid < chip->info->max_vid);
return err;
}
@@ -2321,23 +1801,52 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
return err;
}
-static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
+ struct net_device *br)
{
- struct mv88e6xxx_chip *chip = ds->priv;
- int i, err = 0;
-
- mutex_lock(&chip->reg_lock);
+ struct dsa_switch *ds;
+ int port;
+ int dev;
+ int err;
- /* Remap each port's VLANTable */
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- if (ds->ports[i].bridge_dev == br) {
- err = _mv88e6xxx_port_based_vlan_map(chip, i);
+ /* Remap the Port VLAN of each local bridge group member */
+ for (port = 0; port < mv88e6xxx_num_ports(chip); ++port) {
+ if (chip->ds->ports[port].bridge_dev == br) {
+ err = mv88e6xxx_port_vlan_map(chip, port);
if (err)
- break;
+ return err;
+ }
+ }
+
+ if (!mv88e6xxx_has_pvt(chip))
+ return 0;
+
+ /* Remap the Port VLAN of each cross-chip bridge group member */
+ for (dev = 0; dev < DSA_MAX_SWITCHES; ++dev) {
+ ds = chip->ds->dst->ds[dev];
+ if (!ds)
+ break;
+
+ for (port = 0; port < ds->num_ports; ++port) {
+ if (ds->ports[port].bridge_dev == br) {
+ err = mv88e6xxx_pvt_map(chip, dev, port);
+ if (err)
+ return err;
+ }
}
}
+ return 0;
+}
+
+static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_bridge_map(chip, br);
mutex_unlock(&chip->reg_lock);
return err;
@@ -2347,17 +1856,41 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
struct net_device *br)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int i;
mutex_lock(&chip->reg_lock);
+ if (mv88e6xxx_bridge_map(chip, br) ||
+ mv88e6xxx_port_vlan_map(chip, port))
+ dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
+ mutex_unlock(&chip->reg_lock);
+}
- /* Remap each port's VLANTable */
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
- if (i == port || ds->ports[i].bridge_dev == br)
- if (_mv88e6xxx_port_based_vlan_map(chip, i))
- netdev_warn(ds->ports[i].netdev,
- "failed to remap\n");
+static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int dev,
+ int port, struct net_device *br)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ if (!mv88e6xxx_has_pvt(chip))
+ return 0;
+
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_pvt_map(chip, dev, port);
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds, int dev,
+ int port, struct net_device *br)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ if (!mv88e6xxx_has_pvt(chip))
+ return;
+
+ mutex_lock(&chip->reg_lock);
+ if (mv88e6xxx_pvt_map(chip, dev, port))
+ dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
mutex_unlock(&chip->reg_lock);
}
@@ -2433,70 +1966,85 @@ static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip)
return err;
}
-static int mv88e6xxx_setup_port_dsa(struct mv88e6xxx_chip *chip, int port,
- int upstream_port)
+static int mv88e6xxx_set_port_mode(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_frame_mode frame, u16 egress,
+ u16 etype)
{
int err;
- err = chip->info->ops->port_set_frame_mode(
- chip, port, MV88E6XXX_FRAME_MODE_DSA);
+ if (!chip->info->ops->port_set_frame_mode)
+ return -EOPNOTSUPP;
+
+ err = mv88e6xxx_port_set_egress_mode(chip, port, egress);
if (err)
return err;
- return chip->info->ops->port_set_egress_unknowns(
- chip, port, port == upstream_port);
+ err = chip->info->ops->port_set_frame_mode(chip, port, frame);
+ if (err)
+ return err;
+
+ if (chip->info->ops->port_set_ether_type)
+ return chip->info->ops->port_set_ether_type(chip, port, etype);
+
+ return 0;
}
-static int mv88e6xxx_setup_port_cpu(struct mv88e6xxx_chip *chip, int port)
+static int mv88e6xxx_set_port_mode_normal(struct mv88e6xxx_chip *chip, int port)
{
- int err;
+ return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_NORMAL,
+ PORT_CONTROL_EGRESS_UNMODIFIED,
+ PORT_ETH_TYPE_DEFAULT);
+}
- switch (chip->info->tag_protocol) {
- case DSA_TAG_PROTO_EDSA:
- err = chip->info->ops->port_set_frame_mode(
- chip, port, MV88E6XXX_FRAME_MODE_ETHERTYPE);
- if (err)
- return err;
+static int mv88e6xxx_set_port_mode_dsa(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_DSA,
+ PORT_CONTROL_EGRESS_UNMODIFIED,
+ PORT_ETH_TYPE_DEFAULT);
+}
- err = mv88e6xxx_port_set_egress_mode(
- chip, port, PORT_CONTROL_EGRESS_ADD_TAG);
- if (err)
- return err;
+static int mv88e6xxx_set_port_mode_edsa(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_set_port_mode(chip, port,
+ MV88E6XXX_FRAME_MODE_ETHERTYPE,
+ PORT_CONTROL_EGRESS_ADD_TAG, ETH_P_EDSA);
+}
- if (chip->info->ops->port_set_ether_type)
- err = chip->info->ops->port_set_ether_type(
- chip, port, ETH_P_EDSA);
- break;
+static int mv88e6xxx_setup_port_mode(struct mv88e6xxx_chip *chip, int port)
+{
+ if (dsa_is_dsa_port(chip->ds, port))
+ return mv88e6xxx_set_port_mode_dsa(chip, port);
- case DSA_TAG_PROTO_DSA:
- err = chip->info->ops->port_set_frame_mode(
- chip, port, MV88E6XXX_FRAME_MODE_DSA);
- if (err)
- return err;
+ if (dsa_is_normal_port(chip->ds, port))
+ return mv88e6xxx_set_port_mode_normal(chip, port);
- err = mv88e6xxx_port_set_egress_mode(
- chip, port, PORT_CONTROL_EGRESS_UNMODIFIED);
- break;
- default:
- err = -EINVAL;
- }
+ /* Setup CPU port mode depending on its supported tag format */
+ if (chip->info->tag_protocol == DSA_TAG_PROTO_DSA)
+ return mv88e6xxx_set_port_mode_dsa(chip, port);
- if (err)
- return err;
+ if (chip->info->tag_protocol == DSA_TAG_PROTO_EDSA)
+ return mv88e6xxx_set_port_mode_edsa(chip, port);
- return chip->info->ops->port_set_egress_unknowns(chip, port, true);
+ return -EINVAL;
}
-static int mv88e6xxx_setup_port_normal(struct mv88e6xxx_chip *chip, int port)
+static int mv88e6xxx_setup_message_port(struct mv88e6xxx_chip *chip, int port)
{
- int err;
+ bool message = dsa_is_dsa_port(chip->ds, port);
- err = chip->info->ops->port_set_frame_mode(
- chip, port, MV88E6XXX_FRAME_MODE_NORMAL);
- if (err)
- return err;
+ return mv88e6xxx_port_set_message_port(chip, port, message);
+}
- return chip->info->ops->port_set_egress_unknowns(chip, port, false);
+static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
+{
+ bool flood = port == dsa_upstream_port(chip->ds);
+
+ /* Upstream ports flood frames with unknown unicast or multicast DA */
+ if (chip->info->ops->port_set_egress_floods)
+ return chip->info->ops->port_set_egress_floods(chip, port,
+ flood, flood);
+
+ return 0;
}
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
@@ -2541,14 +2089,11 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- if (dsa_is_cpu_port(ds, port)) {
- err = mv88e6xxx_setup_port_cpu(chip, port);
- } else if (dsa_is_dsa_port(ds, port)) {
- err = mv88e6xxx_setup_port_dsa(chip, port,
- dsa_upstream_port(ds));
- } else {
- err = mv88e6xxx_setup_port_normal(chip, port);
- }
+ err = mv88e6xxx_setup_port_mode(chip, port);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_setup_egress_floods(chip, port);
if (err)
return err;
@@ -2623,20 +2168,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
}
- if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
- mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
- mv88e6xxx_6320_family(chip) || mv88e6xxx_6341_family(chip)) {
- /* Port ATU control: disable limiting the number of
- * address database entries that this port is allowed
- * to use.
- */
- err = mv88e6xxx_port_write(chip, port, PORT_ATU_CONTROL,
- 0x0000);
- /* Priority Override: disable DA, SA and VTU priority
- * override.
- */
- err = mv88e6xxx_port_write(chip, port, PORT_PRI_OVERRIDE,
- 0x0000);
+ if (chip->info->ops->port_disable_learn_limit) {
+ err = chip->info->ops->port_disable_learn_limit(chip, port);
+ if (err)
+ return err;
+ }
+
+ if (chip->info->ops->port_disable_pri_override) {
+ err = chip->info->ops->port_disable_pri_override(chip, port);
if (err)
return err;
}
@@ -2653,10 +2192,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
}
- /* Port Control 1: disable trunking, disable sending
- * learning messages to this port.
- */
- err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, 0x0000);
+ err = mv88e6xxx_setup_message_port(chip, port);
if (err)
return err;
@@ -2668,7 +2204,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- err = _mv88e6xxx_port_based_vlan_map(chip, port);
+ err = mv88e6xxx_port_vlan_map(chip, port);
if (err)
return err;
@@ -2697,33 +2233,6 @@ static int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
return 0;
}
-static int mv88e6xxx_g1_set_age_time(struct mv88e6xxx_chip *chip,
- unsigned int msecs)
-{
- const unsigned int coeff = chip->info->age_time_coeff;
- const unsigned int min = 0x01 * coeff;
- const unsigned int max = 0xff * coeff;
- u8 age_time;
- u16 val;
- int err;
-
- if (msecs < min || msecs > max)
- return -ERANGE;
-
- /* Round to nearest multiple of coeff */
- age_time = (msecs + coeff / 2) / coeff;
-
- err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
- if (err)
- return err;
-
- /* AgeTime is 11:4 bits */
- val &= ~0xff0;
- val |= age_time << 4;
-
- return mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
-}
-
static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
@@ -2731,7 +2240,7 @@ static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
int err;
mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_g1_set_age_time(chip, ageing_time);
+ err = mv88e6xxx_g1_atu_set_age_time(chip, ageing_time);
mutex_unlock(&chip->reg_lock);
return err;
@@ -2769,29 +2278,6 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
if (err)
return err;
- /* Clear all the VTU and STU entries */
- err = _mv88e6xxx_vtu_stu_flush(chip);
- if (err < 0)
- return err;
-
- /* Set the default address aging time to 5 minutes, and
- * enable address learn messages to be sent to all message
- * ports.
- */
- err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL,
- GLOBAL_ATU_CONTROL_LEARN2ALL);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_set_age_time(chip, 300000);
- if (err)
- return err;
-
- /* Clear all ATU entries */
- err = _mv88e6xxx_atu_flush(chip, 0, true);
- if (err)
- return err;
-
/* Configure the IP ToS mapping registers. */
err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_0, 0x0000);
if (err)
@@ -2872,6 +2358,18 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
goto unlock;
}
+ err = mv88e6xxx_vtu_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_pvt_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_atu_setup(chip);
+ if (err)
+ goto unlock;
+
/* Some generations have the configuration of sending reserved
* management frames to the CPU in global2, others in
* global1. Hence it does not fit the two setup functions
@@ -3101,10 +2599,12 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3116,6 +2616,8 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
.reset = mv88e6185_g1_reset,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6095_ops = {
@@ -3127,7 +2629,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6185_port_set_egress_floods,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3137,6 +2639,8 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
.reset = mv88e6185_g1_reset,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6097_ops = {
@@ -3149,11 +2653,13 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3163,6 +2669,8 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6123_ops = {
@@ -3174,7 +2682,9 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6085_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3184,6 +2694,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6131_ops = {
@@ -3196,7 +2708,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6185_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.port_jumbo_config = mv88e6165_port_jumbo_config,
@@ -3213,6 +2725,41 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
.reset = mv88e6185_g1_reset,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+};
+
+static const struct mv88e6xxx_ops mv88e6141_ops = {
+ /* MV88E6XXX_FAMILY_6341 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6161_ops = {
@@ -3225,11 +2772,13 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3239,6 +2788,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6165_ops = {
@@ -3249,6 +2800,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3258,6 +2811,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6171_ops = {
@@ -3271,11 +2826,13 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3285,6 +2842,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6172_ops = {
@@ -3300,11 +2859,13 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.port_set_speed = mv88e6352_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3314,6 +2875,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6175_ops = {
@@ -3327,11 +2890,13 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3341,6 +2906,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6176_ops = {
@@ -3356,11 +2923,13 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.port_set_speed = mv88e6352_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3370,6 +2939,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6185_ops = {
@@ -3381,7 +2952,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6185_port_set_egress_floods,
.port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3395,6 +2966,8 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
.reset = mv88e6185_g1_reset,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6190_ops = {
@@ -3410,9 +2983,11 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.port_set_speed = mv88e6390_port_set_speed,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_config = mv88e6390_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3423,6 +2998,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6390_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6190x_ops = {
@@ -3438,9 +3015,11 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.port_set_speed = mv88e6390x_port_set_speed,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_config = mv88e6390_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3451,6 +3030,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6390_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6191_ops = {
@@ -3466,9 +3047,11 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.port_set_speed = mv88e6390_port_set_speed,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_config = mv88e6390_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3479,6 +3062,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6390_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6240_ops = {
@@ -3494,11 +3079,13 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.port_set_speed = mv88e6352_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3508,6 +3095,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6290_ops = {
@@ -3523,10 +3112,12 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.port_set_speed = mv88e6390_port_set_speed,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_config = mv88e6390_port_pause_config,
.port_set_cmode = mv88e6390x_port_set_cmode,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3537,6 +3128,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6390_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6320_ops = {
@@ -3551,11 +3144,13 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
@@ -3564,6 +3159,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6321_ops = {
@@ -3578,11 +3175,13 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
@@ -3590,6 +3189,41 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
.g1_set_egress_port = mv88e6095_g1_set_egress_port,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+};
+
+static const struct mv88e6xxx_ops mv88e6341_ops = {
+ /* MV88E6XXX_FAMILY_6341 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6350_ops = {
@@ -3603,11 +3237,13 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3617,6 +3253,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6351_ops = {
@@ -3630,11 +3268,13 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.port_set_speed = mv88e6185_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3644,6 +3284,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6352_ops = {
@@ -3659,11 +3301,13 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.port_set_speed = mv88e6352_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6097_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -3673,64 +3317,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
-};
-
-static const struct mv88e6xxx_ops mv88e6141_ops = {
- /* MV88E6XXX_FAMILY_6341 */
- .get_eeprom = mv88e6xxx_g2_get_eeprom8,
- .set_eeprom = mv88e6xxx_g2_set_eeprom8,
- .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
- .port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390_port_set_speed,
- .port_tag_remap = mv88e6095_port_tag_remap,
- .port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
- .port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
- .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
- .stats_snapshot = mv88e6390_g1_stats_snapshot,
- .stats_get_sset_count = mv88e6320_stats_get_sset_count,
- .stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
- .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6390_g1_set_egress_port,
- .watchdog_ops = &mv88e6390_watchdog_ops,
- .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
- .reset = mv88e6352_g1_reset,
-};
-
-static const struct mv88e6xxx_ops mv88e6341_ops = {
- /* MV88E6XXX_FAMILY_6341 */
- .get_eeprom = mv88e6xxx_g2_get_eeprom8,
- .set_eeprom = mv88e6xxx_g2_set_eeprom8,
- .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
- .port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390_port_set_speed,
- .port_tag_remap = mv88e6095_port_tag_remap,
- .port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
- .port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_jumbo_config = mv88e6165_port_jumbo_config,
- .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
- .port_pause_config = mv88e6097_port_pause_config,
- .stats_snapshot = mv88e6390_g1_stats_snapshot,
- .stats_get_sset_count = mv88e6320_stats_get_sset_count,
- .stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
- .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6390_g1_set_egress_port,
- .watchdog_ops = &mv88e6390_watchdog_ops,
- .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
- .reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6390_ops = {
@@ -3746,12 +3334,14 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.port_set_speed = mv88e6390_port_set_speed,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6390_port_pause_config,
.port_set_cmode = mv88e6390x_port_set_cmode,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3762,6 +3352,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6390_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6390x_ops = {
@@ -3777,11 +3369,13 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_set_speed = mv88e6390x_port_set_speed,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6390_port_pause_config,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3792,52 +3386,10 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6390_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
};
-static const struct mv88e6xxx_ops mv88e6391_ops = {
- /* MV88E6XXX_FAMILY_6390 */
- .get_eeprom = mv88e6xxx_g2_get_eeprom8,
- .set_eeprom = mv88e6xxx_g2_set_eeprom8,
- .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
- .port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390_port_set_speed,
- .port_tag_remap = mv88e6390_port_tag_remap,
- .port_set_frame_mode = mv88e6351_port_set_frame_mode,
- .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
- .port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_pause_config = mv88e6390_port_pause_config,
- .stats_snapshot = mv88e6390_g1_stats_snapshot,
- .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
- .stats_get_sset_count = mv88e6320_stats_get_sset_count,
- .stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
- .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
- .g1_set_egress_port = mv88e6390_g1_set_egress_port,
- .watchdog_ops = &mv88e6390_watchdog_ops,
- .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
- .reset = mv88e6352_g1_reset,
-};
-
-static int mv88e6xxx_verify_madatory_ops(struct mv88e6xxx_chip *chip,
- const struct mv88e6xxx_ops *ops)
-{
- if (!ops->port_set_frame_mode) {
- dev_err(chip->dev, "Missing port_set_frame_mode");
- return -EINVAL;
- }
-
- if (!ops->port_set_egress_unknowns) {
- dev_err(chip->dev, "Missing port_set_egress_mode");
- return -EINVAL;
- }
-
- return 0;
-}
-
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
[MV88E6085] = {
.prod_num = PORT_SWITCH_ID_PROD_NUM_6085,
@@ -3845,10 +3397,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6085",
.num_databases = 4096,
.num_ports = 10,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6097,
.ops = &mv88e6085_ops,
@@ -3860,10 +3415,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6095/88E6095F",
.num_databases = 256,
.num_ports = 11,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6095,
.ops = &mv88e6095_ops,
@@ -3875,10 +3432,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6097/88E6097F",
.num_databases = 4096,
.num_ports = 11,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6097,
.ops = &mv88e6097_ops,
@@ -3890,10 +3450,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6123",
.num_databases = 4096,
.num_ports = 3,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
.ops = &mv88e6123_ops,
@@ -3905,25 +3468,47 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6131",
.num_databases = 256,
.num_ports = 8,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6185,
.ops = &mv88e6131_ops,
},
+ [MV88E6141] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6141,
+ .family = MV88E6XXX_FAMILY_6341,
+ .name = "Marvell 88E6341",
+ .num_databases = 4096,
+ .num_ports = 6,
+ .max_vid = 4095,
+ .port_base_addr = 0x10,
+ .global1_addr = 0x1b,
+ .age_time_coeff = 3750,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6341,
+ .ops = &mv88e6141_ops,
+ },
+
[MV88E6161] = {
.prod_num = PORT_SWITCH_ID_PROD_NUM_6161,
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6161",
.num_databases = 4096,
.num_ports = 6,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
.ops = &mv88e6161_ops,
@@ -3935,10 +3520,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6165",
.num_databases = 4096,
.num_ports = 6,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
.ops = &mv88e6165_ops,
@@ -3950,10 +3538,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6171",
.num_databases = 4096,
.num_ports = 7,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
.ops = &mv88e6171_ops,
@@ -3965,10 +3556,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6172",
.num_databases = 4096,
.num_ports = 7,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
.ops = &mv88e6172_ops,
@@ -3980,10 +3574,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6175",
.num_databases = 4096,
.num_ports = 7,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
.ops = &mv88e6175_ops,
@@ -3995,10 +3592,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6176",
.num_databases = 4096,
.num_ports = 7,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
.ops = &mv88e6176_ops,
@@ -4010,10 +3610,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6185",
.num_databases = 256,
.num_ports = 10,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6185,
.ops = &mv88e6185_ops,
@@ -4025,11 +3627,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6190",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
+ .max_vid = 8191,
.port_base_addr = 0x0,
.global1_addr = 0x1b,
.tag_protocol = DSA_TAG_PROTO_DSA,
.age_time_coeff = 3750,
.g1_irqs = 9,
+ .pvt = true,
+ .atu_move_port_mask = 0x1f,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
.ops = &mv88e6190_ops,
},
@@ -4040,10 +3645,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6190X",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
+ .max_vid = 8191,
.port_base_addr = 0x0,
.global1_addr = 0x1b,
.age_time_coeff = 3750,
.g1_irqs = 9,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
.ops = &mv88e6190x_ops,
@@ -4055,13 +3663,16 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6191",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
+ .max_vid = 8191,
.port_base_addr = 0x0,
.global1_addr = 0x1b,
.age_time_coeff = 3750,
.g1_irqs = 9,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
- .ops = &mv88e6391_ops,
+ .ops = &mv88e6191_ops,
},
[MV88E6240] = {
@@ -4070,10 +3681,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6240",
.num_databases = 4096,
.num_ports = 7,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
.ops = &mv88e6240_ops,
@@ -4085,10 +3699,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6290",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
+ .max_vid = 8191,
.port_base_addr = 0x0,
.global1_addr = 0x1b,
.age_time_coeff = 3750,
.g1_irqs = 9,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
.ops = &mv88e6290_ops,
@@ -4100,10 +3717,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6320",
.num_databases = 4096,
.num_ports = 7,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6320,
.ops = &mv88e6320_ops,
@@ -4115,38 +3735,29 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6321",
.num_databases = 4096,
.num_ports = 7,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .atu_move_port_mask = 0xf,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6320,
.ops = &mv88e6321_ops,
},
- [MV88E6141] = {
- .prod_num = PORT_SWITCH_ID_PROD_NUM_6141,
- .family = MV88E6XXX_FAMILY_6341,
- .name = "Marvell 88E6341",
- .num_databases = 4096,
- .num_ports = 6,
- .port_base_addr = 0x10,
- .global1_addr = 0x1b,
- .age_time_coeff = 3750,
- .tag_protocol = DSA_TAG_PROTO_EDSA,
- .flags = MV88E6XXX_FLAGS_FAMILY_6341,
- .ops = &mv88e6141_ops,
- },
-
[MV88E6341] = {
.prod_num = PORT_SWITCH_ID_PROD_NUM_6341,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6341",
.num_databases = 4096,
.num_ports = 6,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 3750,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6341,
.ops = &mv88e6341_ops,
@@ -4158,10 +3769,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6350",
.num_databases = 4096,
.num_ports = 7,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
.ops = &mv88e6350_ops,
@@ -4173,10 +3787,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6351",
.num_databases = 4096,
.num_ports = 7,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
.ops = &mv88e6351_ops,
@@ -4188,10 +3805,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6352",
.num_databases = 4096,
.num_ports = 7,
+ .max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
.ops = &mv88e6352_ops,
@@ -4202,10 +3822,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6390",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
+ .max_vid = 8191,
.port_base_addr = 0x0,
.global1_addr = 0x1b,
.age_time_coeff = 3750,
.g1_irqs = 9,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
.ops = &mv88e6390_ops,
@@ -4216,10 +3839,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6390X",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
+ .max_vid = 8191,
.port_base_addr = 0x0,
.global1_addr = 0x1b,
.age_time_coeff = 3750,
.g1_irqs = 9,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
.flags = MV88E6XXX_FLAGS_FAMILY_6390,
.ops = &mv88e6390x_ops,
@@ -4455,6 +4081,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_mdb_add = mv88e6xxx_port_mdb_add,
.port_mdb_del = mv88e6xxx_port_mdb_del,
.port_mdb_dump = mv88e6xxx_port_mdb_dump,
+ .crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join,
+ .crosschip_bridge_leave = mv88e6xxx_crosschip_bridge_leave,
};
static struct dsa_switch_driver mv88e6xxx_switch_drv = {
@@ -4466,12 +4094,14 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
struct device *dev = chip->dev;
struct dsa_switch *ds;
- ds = dsa_switch_alloc(dev, DSA_MAX_PORTS);
+ ds = dsa_switch_alloc(dev, mv88e6xxx_num_ports(chip));
if (!ds)
return -ENOMEM;
ds->priv = chip;
ds->ops = &mv88e6xxx_switch_ops;
+ ds->ageing_time_min = chip->info->age_time_coeff;
+ ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX;
dev_set_drvdata(dev, ds);
@@ -4502,10 +4132,6 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
chip->info = compat_info;
- err = mv88e6xxx_verify_madatory_ops(chip, chip->info->ops);
- if (err)
- return err;
-
err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 75af86a7fad8..39825837a1c9 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -3,7 +3,8 @@
*
* Copyright (c) 2008 Marvell Semiconductor
*
- * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 1aec7382c02d..46a4ea0f8c47 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -3,7 +3,8 @@
*
* Copyright (c) 2008 Marvell Semiconductor
*
- * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -38,4 +39,29 @@ int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all);
+int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
+ unsigned int msecs);
+int mv88e6xxx_g1_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry);
+int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry);
+int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all);
+int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
+ bool all);
+
+int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip);
+
#endif /* _MV88E6XXX_GLOBAL1_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
new file mode 100644
index 000000000000..fa7e7db5171b
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -0,0 +1,305 @@
+/*
+ * Marvell 88E6xxx Address Translation Unit (ATU) support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ * Copyright (c) 2017 Savoir-faire Linux, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "mv88e6xxx.h"
+#include "global1.h"
+
+/* Offset 0x01: ATU FID Register */
+
+static int mv88e6xxx_g1_atu_fid_write(struct mv88e6xxx_chip *chip, u16 fid)
+{
+ return mv88e6xxx_g1_write(chip, GLOBAL_ATU_FID, fid & 0xfff);
+}
+
+/* Offset 0x0A: ATU Control Register */
+
+int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
+ if (err)
+ return err;
+
+ if (learn2all)
+ val |= GLOBAL_ATU_CONTROL_LEARN2ALL;
+ else
+ val &= ~GLOBAL_ATU_CONTROL_LEARN2ALL;
+
+ return mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
+}
+
+int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
+ unsigned int msecs)
+{
+ const unsigned int coeff = chip->info->age_time_coeff;
+ const unsigned int min = 0x01 * coeff;
+ const unsigned int max = 0xff * coeff;
+ u8 age_time;
+ u16 val;
+ int err;
+
+ if (msecs < min || msecs > max)
+ return -ERANGE;
+
+ /* Round to nearest multiple of coeff */
+ age_time = (msecs + coeff / 2) / coeff;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
+ if (err)
+ return err;
+
+ /* AgeTime is 11:4 bits */
+ val &= ~0xff0;
+ val |= age_time << 4;
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
+ if (err)
+ return err;
+
+ dev_dbg(chip->dev, "AgeTime set to 0x%02x (%d ms)\n", age_time,
+ age_time * coeff);
+
+ return 0;
+}
+
+/* Offset 0x0B: ATU Operation Register */
+
+static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g1_wait(chip, GLOBAL_ATU_OP, GLOBAL_ATU_OP_BUSY);
+}
+
+static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
+{
+ u16 val;
+ int err;
+
+ /* FID bits are dispatched all around gradually as more are supported */
+ if (mv88e6xxx_num_databases(chip) > 256) {
+ err = mv88e6xxx_g1_atu_fid_write(chip, fid);
+ if (err)
+ return err;
+ } else {
+ if (mv88e6xxx_num_databases(chip) > 16) {
+ /* ATU DBNum[7:4] are located in ATU Control 15:12 */
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
+ if (err)
+ return err;
+
+ val = (val & 0x0fff) | ((fid << 8) & 0xf000);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
+ if (err)
+ return err;
+ }
+
+ /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
+ op |= fid & 0xf;
+ }
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_OP, op);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_atu_op_wait(chip);
+}
+
+/* Offset 0x0C: ATU Data Register */
+
+static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_DATA, &val);
+ if (err)
+ return err;
+
+ entry->state = val & 0xf;
+ if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+ entry->trunk = !!(val & GLOBAL_ATU_DATA_TRUNK);
+ entry->portvec = (val >> 4) & mv88e6xxx_port_mask(chip);
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_g1_atu_data_write(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ u16 data = entry->state & 0xf;
+
+ if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+ if (entry->trunk)
+ data |= GLOBAL_ATU_DATA_TRUNK;
+
+ data |= (entry->portvec & mv88e6xxx_port_mask(chip)) << 4;
+ }
+
+ return mv88e6xxx_g1_write(chip, GLOBAL_ATU_DATA, data);
+}
+
+/* Offset 0x0D: ATU MAC Address Register Bytes 0 & 1
+ * Offset 0x0E: ATU MAC Address Register Bytes 2 & 3
+ * Offset 0x0F: ATU MAC Address Register Bytes 4 & 5
+ */
+
+static int mv88e6xxx_g1_atu_mac_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ u16 val;
+ int i, err;
+
+ for (i = 0; i < 3; i++) {
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_MAC_01 + i, &val);
+ if (err)
+ return err;
+
+ entry->mac[i * 2] = val >> 8;
+ entry->mac[i * 2 + 1] = val & 0xff;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_g1_atu_mac_write(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ u16 val;
+ int i, err;
+
+ for (i = 0; i < 3; i++) {
+ val = (entry->mac[i * 2] << 8) | entry->mac[i * 2 + 1];
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_MAC_01 + i, val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Address Translation Unit operations */
+
+int mv88e6xxx_g1_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_atu_op_wait(chip);
+ if (err)
+ return err;
+
+ /* Write the MAC address to iterate from only once */
+ if (entry->state == GLOBAL_ATU_DATA_STATE_UNUSED) {
+ err = mv88e6xxx_g1_atu_mac_write(chip, entry);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6xxx_g1_atu_op(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_atu_data_read(chip, entry);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_atu_mac_read(chip, entry);
+}
+
+int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_atu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_atu_mac_write(chip, entry);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_atu_data_write(chip, entry);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_atu_op(chip, fid, GLOBAL_ATU_OP_LOAD_DB);
+}
+
+static int mv88e6xxx_g1_atu_flushmove(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry,
+ bool all)
+{
+ u16 op;
+ int err;
+
+ err = mv88e6xxx_g1_atu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_atu_data_write(chip, entry);
+ if (err)
+ return err;
+
+ /* Flush/Move all or non-static entries from all or a given database */
+ if (all && fid)
+ op = GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB;
+ else if (fid)
+ op = GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
+ else if (all)
+ op = GLOBAL_ATU_OP_FLUSH_MOVE_ALL;
+ else
+ op = GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
+
+ return mv88e6xxx_g1_atu_op(chip, fid, op);
+}
+
+int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all)
+{
+ struct mv88e6xxx_atu_entry entry = {
+ .state = 0, /* Null EntryState means Flush */
+ };
+
+ return mv88e6xxx_g1_atu_flushmove(chip, fid, &entry, all);
+}
+
+static int mv88e6xxx_g1_atu_move(struct mv88e6xxx_chip *chip, u16 fid,
+ int from_port, int to_port, bool all)
+{
+ struct mv88e6xxx_atu_entry entry = { 0 };
+ unsigned long mask;
+ int shift;
+
+ if (!chip->info->atu_move_port_mask)
+ return -EOPNOTSUPP;
+
+ mask = chip->info->atu_move_port_mask;
+ shift = bitmap_weight(&mask, 16);
+
+ entry.state = 0xf, /* Full EntryState means Move */
+ entry.portvec = from_port & mask;
+ entry.portvec |= (to_port & mask) << shift;
+
+ return mv88e6xxx_g1_atu_flushmove(chip, fid, &entry, all);
+}
+
+int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
+ bool all)
+{
+ int from_port = port;
+ int to_port = chip->info->atu_move_port_mask;
+
+ return mv88e6xxx_g1_atu_move(chip, fid, from_port, to_port, all);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
new file mode 100644
index 000000000000..9aea22d4c9e2
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -0,0 +1,511 @@
+/*
+ * Marvell 88E6xxx VLAN [Spanning Tree] Translation Unit (VTU [STU]) support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ * Copyright (c) 2015 CMC Electronics, Inc.
+ * Copyright (c) 2017 Savoir-faire Linux, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "mv88e6xxx.h"
+#include "global1.h"
+
+/* Offset 0x02: VTU FID Register */
+
+static int mv88e6xxx_g1_vtu_fid_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_FID, &val);
+ if (err)
+ return err;
+
+ entry->fid = val & GLOBAL_VTU_FID_MASK;
+
+ return 0;
+}
+
+static int mv88e6xxx_g1_vtu_fid_write(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 val = entry->fid & GLOBAL_VTU_FID_MASK;
+
+ return mv88e6xxx_g1_write(chip, GLOBAL_VTU_FID, val);
+}
+
+/* Offset 0x03: VTU SID Register */
+
+static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_SID, &val);
+ if (err)
+ return err;
+
+ entry->sid = val & GLOBAL_VTU_SID_MASK;
+
+ return 0;
+}
+
+static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 val = entry->sid & GLOBAL_VTU_SID_MASK;
+
+ return mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID, val);
+}
+
+/* Offset 0x05: VTU Operation Register */
+
+static int mv88e6xxx_g1_vtu_op_wait(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g1_wait(chip, GLOBAL_VTU_OP, GLOBAL_VTU_OP_BUSY);
+}
+
+static int mv88e6xxx_g1_vtu_op(struct mv88e6xxx_chip *chip, u16 op)
+{
+ int err;
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_OP, op);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_vtu_op_wait(chip);
+}
+
+/* Offset 0x06: VTU VID Register */
+
+static int mv88e6xxx_g1_vtu_vid_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_VID, &val);
+ if (err)
+ return err;
+
+ entry->vid = val & 0xfff;
+
+ if (val & GLOBAL_VTU_VID_PAGE)
+ entry->vid |= 0x1000;
+
+ entry->valid = !!(val & GLOBAL_VTU_VID_VALID);
+
+ return 0;
+}
+
+static int mv88e6xxx_g1_vtu_vid_write(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 val = entry->vid & 0xfff;
+
+ if (entry->vid & 0x1000)
+ val |= GLOBAL_VTU_VID_PAGE;
+
+ if (entry->valid)
+ val |= GLOBAL_VTU_VID_VALID;
+
+ return mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID, val);
+}
+
+/* Offset 0x07: VTU/STU Data Register 1
+ * Offset 0x08: VTU/STU Data Register 2
+ * Offset 0x09: VTU/STU Data Register 3
+ */
+
+static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 regs[3];
+ int i;
+
+ /* Read all 3 VTU/STU Data registers */
+ for (i = 0; i < 3; ++i) {
+ u16 *reg = &regs[i];
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_DATA_0_3 + i, reg);
+ if (err)
+ return err;
+ }
+
+ /* Extract MemberTag and PortState data */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ unsigned int member_offset = (i % 4) * 4;
+ unsigned int state_offset = member_offset + 2;
+
+ entry->member[i] = (regs[i / 4] >> member_offset) & 0x3;
+ entry->state[i] = (regs[i / 4] >> state_offset) & 0x3;
+ }
+
+ return 0;
+}
+
+static int mv88e6185_g1_vtu_data_write(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 regs[3] = { 0 };
+ int i;
+
+ /* Insert MemberTag and PortState data */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ unsigned int member_offset = (i % 4) * 4;
+ unsigned int state_offset = member_offset + 2;
+
+ regs[i / 4] |= (entry->member[i] & 0x3) << member_offset;
+ regs[i / 4] |= (entry->state[i] & 0x3) << state_offset;
+ }
+
+ /* Write all 3 VTU/STU Data registers */
+ for (i = 0; i < 3; ++i) {
+ u16 reg = regs[i];
+ int err;
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_DATA_0_3 + i, reg);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6390_g1_vtu_data_read(struct mv88e6xxx_chip *chip, u8 *data)
+{
+ u16 regs[2];
+ int i;
+
+ /* Read the 2 VTU/STU Data registers */
+ for (i = 0; i < 2; ++i) {
+ u16 *reg = &regs[i];
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_DATA_0_3 + i, reg);
+ if (err)
+ return err;
+ }
+
+ /* Extract data */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ unsigned int offset = (i % 8) * 2;
+
+ data[i] = (regs[i / 8] >> offset) & 0x3;
+ }
+
+ return 0;
+}
+
+static int mv88e6390_g1_vtu_data_write(struct mv88e6xxx_chip *chip, u8 *data)
+{
+ u16 regs[2] = { 0 };
+ int i;
+
+ /* Insert data */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ unsigned int offset = (i % 8) * 2;
+
+ regs[i / 8] |= (data[i] & 0x3) << offset;
+ }
+
+ /* Write the 2 VTU/STU Data registers */
+ for (i = 0; i < 2; ++i) {
+ u16 reg = regs[i];
+ int err;
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_DATA_0_3 + i, reg);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* VLAN Translation Unit Operations */
+
+static int mv88e6xxx_g1_vtu_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_STU_GET_NEXT);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_read(chip, entry);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_vtu_vid_read(chip, entry);
+}
+
+static int mv88e6xxx_g1_vtu_stu_get(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *vtu)
+{
+ struct mv88e6xxx_vtu_entry stu;
+ int err;
+
+ err = mv88e6xxx_g1_vtu_sid_read(chip, vtu);
+ if (err)
+ return err;
+
+ stu.sid = vtu->sid - 1;
+
+ err = mv88e6xxx_g1_vtu_stu_getnext(chip, &stu);
+ if (err)
+ return err;
+
+ if (stu.sid != vtu->sid || !stu.valid)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ /* To get the next higher active VID, the VTU GetNext operation can be
+ * started again without setting the VID registers since it already
+ * contains the last VID.
+ *
+ * To save a few hardware accesses and abstract this to the caller,
+ * write the VID only once, when the entry is given as invalid.
+ */
+ if (!entry->valid) {
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_VTU_GET_NEXT);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_vtu_vid_read(chip, entry);
+}
+
+int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_vtu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6185_g1_vtu_data_read(chip, entry);
+ if (err)
+ return err;
+
+ /* VTU DBNum[3:0] are located in VTU Operation 3:0
+ * VTU DBNum[7:4] are located in VTU Operation 11:8
+ */
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_OP, &val);
+ if (err)
+ return err;
+
+ entry->fid = val & 0x000f;
+ entry->fid |= (val & 0x0f00) >> 4;
+ }
+
+ return 0;
+}
+
+int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ int err;
+
+ /* Fetch VLAN MemberTag data from the VTU */
+ err = mv88e6xxx_g1_vtu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ /* Fetch (and mask) VLAN PortState data from the STU */
+ err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
+ if (err)
+ return err;
+
+ err = mv88e6185_g1_vtu_data_read(chip, entry);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ int err;
+
+ /* Fetch VLAN MemberTag data from the VTU */
+ err = mv88e6xxx_g1_vtu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6390_g1_vtu_data_read(chip, entry->member);
+ if (err)
+ return err;
+
+ /* Fetch VLAN PortState data from the STU */
+ err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
+ if (err)
+ return err;
+
+ err = mv88e6390_g1_vtu_data_read(chip, entry->state);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6185_g1_vtu_data_write(chip, entry);
+ if (err)
+ return err;
+
+ /* VTU DBNum[3:0] are located in VTU Operation 3:0
+ * VTU DBNum[7:4] are located in VTU Operation 11:8
+ */
+ op |= entry->fid & 0x000f;
+ op |= (entry->fid & 0x00f0) << 8;
+ }
+
+ return mv88e6xxx_g1_vtu_op(chip, op);
+}
+
+int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ /* Write MemberTag and PortState data */
+ err = mv88e6185_g1_vtu_data_write(chip, entry);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
+ if (err)
+ return err;
+
+ /* Load STU entry */
+ err = mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
+ if (err)
+ return err;
+ }
+
+ /* Load/Purge VTU entry */
+ return mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
+}
+
+int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ /* Write PortState data */
+ err = mv88e6390_g1_vtu_data_write(chip, entry->state);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
+ if (err)
+ return err;
+
+ /* Load STU entry */
+ err = mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE);
+ if (err)
+ return err;
+
+ /* Write MemberTag data */
+ err = mv88e6390_g1_vtu_data_write(chip, entry->member);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
+ if (err)
+ return err;
+ }
+
+ /* Load/Purge VTU entry */
+ return mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
+}
+
+int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_FLUSH_ALL);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 8f15bc7b1f5f..b3fea55071e3 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -4,7 +4,8 @@
*
* Copyright (c) 2008 Marvell Semiconductor
*
- * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,6 +13,7 @@
* (at your option) any later version.
*/
+#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include "mv88e6xxx.h"
#include "global2.h"
@@ -170,6 +172,50 @@ static int mv88e6xxx_g2_clear_irl(struct mv88e6xxx_chip *chip)
return err;
}
+/* Offset 0x0B: Cross-chip Port VLAN (Addr) Register
+ * Offset 0x0C: Cross-chip Port VLAN Data Register
+ */
+
+static int mv88e6xxx_g2_pvt_op_wait(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g2_wait(chip, GLOBAL2_PVT_ADDR, GLOBAL2_PVT_ADDR_BUSY);
+}
+
+static int mv88e6xxx_g2_pvt_op(struct mv88e6xxx_chip *chip, int src_dev,
+ int src_port, u16 op)
+{
+ int err;
+
+ /* 9-bit Cross-chip PVT pointer: with GLOBAL2_MISC_5_BIT_PORT cleared,
+ * source device is 5-bit, source port is 4-bit.
+ */
+ op |= (src_dev & 0x1f) << 4;
+ op |= (src_port & 0xf);
+
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_PVT_ADDR, op);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_pvt_op_wait(chip);
+}
+
+int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
+ int src_port, u16 data)
+{
+ int err;
+
+ err = mv88e6xxx_g2_pvt_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_PVT_DATA, data);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_pvt_op(chip, src_dev, src_port,
+ GLOBAL2_PVT_ADDR_OP_WRITE_PVLAN);
+}
+
/* Offset 0x0D: Switch MAC/WoL/WoF register */
static int mv88e6xxx_g2_switch_mac_write(struct mv88e6xxx_chip *chip,
@@ -522,8 +568,9 @@ static int mv88e6xxx_g2_smi_phy_write_addr(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
}
-int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip, int addr,
- int reg_c45, u16 *val, bool external)
+static int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+ int addr, int reg_c45, u16 *val,
+ bool external)
{
int device = (reg_c45 >> 16) & 0x1f;
int reg = reg_c45 & 0xffff;
@@ -553,8 +600,9 @@ int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip, int addr,
return 0;
}
-int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip, int addr,
- int reg, u16 *val, bool external)
+static int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 *val,
+ bool external)
{
u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg;
int err;
@@ -586,8 +634,9 @@ int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_smi_phy_read_c22(chip, addr, reg, val, external);
}
-int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip, int addr,
- int reg_c45, u16 val, bool external)
+static int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+ int addr, int reg_c45, u16 val,
+ bool external)
{
int device = (reg_c45 >> 16) & 0x1f;
int reg = reg_c45 & 0xffff;
@@ -615,8 +664,9 @@ int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip, int addr,
return 0;
}
-int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip, int addr,
- int reg, u16 val, bool external)
+static int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 val,
+ bool external)
{
u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA | (addr << 5) | reg;
int err;
@@ -782,6 +832,31 @@ static int mv88e6xxx_g2_watchdog_setup(struct mv88e6xxx_chip *chip)
return err;
}
+/* Offset 0x1D: Misc Register */
+
+static int mv88e6xxx_g2_misc_5_bit_port(struct mv88e6xxx_chip *chip,
+ bool port_5_bit)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g2_read(chip, GLOBAL2_MISC, &val);
+ if (err)
+ return err;
+
+ if (port_5_bit)
+ val |= GLOBAL2_MISC_5_BIT_PORT;
+ else
+ val &= ~GLOBAL2_MISC_5_BIT_PORT;
+
+ return mv88e6xxx_g2_write(chip, GLOBAL2_MISC, val);
+}
+
+int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g2_misc_5_bit_port(chip, false);
+}
+
static void mv88e6xxx_g2_irq_mask(struct irq_data *d)
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
@@ -964,14 +1039,6 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
return err;
}
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_PVT)) {
- /* Initialize Cross-chip Port VLAN Table to reset defaults */
- err = mv88e6xxx_g2_write(chip, GLOBAL2_PVT_ADDR,
- GLOBAL2_PVT_ADDR_OP_INIT_ONES);
- if (err)
- return err;
- }
-
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_POT)) {
/* Clear the priority override table. */
err = mv88e6xxx_g2_clear_pot(chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index a8b2f9486a4a..96046bb12ca1 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -3,7 +3,8 @@
*
* Copyright (c) 2008 Marvell Semiconductor
*
- * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -41,6 +42,10 @@ int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom, u8 *data);
+int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
+ int src_port, u16 data);
+int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip);
+
int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip);
@@ -109,6 +114,17 @@ static inline int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
return -EOPNOTSUPP;
}
+int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
+ int src_port, u16 data)
+{
+ return -EOPNOTSUPP;
+}
+
+int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
{
return -EOPNOTSUPP;
diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
index 6033f2f6260a..77236cd72df2 100644
--- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
@@ -16,6 +16,7 @@
#include <linux/irq.h>
#include <linux/gpio/consumer.h>
#include <linux/phy.h>
+#include <net/dsa.h>
#ifndef UINT64_MAX
#define UINT64_MAX (u64)(~((u64)0))
@@ -132,18 +133,19 @@
#define PORT_CONTROL_TAG_IF_BOTH BIT(6)
#define PORT_CONTROL_USE_IP BIT(5)
#define PORT_CONTROL_USE_TAG BIT(4)
-#define PORT_CONTROL_FORWARD_UNKNOWN_MC BIT(3)
#define PORT_CONTROL_FORWARD_UNKNOWN BIT(2)
-#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_DA (0x0 << 2)
-#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_MULTICAST_DA (0x1 << 2)
-#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_UNITCAST_DA (0x2 << 2)
-#define PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA (0x3 << 2)
+#define PORT_CONTROL_EGRESS_FLOODS_MASK (0x3 << 2)
+#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_DA (0x0 << 2)
+#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_MC_DA (0x1 << 2)
+#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_UC_DA (0x2 << 2)
+#define PORT_CONTROL_EGRESS_FLOODS_ALL_UNKNOWN_DA (0x3 << 2)
#define PORT_CONTROL_STATE_MASK 0x03
#define PORT_CONTROL_STATE_DISABLED 0x00
#define PORT_CONTROL_STATE_BLOCKING 0x01
#define PORT_CONTROL_STATE_LEARNING 0x02
#define PORT_CONTROL_STATE_FORWARDING 0x03
#define PORT_CONTROL_1 0x05
+#define PORT_CONTROL_1_MESSAGE_PORT BIT(15)
#define PORT_CONTROL_1_FID_11_4_MASK (0xff << 0)
#define PORT_BASE_VLAN 0x06
#define PORT_BASE_VLAN_FID_3_0_MASK (0xf << 12)
@@ -166,7 +168,6 @@
#define PORT_CONTROL_2_DISCARD_UNTAGGED BIT(8)
#define PORT_CONTROL_2_MAP_DA BIT(7)
#define PORT_CONTROL_2_DEFAULT_FORWARD BIT(6)
-#define PORT_CONTROL_2_FORWARD_UNKNOWN BIT(6)
#define PORT_CONTROL_2_EGRESS_MONITOR BIT(5)
#define PORT_CONTROL_2_INGRESS_MONITOR BIT(4)
#define PORT_CONTROL_2_UPSTREAM_MASK 0x0f
@@ -181,6 +182,7 @@
#define PORT_ATU_CONTROL 0x0c
#define PORT_PRI_OVERRIDE 0x0d
#define PORT_ETH_TYPE 0x0f
+#define PORT_ETH_TYPE_DEFAULT 0x9100
#define PORT_IN_DISCARD_LO 0x10
#define PORT_IN_DISCARD_HI 0x11
#define PORT_IN_FILTERED 0x12
@@ -247,6 +249,7 @@
#define GLOBAL_VTU_OP_STU_GET_NEXT ((0x06 << 12) | GLOBAL_VTU_OP_BUSY)
#define GLOBAL_VTU_VID 0x06
#define GLOBAL_VTU_VID_MASK 0xfff
+#define GLOBAL_VTU_VID_PAGE BIT(13)
#define GLOBAL_VTU_VID_VALID BIT(12)
#define GLOBAL_VTU_DATA_0_3 0x07
#define GLOBAL_VTU_DATA_4_7 0x08
@@ -437,9 +440,14 @@
#define GLOBAL2_WDOG_FORCE_IRQ BIT(0)
#define GLOBAL2_QOS_WEIGHT 0x1c
#define GLOBAL2_MISC 0x1d
+#define GLOBAL2_MISC_5_BIT_PORT BIT(14)
#define MV88E6XXX_N_FID 4096
+/* PVT limits for 4-bit port and 5-bit switch */
+#define MV88E6XXX_MAX_PVT_SWITCHES 32
+#define MV88E6XXX_MAX_PVT_PORTS 16
+
enum mv88e6xxx_frame_mode {
MV88E6XXX_FRAME_MODE_NORMAL,
MV88E6XXX_FRAME_MODE_DSA,
@@ -525,8 +533,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_CAP_G2_MGMT_EN_0X, /* (0x03) MGMT Enable Register 0x */
MV88E6XXX_CAP_G2_IRL_CMD, /* (0x09) Ingress Rate Command */
MV88E6XXX_CAP_G2_IRL_DATA, /* (0x0a) Ingress Rate Data */
- MV88E6XXX_CAP_G2_PVT_ADDR, /* (0x0b) Cross Chip Port VLAN Addr */
- MV88E6XXX_CAP_G2_PVT_DATA, /* (0x0c) Cross Chip Port VLAN Data */
MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */
/* Per VLAN Spanning Tree Unit (STU).
@@ -551,7 +557,6 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAG_SERDES BIT_ULL(MV88E6XXX_CAP_SERDES)
-#define MV88E6XXX_FLAG_G1_ATU_FID BIT_ULL(MV88E6XXX_CAP_G1_ATU_FID)
#define MV88E6XXX_FLAG_G1_VTU_FID BIT_ULL(MV88E6XXX_CAP_G1_VTU_FID)
#define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2)
@@ -560,13 +565,8 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_0X)
#define MV88E6XXX_FLAG_G2_IRL_CMD BIT_ULL(MV88E6XXX_CAP_G2_IRL_CMD)
#define MV88E6XXX_FLAG_G2_IRL_DATA BIT_ULL(MV88E6XXX_CAP_G2_IRL_DATA)
-#define MV88E6XXX_FLAG_G2_PVT_ADDR BIT_ULL(MV88E6XXX_CAP_G2_PVT_ADDR)
-#define MV88E6XXX_FLAG_G2_PVT_DATA BIT_ULL(MV88E6XXX_CAP_G2_PVT_DATA)
#define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT)
-#define MV88E6XXX_FLAG_STU BIT_ULL(MV88E6XXX_CAP_STU)
-#define MV88E6XXX_FLAG_VTU BIT_ULL(MV88E6XXX_CAP_VTU)
-
/* Ingress Rate Limit unit */
#define MV88E6XXX_FLAGS_IRL \
(MV88E6XXX_FLAG_G2_IRL_CMD | \
@@ -577,11 +577,6 @@ enum mv88e6xxx_cap {
(MV88E6XXX_FLAG_SMI_CMD | \
MV88E6XXX_FLAG_SMI_DATA)
-/* Cross-chip Port VLAN Table */
-#define MV88E6XXX_FLAGS_PVT \
- (MV88E6XXX_FLAG_G2_PVT_ADDR | \
- MV88E6XXX_FLAG_G2_PVT_DATA)
-
/* Fiber/SERDES Registers at SMI address F, page 1 */
#define MV88E6XXX_FLAGS_SERDES \
(MV88E6XXX_FLAG_PHY_PAGE | \
@@ -590,43 +585,33 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAGS_FAMILY_6095 \
(MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6097 \
- (MV88E6XXX_FLAG_G1_ATU_FID | \
- MV88E6XXX_FLAG_G1_VTU_FID | \
+ (MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_STU | \
- MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
- MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAGS_PVT)
+ MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6165 \
- (MV88E6XXX_FLAG_G1_ATU_FID | \
- MV88E6XXX_FLAG_G1_VTU_FID | \
+ (MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_STU | \
- MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
- MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAGS_PVT)
+ MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6185 \
(MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAG_VTU)
+ MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6320 \
(MV88E6XXX_FLAG_EEE | \
@@ -634,64 +619,47 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
- MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAGS_PVT)
+ MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6341 \
(MV88E6XXX_FLAG_EEE | \
- MV88E6XXX_FLAG_G1_ATU_FID | \
MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_STU | \
- MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAGS_PVT | \
MV88E6XXX_FLAGS_SERDES)
#define MV88E6XXX_FLAGS_FAMILY_6351 \
- (MV88E6XXX_FLAG_G1_ATU_FID | \
- MV88E6XXX_FLAG_G1_VTU_FID | \
+ (MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_STU | \
- MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
- MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAGS_PVT)
+ MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6352 \
(MV88E6XXX_FLAG_EEE | \
- MV88E6XXX_FLAG_G1_ATU_FID | \
MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_STU | \
- MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAGS_PVT | \
MV88E6XXX_FLAGS_SERDES)
#define MV88E6XXX_FLAGS_FAMILY_6390 \
(MV88E6XXX_FLAG_EEE | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_INT | \
- MV88E6XXX_FLAG_STU | \
- MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
- MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAGS_PVT)
+ MV88E6XXX_FLAGS_MULTI_CHIP)
struct mv88e6xxx_ops;
@@ -701,20 +669,26 @@ struct mv88e6xxx_info {
const char *name;
unsigned int num_databases;
unsigned int num_ports;
+ unsigned int max_vid;
unsigned int port_base_addr;
unsigned int global1_addr;
unsigned int age_time_coeff;
unsigned int g1_irqs;
+ bool pvt;
enum dsa_tag_protocol tag_protocol;
unsigned long long flags;
+
+ /* Mask for FromPort and ToPort value of PortVec used in ATU Move
+ * operation. 0 means that the ATU Move operation is not supported.
+ */
+ u8 atu_move_port_mask;
const struct mv88e6xxx_ops *ops;
};
struct mv88e6xxx_atu_entry {
- u16 fid;
u8 state;
bool trunk;
- u16 portv_trunkid;
+ u16 portvec;
u8 mac[ETH_ALEN];
};
@@ -723,7 +697,8 @@ struct mv88e6xxx_vtu_entry {
u16 fid;
u8 sid;
bool valid;
- u8 data[DSA_MAX_PORTS];
+ u8 member[DSA_MAX_PORTS];
+ u8 state[DSA_MAX_PORTS];
};
struct mv88e6xxx_bus_ops;
@@ -864,14 +839,16 @@ struct mv88e6xxx_ops {
int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode mode);
- int (*port_set_egress_unknowns)(struct mv88e6xxx_chip *chip, int port,
- bool on);
+ int (*port_set_egress_floods)(struct mv88e6xxx_chip *chip, int port,
+ bool unicast, bool multicast);
int (*port_set_ether_type)(struct mv88e6xxx_chip *chip, int port,
u16 etype);
int (*port_jumbo_config)(struct mv88e6xxx_chip *chip, int port);
int (*port_egress_rate_limiting)(struct mv88e6xxx_chip *chip, int port);
int (*port_pause_config)(struct mv88e6xxx_chip *chip, int port);
+ int (*port_disable_learn_limit)(struct mv88e6xxx_chip *chip, int port);
+ int (*port_disable_pri_override)(struct mv88e6xxx_chip *chip, int port);
/* CMODE control what PHY mode the MAC will use, eg. SGMII, RGMII, etc.
* Some chips allow this to be configured on specific ports.
@@ -906,6 +883,12 @@ struct mv88e6xxx_ops {
/* Can be either in g1 or g2, so don't use a prefix */
int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip);
+
+ /* VLAN Translation Unit operations */
+ int (*vtu_getnext)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+ int (*vtu_loadpurge)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
};
struct mv88e6xxx_irq_ops {
@@ -934,6 +917,11 @@ static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip,
return (chip->info->flags & flags) == flags;
}
+static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->pvt;
+}
+
static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
{
return chip->info->num_databases;
@@ -944,6 +932,11 @@ static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
return chip->info->num_ports;
}
+static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
+{
+ return GENMASK(mv88e6xxx_num_ports(chip) - 1, 0);
+}
+
int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 8875784c4718..548a956637ee 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -3,7 +3,8 @@
*
* Copyright (c) 2008 Marvell Semiconductor
*
- * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -497,8 +498,8 @@ int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
}
-int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
- bool on)
+static int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
+ int port, bool unicast)
{
int err;
u16 reg;
@@ -507,7 +508,7 @@ int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- if (on)
+ if (unicast)
reg |= PORT_CONTROL_FORWARD_UNKNOWN;
else
reg &= ~PORT_CONTROL_FORWARD_UNKNOWN;
@@ -515,8 +516,8 @@ int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
}
-int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
- bool on)
+int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
+ bool unicast, bool multicast)
{
int err;
u16 reg;
@@ -525,21 +526,45 @@ int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- if (on)
- reg |= PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA;
+ reg &= ~PORT_CONTROL_EGRESS_FLOODS_MASK;
+
+ if (unicast && multicast)
+ reg |= PORT_CONTROL_EGRESS_FLOODS_ALL_UNKNOWN_DA;
+ else if (unicast)
+ reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_MC_DA;
+ else if (multicast)
+ reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_UC_DA;
else
- reg &= ~PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA;
+ reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_DA;
return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
}
/* Offset 0x05: Port Control 1 */
+int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
+ bool message_port)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_1, &val);
+ if (err)
+ return err;
+
+ if (message_port)
+ val |= PORT_CONTROL_1_MESSAGE_PORT;
+ else
+ val &= ~PORT_CONTROL_1_MESSAGE_PORT;
+
+ return mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, val);
+}
+
/* Offset 0x06: Port Based VLAN Map */
int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map)
{
- const u16 mask = GENMASK(mv88e6xxx_num_ports(chip) - 1, 0);
+ const u16 mask = mv88e6xxx_port_mask(chip);
u16 reg;
int err;
@@ -672,8 +697,8 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = {
[PORT_CONTROL_2_8021Q_SECURE] = "Secure",
};
-int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
- bool on)
+static int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
+ int port, bool multicast)
{
int err;
u16 reg;
@@ -682,14 +707,26 @@ int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- if (on)
- reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
+ if (multicast)
+ reg |= PORT_CONTROL_2_DEFAULT_FORWARD;
else
- reg &= ~PORT_CONTROL_2_FORWARD_UNKNOWN;
+ reg &= ~PORT_CONTROL_2_DEFAULT_FORWARD;
return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
}
+int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
+ bool unicast, bool multicast)
+{
+ int err;
+
+ err = mv88e6185_port_set_forward_unknown(chip, port, unicast);
+ if (err)
+ return err;
+
+ return mv88e6185_port_set_default_forward(chip, port, multicast);
+}
+
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port)
{
@@ -769,6 +806,20 @@ int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port)
return mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL, 0x0001);
}
+/* Offset 0x0C: Port ATU Control */
+
+int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_write(chip, port, PORT_ATU_CONTROL, 0);
+}
+
+/* Offset 0x0D: (Priority) Override Register */
+
+int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_write(chip, port, PORT_PRI_OVERRIDE, 0);
+}
+
/* Offset 0x0f: Port Ether type */
int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index c83cbb3f4491..86f40887b6d2 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -3,7 +3,8 @@
*
* Copyright (c) 2008 Marvell Semiconductor
*
- * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -56,14 +57,14 @@ int mv88e6085_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode mode);
int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode mode);
-int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
- bool on);
-int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
- bool on);
-int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
- bool on);
+int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
+ bool unicast, bool multicast);
+int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
+ bool unicast, bool multicast);
int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
u16 etype);
+int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
+ bool message_port);
int mv88e6165_port_jumbo_config(struct mv88e6xxx_chip *chip, int port);
int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
@@ -75,4 +76,8 @@ int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
+
+int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port);
+
#endif /* _MV88E6XXX_PORT_H */
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 2c80611b94ae..149244aac20a 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -35,6 +35,7 @@
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/rtnetlink.h>
+#include <linux/net_tstamp.h>
#include <net/rtnetlink.h>
#include <linux/u64_stats_sync.h>
@@ -125,6 +126,7 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
dstats->tx_bytes += skb->len;
u64_stats_update_end(&dstats->syncp);
+ skb_tx_timestamp(skb);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -304,8 +306,21 @@ static void dummy_get_drvinfo(struct net_device *dev,
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
+static int dummy_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *ts_info)
+{
+ ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+
+ ts_info->phc_index = -1;
+
+ return 0;
+};
+
static const struct ethtool_ops dummy_ethtool_ops = {
.get_drvinfo = dummy_get_drvinfo,
+ .get_ts_info = dummy_get_ts_info,
};
static void dummy_free_netdev(struct net_device *dev)
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 084a6d58543a..be823c186517 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -283,7 +283,6 @@ struct typhoon {
spinlock_t command_lock ____cacheline_aligned;
struct basic_ring cmdRing;
struct basic_ring respRing;
- struct net_device_stats stats;
struct net_device_stats stats_saved;
struct typhoon_shared * shared;
dma_addr_t shared_dma;
@@ -898,7 +897,7 @@ typhoon_set_rx_mode(struct net_device *dev)
static int
typhoon_do_get_stats(struct typhoon *tp)
{
- struct net_device_stats *stats = &tp->stats;
+ struct net_device_stats *stats = &tp->dev->stats;
struct net_device_stats *saved = &tp->stats_saved;
struct cmd_desc xp_cmd;
struct resp_desc xp_resp[7];
@@ -951,7 +950,7 @@ static struct net_device_stats *
typhoon_get_stats(struct net_device *dev)
{
struct typhoon *tp = netdev_priv(dev);
- struct net_device_stats *stats = &tp->stats;
+ struct net_device_stats *stats = &tp->dev->stats;
struct net_device_stats *saved = &tp->stats_saved;
smp_rmb();
@@ -1991,7 +1990,7 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
tp->card_state = Sleeping;
smp_wmb();
typhoon_do_get_stats(tp);
- memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
+ memcpy(&tp->stats_saved, &tp->dev->stats, sizeof(struct net_device_stats));
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 8c08f9deef92..edae15ac0e98 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -180,5 +180,6 @@ source "drivers/net/ethernet/via/Kconfig"
source "drivers/net/ethernet/wiznet/Kconfig"
source "drivers/net/ethernet/xilinx/Kconfig"
source "drivers/net/ethernet/xircom/Kconfig"
+source "drivers/net/ethernet/synopsys/Kconfig"
endif # ETHERNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 26dce5bf2c18..bf7f4502cabc 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -91,3 +91,4 @@ obj-$(CONFIG_NET_VENDOR_VIA) += via/
obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
+obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index 8c3b56198e4b..4ad5b9be3f84 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -68,13 +68,6 @@ struct net_dma_desc_tx {
};
struct bfin_mac_local {
- /*
- * these are things that the kernel wants me to keep, so users
- * can find out semi-useless statistics of how well the card is
- * performing
- */
- struct net_device_stats stats;
-
spinlock_t lock;
int wol; /* Wake On Lan */
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 9f7422ada704..d8e133ced7b8 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -34,6 +34,7 @@
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/of_device.h>
+#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
@@ -1454,11 +1455,10 @@ static int greth_of_probe(struct platform_device *ofdev)
break;
}
if (i == 6) {
- const unsigned char *addr;
- int len;
- addr = of_get_property(ofdev->dev.of_node, "local-mac-address",
- &len);
- if (addr != NULL && len == 6) {
+ const u8 *addr;
+
+ addr = of_get_mac_address(ofdev->dev.of_node);
+ if (addr) {
for (i = 0; i < 6; i++)
macaddr[i] = (unsigned int) addr[i];
} else {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 35f19430c84a..7c1214d78855 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -133,7 +133,7 @@ static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
int irq_idx = ENA_IO_IRQ_IDX(i);
rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
- adapter->msix_entries[irq_idx].vector);
+ pci_irq_vector(adapter->pdev, irq_idx));
if (rc) {
free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
adapter->netdev->rx_cpu_rmap = NULL;
@@ -1208,13 +1208,7 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
{
- int i, msix_vecs, rc;
-
- if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
- netif_err(adapter, probe, adapter->netdev,
- "Error, MSI-X is already enabled\n");
- return -EPERM;
- }
+ int msix_vecs, rc;
/* Reserved the max msix vectors we might need */
msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
@@ -1222,16 +1216,9 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
netif_dbg(adapter, probe, adapter->netdev,
"trying to enable MSI-X, vectors %d\n", msix_vecs);
- adapter->msix_entries = vzalloc(msix_vecs * sizeof(struct msix_entry));
-
- if (!adapter->msix_entries)
- return -ENOMEM;
-
- for (i = 0; i < msix_vecs; i++)
- adapter->msix_entries[i].entry = i;
-
- rc = pci_enable_msix(adapter->pdev, adapter->msix_entries, msix_vecs);
- if (rc != 0) {
+ rc = pci_alloc_irq_vectors(adapter->pdev, msix_vecs, msix_vecs,
+ PCI_IRQ_MSIX);
+ if (rc < 0) {
netif_err(adapter, probe, adapter->netdev,
"Failed to enable MSI-X, vectors %d rc %d\n",
msix_vecs, rc);
@@ -1248,7 +1235,6 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
}
adapter->msix_vecs = msix_vecs;
- set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
return 0;
}
@@ -1264,7 +1250,7 @@ static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
ena_intr_msix_mgmnt;
adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
- adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
+ pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
cpu = cpumask_first(cpu_online_mask);
adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
cpumask_set_cpu(cpu,
@@ -1287,7 +1273,7 @@ static void ena_setup_io_intr(struct ena_adapter *adapter)
adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
adapter->irq_tbl[irq_idx].vector =
- adapter->msix_entries[irq_idx].vector;
+ pci_irq_vector(adapter->pdev, irq_idx);
adapter->irq_tbl[irq_idx].cpu = cpu;
cpumask_set_cpu(cpu,
@@ -1325,12 +1311,6 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
struct ena_irq *irq;
int rc = 0, i, k;
- if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
- netif_err(adapter, ifup, adapter->netdev,
- "Failed to request I/O IRQ: MSI-X is not enabled\n");
- return -EINVAL;
- }
-
for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
irq = &adapter->irq_tbl[i];
rc = request_irq(irq->vector, irq->handler, flags, irq->name,
@@ -1389,16 +1369,6 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
}
}
-static void ena_disable_msix(struct ena_adapter *adapter)
-{
- if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
- pci_disable_msix(adapter->pdev);
-
- if (adapter->msix_entries)
- vfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
-}
-
static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
{
int i;
@@ -2479,8 +2449,7 @@ static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
return 0;
err_disable_msix:
- ena_disable_msix(adapter);
-
+ pci_free_irq_vectors(adapter->pdev);
return rc;
}
@@ -2518,7 +2487,7 @@ static void ena_fw_reset_device(struct work_struct *work)
ena_free_mgmnt_irq(adapter);
- ena_disable_msix(adapter);
+ pci_free_irq_vectors(adapter->pdev);
ena_com_abort_admin_commands(ena_dev);
@@ -2569,7 +2538,7 @@ static void ena_fw_reset_device(struct work_struct *work)
return;
err_disable_msix:
ena_free_mgmnt_irq(adapter);
- ena_disable_msix(adapter);
+ pci_free_irq_vectors(adapter->pdev);
err_device_destroy:
ena_com_admin_destroy(ena_dev);
err:
@@ -3103,7 +3072,7 @@ err_rss:
err_free_msix:
ena_com_dev_reset(ena_dev);
ena_free_mgmnt_irq(adapter);
- ena_disable_msix(adapter);
+ pci_free_irq_vectors(adapter->pdev);
err_worker_destroy:
ena_com_destroy_interrupt_moderation(ena_dev);
del_timer(&adapter->timer_service);
@@ -3188,7 +3157,7 @@ static void ena_remove(struct pci_dev *pdev)
ena_free_mgmnt_irq(adapter);
- ena_disable_msix(adapter);
+ pci_free_irq_vectors(adapter->pdev);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index ed62d8e231a1..0e22bce6239d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -248,7 +248,6 @@ enum ena_flags_t {
ENA_FLAG_DEVICE_RUNNING,
ENA_FLAG_DEV_UP,
ENA_FLAG_LINK_UP,
- ENA_FLAG_MSIX_ENABLED,
ENA_FLAG_TRIGGER_RESET
};
@@ -267,7 +266,6 @@ struct ena_adapter {
int num_queues;
- struct msix_entry *msix_entries;
int msix_vecs;
u32 tx_usecs, rx_usecs; /* interrupt moderation */
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index b556c926557a..9c152d85840d 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -359,7 +359,6 @@ typedef struct _mace_statistics {
typedef struct _mace_private {
struct pcmcia_device *p_dev;
- struct net_device_stats linux_stats; /* Linux statistics counters */
mace_statistics mace_stats; /* MACE chip statistics counters */
/* restore_multicast_list() state variables */
@@ -879,7 +878,7 @@ static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
service a transmit interrupt while we are in here.
*/
- lp->linux_stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
lp->tx_free_frames--;
/* WARNING: Write the _exact_ number of bytes written in the header! */
@@ -967,7 +966,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC);
if ((fifofc & MACE_FIFOFC_XMTFC)==0) {
- lp->linux_stats.tx_errors++;
+ dev->stats.tx_errors++;
outb(0xFF, ioaddr + AM2150_XMT_SKIP);
}
@@ -1016,7 +1015,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
} /* if (xmtfs & MACE_XMTFS_XMTSV) */
- lp->linux_stats.tx_packets++;
+ dev->stats.tx_packets++;
lp->tx_free_frames++;
netif_wake_queue(dev);
} /* if (status & MACE_IR_XMTINT) */
@@ -1077,7 +1076,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
" 0x%X.\n", dev->name, rx_framecnt, rx_status);
if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */
- lp->linux_stats.rx_errors++;
+ dev->stats.rx_errors++;
if (rx_status & MACE_RCVFS_OFLO) {
lp->mace_stats.oflo++;
}
@@ -1114,14 +1113,14 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
- lp->linux_stats.rx_packets++;
- lp->linux_stats.rx_bytes += pkt_len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
continue;
} else {
pr_debug("%s: couldn't allocate a sk_buff of size"
" %d.\n", dev->name, pkt_len);
- lp->linux_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
}
}
outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
@@ -1231,13 +1230,13 @@ static void update_stats(unsigned int ioaddr, struct net_device *dev)
lp->mace_stats.rntpc += mace_read(lp, ioaddr, MACE_RNTPC);
lp->mace_stats.mpc += mace_read(lp, ioaddr, MACE_MPC);
/* At this point, mace_stats is fully updated for this call.
- We may now update the linux_stats. */
+ We may now update the netdev stats. */
- /* The MACE has no equivalent for linux_stats field which are commented
+ /* The MACE has no equivalent for netdev stats field which are commented
out. */
- /* lp->linux_stats.multicast; */
- lp->linux_stats.collisions =
+ /* dev->stats.multicast; */
+ dev->stats.collisions =
lp->mace_stats.rcvcco * 256 + lp->mace_stats.rcvcc;
/* Collision: The MACE may retry sending a packet 15 times
before giving up. The retry count is in XMTRC.
@@ -1245,22 +1244,22 @@ static void update_stats(unsigned int ioaddr, struct net_device *dev)
If so, why doesn't the RCVCC record these collisions? */
/* detailed rx_errors: */
- lp->linux_stats.rx_length_errors =
+ dev->stats.rx_length_errors =
lp->mace_stats.rntpco * 256 + lp->mace_stats.rntpc;
- /* lp->linux_stats.rx_over_errors */
- lp->linux_stats.rx_crc_errors = lp->mace_stats.fcs;
- lp->linux_stats.rx_frame_errors = lp->mace_stats.fram;
- lp->linux_stats.rx_fifo_errors = lp->mace_stats.oflo;
- lp->linux_stats.rx_missed_errors =
+ /* dev->stats.rx_over_errors */
+ dev->stats.rx_crc_errors = lp->mace_stats.fcs;
+ dev->stats.rx_frame_errors = lp->mace_stats.fram;
+ dev->stats.rx_fifo_errors = lp->mace_stats.oflo;
+ dev->stats.rx_missed_errors =
lp->mace_stats.mpco * 256 + lp->mace_stats.mpc;
/* detailed tx_errors */
- lp->linux_stats.tx_aborted_errors = lp->mace_stats.rtry;
- lp->linux_stats.tx_carrier_errors = lp->mace_stats.lcar;
+ dev->stats.tx_aborted_errors = lp->mace_stats.rtry;
+ dev->stats.tx_carrier_errors = lp->mace_stats.lcar;
/* LCAR usually results from bad cabling. */
- lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo;
- lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr;
- /* lp->linux_stats.tx_window_errors; */
+ dev->stats.tx_fifo_errors = lp->mace_stats.uflo;
+ dev->stats.tx_heartbeat_errors = lp->mace_stats.cerr;
+ /* dev->stats.tx_window_errors; */
} /* update_stats */
/* ----------------------------------------------------------------------------
@@ -1274,10 +1273,10 @@ static struct net_device_stats *mace_get_stats(struct net_device *dev)
update_stats(dev->base_addr, dev);
pr_debug("%s: updating the statistics.\n", dev->name);
- pr_linux_stats(&lp->linux_stats);
+ pr_linux_stats(&dev->stats);
pr_mace_stats(&lp->mace_stats);
- return &lp->linux_stats;
+ return &dev->stats;
} /* net_device_stats */
/* ----------------------------------------------------------------------------
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index a713abd9d03e..c772420fa41c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -118,6 +118,7 @@
#include <linux/spinlock.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
#include <net/busy_poll.h>
#include <linux/clk.h>
#include <linux/if_ether.h>
@@ -1854,7 +1855,8 @@ static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
if (tc_to_netdev->type != TC_SETUP_MQPRIO)
return -EINVAL;
- tc = tc_to_netdev->tc;
+ tc_to_netdev->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ tc = tc_to_netdev->mqprio->num_tc;
if (tc > pdata->hw_feat.tc_cnt)
return -EINVAL;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index 0c7088a426e9..417bdb5982a9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -115,6 +115,7 @@
*/
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/kmod.h>
#include <linux/delay.h>
#include <linux/completion.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 4c5b90eea4af..b672d9249539 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -114,6 +114,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/mdio.h>
diff --git a/drivers/net/ethernet/apm/Kconfig b/drivers/net/ethernet/apm/Kconfig
index ec63d706d464..59efe5b145dd 100644
--- a/drivers/net/ethernet/apm/Kconfig
+++ b/drivers/net/ethernet/apm/Kconfig
@@ -1 +1,2 @@
source "drivers/net/ethernet/apm/xgene/Kconfig"
+source "drivers/net/ethernet/apm/xgene-v2/Kconfig"
diff --git a/drivers/net/ethernet/apm/Makefile b/drivers/net/ethernet/apm/Makefile
index 65ce32ad1b2c..946b2a4c882d 100644
--- a/drivers/net/ethernet/apm/Makefile
+++ b/drivers/net/ethernet/apm/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_NET_XGENE) += xgene/
+obj-$(CONFIG_NET_XGENE_V2) += xgene-v2/
diff --git a/drivers/net/ethernet/apm/xgene-v2/Kconfig b/drivers/net/ethernet/apm/xgene-v2/Kconfig
new file mode 100644
index 000000000000..1205861b6318
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/Kconfig
@@ -0,0 +1,11 @@
+config NET_XGENE_V2
+ tristate "APM X-Gene SoC Ethernet-v2 Driver"
+ depends on HAS_DMA
+ depends on ARCH_XGENE || COMPILE_TEST
+ help
+ This is the Ethernet driver for the on-chip ethernet interface
+ which uses a linked list of DMA descriptor architecture (v2) for
+ APM X-Gene SoCs.
+
+ To compile this driver as a module, choose M here. This module will
+ be called xgene-enet-v2.
diff --git a/drivers/net/ethernet/apm/xgene-v2/Makefile b/drivers/net/ethernet/apm/xgene-v2/Makefile
new file mode 100644
index 000000000000..f16a2b3dde8b
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for APM X-Gene Ethernet v2 driver
+#
+
+xgene-enet-v2-objs := main.o mac.o enet.o ring.o mdio.o ethtool.o
+obj-$(CONFIG_NET_XGENE_V2) += xgene-enet-v2.o
diff --git a/drivers/net/ethernet/apm/xgene-v2/enet.c b/drivers/net/ethernet/apm/xgene-v2/enet.c
new file mode 100644
index 000000000000..5998da014923
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/enet.c
@@ -0,0 +1,83 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+void xge_wr_csr(struct xge_pdata *pdata, u32 offset, u32 val)
+{
+ void __iomem *addr = pdata->resources.base_addr + offset;
+
+ iowrite32(val, addr);
+}
+
+u32 xge_rd_csr(struct xge_pdata *pdata, u32 offset)
+{
+ void __iomem *addr = pdata->resources.base_addr + offset;
+
+ return ioread32(addr);
+}
+
+int xge_port_reset(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ u32 data, wait = 10;
+
+ xge_wr_csr(pdata, ENET_CLKEN, 0x3);
+ xge_wr_csr(pdata, ENET_SRST, 0xf);
+ xge_wr_csr(pdata, ENET_SRST, 0);
+ xge_wr_csr(pdata, CFG_MEM_RAM_SHUTDOWN, 1);
+ xge_wr_csr(pdata, CFG_MEM_RAM_SHUTDOWN, 0);
+
+ do {
+ usleep_range(100, 110);
+ data = xge_rd_csr(pdata, BLOCK_MEM_RDY);
+ } while (data != MEM_RDY && wait--);
+
+ if (data != MEM_RDY) {
+ dev_err(dev, "ECC init failed: %x\n", data);
+ return -ETIMEDOUT;
+ }
+
+ xge_wr_csr(pdata, ENET_SHIM, DEVM_ARAUX_COH | DEVM_AWAUX_COH);
+
+ return 0;
+}
+
+static void xge_traffic_resume(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ xge_wr_csr(pdata, CFG_FORCE_LINK_STATUS_EN, 1);
+ xge_wr_csr(pdata, FORCE_LINK_STATUS, 1);
+
+ xge_wr_csr(pdata, CFG_LINK_AGGR_RESUME, 1);
+ xge_wr_csr(pdata, RX_DV_GATE_REG, 1);
+}
+
+void xge_port_init(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ pdata->phy_speed = SPEED_1000;
+ xge_mac_init(pdata);
+ xge_traffic_resume(ndev);
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/enet.h b/drivers/net/ethernet/apm/xgene-v2/enet.h
new file mode 100644
index 000000000000..3fd36dc66a23
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/enet.h
@@ -0,0 +1,45 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_ENET_H__
+#define __XGENE_ENET_V2_ENET_H__
+
+#define ENET_CLKEN 0xc008
+#define ENET_SRST 0xc000
+#define ENET_SHIM 0xc010
+#define CFG_MEM_RAM_SHUTDOWN 0xd070
+#define BLOCK_MEM_RDY 0xd074
+
+#define MEM_RDY 0xffffffff
+#define DEVM_ARAUX_COH BIT(19)
+#define DEVM_AWAUX_COH BIT(3)
+
+#define CFG_FORCE_LINK_STATUS_EN 0x229c
+#define FORCE_LINK_STATUS 0x22a0
+#define CFG_LINK_AGGR_RESUME 0x27c8
+#define RX_DV_GATE_REG 0x2dfc
+
+void xge_wr_csr(struct xge_pdata *pdata, u32 offset, u32 val);
+u32 xge_rd_csr(struct xge_pdata *pdata, u32 offset);
+int xge_port_reset(struct net_device *ndev);
+void xge_port_init(struct net_device *ndev);
+
+#endif /* __XGENE_ENET_V2_ENET__H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/ethtool.c b/drivers/net/ethernet/apm/xgene-v2/ethtool.c
new file mode 100644
index 000000000000..b6666e418e79
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/ethtool.c
@@ -0,0 +1,187 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+#define XGE_STAT(m) { #m, offsetof(struct xge_pdata, stats.m) }
+#define XGE_EXTD_STAT(m, n) \
+ { \
+ #m, \
+ n, \
+ 0 \
+ }
+
+static const struct xge_gstrings_stats gstrings_stats[] = {
+ XGE_STAT(rx_packets),
+ XGE_STAT(tx_packets),
+ XGE_STAT(rx_bytes),
+ XGE_STAT(tx_bytes),
+ XGE_STAT(rx_errors)
+};
+
+static struct xge_gstrings_extd_stats gstrings_extd_stats[] = {
+ XGE_EXTD_STAT(tx_rx_64b_frame_cntr, TR64),
+ XGE_EXTD_STAT(tx_rx_127b_frame_cntr, TR127),
+ XGE_EXTD_STAT(tx_rx_255b_frame_cntr, TR255),
+ XGE_EXTD_STAT(tx_rx_511b_frame_cntr, TR511),
+ XGE_EXTD_STAT(tx_rx_1023b_frame_cntr, TR1K),
+ XGE_EXTD_STAT(tx_rx_1518b_frame_cntr, TRMAX),
+ XGE_EXTD_STAT(tx_rx_1522b_frame_cntr, TRMGV),
+ XGE_EXTD_STAT(rx_fcs_error_cntr, RFCS),
+ XGE_EXTD_STAT(rx_multicast_pkt_cntr, RMCA),
+ XGE_EXTD_STAT(rx_broadcast_pkt_cntr, RBCA),
+ XGE_EXTD_STAT(rx_ctrl_frame_pkt_cntr, RXCF),
+ XGE_EXTD_STAT(rx_pause_frame_pkt_cntr, RXPF),
+ XGE_EXTD_STAT(rx_unk_opcode_cntr, RXUO),
+ XGE_EXTD_STAT(rx_align_err_cntr, RALN),
+ XGE_EXTD_STAT(rx_frame_len_err_cntr, RFLR),
+ XGE_EXTD_STAT(rx_code_err_cntr, RCDE),
+ XGE_EXTD_STAT(rx_carrier_sense_err_cntr, RCSE),
+ XGE_EXTD_STAT(rx_undersize_pkt_cntr, RUND),
+ XGE_EXTD_STAT(rx_oversize_pkt_cntr, ROVR),
+ XGE_EXTD_STAT(rx_fragments_cntr, RFRG),
+ XGE_EXTD_STAT(rx_jabber_cntr, RJBR),
+ XGE_EXTD_STAT(rx_dropped_pkt_cntr, RDRP),
+ XGE_EXTD_STAT(tx_multicast_pkt_cntr, TMCA),
+ XGE_EXTD_STAT(tx_broadcast_pkt_cntr, TBCA),
+ XGE_EXTD_STAT(tx_pause_ctrl_frame_cntr, TXPF),
+ XGE_EXTD_STAT(tx_defer_pkt_cntr, TDFR),
+ XGE_EXTD_STAT(tx_excv_defer_pkt_cntr, TEDF),
+ XGE_EXTD_STAT(tx_single_col_pkt_cntr, TSCL),
+ XGE_EXTD_STAT(tx_multi_col_pkt_cntr, TMCL),
+ XGE_EXTD_STAT(tx_late_col_pkt_cntr, TLCL),
+ XGE_EXTD_STAT(tx_excv_col_pkt_cntr, TXCL),
+ XGE_EXTD_STAT(tx_total_col_cntr, TNCL),
+ XGE_EXTD_STAT(tx_pause_frames_hnrd_cntr, TPFH),
+ XGE_EXTD_STAT(tx_drop_frame_cntr, TDRP),
+ XGE_EXTD_STAT(tx_jabber_frame_cntr, TJBR),
+ XGE_EXTD_STAT(tx_fcs_error_cntr, TFCS),
+ XGE_EXTD_STAT(tx_ctrl_frame_cntr, TXCF),
+ XGE_EXTD_STAT(tx_oversize_frame_cntr, TOVR),
+ XGE_EXTD_STAT(tx_undersize_frame_cntr, TUND),
+ XGE_EXTD_STAT(tx_fragments_cntr, TFRG)
+};
+
+#define XGE_STATS_LEN ARRAY_SIZE(gstrings_stats)
+#define XGE_EXTD_STATS_LEN ARRAY_SIZE(gstrings_extd_stats)
+
+static void xge_mac_get_extd_stats(struct xge_pdata *pdata)
+{
+ u32 data;
+ int i;
+
+ for (i = 0; i < XGE_EXTD_STATS_LEN; i++) {
+ data = xge_rd_csr(pdata, gstrings_extd_stats[i].addr);
+ gstrings_extd_stats[i].value += data;
+ }
+}
+
+static void xge_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct platform_device *pdev = pdata->pdev;
+
+ strcpy(info->driver, "xgene-enet-v2");
+ strcpy(info->version, XGENE_ENET_V2_VERSION);
+ snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "N/A");
+ sprintf(info->bus_info, "%s", pdev->name);
+}
+
+static void xge_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < XGE_STATS_LEN; i++) {
+ memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < XGE_EXTD_STATS_LEN; i++) {
+ memcpy(p, gstrings_extd_stats[i].name, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+static int xge_get_sset_count(struct net_device *ndev, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ return XGE_STATS_LEN + XGE_EXTD_STATS_LEN;
+}
+
+static void xge_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *dummy,
+ u64 *data)
+{
+ void *pdata = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < XGE_STATS_LEN; i++)
+ *data++ = *(u64 *)(pdata + gstrings_stats[i].offset);
+
+ xge_mac_get_extd_stats(pdata);
+
+ for (i = 0; i < XGE_EXTD_STATS_LEN; i++)
+ *data++ = gstrings_extd_stats[i].value;
+}
+
+static int xge_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct phy_device *phydev = ndev->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_ksettings_get(phydev, cmd);
+}
+
+static int xge_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct phy_device *phydev = ndev->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_ksettings_set(phydev, cmd);
+}
+
+static const struct ethtool_ops xge_ethtool_ops = {
+ .get_drvinfo = xge_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = xge_get_strings,
+ .get_sset_count = xge_get_sset_count,
+ .get_ethtool_stats = xge_get_ethtool_stats,
+ .get_link_ksettings = xge_get_link_ksettings,
+ .set_link_ksettings = xge_set_link_ksettings,
+};
+
+void xge_set_ethtool_ops(struct net_device *ndev)
+{
+ ndev->ethtool_ops = &xge_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/ethtool.h b/drivers/net/ethernet/apm/xgene-v2/ethtool.h
new file mode 100644
index 000000000000..54b48d5561b8
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/ethtool.h
@@ -0,0 +1,78 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_ETHTOOL_H__
+#define __XGENE_ENET_V2_ETHTOOL_H__
+
+struct xge_gstrings_stats {
+ char name[ETH_GSTRING_LEN];
+ int offset;
+};
+
+struct xge_gstrings_extd_stats {
+ char name[ETH_GSTRING_LEN];
+ u32 addr;
+ u32 value;
+};
+
+#define TR64 0xa080
+#define TR127 0xa084
+#define TR255 0xa088
+#define TR511 0xa08c
+#define TR1K 0xa090
+#define TRMAX 0xa094
+#define TRMGV 0xa098
+#define RFCS 0xa0a4
+#define RMCA 0xa0a8
+#define RBCA 0xa0ac
+#define RXCF 0xa0b0
+#define RXPF 0xa0b4
+#define RXUO 0xa0b8
+#define RALN 0xa0bc
+#define RFLR 0xa0c0
+#define RCDE 0xa0c4
+#define RCSE 0xa0c8
+#define RUND 0xa0cc
+#define ROVR 0xa0d0
+#define RFRG 0xa0d4
+#define RJBR 0xa0d8
+#define RDRP 0xa0dc
+#define TMCA 0xa0e8
+#define TBCA 0xa0ec
+#define TXPF 0xa0f0
+#define TDFR 0xa0f4
+#define TEDF 0xa0f8
+#define TSCL 0xa0fc
+#define TMCL 0xa100
+#define TLCL 0xa104
+#define TXCL 0xa108
+#define TNCL 0xa10c
+#define TPFH 0xa110
+#define TDRP 0xa114
+#define TJBR 0xa118
+#define TFCS 0xa11c
+#define TXCF 0xa120
+#define TOVR 0xa124
+#define TUND 0xa128
+#define TFRG 0xa12c
+
+void xge_set_ethtool_ops(struct net_device *ndev);
+
+#endif /* __XGENE_ENET_V2_ETHTOOL_H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.c b/drivers/net/ethernet/apm/xgene-v2/mac.c
new file mode 100644
index 000000000000..ee431e397e57
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/mac.c
@@ -0,0 +1,116 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+void xge_mac_reset(struct xge_pdata *pdata)
+{
+ xge_wr_csr(pdata, MAC_CONFIG_1, SOFT_RESET);
+ xge_wr_csr(pdata, MAC_CONFIG_1, 0);
+}
+
+void xge_mac_set_speed(struct xge_pdata *pdata)
+{
+ u32 icm0, icm2, ecm0, mc2;
+ u32 intf_ctrl, rgmii;
+
+ icm0 = xge_rd_csr(pdata, ICM_CONFIG0_REG_0);
+ icm2 = xge_rd_csr(pdata, ICM_CONFIG2_REG_0);
+ ecm0 = xge_rd_csr(pdata, ECM_CONFIG0_REG_0);
+ rgmii = xge_rd_csr(pdata, RGMII_REG_0);
+ mc2 = xge_rd_csr(pdata, MAC_CONFIG_2);
+ intf_ctrl = xge_rd_csr(pdata, INTERFACE_CONTROL);
+ icm2 |= CFG_WAITASYNCRD_EN;
+
+ switch (pdata->phy_speed) {
+ case SPEED_10:
+ SET_REG_BITS(&mc2, INTF_MODE, 1);
+ SET_REG_BITS(&intf_ctrl, HD_MODE, 0);
+ SET_REG_BITS(&icm0, CFG_MACMODE, 0);
+ SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 500);
+ SET_REG_BIT(&rgmii, CFG_SPEED_125, 0);
+ break;
+ case SPEED_100:
+ SET_REG_BITS(&mc2, INTF_MODE, 1);
+ SET_REG_BITS(&intf_ctrl, HD_MODE, 1);
+ SET_REG_BITS(&icm0, CFG_MACMODE, 1);
+ SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 80);
+ SET_REG_BIT(&rgmii, CFG_SPEED_125, 0);
+ break;
+ default:
+ SET_REG_BITS(&mc2, INTF_MODE, 2);
+ SET_REG_BITS(&intf_ctrl, HD_MODE, 2);
+ SET_REG_BITS(&icm0, CFG_MACMODE, 2);
+ SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 16);
+ SET_REG_BIT(&rgmii, CFG_SPEED_125, 1);
+ break;
+ }
+
+ mc2 |= FULL_DUPLEX | CRC_EN | PAD_CRC;
+ SET_REG_BITS(&ecm0, CFG_WFIFOFULLTHR, 0x32);
+
+ xge_wr_csr(pdata, MAC_CONFIG_2, mc2);
+ xge_wr_csr(pdata, INTERFACE_CONTROL, intf_ctrl);
+ xge_wr_csr(pdata, RGMII_REG_0, rgmii);
+ xge_wr_csr(pdata, ICM_CONFIG0_REG_0, icm0);
+ xge_wr_csr(pdata, ICM_CONFIG2_REG_0, icm2);
+ xge_wr_csr(pdata, ECM_CONFIG0_REG_0, ecm0);
+}
+
+void xge_mac_set_station_addr(struct xge_pdata *pdata)
+{
+ u8 *dev_addr = pdata->ndev->dev_addr;
+ u32 addr0, addr1;
+
+ addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
+ (dev_addr[1] << 8) | dev_addr[0];
+ addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
+
+ xge_wr_csr(pdata, STATION_ADDR0, addr0);
+ xge_wr_csr(pdata, STATION_ADDR1, addr1);
+}
+
+void xge_mac_init(struct xge_pdata *pdata)
+{
+ xge_mac_reset(pdata);
+ xge_mac_set_speed(pdata);
+ xge_mac_set_station_addr(pdata);
+}
+
+void xge_mac_enable(struct xge_pdata *pdata)
+{
+ u32 data;
+
+ data = xge_rd_csr(pdata, MAC_CONFIG_1);
+ data |= TX_EN | RX_EN;
+ xge_wr_csr(pdata, MAC_CONFIG_1, data);
+
+ data = xge_rd_csr(pdata, MAC_CONFIG_1);
+}
+
+void xge_mac_disable(struct xge_pdata *pdata)
+{
+ u32 data;
+
+ data = xge_rd_csr(pdata, MAC_CONFIG_1);
+ data &= ~(TX_EN | RX_EN);
+ xge_wr_csr(pdata, MAC_CONFIG_1, data);
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.h b/drivers/net/ethernet/apm/xgene-v2/mac.h
new file mode 100644
index 000000000000..3c83fa617356
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/mac.h
@@ -0,0 +1,107 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_MAC_H__
+#define __XGENE_ENET_V2_MAC_H__
+
+/* Register offsets */
+#define MAC_CONFIG_1 0xa000
+#define MAC_CONFIG_2 0xa004
+#define MII_MGMT_CONFIG 0xa020
+#define MII_MGMT_COMMAND 0xa024
+#define MII_MGMT_ADDRESS 0xa028
+#define MII_MGMT_CONTROL 0xa02c
+#define MII_MGMT_STATUS 0xa030
+#define MII_MGMT_INDICATORS 0xa034
+#define INTERFACE_CONTROL 0xa038
+#define STATION_ADDR0 0xa040
+#define STATION_ADDR1 0xa044
+
+#define RGMII_REG_0 0x27e0
+#define ICM_CONFIG0_REG_0 0x2c00
+#define ICM_CONFIG2_REG_0 0x2c08
+#define ECM_CONFIG0_REG_0 0x2d00
+
+/* Register fields */
+#define SOFT_RESET BIT(31)
+#define TX_EN BIT(0)
+#define RX_EN BIT(2)
+#define PAD_CRC BIT(2)
+#define CRC_EN BIT(1)
+#define FULL_DUPLEX BIT(0)
+
+#define INTF_MODE_POS 8
+#define INTF_MODE_LEN 2
+#define HD_MODE_POS 25
+#define HD_MODE_LEN 2
+#define CFG_MACMODE_POS 18
+#define CFG_MACMODE_LEN 2
+#define CFG_WAITASYNCRD_POS 0
+#define CFG_WAITASYNCRD_LEN 16
+#define CFG_SPEED_125_POS 24
+#define CFG_WFIFOFULLTHR_POS 0
+#define CFG_WFIFOFULLTHR_LEN 7
+#define MGMT_CLOCK_SEL_POS 0
+#define MGMT_CLOCK_SEL_LEN 3
+#define PHY_ADDR_POS 8
+#define PHY_ADDR_LEN 5
+#define REG_ADDR_POS 0
+#define REG_ADDR_LEN 5
+#define MII_MGMT_BUSY BIT(0)
+#define MII_READ_CYCLE BIT(0)
+#define CFG_WAITASYNCRD_EN BIT(16)
+
+static inline void xgene_set_reg_bits(u32 *var, int pos, int len, u32 val)
+{
+ u32 mask = GENMASK(pos + len, pos);
+
+ *var &= ~mask;
+ *var |= ((val << pos) & mask);
+}
+
+static inline u32 xgene_get_reg_bits(u32 var, int pos, int len)
+{
+ u32 mask = GENMASK(pos + len, pos);
+
+ return (var & mask) >> pos;
+}
+
+#define SET_REG_BITS(var, field, val) \
+ xgene_set_reg_bits(var, field ## _POS, field ## _LEN, val)
+
+#define SET_REG_BIT(var, field, val) \
+ xgene_set_reg_bits(var, field ## _POS, 1, val)
+
+#define GET_REG_BITS(var, field) \
+ xgene_get_reg_bits(var, field ## _POS, field ## _LEN)
+
+#define GET_REG_BIT(var, field) ((var) & (field))
+
+struct xge_pdata;
+
+void xge_mac_reset(struct xge_pdata *pdata);
+void xge_mac_set_speed(struct xge_pdata *pdata);
+void xge_mac_enable(struct xge_pdata *pdata);
+void xge_mac_disable(struct xge_pdata *pdata);
+void xge_mac_init(struct xge_pdata *pdata);
+void xge_mac_set_station_addr(struct xge_pdata *pdata);
+
+#endif /* __XGENE_ENET_V2_MAC_H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
new file mode 100644
index 000000000000..0f2ad50f3bd7
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -0,0 +1,759 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+static const struct acpi_device_id xge_acpi_match[];
+
+static int xge_get_resources(struct xge_pdata *pdata)
+{
+ struct platform_device *pdev;
+ struct net_device *ndev;
+ int phy_mode, ret = 0;
+ struct resource *res;
+ struct device *dev;
+
+ pdev = pdata->pdev;
+ dev = &pdev->dev;
+ ndev = pdata->ndev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Resource enet_csr not defined\n");
+ return -ENODEV;
+ }
+
+ pdata->resources.base_addr = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (!pdata->resources.base_addr) {
+ dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
+ return -ENOMEM;
+ }
+
+ if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
+ eth_hw_addr_random(ndev);
+
+ memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+
+ phy_mode = device_get_phy_mode(dev);
+ if (phy_mode < 0) {
+ dev_err(dev, "Unable to get phy-connection-type\n");
+ return phy_mode;
+ }
+ pdata->resources.phy_mode = phy_mode;
+
+ if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
+ dev_err(dev, "Incorrect phy-connection-type specified\n");
+ return -ENODEV;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "Unable to get irq\n");
+ return ret;
+ }
+ pdata->resources.irq = ret;
+
+ return 0;
+}
+
+static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct xge_desc_ring *ring = pdata->rx_ring;
+ const u8 slots = XGENE_ENET_NUM_DESC - 1;
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_raw_desc *raw_desc;
+ u64 addr_lo, addr_hi;
+ u8 tail = ring->tail;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u16 len;
+ int i;
+
+ for (i = 0; i < nbuf; i++) {
+ raw_desc = &ring->raw_desc[tail];
+
+ len = XGENE_ENET_STD_MTU;
+ skb = netdev_alloc_skb(ndev, len);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ netdev_err(ndev, "DMA mapping error\n");
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ ring->pkt_info[tail].skb = skb;
+ ring->pkt_info[tail].dma_addr = dma_addr;
+
+ addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
+ addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
+ raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
+ SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
+ SET_BITS(PKT_ADDRH,
+ upper_32_bits(dma_addr)));
+
+ dma_wmb();
+ raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
+ SET_BITS(E, 1));
+ tail = (tail + 1) & slots;
+ }
+
+ ring->tail = tail;
+
+ return 0;
+}
+
+static int xge_init_hw(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ int ret;
+
+ ret = xge_port_reset(ndev);
+ if (ret)
+ return ret;
+
+ xge_port_init(ndev);
+ pdata->nbufs = NUM_BUFS;
+
+ return 0;
+}
+
+static irqreturn_t xge_irq(const int irq, void *data)
+{
+ struct xge_pdata *pdata = data;
+
+ if (napi_schedule_prep(&pdata->napi)) {
+ xge_intr_disable(pdata);
+ __napi_schedule(&pdata->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int xge_request_irq(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ int ret;
+
+ snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
+
+ ret = request_irq(pdata->resources.irq, xge_irq, 0, pdata->irq_name,
+ pdata);
+ if (ret)
+ netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
+
+ return ret;
+}
+
+static void xge_free_irq(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ free_irq(pdata->resources.irq, pdata);
+}
+
+static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
+{
+ if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
+ (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
+ return true;
+
+ return false;
+}
+
+static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_desc_ring *tx_ring;
+ struct xge_raw_desc *raw_desc;
+ static dma_addr_t dma_addr;
+ u64 addr_lo, addr_hi;
+ void *pkt_buf;
+ u8 tail;
+ u16 len;
+
+ tx_ring = pdata->tx_ring;
+ tail = tx_ring->tail;
+ len = skb_headlen(skb);
+ raw_desc = &tx_ring->raw_desc[tail];
+
+ if (!is_tx_slot_available(raw_desc)) {
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Packet buffers should be 64B aligned */
+ pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
+ GFP_ATOMIC);
+ if (unlikely(!pkt_buf)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ memcpy(pkt_buf, skb->data, len);
+
+ addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
+ addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
+ raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
+ SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
+ SET_BITS(PKT_ADDRH,
+ upper_32_bits(dma_addr)));
+
+ tx_ring->pkt_info[tail].skb = skb;
+ tx_ring->pkt_info[tail].dma_addr = dma_addr;
+ tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
+
+ dma_wmb();
+
+ raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
+ SET_BITS(PKT_SIZE, len) |
+ SET_BITS(E, 0));
+ skb_tx_timestamp(skb);
+ xge_wr_csr(pdata, DMATXCTRL, 1);
+
+ tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
+
+ return NETDEV_TX_OK;
+}
+
+static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
+{
+ if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
+ !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
+ return true;
+
+ return false;
+}
+
+static void xge_txc_poll(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_desc_ring *tx_ring;
+ struct xge_raw_desc *raw_desc;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+ void *pkt_buf;
+ u32 data;
+ u8 head;
+
+ tx_ring = pdata->tx_ring;
+ head = tx_ring->head;
+
+ data = xge_rd_csr(pdata, DMATXSTATUS);
+ if (!GET_BITS(TXPKTCOUNT, data))
+ return;
+
+ while (1) {
+ raw_desc = &tx_ring->raw_desc[head];
+
+ if (!is_tx_hw_done(raw_desc))
+ break;
+
+ dma_rmb();
+
+ skb = tx_ring->pkt_info[head].skb;
+ dma_addr = tx_ring->pkt_info[head].dma_addr;
+ pkt_buf = tx_ring->pkt_info[head].pkt_buf;
+ pdata->stats.tx_packets++;
+ pdata->stats.tx_bytes += skb->len;
+ dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
+ dev_kfree_skb_any(skb);
+
+ /* clear pktstart address and pktsize */
+ raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
+ SET_BITS(PKT_SIZE, SLOT_EMPTY));
+ xge_wr_csr(pdata, DMATXSTATUS, 1);
+
+ head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
+ }
+
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+
+ tx_ring->head = head;
+}
+
+static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_desc_ring *rx_ring;
+ struct xge_raw_desc *raw_desc;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ int processed = 0;
+ u8 head, rx_error;
+ int i, ret;
+ u32 data;
+ u16 len;
+
+ rx_ring = pdata->rx_ring;
+ head = rx_ring->head;
+
+ data = xge_rd_csr(pdata, DMARXSTATUS);
+ if (!GET_BITS(RXPKTCOUNT, data))
+ return 0;
+
+ for (i = 0; i < budget; i++) {
+ raw_desc = &rx_ring->raw_desc[head];
+
+ if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
+ break;
+
+ dma_rmb();
+
+ skb = rx_ring->pkt_info[head].skb;
+ rx_ring->pkt_info[head].skb = NULL;
+ dma_addr = rx_ring->pkt_info[head].dma_addr;
+ len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
+ dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
+ DMA_FROM_DEVICE);
+
+ rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
+ if (unlikely(rx_error)) {
+ pdata->stats.rx_errors++;
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ pdata->stats.rx_packets++;
+ pdata->stats.rx_bytes += len;
+ napi_gro_receive(&pdata->napi, skb);
+out:
+ ret = xge_refill_buffers(ndev, 1);
+ xge_wr_csr(pdata, DMARXSTATUS, 1);
+ xge_wr_csr(pdata, DMARXCTRL, 1);
+
+ if (ret)
+ break;
+
+ head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
+ processed++;
+ }
+
+ rx_ring->head = head;
+
+ return processed;
+}
+
+static void xge_delete_desc_ring(struct net_device *ndev,
+ struct xge_desc_ring *ring)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ u16 size;
+
+ if (!ring)
+ return;
+
+ size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
+ if (ring->desc_addr)
+ dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
+
+ kfree(ring->pkt_info);
+ kfree(ring);
+}
+
+static void xge_free_buffers(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct xge_desc_ring *ring = pdata->rx_ring;
+ struct device *dev = &pdata->pdev->dev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ int i;
+
+ for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
+ skb = ring->pkt_info[i].skb;
+ dma_addr = ring->pkt_info[i].dma_addr;
+
+ if (!skb)
+ continue;
+
+ dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void xge_delete_desc_rings(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ xge_txc_poll(ndev);
+ xge_delete_desc_ring(ndev, pdata->tx_ring);
+
+ xge_rx_poll(ndev, 64);
+ xge_free_buffers(ndev);
+ xge_delete_desc_ring(ndev, pdata->rx_ring);
+}
+
+static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_desc_ring *ring;
+ u16 size;
+
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ ring->ndev = ndev;
+
+ size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
+ ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr,
+ GFP_KERNEL);
+ if (!ring->desc_addr)
+ goto err;
+
+ ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info),
+ GFP_KERNEL);
+ if (!ring->pkt_info)
+ goto err;
+
+ xge_setup_desc(ring);
+
+ return ring;
+
+err:
+ xge_delete_desc_ring(ndev, ring);
+
+ return NULL;
+}
+
+static int xge_create_desc_rings(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct xge_desc_ring *ring;
+ int ret;
+
+ /* create tx ring */
+ ring = xge_create_desc_ring(ndev);
+ if (!ring)
+ goto err;
+
+ pdata->tx_ring = ring;
+ xge_update_tx_desc_addr(pdata);
+
+ /* create rx ring */
+ ring = xge_create_desc_ring(ndev);
+ if (!ring)
+ goto err;
+
+ pdata->rx_ring = ring;
+ xge_update_rx_desc_addr(pdata);
+
+ ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ xge_delete_desc_rings(ndev);
+
+ return -ENOMEM;
+}
+
+static int xge_open(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ int ret;
+
+ ret = xge_create_desc_rings(ndev);
+ if (ret)
+ return ret;
+
+ napi_enable(&pdata->napi);
+ ret = xge_request_irq(ndev);
+ if (ret)
+ return ret;
+
+ xge_intr_enable(pdata);
+ xge_wr_csr(pdata, DMARXCTRL, 1);
+
+ phy_start(ndev->phydev);
+ xge_mac_enable(pdata);
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int xge_close(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ xge_mac_disable(pdata);
+ phy_stop(ndev->phydev);
+
+ xge_intr_disable(pdata);
+ xge_free_irq(ndev);
+ napi_disable(&pdata->napi);
+ xge_delete_desc_rings(ndev);
+
+ return 0;
+}
+
+static int xge_napi(struct napi_struct *napi, const int budget)
+{
+ struct net_device *ndev = napi->dev;
+ struct xge_pdata *pdata;
+ int processed;
+
+ pdata = netdev_priv(ndev);
+
+ xge_txc_poll(ndev);
+ processed = xge_rx_poll(ndev, budget);
+
+ if (processed < budget) {
+ napi_complete_done(napi, processed);
+ xge_intr_enable(pdata);
+ }
+
+ return processed;
+}
+
+static int xge_set_mac_addr(struct net_device *ndev, void *addr)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ int ret;
+
+ ret = eth_mac_addr(ndev, addr);
+ if (ret)
+ return ret;
+
+ xge_mac_set_station_addr(pdata);
+
+ return 0;
+}
+
+static bool is_tx_pending(struct xge_raw_desc *raw_desc)
+{
+ if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
+ return true;
+
+ return false;
+}
+
+static void xge_free_pending_skb(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct xge_desc_ring *tx_ring;
+ struct xge_raw_desc *raw_desc;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+ void *pkt_buf;
+ int i;
+
+ tx_ring = pdata->tx_ring;
+
+ for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
+ raw_desc = &tx_ring->raw_desc[i];
+
+ if (!is_tx_pending(raw_desc))
+ continue;
+
+ skb = tx_ring->pkt_info[i].skb;
+ dma_addr = tx_ring->pkt_info[i].dma_addr;
+ pkt_buf = tx_ring->pkt_info[i].pkt_buf;
+ dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void xge_timeout(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+
+ rtnl_lock();
+
+ if (!netif_running(ndev))
+ goto out;
+
+ netif_stop_queue(ndev);
+ xge_intr_disable(pdata);
+ napi_disable(&pdata->napi);
+
+ xge_wr_csr(pdata, DMATXCTRL, 0);
+ xge_txc_poll(ndev);
+ xge_free_pending_skb(ndev);
+ xge_wr_csr(pdata, DMATXSTATUS, ~0U);
+
+ xge_setup_desc(pdata->tx_ring);
+ xge_update_tx_desc_addr(pdata);
+ xge_mac_init(pdata);
+
+ napi_enable(&pdata->napi);
+ xge_intr_enable(pdata);
+ xge_mac_enable(pdata);
+ netif_start_queue(ndev);
+
+out:
+ rtnl_unlock();
+}
+
+static void xge_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct xge_stats *stats = &pdata->stats;
+
+ storage->tx_packets += stats->tx_packets;
+ storage->tx_bytes += stats->tx_bytes;
+
+ storage->rx_packets += stats->rx_packets;
+ storage->rx_bytes += stats->rx_bytes;
+ storage->rx_errors += stats->rx_errors;
+}
+
+static const struct net_device_ops xgene_ndev_ops = {
+ .ndo_open = xge_open,
+ .ndo_stop = xge_close,
+ .ndo_start_xmit = xge_start_xmit,
+ .ndo_set_mac_address = xge_set_mac_addr,
+ .ndo_tx_timeout = xge_timeout,
+ .ndo_get_stats64 = xge_get_stats64,
+};
+
+static int xge_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev;
+ struct xge_pdata *pdata;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(*pdata));
+ if (!ndev)
+ return -ENOMEM;
+
+ pdata = netdev_priv(ndev);
+
+ pdata->pdev = pdev;
+ pdata->ndev = ndev;
+ SET_NETDEV_DEV(ndev, dev);
+ platform_set_drvdata(pdev, pdata);
+ ndev->netdev_ops = &xgene_ndev_ops;
+
+ ndev->features |= NETIF_F_GSO |
+ NETIF_F_GRO;
+
+ ret = xge_get_resources(pdata);
+ if (ret)
+ goto err;
+
+ ndev->hw_features = ndev->features;
+ xge_set_ethtool_ops(ndev);
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret) {
+ netdev_err(ndev, "No usable DMA configuration\n");
+ goto err;
+ }
+
+ ret = xge_init_hw(ndev);
+ if (ret)
+ goto err;
+
+ ret = xge_mdio_config(ndev);
+ if (ret)
+ goto err;
+
+ netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ netdev_err(ndev, "Failed to register netdev\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int xge_remove(struct platform_device *pdev)
+{
+ struct xge_pdata *pdata;
+ struct net_device *ndev;
+
+ pdata = platform_get_drvdata(pdev);
+ ndev = pdata->ndev;
+
+ rtnl_lock();
+ if (netif_running(ndev))
+ dev_close(ndev);
+ rtnl_unlock();
+
+ xge_mdio_remove(ndev);
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static void xge_shutdown(struct platform_device *pdev)
+{
+ struct xge_pdata *pdata;
+
+ pdata = platform_get_drvdata(pdev);
+ if (!pdata)
+ return;
+
+ if (!pdata->ndev)
+ return;
+
+ xge_remove(pdev);
+}
+
+static const struct acpi_device_id xge_acpi_match[] = {
+ { "APMC0D80" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
+
+static struct platform_driver xge_driver = {
+ .driver = {
+ .name = "xgene-enet-v2",
+ .acpi_match_table = ACPI_PTR(xge_acpi_match),
+ },
+ .probe = xge_probe,
+ .remove = xge_remove,
+ .shutdown = xge_shutdown,
+};
+module_platform_driver(xge_driver);
+
+MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
+MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
+MODULE_VERSION(XGENE_ENET_V2_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.h b/drivers/net/ethernet/apm/xgene-v2/main.h
new file mode 100644
index 000000000000..969b258cb7de
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/main.h
@@ -0,0 +1,80 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_MAIN_H__
+#define __XGENE_ENET_V2_MAIN_H__
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/efi.h>
+#include <linux/if_vlan.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/prefetch.h>
+#include <linux/phy.h>
+#include <net/ip.h>
+#include "mac.h"
+#include "enet.h"
+#include "ring.h"
+#include "ethtool.h"
+
+#define XGENE_ENET_V2_VERSION "v1.0"
+#define XGENE_ENET_STD_MTU 1536
+#define XGENE_ENET_MIN_FRAME 60
+#define IRQ_ID_SIZE 16
+
+struct xge_resource {
+ void __iomem *base_addr;
+ int phy_mode;
+ u32 irq;
+};
+
+struct xge_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_errors;
+};
+
+/* ethernet private data */
+struct xge_pdata {
+ struct xge_resource resources;
+ struct xge_desc_ring *tx_ring;
+ struct xge_desc_ring *rx_ring;
+ struct platform_device *pdev;
+ char irq_name[IRQ_ID_SIZE];
+ struct mii_bus *mdio_bus;
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct xge_stats stats;
+ int phy_speed;
+ u8 nbufs;
+};
+
+int xge_mdio_config(struct net_device *ndev);
+void xge_mdio_remove(struct net_device *ndev);
+
+#endif /* __XGENE_ENET_V2_MAIN_H__ */
diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c
new file mode 100644
index 000000000000..f5fe3bb2e59d
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c
@@ -0,0 +1,168 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+static int xge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data)
+{
+ struct xge_pdata *pdata = bus->priv;
+ u32 done, val = 0;
+ u8 wait = 10;
+
+ SET_REG_BITS(&val, PHY_ADDR, phy_id);
+ SET_REG_BITS(&val, REG_ADDR, reg);
+ xge_wr_csr(pdata, MII_MGMT_ADDRESS, val);
+
+ xge_wr_csr(pdata, MII_MGMT_CONTROL, data);
+ do {
+ usleep_range(5, 10);
+ done = xge_rd_csr(pdata, MII_MGMT_INDICATORS);
+ } while ((done & MII_MGMT_BUSY) && wait--);
+
+ if (done & MII_MGMT_BUSY) {
+ dev_err(&bus->dev, "MII_MGMT write failed\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int xge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct xge_pdata *pdata = bus->priv;
+ u32 data, done, val = 0;
+ u8 wait = 10;
+
+ SET_REG_BITS(&val, PHY_ADDR, phy_id);
+ SET_REG_BITS(&val, REG_ADDR, reg);
+ xge_wr_csr(pdata, MII_MGMT_ADDRESS, val);
+
+ xge_wr_csr(pdata, MII_MGMT_COMMAND, MII_READ_CYCLE);
+ do {
+ usleep_range(5, 10);
+ done = xge_rd_csr(pdata, MII_MGMT_INDICATORS);
+ } while ((done & MII_MGMT_BUSY) && wait--);
+
+ if (done & MII_MGMT_BUSY) {
+ dev_err(&bus->dev, "MII_MGMT read failed\n");
+ return -ETIMEDOUT;
+ }
+
+ data = xge_rd_csr(pdata, MII_MGMT_STATUS);
+ xge_wr_csr(pdata, MII_MGMT_COMMAND, 0);
+
+ return data;
+}
+
+static void xge_adjust_link(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+
+ if (phydev->link) {
+ if (pdata->phy_speed != phydev->speed) {
+ pdata->phy_speed = phydev->speed;
+ xge_mac_set_speed(pdata);
+ xge_mac_enable(pdata);
+ phy_print_status(phydev);
+ }
+ } else {
+ if (pdata->phy_speed != SPEED_UNKNOWN) {
+ pdata->phy_speed = SPEED_UNKNOWN;
+ xge_mac_disable(pdata);
+ phy_print_status(phydev);
+ }
+ }
+}
+
+void xge_mdio_remove(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct mii_bus *mdio_bus = pdata->mdio_bus;
+
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
+
+ if (mdio_bus->state == MDIOBUS_REGISTERED)
+ mdiobus_unregister(mdio_bus);
+
+ mdiobus_free(mdio_bus);
+}
+
+int xge_mdio_config(struct net_device *ndev)
+{
+ struct xge_pdata *pdata = netdev_priv(ndev);
+ struct device *dev = &pdata->pdev->dev;
+ struct mii_bus *mdio_bus;
+ struct phy_device *phydev;
+ int ret;
+
+ mdio_bus = mdiobus_alloc();
+ if (!mdio_bus)
+ return -ENOMEM;
+
+ mdio_bus->name = "APM X-Gene Ethernet (v2) MDIO Bus";
+ mdio_bus->read = xge_mdio_read;
+ mdio_bus->write = xge_mdio_write;
+ mdio_bus->priv = pdata;
+ mdio_bus->parent = dev;
+ snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev));
+ pdata->mdio_bus = mdio_bus;
+
+ mdio_bus->phy_mask = 0x1;
+ ret = mdiobus_register(mdio_bus);
+ if (ret)
+ goto err;
+
+ phydev = phy_find_first(mdio_bus);
+ if (!phydev) {
+ dev_err(dev, "no PHY found\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ phydev = phy_connect(ndev, phydev_name(phydev),
+ &xge_adjust_link,
+ pdata->resources.phy_mode);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(ndev, "Could not attach to PHY\n");
+ ret = PTR_ERR(phydev);
+ goto err;
+ }
+
+ phydev->supported &= ~(SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_AUI |
+ SUPPORTED_MII |
+ SUPPORTED_FIBRE |
+ SUPPORTED_BNC);
+ phydev->advertising = phydev->supported;
+ pdata->phy_speed = SPEED_UNKNOWN;
+
+ return 0;
+err:
+ xge_mdio_remove(ndev);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/ring.c b/drivers/net/ethernet/apm/xgene-v2/ring.c
new file mode 100644
index 000000000000..38810828f8f0
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/ring.c
@@ -0,0 +1,81 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+
+/* create circular linked list of descriptors */
+void xge_setup_desc(struct xge_desc_ring *ring)
+{
+ struct xge_raw_desc *raw_desc;
+ dma_addr_t dma_h, next_dma;
+ u16 offset;
+ int i;
+
+ for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
+ raw_desc = &ring->raw_desc[i];
+
+ offset = (i + 1) & (XGENE_ENET_NUM_DESC - 1);
+ next_dma = ring->dma_addr + (offset * XGENE_ENET_DESC_SIZE);
+
+ raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
+ SET_BITS(PKT_SIZE, SLOT_EMPTY));
+ dma_h = upper_32_bits(next_dma);
+ raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, next_dma) |
+ SET_BITS(NEXT_DESC_ADDRH, dma_h));
+ }
+}
+
+void xge_update_tx_desc_addr(struct xge_pdata *pdata)
+{
+ struct xge_desc_ring *ring = pdata->tx_ring;
+ dma_addr_t dma_addr = ring->dma_addr;
+
+ xge_wr_csr(pdata, DMATXDESCL, dma_addr);
+ xge_wr_csr(pdata, DMATXDESCH, upper_32_bits(dma_addr));
+
+ ring->head = 0;
+ ring->tail = 0;
+}
+
+void xge_update_rx_desc_addr(struct xge_pdata *pdata)
+{
+ struct xge_desc_ring *ring = pdata->rx_ring;
+ dma_addr_t dma_addr = ring->dma_addr;
+
+ xge_wr_csr(pdata, DMARXDESCL, dma_addr);
+ xge_wr_csr(pdata, DMARXDESCH, upper_32_bits(dma_addr));
+
+ ring->head = 0;
+ ring->tail = 0;
+}
+
+void xge_intr_enable(struct xge_pdata *pdata)
+{
+ u32 data;
+
+ data = RX_PKT_RCVD | TX_PKT_SENT;
+ xge_wr_csr(pdata, DMAINTRMASK, data);
+}
+
+void xge_intr_disable(struct xge_pdata *pdata)
+{
+ xge_wr_csr(pdata, DMAINTRMASK, 0);
+}
diff --git a/drivers/net/ethernet/apm/xgene-v2/ring.h b/drivers/net/ethernet/apm/xgene-v2/ring.h
new file mode 100644
index 000000000000..abc8c9a84954
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/ring.h
@@ -0,0 +1,119 @@
+/*
+ * Applied Micro X-Gene SoC Ethernet v2 Driver
+ *
+ * Copyright (c) 2017, Applied Micro Circuits Corporation
+ * Author(s): Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_V2_RING_H__
+#define __XGENE_ENET_V2_RING_H__
+
+#define XGENE_ENET_DESC_SIZE 64
+#define XGENE_ENET_NUM_DESC 256
+#define NUM_BUFS 8
+#define SLOT_EMPTY 0xfff
+
+#define DMATXCTRL 0xa180
+#define DMATXDESCL 0xa184
+#define DMATXDESCH 0xa1a0
+#define DMATXSTATUS 0xa188
+#define DMARXCTRL 0xa18c
+#define DMARXDESCL 0xa190
+#define DMARXDESCH 0xa1a4
+#define DMARXSTATUS 0xa194
+#define DMAINTRMASK 0xa198
+#define DMAINTERRUPT 0xa19c
+
+#define D_POS 62
+#define D_LEN 2
+#define E_POS 63
+#define E_LEN 1
+#define PKT_ADDRL_POS 0
+#define PKT_ADDRL_LEN 32
+#define PKT_ADDRH_POS 32
+#define PKT_ADDRH_LEN 10
+#define PKT_SIZE_POS 32
+#define PKT_SIZE_LEN 12
+#define NEXT_DESC_ADDRL_POS 0
+#define NEXT_DESC_ADDRL_LEN 32
+#define NEXT_DESC_ADDRH_POS 48
+#define NEXT_DESC_ADDRH_LEN 10
+
+#define TXPKTCOUNT_POS 16
+#define TXPKTCOUNT_LEN 8
+#define RXPKTCOUNT_POS 16
+#define RXPKTCOUNT_LEN 8
+
+#define TX_PKT_SENT BIT(0)
+#define TX_BUS_ERROR BIT(3)
+#define RX_PKT_RCVD BIT(4)
+#define RX_BUS_ERROR BIT(7)
+#define RXSTATUS_RXPKTRCVD BIT(0)
+
+struct xge_raw_desc {
+ __le64 m0;
+ __le64 m1;
+ __le64 m2;
+ __le64 m3;
+ __le64 m4;
+ __le64 m5;
+ __le64 m6;
+ __le64 m7;
+};
+
+struct pkt_info {
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ void *pkt_buf;
+};
+
+/* software context of a descriptor ring */
+struct xge_desc_ring {
+ struct net_device *ndev;
+ dma_addr_t dma_addr;
+ u8 head;
+ u8 tail;
+ union {
+ void *desc_addr;
+ struct xge_raw_desc *raw_desc;
+ };
+ struct pkt_info (*pkt_info);
+};
+
+static inline u64 xge_set_desc_bits(int pos, int len, u64 val)
+{
+ return (val & ((1ULL << len) - 1)) << pos;
+}
+
+static inline u64 xge_get_desc_bits(int pos, int len, u64 src)
+{
+ return (src >> pos) & ((1ULL << len) - 1);
+}
+
+#define SET_BITS(field, val) \
+ xge_set_desc_bits(field ## _POS, field ## _LEN, val)
+
+#define GET_BITS(field, src) \
+ xge_get_desc_bits(field ## _POS, field ## _LEN, src)
+
+void xge_setup_desc(struct xge_desc_ring *ring);
+void xge_update_tx_desc_addr(struct xge_pdata *pdata);
+void xge_update_rx_desc_addr(struct xge_pdata *pdata);
+void xge_intr_enable(struct xge_pdata *pdata);
+void xge_intr_disable(struct xge_pdata *pdata);
+
+#endif /* __XGENE_ENET_V2_RING_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 06e681697c17..2a835e07adfb 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -494,7 +494,7 @@ static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
break;
}
- mc2 |= FULL_DUPLEX2 | PAD_CRC;
+ mc2 |= FULL_DUPLEX2 | PAD_CRC | LENGTH_CHK;
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
@@ -623,6 +623,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
cb |= CFG_CLE_BYPASS_EN0;
CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
+ CFG_CLE_IP_HDR_LEN_SET(&cb, 0);
xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 5f83037bb96b..d250bfe94d24 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -163,6 +163,7 @@ enum xgene_enet_rm {
#define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3)
#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2)
+#define CFG_CLE_IP_HDR_LEN_SET(dst, val) xgene_set_bits(dst, val, 8, 5)
#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12)
#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4)
#define CFG_CLE_NXTFPSEL0_SET(dst, val) xgene_set_bits(dst, val, 20, 4)
@@ -215,6 +216,7 @@ enum xgene_enet_rm {
#define ENET_GHD_MODE BIT(26)
#define FULL_DUPLEX2 BIT(0)
#define PAD_CRC BIT(2)
+#define LENGTH_CHK BIT(4)
#define SCAN_AUTO_INCR BIT(5)
#define TBYT_ADDR 0x38
#define TPKT_ADDR 0x39
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index b3568c453b14..5f37ed3506d5 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -601,14 +601,24 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static void xgene_enet_skip_csum(struct sk_buff *skb)
+static void xgene_enet_rx_csum(struct sk_buff *skb)
{
+ struct net_device *ndev = skb->dev;
struct iphdr *iph = ip_hdr(skb);
- if (!ip_is_fragment(iph) ||
- (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
+ if (!(ndev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
+
+ if (ip_is_fragment(iph))
+ return;
+
+ if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
+ return;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
}
static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
@@ -648,12 +658,24 @@ static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
buf_pool->head = head;
}
+/* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */
+static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status)
+{
+ if (status == INGRESS_PKT_LEN && len == ETHER_MIN_PACKET) {
+ if (ntohs(eth_hdr(skb)->h_proto) < 46)
+ return true;
+ }
+
+ return false;
+}
+
static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
struct xgene_enet_raw_desc *raw_desc,
struct xgene_enet_raw_desc *exp_desc)
{
struct xgene_enet_desc_ring *buf_pool, *page_pool;
u32 datalen, frag_size, skb_index;
+ struct xgene_enet_pdata *pdata;
struct net_device *ndev;
dma_addr_t dma_addr;
struct sk_buff *skb;
@@ -666,6 +688,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
bool nv;
ndev = rx_ring->ndev;
+ pdata = netdev_priv(ndev);
dev = ndev_to_dev(rx_ring->ndev);
buf_pool = rx_ring->buf_pool;
page_pool = rx_ring->page_pool;
@@ -676,30 +699,29 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
skb = buf_pool->rx_skb[skb_index];
buf_pool->rx_skb[skb_index] = NULL;
+ datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
+ skb_put(skb, datalen);
+ prefetch(skb->data - NET_IP_ALIGN);
+ skb->protocol = eth_type_trans(skb, ndev);
+
/* checking for error */
- status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
+ status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) |
GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
- if (unlikely(status > 2)) {
- dev_kfree_skb_any(skb);
- xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
- xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
- status);
- ret = -EIO;
- goto out;
+ if (unlikely(status)) {
+ if (!xgene_enet_errata_10GE_8(skb, datalen, status)) {
+ dev_kfree_skb_any(skb);
+ xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
+ xgene_enet_parse_error(rx_ring, pdata, status);
+ goto out;
+ }
}
- /* strip off CRC as HW isn't doing this */
- datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
-
nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
- if (!nv)
+ if (!nv) {
+ /* strip off CRC as HW isn't doing this */
datalen -= 4;
-
- skb_put(skb, datalen);
- prefetch(skb->data - NET_IP_ALIGN);
-
- if (!nv)
goto skip_jumbo;
+ }
slots = page_pool->slots - 1;
head = page_pool->head;
@@ -728,11 +750,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
skip_jumbo:
skb_checksum_none_assert(skb);
- skb->protocol = eth_type_trans(skb, ndev);
- if (likely((ndev->features & NETIF_F_IP_CSUM) &&
- skb->protocol == htons(ETH_P_IP))) {
- xgene_enet_skip_csum(skb);
- }
+ xgene_enet_rx_csum(skb);
rx_ring->rx_packets++;
rx_ring->rx_bytes += datalen;
@@ -2039,7 +2057,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
xgene_enet_setup_ops(pdata);
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
- ndev->features |= NETIF_F_TSO;
+ ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM;
spin_lock_init(&pdata->mss_lock);
}
ndev->hw_features = ndev->features;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 52571741da9f..0d4be2425ebc 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -41,6 +41,7 @@
#include "../../../phy/mdio-xgene.h"
#define XGENE_DRV_VERSION "v1.0"
+#define ETHER_MIN_PACKET 64
#define XGENE_ENET_STD_MTU 1536
#define XGENE_ENET_MAX_MTU 9600
#define SKB_BUFFER_SIZE (XGENE_ENET_STD_MTU - NET_IP_ALIGN)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index ece19e6d68e3..423240c97d39 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -341,8 +341,15 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
+ /* Errata 10GE_1 - FIFO threshold default value incorrect */
+ RSIF_CLE_BUFF_THRESH_SET(&data, XG_RSIF_CLE_BUFF_THRESH);
xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data);
+ /* Errata 10GE_1 - FIFO threshold default value incorrect */
+ xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, &data);
+ RSIF_PLC_CLE_BUFF_THRESH_SET(&data, XG_RSIF_PLC_CLE_BUFF_THRESH);
+ xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, data);
+
xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data);
data |= BIT(12);
xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index 03b847ad8937..e644a429ebf4 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -65,6 +65,11 @@
#define XG_DEF_PAUSE_THRES 0x390
#define XG_DEF_PAUSE_OFF_THRES 0x2c0
#define XG_RSIF_CONFIG_REG_ADDR 0x00a0
+#define XG_RSIF_CLE_BUFF_THRESH 0x3
+#define RSIF_CLE_BUFF_THRESH_SET(dst, val) xgene_set_bits(dst, val, 0, 3)
+#define XG_RSIF_CONFIG1_REG_ADDR 0x00b8
+#define XG_RSIF_PLC_CLE_BUFF_THRESH 0x1
+#define RSIF_PLC_CLE_BUFF_THRESH_SET(dst, val) xgene_set_bits(dst, val, 0, 2)
#define XCLE_BYPASS_REG0_ADDR 0x0160
#define XCLE_BYPASS_REG1_ADDR 0x0164
#define XG_CFG_BYPASS_ADDR 0x0204
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 23873395f100..68de2f2652f2 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -434,7 +434,7 @@ static int arc_emac_open(struct net_device *ndev)
/* Enable EMAC */
arc_reg_or(priv, R_CTRL, EN_MASK);
- phy_start_aneg(ndev->phydev);
+ phy_start(ndev->phydev);
netif_start_queue(ndev);
@@ -556,6 +556,8 @@ static int arc_emac_stop(struct net_device *ndev)
napi_disable(&priv->napi);
netif_stop_queue(ndev);
+ phy_stop(ndev->phydev);
+
/* Disable interrupts */
arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index d4a409139ea2..78c5de467426 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -102,9 +102,6 @@ struct alx_napi {
#define ALX_MAX_NAPIS 8
-#define ALX_FLAG_USING_MSIX BIT(0)
-#define ALX_FLAG_USING_MSI BIT(1)
-
struct alx_priv {
struct net_device *dev;
@@ -112,7 +109,6 @@ struct alx_priv {
/* msi-x vectors */
int num_vec;
- struct msix_entry *msix_entries;
/* all descriptor memory */
struct {
@@ -139,8 +135,6 @@ struct alx_priv {
u16 msg_enable;
- int flags;
-
/* protects hw.stats */
spinlock_t stats_lock;
};
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 6a27c2662675..a8c2db881b75 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -314,7 +314,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
napi_complete_done(&np->napi, work);
/* enable interrupt */
- if (alx->flags & ALX_FLAG_USING_MSIX) {
+ if (alx->hw.pdev->msix_enabled) {
alx_mask_msix(hw, np->vec_idx, false);
} else {
spin_lock_irqsave(&alx->irq_lock, flags);
@@ -811,7 +811,7 @@ static void alx_config_vector_mapping(struct alx_priv *alx)
u32 tbl[2] = {0, 0};
int i, vector, idx, shift;
- if (alx->flags & ALX_FLAG_USING_MSIX) {
+ if (alx->hw.pdev->msix_enabled) {
/* tx mappings */
for (i = 0, vector = 1; i < alx->num_txq; i++, vector++) {
idx = txq_vec_mapping_shift[i * 2];
@@ -828,29 +828,19 @@ static void alx_config_vector_mapping(struct alx_priv *alx)
alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
}
-static bool alx_enable_msix(struct alx_priv *alx)
+static int alx_enable_msix(struct alx_priv *alx)
{
- int i, err, num_vec, num_txq, num_rxq;
+ int err, num_vec, num_txq, num_rxq;
num_txq = min_t(int, num_online_cpus(), ALX_MAX_TX_QUEUES);
num_rxq = 1;
num_vec = max_t(int, num_txq, num_rxq) + 1;
- alx->msix_entries = kcalloc(num_vec, sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!alx->msix_entries) {
- netdev_warn(alx->dev, "Allocation of msix entries failed!\n");
- return false;
- }
-
- for (i = 0; i < num_vec; i++)
- alx->msix_entries[i].entry = i;
-
- err = pci_enable_msix(alx->hw.pdev, alx->msix_entries, num_vec);
+ err = pci_alloc_irq_vectors(alx->hw.pdev, num_vec, num_vec,
+ PCI_IRQ_MSIX);
if (err) {
- kfree(alx->msix_entries);
netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n");
- return false;
+ return err;
}
alx->num_vec = num_vec;
@@ -858,7 +848,7 @@ static bool alx_enable_msix(struct alx_priv *alx)
alx->num_txq = num_txq;
alx->num_rxq = num_rxq;
- return true;
+ return err;
}
static int alx_request_msix(struct alx_priv *alx)
@@ -866,7 +856,7 @@ static int alx_request_msix(struct alx_priv *alx)
struct net_device *netdev = alx->dev;
int i, err, vector = 0, free_vector = 0;
- err = request_irq(alx->msix_entries[0].vector, alx_intr_msix_misc,
+ err = request_irq(pci_irq_vector(alx->hw.pdev, 0), alx_intr_msix_misc,
0, netdev->name, alx);
if (err)
goto out_err;
@@ -889,7 +879,7 @@ static int alx_request_msix(struct alx_priv *alx)
sprintf(np->irq_lbl, "%s-unused", netdev->name);
np->vec_idx = vector;
- err = request_irq(alx->msix_entries[vector].vector,
+ err = request_irq(pci_irq_vector(alx->hw.pdev, vector),
alx_intr_msix_ring, 0, np->irq_lbl, np);
if (err)
goto out_free;
@@ -897,47 +887,31 @@ static int alx_request_msix(struct alx_priv *alx)
return 0;
out_free:
- free_irq(alx->msix_entries[free_vector++].vector, alx);
+ free_irq(pci_irq_vector(alx->hw.pdev, free_vector++), alx);
vector--;
for (i = 0; i < vector; i++)
- free_irq(alx->msix_entries[free_vector++].vector,
+ free_irq(pci_irq_vector(alx->hw.pdev,free_vector++),
alx->qnapi[i]);
out_err:
return err;
}
-static void alx_init_intr(struct alx_priv *alx, bool msix)
+static int alx_init_intr(struct alx_priv *alx)
{
- if (msix) {
- if (alx_enable_msix(alx))
- alx->flags |= ALX_FLAG_USING_MSIX;
- }
+ int ret;
- if (!(alx->flags & ALX_FLAG_USING_MSIX)) {
- alx->num_vec = 1;
- alx->num_napi = 1;
- alx->num_txq = 1;
- alx->num_rxq = 1;
-
- if (!pci_enable_msi(alx->hw.pdev))
- alx->flags |= ALX_FLAG_USING_MSI;
- }
-}
-
-static void alx_disable_advanced_intr(struct alx_priv *alx)
-{
- if (alx->flags & ALX_FLAG_USING_MSIX) {
- kfree(alx->msix_entries);
- pci_disable_msix(alx->hw.pdev);
- alx->flags &= ~ALX_FLAG_USING_MSIX;
- }
+ ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (ret)
+ return ret;
- if (alx->flags & ALX_FLAG_USING_MSI) {
- pci_disable_msi(alx->hw.pdev);
- alx->flags &= ~ALX_FLAG_USING_MSI;
- }
+ alx->num_vec = 1;
+ alx->num_napi = 1;
+ alx->num_txq = 1;
+ alx->num_rxq = 1;
+ return 0;
}
static void alx_irq_enable(struct alx_priv *alx)
@@ -950,10 +924,11 @@ static void alx_irq_enable(struct alx_priv *alx)
alx_write_mem32(hw, ALX_IMR, alx->int_mask);
alx_post_write(hw);
- if (alx->flags & ALX_FLAG_USING_MSIX)
+ if (alx->hw.pdev->msix_enabled) {
/* enable all msix irqs */
for (i = 0; i < alx->num_vec; i++)
alx_mask_msix(hw, i, false);
+ }
}
static void alx_irq_disable(struct alx_priv *alx)
@@ -965,13 +940,13 @@ static void alx_irq_disable(struct alx_priv *alx)
alx_write_mem32(hw, ALX_IMR, 0);
alx_post_write(hw);
- if (alx->flags & ALX_FLAG_USING_MSIX) {
+ if (alx->hw.pdev->msix_enabled) {
for (i = 0; i < alx->num_vec; i++) {
alx_mask_msix(hw, i, true);
- synchronize_irq(alx->msix_entries[i].vector);
+ synchronize_irq(pci_irq_vector(alx->hw.pdev, i));
}
} else {
- synchronize_irq(alx->hw.pdev->irq);
+ synchronize_irq(pci_irq_vector(alx->hw.pdev, 0));
}
}
@@ -981,8 +956,11 @@ static int alx_realloc_resources(struct alx_priv *alx)
alx_free_rings(alx);
alx_free_napis(alx);
- alx_disable_advanced_intr(alx);
- alx_init_intr(alx, false);
+ pci_free_irq_vectors(alx->hw.pdev);
+
+ err = alx_init_intr(alx);
+ if (err)
+ return err;
err = alx_alloc_napis(alx);
if (err)
@@ -1004,7 +982,7 @@ static int alx_request_irq(struct alx_priv *alx)
msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
- if (alx->flags & ALX_FLAG_USING_MSIX) {
+ if (alx->hw.pdev->msix_enabled) {
alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl);
err = alx_request_msix(alx);
if (!err)
@@ -1016,20 +994,20 @@ static int alx_request_irq(struct alx_priv *alx)
goto out;
}
- if (alx->flags & ALX_FLAG_USING_MSI) {
+ if (alx->hw.pdev->msi_enabled) {
alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
msi_ctrl | ALX_MSI_MASK_SEL_LINE);
- err = request_irq(pdev->irq, alx_intr_msi, 0,
+ err = request_irq(pci_irq_vector(pdev, 0), alx_intr_msi, 0,
alx->dev->name, alx);
if (!err)
goto out;
+
/* fall back to legacy interrupt */
- alx->flags &= ~ALX_FLAG_USING_MSI;
- pci_disable_msi(alx->hw.pdev);
+ pci_free_irq_vectors(alx->hw.pdev);
}
alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
- err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
+ err = request_irq(pci_irq_vector(pdev, 0), alx_intr_legacy, IRQF_SHARED,
alx->dev->name, alx);
out:
if (!err)
@@ -1042,18 +1020,15 @@ out:
static void alx_free_irq(struct alx_priv *alx)
{
struct pci_dev *pdev = alx->hw.pdev;
- int i, vector = 0;
+ int i;
- if (alx->flags & ALX_FLAG_USING_MSIX) {
- free_irq(alx->msix_entries[vector++].vector, alx);
+ free_irq(pci_irq_vector(pdev, 0), alx);
+ if (alx->hw.pdev->msix_enabled) {
for (i = 0; i < alx->num_napi; i++)
- free_irq(alx->msix_entries[vector++].vector,
- alx->qnapi[i]);
- } else {
- free_irq(pdev->irq, alx);
+ free_irq(pci_irq_vector(pdev, i + 1), alx->qnapi[i]);
}
- alx_disable_advanced_intr(alx);
+ pci_free_irq_vectors(pdev);
}
static int alx_identify_hw(struct alx_priv *alx)
@@ -1221,7 +1196,12 @@ static int __alx_open(struct alx_priv *alx, bool resume)
{
int err;
- alx_init_intr(alx, true);
+ err = alx_enable_msix(alx);
+ if (err < 0) {
+ err = alx_init_intr(alx);
+ if (err)
+ return err;
+ }
if (!resume)
netif_carrier_off(alx->dev);
@@ -1264,7 +1244,7 @@ out_free_rings:
alx_free_rings(alx);
alx_free_napis(alx);
out_disable_adv_intr:
- alx_disable_advanced_intr(alx);
+ pci_free_irq_vectors(alx->hw.pdev);
return err;
}
@@ -1637,11 +1617,11 @@ static void alx_poll_controller(struct net_device *netdev)
struct alx_priv *alx = netdev_priv(netdev);
int i;
- if (alx->flags & ALX_FLAG_USING_MSIX) {
+ if (alx->hw.pdev->msix_enabled) {
alx_intr_msix_misc(0, alx);
for (i = 0; i < alx->num_txq; i++)
alx_intr_msix_ring(0, alx->qnapi[i]);
- } else if (alx->flags & ALX_FLAG_USING_MSI)
+ } else if (alx->hw.pdev->msi_enabled)
alx_intr_msi(0, alx);
else
alx_intr_legacy(0, alx);
@@ -1783,7 +1763,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &alx_netdev_ops;
netdev->ethtool_ops = &alx_ethtool_ops;
- netdev->irq = pdev->irq;
+ netdev->irq = pci_irq_vector(pdev, 0);
netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 022772e1e249..83d2db2abb45 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1886,7 +1886,7 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
buffer_info->skb = skb;
buffer_info->length = (u16) adapter->rx_buffer_len;
page = virt_to_page(skb->data);
- offset = (unsigned long)skb->data & ~PAGE_MASK;
+ offset = offset_in_page(skb->data);
buffer_info->dma = pci_map_page(pdev, page, offset,
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
@@ -2230,7 +2230,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
buffer_info->length = hdr_len;
page = virt_to_page(skb->data);
- offset = (unsigned long)skb->data & ~PAGE_MASK;
+ offset = offset_in_page(skb->data);
buffer_info->dma = pci_map_page(adapter->pdev, page,
offset, hdr_len,
PCI_DMA_TODEVICE);
@@ -2254,9 +2254,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
data_len -= buffer_info->length;
page = virt_to_page(skb->data +
(hdr_len + i * ATL1_MAX_TX_BUF_LEN));
- offset = (unsigned long)(skb->data +
- (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) &
- ~PAGE_MASK;
+ offset = offset_in_page(skb->data +
+ (hdr_len + i * ATL1_MAX_TX_BUF_LEN));
buffer_info->dma = pci_map_page(adapter->pdev,
page, offset, buffer_info->length,
PCI_DMA_TODEVICE);
@@ -2268,7 +2267,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
/* not TSO */
buffer_info->length = buf_len;
page = virt_to_page(skb->data);
- offset = (unsigned long)skb->data & ~PAGE_MASK;
+ offset = offset_in_page(skb->data);
buffer_info->dma = pci_map_page(adapter->pdev, page,
offset, buf_len, PCI_DMA_TODEVICE);
if (++next_to_use == tpd_ring->count)
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 940fb24bba21..96413808c726 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -109,7 +109,6 @@ config TIGON3
tristate "Broadcom Tigon3 support"
depends on PCI
select PHYLIB
- select HWMON
imply PTP_1588_CLOCK
---help---
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
@@ -117,6 +116,13 @@ config TIGON3
To compile this driver as a module, choose M here: the module
will be called tg3. This is recommended.
+config TIGON3_HWMON
+ bool "Broadcom Tigon3 HWMON support"
+ default y
+ depends on TIGON3 && HWMON && !(TIGON3=y && HWMON=m)
+ ---help---
+ Say Y if you want to expose the thermal sensor on Tigon3 devices.
+
config BNX2X
tristate "Broadcom NetXtremeII 10Gb support"
depends on PCI
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index a68d4889f5db..099b374c1b17 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -22,6 +22,7 @@
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
+#include <net/dsa.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -284,6 +285,7 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
+ /* Per TX-queue statistics are dynamically appended */
};
#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -338,7 +340,8 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
continue;
j++;
}
- return j;
+ /* Include per-queue statistics */
+ return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
default:
return -EOPNOTSUPP;
}
@@ -349,6 +352,7 @@ static void bcm_sysport_get_strings(struct net_device *dev,
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
const struct bcm_sysport_stats *s;
+ char buf[128];
int i, j;
switch (stringset) {
@@ -363,6 +367,18 @@ static void bcm_sysport_get_strings(struct net_device *dev,
ETH_GSTRING_LEN);
j++;
}
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ snprintf(buf, sizeof(buf), "txq%d_packets", i);
+ memcpy(data + j * ETH_GSTRING_LEN, buf,
+ ETH_GSTRING_LEN);
+ j++;
+
+ snprintf(buf, sizeof(buf), "txq%d_bytes", i);
+ memcpy(data + j * ETH_GSTRING_LEN, buf,
+ ETH_GSTRING_LEN);
+ j++;
+ }
break;
default:
break;
@@ -418,6 +434,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct bcm_sysport_tx_ring *ring;
int i, j;
if (netif_running(dev))
@@ -436,6 +453,22 @@ static void bcm_sysport_get_stats(struct net_device *dev,
data[j] = *(unsigned long *)p;
j++;
}
+
+ /* For SYSTEMPORT Lite since we have holes in our statistics, j would
+ * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
+ * needs to point to how many total statistics we have minus the
+ * number of per TX queue statistics
+ */
+ j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
+ dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ ring = &priv->tx_rings[i];
+ data[j] = ring->packets;
+ j++;
+ data[j] = ring->bytes;
+ j++;
+ }
}
static void bcm_sysport_get_wol(struct net_device *dev,
@@ -637,6 +670,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
u16 len, status;
struct bcm_rsb *rsb;
+ /* Clear status before servicing to reduce spurious interrupts */
+ intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
+
/* Determine how much we should process since last call, SYSTEMPORT Lite
* groups the producer and consumer indexes into the same 32-bit
* which we access using RDMA_CONS_INDEX
@@ -647,11 +683,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
p_index = rdma_readl(priv, RDMA_CONS_INDEX);
p_index &= RDMA_PROD_INDEX_MASK;
- if (p_index < priv->rx_c_index)
- to_process = (RDMA_CONS_INDEX_MASK + 1) -
- priv->rx_c_index + p_index;
- else
- to_process = p_index - priv->rx_c_index;
+ to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
netif_dbg(priv, rx_status, ndev,
"p_index=%d rx_c_index=%d to_process=%d\n",
@@ -746,26 +778,26 @@ next:
return processed;
}
-static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
+static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
struct bcm_sysport_cb *cb,
unsigned int *bytes_compl,
unsigned int *pkts_compl)
{
+ struct bcm_sysport_priv *priv = ring->priv;
struct device *kdev = &priv->pdev->dev;
- struct net_device *ndev = priv->netdev;
if (cb->skb) {
- ndev->stats.tx_bytes += cb->skb->len;
+ ring->bytes += cb->skb->len;
*bytes_compl += cb->skb->len;
dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
dma_unmap_len(cb, dma_len),
DMA_TO_DEVICE);
- ndev->stats.tx_packets++;
+ ring->packets++;
(*pkts_compl)++;
bcm_sysport_free_cb(cb);
/* SKB fragment */
} else if (dma_unmap_addr(cb, dma_addr)) {
- ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
+ ring->bytes += dma_unmap_len(cb, dma_len);
dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
dma_unmap_addr_set(cb, dma_addr, 0);
@@ -782,6 +814,13 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_cb *cb;
u32 hw_ind;
+ /* Clear status before servicing to reduce spurious interrupts */
+ if (!ring->priv->is_lite)
+ intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
+ else
+ intrl2_0_writel(ring->priv, BIT(ring->index +
+ INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
+
/* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
@@ -803,7 +842,7 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
while (last_tx_cn-- > 0) {
cb = ring->cbs + last_c_index;
- bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
+ bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
ring->desc_count++;
last_c_index++;
@@ -1632,6 +1671,24 @@ static int bcm_sysport_change_mac(struct net_device *dev, void *p)
return 0;
}
+static struct net_device_stats *bcm_sysport_get_nstats(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ unsigned long tx_bytes = 0, tx_packets = 0;
+ struct bcm_sysport_tx_ring *ring;
+ unsigned int q;
+
+ for (q = 0; q < dev->num_tx_queues; q++) {
+ ring = &priv->tx_rings[q];
+ tx_bytes += ring->bytes;
+ tx_packets += ring->packets;
+ }
+
+ dev->stats.tx_bytes = tx_bytes;
+ dev->stats.tx_packets = tx_packets;
+ return &dev->stats;
+}
+
static void bcm_sysport_netif_start(struct net_device *dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -1893,6 +1950,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bcm_sysport_poll_controller,
#endif
+ .ndo_get_stats = bcm_sysport_get_nstats,
};
#define REV_FMT "v%2x.%02x"
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 863ddd7870b7..77a51c167a69 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -647,6 +647,9 @@ enum bcm_sysport_stat_type {
.reg_offset = ofs, \
}
+/* TX bytes and packets */
+#define NUM_SYSPORT_TXQ_STAT 2
+
struct bcm_sysport_stats {
char stat_string[ETH_GSTRING_LEN];
int stat_sizeof;
@@ -690,6 +693,8 @@ struct bcm_sysport_tx_ring {
struct bcm_sysport_cb *cbs; /* Transmit control blocks */
struct dma_desc *desc_cpu; /* CPU view of the descriptor */
struct bcm_sysport_priv *priv; /* private context backpointer */
+ unsigned long packets; /* packets statistics */
+ unsigned long bytes; /* bytes statistics */
};
/* Driver private structure */
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index d59cfcc4c4d5..6322594ab260 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -11,6 +11,7 @@
#include <linux/bcma/bcma.h>
#include <linux/brcmphy.h>
#include <linux/etherdevice.h>
+#include <linux/of_net.h>
#include "bgmac.h"
static inline bool bgmac_is_bcm4707_family(struct bcma_device *core)
@@ -114,7 +115,7 @@ static int bgmac_probe(struct bcma_device *core)
struct ssb_sprom *sprom = &core->bus->sprom;
struct mii_bus *mii_bus;
struct bgmac *bgmac;
- u8 *mac;
+ const u8 *mac = NULL;
int err;
bgmac = bgmac_alloc(&core->dev);
@@ -127,21 +128,27 @@ static int bgmac_probe(struct bcma_device *core)
bcma_set_drvdata(core, bgmac);
- switch (core->core_unit) {
- case 0:
- mac = sprom->et0mac;
- break;
- case 1:
- mac = sprom->et1mac;
- break;
- case 2:
- mac = sprom->et2mac;
- break;
- default:
- dev_err(bgmac->dev, "Unsupported core_unit %d\n",
- core->core_unit);
- err = -ENOTSUPP;
- goto err;
+ if (bgmac->dev->of_node)
+ mac = of_get_mac_address(bgmac->dev->of_node);
+
+ /* If no MAC address assigned via device tree, check SPROM */
+ if (!mac) {
+ switch (core->core_unit) {
+ case 0:
+ mac = sprom->et0mac;
+ break;
+ case 1:
+ mac = sprom->et1mac;
+ break;
+ case 2:
+ mac = sprom->et2mac;
+ break;
+ default:
+ dev_err(bgmac->dev, "Unsupported core_unit %d\n",
+ core->core_unit);
+ err = -ENOTSUPP;
+ goto err;
+ }
}
ether_addr_copy(bgmac->net_dev->dev_addr, mac);
@@ -192,36 +199,50 @@ static int bgmac_probe(struct bcma_device *core)
goto err1;
}
- bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
- BGMAC_BFL_ENETROBO);
+ bgmac->has_robosw = !!(sprom->boardflags_lo & BGMAC_BFL_ENETROBO);
if (bgmac->has_robosw)
dev_warn(bgmac->dev, "Support for Roboswitch not implemented\n");
- if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
+ if (sprom->boardflags_lo & BGMAC_BFL_ENETADM)
dev_warn(bgmac->dev, "Support for ADMtek ethernet switch not implemented\n");
/* Feature Flags */
- switch (core->bus->chipinfo.id) {
+ switch (ci->id) {
+ /* BCM 471X/535X family */
+ case BCMA_CHIP_ID_BCM4716:
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ /* fallthrough */
+ case BCMA_CHIP_ID_BCM47162:
+ bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2;
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ break;
case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM53572:
bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47186) {
- bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
+ ci->pkg == BCMA_PKG_ID_BCM47186) {
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+ bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
}
- if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM5358)
+ if (ci->pkg == BCMA_PKG_ID_BCM5358)
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
break;
- case BCMA_CHIP_ID_BCM53572:
- bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ case BCMA_CHIP_ID_BCM53573:
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
- bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
- bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47188) {
- bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ if (ci->pkg == BCMA_PKG_ID_BCM47189)
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ if (core->core_unit == 0) {
+ bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE;
+ if (ci->pkg == BCMA_PKG_ID_BCM47189)
+ bgmac->feature_flags |=
+ BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII;
+ } else if (core->core_unit == 1) {
+ bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6;
+ bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII;
}
break;
case BCMA_CHIP_ID_BCM4749:
@@ -229,18 +250,11 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (core->bus->chipinfo.pkg == 10) {
+ if (ci->pkg == 10) {
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
}
break;
- case BCMA_CHIP_ID_BCM4716:
- bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
- /* fallthrough */
- case BCMA_CHIP_ID_BCM47162:
- bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2;
- bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
- break;
/* bcm4707_family */
case BCMA_CHIP_ID_BCM4707:
case BCMA_CHIP_ID_BCM47094:
@@ -249,21 +263,6 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_NO_RESET;
bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500;
break;
- case BCMA_CHIP_ID_BCM53573:
- bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
- bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
- if (ci->pkg == BCMA_PKG_ID_BCM47189)
- bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
- if (core->core_unit == 0) {
- bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE;
- if (ci->pkg == BCMA_PKG_ID_BCM47189)
- bgmac->feature_flags |=
- BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII;
- } else if (core->core_unit == 1) {
- bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6;
- bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII;
- }
- break;
default:
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index da1b8b225eb9..73aca97a96bc 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -21,8 +21,12 @@
#include <linux/of_net.h>
#include "bgmac.h"
+#define NICPM_PADRING_CFG 0x00000004
#define NICPM_IOMUX_CTRL 0x00000008
+#define NICPM_PADRING_CFG_INIT_VAL 0x74000000
+#define NICPM_IOMUX_CTRL_INIT_VAL_AX 0x21880000
+
#define NICPM_IOMUX_CTRL_INIT_VAL 0x3196e000
#define NICPM_IOMUX_CTRL_SPD_SHIFT 10
#define NICPM_IOMUX_CTRL_SPD_10M 0
@@ -113,6 +117,10 @@ static void bgmac_nicpm_speed_set(struct net_device *net_dev)
if (!bgmac->plat.nicpm_base)
return;
+ /* SET RGMII IO CONFIG */
+ writel(NICPM_PADRING_CFG_INIT_VAL,
+ bgmac->plat.nicpm_base + NICPM_PADRING_CFG);
+
val = NICPM_IOMUX_CTRL_INIT_VAL;
switch (bgmac->net_dev->phydev->speed) {
default:
@@ -244,6 +252,31 @@ static int bgmac_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int bgmac_suspend(struct device *dev)
+{
+ struct bgmac *bgmac = dev_get_drvdata(dev);
+
+ return bgmac_enet_suspend(bgmac);
+}
+
+static int bgmac_resume(struct device *dev)
+{
+ struct bgmac *bgmac = dev_get_drvdata(dev);
+
+ return bgmac_enet_resume(bgmac);
+}
+
+static const struct dev_pm_ops bgmac_pm_ops = {
+ .suspend = bgmac_suspend,
+ .resume = bgmac_resume
+};
+
+#define BGMAC_PM_OPS (&bgmac_pm_ops)
+#else
+#define BGMAC_PM_OPS NULL
+#endif /* CONFIG_PM */
+
static const struct of_device_id bgmac_of_enet_match[] = {
{.compatible = "brcm,amac",},
{.compatible = "brcm,nsp-amac",},
@@ -257,6 +290,7 @@ static struct platform_driver bgmac_enet_driver = {
.driver = {
.name = "bgmac-enet",
.of_match_table = bgmac_of_enet_match,
+ .pm = BGMAC_PM_OPS
},
.probe = bgmac_probe,
.remove = bgmac_remove,
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index fd66fca00e01..ba4d2e145bb9 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -11,6 +11,7 @@
#include <linux/bcma/bcma.h>
#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
#include <linux/bcm47xx_nvram.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
@@ -1480,6 +1481,7 @@ int bgmac_enet_probe(struct bgmac *bgmac)
net_dev->irq = bgmac->irq;
SET_NETDEV_DEV(net_dev, bgmac->dev);
+ dev_set_drvdata(bgmac->dev, bgmac);
if (!is_valid_ether_addr(net_dev->dev_addr)) {
dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
@@ -1552,5 +1554,55 @@ void bgmac_enet_remove(struct bgmac *bgmac)
}
EXPORT_SYMBOL_GPL(bgmac_enet_remove);
+int bgmac_enet_suspend(struct bgmac *bgmac)
+{
+ if (!netif_running(bgmac->net_dev))
+ return 0;
+
+ phy_stop(bgmac->net_dev->phydev);
+
+ netif_stop_queue(bgmac->net_dev);
+
+ napi_disable(&bgmac->napi);
+
+ netif_tx_lock(bgmac->net_dev);
+ netif_device_detach(bgmac->net_dev);
+ netif_tx_unlock(bgmac->net_dev);
+
+ bgmac_chip_intrs_off(bgmac);
+ bgmac_chip_reset(bgmac);
+ bgmac_dma_cleanup(bgmac);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bgmac_enet_suspend);
+
+int bgmac_enet_resume(struct bgmac *bgmac)
+{
+ int rc;
+
+ if (!netif_running(bgmac->net_dev))
+ return 0;
+
+ rc = bgmac_dma_init(bgmac);
+ if (rc)
+ return rc;
+
+ bgmac_chip_init(bgmac);
+
+ napi_enable(&bgmac->napi);
+
+ netif_tx_lock(bgmac->net_dev);
+ netif_device_attach(bgmac->net_dev);
+ netif_tx_unlock(bgmac->net_dev);
+
+ netif_start_queue(bgmac->net_dev);
+
+ phy_start(bgmac->net_dev->phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bgmac_enet_resume);
+
MODULE_AUTHOR("Rafał Miłecki");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 6d1c6ff1ed96..c1818766c501 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -402,7 +402,7 @@
#define BGMAC_WEIGHT 64
-#define ETHER_MAX_LEN 1518
+#define ETHER_MAX_LEN (ETH_FRAME_LEN + ETH_FCS_LEN)
/* Feature Flags */
#define BGMAC_FEAT_TX_MASK_SETUP BIT(0)
@@ -537,6 +537,8 @@ int bgmac_enet_probe(struct bgmac *bgmac);
void bgmac_enet_remove(struct bgmac *bgmac);
void bgmac_adjust_link(struct net_device *net_dev);
int bgmac_phy_connect_direct(struct bgmac *bgmac);
+int bgmac_enet_suspend(struct bgmac *bgmac);
+int bgmac_enet_resume(struct bgmac *bgmac);
struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac);
void bcma_mdio_mii_unregister(struct mii_bus *mii_bus);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 9e8c06130c09..eccb3d1b6abb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2021,6 +2021,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
ETH_OVERHEAD +
mtu +
BNX2X_FW_RX_ALIGN_END;
+ fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
/* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
@@ -4277,7 +4278,10 @@ int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
{
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return bnx2x_setup_tc(dev, tc->tc);
+
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return bnx2x_setup_tc(dev, tc->mqprio->num_tc);
}
/* called with rtnl_lock */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index b209b7f6093e..7dd83d0ef0a0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6161,103 +6161,40 @@ static void bnx2x_link_int_ack(struct link_params *params,
}
}
+static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+{
+ str[0] = '\0';
+ (*len)--;
+ return 0;
+}
+
static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
{
- u8 *str_ptr = str;
- u32 mask = 0xf0000000;
- u8 shift = 8*4;
- u8 digit;
- u8 remove_leading_zeros = 1;
+ u16 ret;
+
if (*len < 10) {
/* Need more than 10chars for this format */
- *str_ptr = '\0';
- (*len)--;
+ bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
- while (shift > 0) {
- shift -= 4;
- digit = ((num & mask) >> shift);
- if (digit == 0 && remove_leading_zeros) {
- *str_ptr = '0';
- } else {
- if (digit < 0xa)
- *str_ptr = digit + '0';
- else
- *str_ptr = digit - 0xa + 'a';
-
- remove_leading_zeros = 0;
- str_ptr++;
- (*len)--;
- }
- mask = mask >> 4;
- if (shift == 4*4) {
- if (remove_leading_zeros) {
- str_ptr++;
- (*len)--;
- }
- *str_ptr = '.';
- str_ptr++;
- (*len)--;
- remove_leading_zeros = 1;
- }
- }
- if (remove_leading_zeros)
- (*len)--;
+ ret = scnprintf(str, *len, "%hx.%hx", num >> 16, num);
+ *len -= ret;
return 0;
}
static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
{
- u8 *str_ptr = str;
- u32 mask = 0x00f00000;
- u8 shift = 8*3;
- u8 digit;
- u8 remove_leading_zeros = 1;
+ u16 ret;
if (*len < 10) {
/* Need more than 10chars for this format */
- *str_ptr = '\0';
- (*len)--;
+ bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
- while (shift > 0) {
- shift -= 4;
- digit = ((num & mask) >> shift);
- if (digit == 0 && remove_leading_zeros) {
- *str_ptr = '0';
- } else {
- if (digit < 0xa)
- *str_ptr = digit + '0';
- else
- *str_ptr = digit - 0xa + 'a';
-
- remove_leading_zeros = 0;
- str_ptr++;
- (*len)--;
- }
- mask = mask >> 4;
- if ((shift == 4*4) || (shift == 4*2)) {
- if (remove_leading_zeros) {
- str_ptr++;
- (*len)--;
- }
- *str_ptr = '.';
- str_ptr++;
- (*len)--;
- remove_leading_zeros = 1;
- }
- }
- if (remove_leading_zeros)
- (*len)--;
- return 0;
-}
-
-static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
-{
- str[0] = '\0';
- (*len)--;
+ ret = scnprintf(str, *len, "%hhx.%hhx.%hhx", num >> 16, num >> 8, num);
+ *len -= ret;
return 0;
}
@@ -10681,22 +10618,19 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
static int bnx2x_8485x_format_ver(u32 raw_ver, u8 *str, u16 *len)
{
- int status = 0;
u32 num;
num = ((raw_ver & 0xF80) >> 7) << 16 | ((raw_ver & 0x7F) << 8) |
((raw_ver & 0xF000) >> 12);
- status = bnx2x_3_seq_format_ver(num, str, len);
- return status;
+ return bnx2x_3_seq_format_ver(num, str, len);
}
static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
{
- int status = 0;
u32 spirom_ver;
+
spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
- status = bnx2x_format_ver(spirom_ver, str, len);
- return status;
+ return bnx2x_format_ver(spirom_ver, str, len);
}
static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
@@ -12547,13 +12481,12 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
{
- int status = 0;
phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
if (phy_index == INT_PHY)
return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
- status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
+
+ return bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
port, phy);
- return status;
}
static void bnx2x_phy_def_cfg(struct link_params *params,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 1f1e54ba0ecb..b3ba66032980 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4482,9 +4482,15 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
}
#endif
- if (BNXT_PF(bp) && (le16_to_cpu(resp->flags) &
- FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED))
- bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
+ if (BNXT_PF(bp)) {
+ u16 flags = le16_to_cpu(resp->flags);
+
+ if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
+ FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED))
+ bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
+ if (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)
+ bp->flags |= BNXT_FLAG_MULTI_HOST;
+ }
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
@@ -4549,6 +4555,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+ if (resp->flags &
+ cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
+ bp->flags |= BNXT_FLAG_WOL_CAP;
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
@@ -5198,9 +5207,10 @@ static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
#if defined(CONFIG_BNXT_SRIOV)
if (BNXT_VF(bp))
- return bp->vf.max_irqs;
+ return min_t(unsigned int, bp->vf.max_irqs,
+ bp->vf.max_cp_rings);
#endif
- return bp->pf.max_irqs;
+ return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
}
void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
@@ -5467,7 +5477,8 @@ static void bnxt_report_link(struct bnxt *bp)
if (bp->link_info.link_up) {
const char *duplex;
const char *flow_ctrl;
- u16 speed, fec;
+ u32 speed;
+ u16 fec;
netif_carrier_on(bp->dev);
if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
@@ -5483,7 +5494,7 @@ static void bnxt_report_link(struct bnxt *bp)
else
flow_ctrl = "none";
speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
- netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
+ netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
speed, duplex, flow_ctrl);
if (bp->flags & BNXT_FLAG_EEE_CAP)
netdev_info(bp->dev, "EEE is %s\n",
@@ -5857,6 +5868,76 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
return 0;
}
+int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
+{
+ struct hwrm_wol_filter_alloc_input req = {0};
+ struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
+ req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
+ memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bp->wol_filter_id = resp->wol_filter_id;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
+{
+ struct hwrm_wol_filter_free_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
+ req.wol_filter_id = bp->wol_filter_id;
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return rc;
+}
+
+static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
+{
+ struct hwrm_wol_filter_qcfg_input req = {0};
+ struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 next_handle = 0;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ req.handle = cpu_to_le16(handle);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ next_handle = le16_to_cpu(resp->next_handle);
+ if (next_handle != 0) {
+ if (resp->wol_type ==
+ WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
+ bp->wol = 1;
+ bp->wol_filter_id = resp->wol_filter_id;
+ }
+ }
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return next_handle;
+}
+
+static void bnxt_get_wol_settings(struct bnxt *bp)
+{
+ u16 handle = 0;
+
+ if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
+ return;
+
+ do {
+ handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
+ } while (handle && handle != 0xffff);
+}
+
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
struct ethtool_eee *eee = &bp->eee;
@@ -6042,6 +6123,43 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
return rc;
}
+/* rtnl_lock held, open the NIC half way by allocating all resources, but
+ * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
+ * self tests.
+ */
+int bnxt_half_open_nic(struct bnxt *bp)
+{
+ int rc = 0;
+
+ rc = bnxt_alloc_mem(bp, false);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
+ goto half_open_err;
+ }
+ rc = bnxt_init_nic(bp, false);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
+ goto half_open_err;
+ }
+ return 0;
+
+half_open_err:
+ bnxt_free_skbs(bp);
+ bnxt_free_mem(bp, false);
+ dev_close(bp->dev);
+ return rc;
+}
+
+/* rtnl_lock held, this call can only be made after a previous successful
+ * call to bnxt_half_open_nic().
+ */
+void bnxt_half_close_nic(struct bnxt *bp)
+{
+ bnxt_hwrm_resource_free(bp, false, false);
+ bnxt_free_skbs(bp);
+ bnxt_free_mem(bp, false);
+}
+
static int bnxt_open(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -6923,7 +7041,9 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (ntc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return bnxt_setup_mq_tc(dev, ntc->tc);
+ ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc);
}
#ifdef CONFIG_RFS_ACCEL
@@ -7224,6 +7344,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_ethtool_free(bp);
bnxt_dcb_free(bp);
kfree(bp->edev);
bp->edev = NULL;
@@ -7546,6 +7667,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_port_led_qcaps(bp);
+ bnxt_ethtool_init(bp);
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
@@ -7591,6 +7713,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ bnxt_get_wol_settings(bp);
+ if (bp->flags & BNXT_FLAG_WOL_CAP)
+ device_set_wakeup_enable(&pdev->dev, bp->wol);
+ else
+ device_set_wakeup_capable(&pdev->dev, false);
+
rc = register_netdev(dev);
if (rc)
goto init_err_clr_int;
@@ -7614,6 +7742,88 @@ init_err_free:
return rc;
}
+static void bnxt_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp;
+
+ if (!dev)
+ return;
+
+ rtnl_lock();
+ bp = netdev_priv(dev);
+ if (!bp)
+ goto shutdown_exit;
+
+ if (netif_running(dev))
+ dev_close(dev);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ bnxt_clear_int_mode(bp);
+ pci_wake_from_d3(pdev, bp->wol);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+shutdown_exit:
+ rtnl_unlock();
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bnxt_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ rtnl_lock();
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ rc = bnxt_close(dev);
+ }
+ bnxt_hwrm_func_drv_unrgtr(bp);
+ rtnl_unlock();
+ return rc;
+}
+
+static int bnxt_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ rtnl_lock();
+ if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
+ rc = -ENODEV;
+ goto resume_exit;
+ }
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc) {
+ rc = -EBUSY;
+ goto resume_exit;
+ }
+ bnxt_get_wol_settings(bp);
+ if (netif_running(dev)) {
+ rc = bnxt_open(dev);
+ if (!rc)
+ netif_device_attach(dev);
+ }
+
+resume_exit:
+ rtnl_unlock();
+ return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
+#define BNXT_PM_OPS (&bnxt_pm_ops)
+
+#else
+
+#define BNXT_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
/**
* bnxt_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -7730,6 +7940,8 @@ static struct pci_driver bnxt_pci_driver = {
.id_table = bnxt_pci_tbl,
.probe = bnxt_init_one,
.remove = bnxt_remove_one,
+ .shutdown = bnxt_shutdown,
+ .driver.pm = BNXT_PM_OPS,
.err_handler = &bnxt_err_handler,
#if defined(CONFIG_BNXT_SRIOV)
.sriov_configure = bnxt_sriov_configure,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index c7a5b84a5cb2..3ef42dbc6327 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -18,6 +18,8 @@
#define DRV_VER_MIN 7
#define DRV_VER_UPD 0
+#include <linux/interrupt.h>
+
struct tx_bd {
__le32 tx_bd_len_flags_type;
#define TX_BD_TYPE (0x3f << 0)
@@ -424,8 +426,6 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_MIN_PKT_SIZE 52
-#define BNXT_NUM_TESTS(bp) 0
-
#define BNXT_DEFAULT_RX_RING_SIZE 511
#define BNXT_DEFAULT_TX_RING_SIZE 511
@@ -851,6 +851,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_25GB PORT_PHY_QCFG_RESP_LINK_SPEED_25GB
#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
+#define BNXT_LINK_SPEED_100GB PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
u16 support_speeds;
u16 auto_link_speeds; /* fw adv setting */
#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
@@ -862,6 +863,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
+#define BNXT_LINK_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB
u16 support_auto_speeds;
u16 lp_auto_link_speeds;
u16 force_link_speed;
@@ -909,6 +911,14 @@ struct bnxt_led_info {
__le16 led_color_caps;
};
+#define BNXT_MAX_TEST 8
+
+struct bnxt_test_info {
+ u8 offline_mask;
+ u16 timeout;
+ char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
+};
+
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
@@ -987,6 +997,7 @@ struct bnxt {
#define BNXT_FLAG_UDP_RSS_CAP 0x800
#define BNXT_FLAG_EEE_CAP 0x1000
#define BNXT_FLAG_NEW_RSS_CAP 0x2000
+ #define BNXT_FLAG_WOL_CAP 0x4000
#define BNXT_FLAG_ROCEV1_CAP 0x8000
#define BNXT_FLAG_ROCEV2_CAP 0x10000
#define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \
@@ -994,6 +1005,7 @@ struct bnxt {
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
#define BNXT_FLAG_FW_LLDP_AGENT 0x80000
+ #define BNXT_FLAG_MULTI_HOST 0x100000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -1003,7 +1015,8 @@ struct bnxt {
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
-#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp))
+#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
+#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
@@ -1178,6 +1191,12 @@ struct bnxt {
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
+ u8 num_tests;
+ struct bnxt_test_info *test_info;
+
+ u8 wol_filter_id;
+ u8 wol;
+
u8 num_leds;
struct bnxt_led_info leds[BNXT_MAX_LED];
@@ -1236,8 +1255,12 @@ void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
+int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
+int bnxt_half_open_nic(struct bnxt *bp);
+void bnxt_half_close_nic(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 03532061d211..46de2f8ff024 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
+#include <rdma/ib_verbs.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_dcb.h"
@@ -241,6 +243,92 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
return 0;
}
+static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
+ bool add)
+{
+ struct hwrm_fw_set_structured_data_input set = {0};
+ struct hwrm_fw_get_structured_data_input get = {0};
+ struct hwrm_struct_data_dcbx_app *fw_app;
+ struct hwrm_struct_hdr *data;
+ dma_addr_t mapping;
+ size_t data_len;
+ int rc, n, i;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ n = IEEE_8021QAZ_MAX_TCS;
+ data_len = sizeof(*data) + sizeof(*fw_app) * n;
+ data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memset(data, 0, data_len);
+ bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1);
+ get.dest_data_addr = cpu_to_le64(mapping);
+ get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
+ get.subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
+ get.count = 0;
+ rc = hwrm_send_message(bp, &get, sizeof(get), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto set_app_exit;
+
+ fw_app = (struct hwrm_struct_data_dcbx_app *)(data + 1);
+
+ if (data->struct_id != cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP)) {
+ rc = -ENODEV;
+ goto set_app_exit;
+ }
+
+ n = data->count;
+ for (i = 0; i < n; i++, fw_app++) {
+ if (fw_app->protocol_id == cpu_to_be16(app->protocol) &&
+ fw_app->protocol_selector == app->selector &&
+ fw_app->priority == app->priority) {
+ if (add)
+ goto set_app_exit;
+ else
+ break;
+ }
+ }
+ if (add) {
+ /* append */
+ n++;
+ fw_app->protocol_id = cpu_to_be16(app->protocol);
+ fw_app->protocol_selector = app->selector;
+ fw_app->priority = app->priority;
+ fw_app->valid = 1;
+ } else {
+ size_t len = 0;
+
+ /* not found, nothing to delete */
+ if (n == i)
+ goto set_app_exit;
+
+ len = (n - 1 - i) * sizeof(*fw_app);
+ if (len)
+ memmove(fw_app, fw_app + 1, len);
+ n--;
+ memset(fw_app + n, 0, sizeof(*fw_app));
+ }
+ data->count = n;
+ data->len = cpu_to_le16(sizeof(*fw_app) * n);
+ data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &set, HWRM_FW_SET_STRUCTURED_DATA, -1, -1);
+ set.src_data_addr = cpu_to_le64(mapping);
+ set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
+ set.hdr_cnt = 1;
+ rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+
+set_app_exit:
+ dma_free_coherent(&bp->pdev->dev, data_len, data, mapping);
+ return rc;
+}
+
static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
{
int total_ets_bw = 0;
@@ -417,6 +505,15 @@ static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
return -EINVAL;
rc = dcb_ieee_setapp(dev, app);
+ if (rc)
+ return rc;
+
+ if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == ETH_P_IBOE) ||
+ (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM &&
+ app->protocol == ROCE_V2_UDP_DPORT))
+ rc = bnxt_hwrm_set_dcbx_app(bp, app, true);
+
return rc;
}
@@ -425,10 +522,19 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
struct bnxt *bp = netdev_priv(dev);
int rc;
- if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
return -EINVAL;
rc = dcb_ieee_delapp(dev, app);
+ if (rc)
+ return rc;
+ if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == ETH_P_IBOE) ||
+ (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM &&
+ app->protocol == ROCE_V2_UDP_DPORT))
+ rc = bnxt_hwrm_set_dcbx_app(bp, app, false);
+
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
index 35a0d28cf2fd..ecd0a5e46a49 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 6903a873f072..11ddf0adc6e1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,6 +18,7 @@
#include <linux/firmware.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_xdp.h"
#include "bnxt_ethtool.h"
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
@@ -209,6 +211,10 @@ static int bnxt_get_sset_count(struct net_device *dev, int sset)
return num_stats;
}
+ case ETH_SS_TEST:
+ if (!bp->num_tests)
+ return -EOPNOTSUPP;
+ return bp->num_tests;
default:
return -EOPNOTSUPP;
}
@@ -306,6 +312,11 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
}
break;
+ case ETH_SS_TEST:
+ if (bp->num_tests)
+ memcpy(buf, bp->test_info->string,
+ bp->num_tests * ETH_GSTRING_LEN);
+ break;
default:
netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
stringset);
@@ -824,7 +835,7 @@ static void bnxt_get_drvinfo(struct net_device *dev,
sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
- info->testinfo_len = BNXT_NUM_TESTS(bp);
+ info->testinfo_len = bp->num_tests;
/* TODO CHIMP_FW: eeprom dump details */
info->eedump_len = 0;
/* TODO CHIMP FW: reg dump details */
@@ -832,6 +843,45 @@ static void bnxt_get_drvinfo(struct net_device *dev,
kfree(pkglog);
}
+static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+ if (bp->flags & BNXT_FLAG_WOL_CAP) {
+ wol->supported = WAKE_MAGIC;
+ if (bp->wol)
+ wol->wolopts = WAKE_MAGIC;
+ }
+}
+
+static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ if (!(bp->flags & BNXT_FLAG_WOL_CAP))
+ return -EINVAL;
+ if (!bp->wol) {
+ if (bnxt_hwrm_alloc_wol_fltr(bp))
+ return -EBUSY;
+ bp->wol = 1;
+ }
+ } else {
+ if (bp->wol) {
+ if (bnxt_hwrm_free_wol_fltr(bp))
+ return -EBUSY;
+ bp->wol = 0;
+ }
+ }
+ return 0;
+}
+
u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
{
u32 speed_mask = 0;
@@ -879,6 +929,9 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \
ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
50000baseCR2_Full);\
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 100000baseCR4_Full);\
if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \
ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
Pause); \
@@ -915,6 +968,9 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
50000baseCR2_Full)) \
(fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 100000baseCR4_Full)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \
}
static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
@@ -977,6 +1033,8 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
return SPEED_40000;
case BNXT_LINK_SPEED_50GB:
return SPEED_50000;
+ case BNXT_LINK_SPEED_100GB:
+ return SPEED_100000;
default:
return SPEED_UNKNOWN;
}
@@ -1042,7 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
return 0;
}
-static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
+static u32 bnxt_get_fw_speed(struct net_device *dev, u32 ethtool_speed)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
@@ -1082,6 +1140,10 @@ static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
break;
+ case SPEED_100000:
+ if (support_spds & BNXT_LINK_SPEED_MSK_100GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB;
+ break;
default:
netdev_err(dev, "unsupported speed!\n");
break;
@@ -2128,12 +2190,372 @@ static int bnxt_set_phys_id(struct net_device *dev,
return rc;
}
+static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
+{
+ struct hwrm_selftest_irq_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_test_irq(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
+ int rc;
+
+ rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
+{
+ struct hwrm_port_mac_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
+
+ req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
+ if (enable)
+ req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
+ else
+ req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
+ struct hwrm_port_phy_cfg_input *req)
+{
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u16 fw_advertising = link_info->advertising;
+ u16 fw_speed;
+ int rc;
+
+ if (!link_info->autoneg)
+ return 0;
+
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
+ if (netif_carrier_ok(bp->dev))
+ fw_speed = bp->link_info.link_speed;
+ else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
+ else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
+ else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
+ else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
+
+ req->force_link_speed = cpu_to_le16(fw_speed);
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
+ PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+ rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT);
+ req->flags = 0;
+ req->force_link_speed = cpu_to_le16(0);
+ return rc;
+}
+
+static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable)
+{
+ struct hwrm_port_phy_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+
+ if (enable) {
+ bnxt_disable_an_for_lpbk(bp, &req);
+ req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
+ } else {
+ req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
+ }
+ req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi,
+ u32 raw_cons, int pkt_size)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ struct bnxt_sw_rx_bd *rx_buf;
+ struct rx_cmp *rxcmp;
+ u16 cp_cons, cons;
+ u8 *data;
+ u32 len;
+ int i;
+
+ cp_cons = RING_CMP(raw_cons);
+ rxcmp = (struct rx_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ cons = rxcmp->rx_cmp_opaque;
+ rx_buf = &rxr->rx_buf_ring[cons];
+ data = rx_buf->data_ptr;
+ len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
+ if (len != pkt_size)
+ return -EIO;
+ i = ETH_ALEN;
+ if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
+ return -EIO;
+ i += ETH_ALEN;
+ for ( ; i < pkt_size; i++) {
+ if (data[i] != (u8)(i & 0xff))
+ return -EIO;
+ }
+ return 0;
+}
+
+static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size)
+{
+ struct bnxt_napi *bnapi = bp->bnapi[0];
+ struct bnxt_cp_ring_info *cpr;
+ struct tx_cmp *txcmp;
+ int rc = -EIO;
+ u32 raw_cons;
+ u32 cons;
+ int i;
+
+ cpr = &bnapi->cp_ring;
+ raw_cons = cpr->cp_raw_cons;
+ for (i = 0; i < 200; i++) {
+ cons = RING_CMP(raw_cons);
+ txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ if (!TX_CMP_VALID(txcmp, raw_cons)) {
+ udelay(5);
+ continue;
+ }
+
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+ if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
+ rc = bnxt_rx_loopback(bp, bnapi, raw_cons, pkt_size);
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ break;
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ }
+ cpr->cp_raw_cons = raw_cons;
+ return rc;
+}
+
+static int bnxt_run_loopback(struct bnxt *bp)
+{
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
+ int pkt_size, i = 0;
+ struct sk_buff *skb;
+ dma_addr_t map;
+ u8 *data;
+ int rc;
+
+ pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
+ skb = netdev_alloc_skb(bp->dev, pkt_size);
+ if (!skb)
+ return -ENOMEM;
+ data = skb_put(skb, pkt_size);
+ eth_broadcast_addr(data);
+ i += ETH_ALEN;
+ ether_addr_copy(&data[i], bp->dev->dev_addr);
+ i += ETH_ALEN;
+ for ( ; i < pkt_size; i++)
+ data[i] = (u8)(i & 0xff);
+
+ map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
+ PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, map)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+ bnxt_xmit_xdp(bp, txr, map, pkt_size, 0);
+
+ /* Sync BD data before updating doorbell */
+ wmb();
+
+ writel(DB_KEY_TX | txr->tx_prod, txr->tx_doorbell);
+ writel(DB_KEY_TX | txr->tx_prod, txr->tx_doorbell);
+ rc = bnxt_poll_loopback(bp, pkt_size);
+
+ dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ return rc;
+}
+
+static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
+{
+ struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_selftest_exec_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ resp->test_success = 0;
+ req.flags = test_mask;
+ rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout);
+ *test_results = resp->test_success;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+#define BNXT_DRV_TESTS 3
+#define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS)
+#define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1)
+#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
+
+static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
+ u64 *buf)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ bool offline = false;
+ u8 test_results = 0;
+ u8 test_mask = 0;
+ int rc, i;
+
+ if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
+ return;
+ memset(buf, 0, sizeof(u64) * bp->num_tests);
+ if (!netif_running(dev)) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
+ if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ if (bp->pf.active_vfs) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ netdev_warn(dev, "Offline tests cannot be run with active VFs\n");
+ return;
+ }
+ offline = true;
+ }
+
+ for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
+ u8 bit_val = 1 << i;
+
+ if (!(bp->test_info->offline_mask & bit_val))
+ test_mask |= bit_val;
+ else if (offline)
+ test_mask |= bit_val;
+ }
+ if (!offline) {
+ bnxt_run_fw_tests(bp, test_mask, &test_results);
+ } else {
+ rc = bnxt_close_nic(bp, false, false);
+ if (rc)
+ return;
+ bnxt_run_fw_tests(bp, test_mask, &test_results);
+
+ buf[BNXT_MACLPBK_TEST_IDX] = 1;
+ bnxt_hwrm_mac_loopback(bp, true);
+ msleep(250);
+ rc = bnxt_half_open_nic(bp);
+ if (rc) {
+ bnxt_hwrm_mac_loopback(bp, false);
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+ if (bnxt_run_loopback(bp))
+ etest->flags |= ETH_TEST_FL_FAILED;
+ else
+ buf[BNXT_MACLPBK_TEST_IDX] = 0;
+
+ bnxt_hwrm_mac_loopback(bp, false);
+ bnxt_hwrm_phy_loopback(bp, true);
+ msleep(1000);
+ if (bnxt_run_loopback(bp)) {
+ buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ bnxt_hwrm_phy_loopback(bp, false);
+ bnxt_half_close_nic(bp);
+ bnxt_open_nic(bp, false, true);
+ }
+ if (bnxt_test_irq(bp)) {
+ buf[BNXT_IRQ_TEST_IDX] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
+ u8 bit_val = 1 << i;
+
+ if ((test_mask & bit_val) && !(test_results & bit_val)) {
+ buf[i] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
+}
+
+void bnxt_ethtool_init(struct bnxt *bp)
+{
+ struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_selftest_qlist_input req = {0};
+ struct bnxt_test_info *test_info;
+ int i, rc;
+
+ if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
+ return;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto ethtool_init_exit;
+
+ test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
+ if (!test_info)
+ goto ethtool_init_exit;
+
+ bp->test_info = test_info;
+ bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
+ if (bp->num_tests > BNXT_MAX_TEST)
+ bp->num_tests = BNXT_MAX_TEST;
+
+ test_info->offline_mask = resp->offline_tests;
+ test_info->timeout = le16_to_cpu(resp->test_timeout);
+ if (!test_info->timeout)
+ test_info->timeout = HWRM_CMD_TIMEOUT;
+ for (i = 0; i < bp->num_tests; i++) {
+ char *str = test_info->string[i];
+ char *fw_str = resp->test0_name + i * 32;
+
+ if (i == BNXT_MACLPBK_TEST_IDX) {
+ strcpy(str, "Mac loopback test (offline)");
+ } else if (i == BNXT_PHYLPBK_TEST_IDX) {
+ strcpy(str, "Phy loopback test (offline)");
+ } else if (i == BNXT_IRQ_TEST_IDX) {
+ strcpy(str, "Interrupt_test (offline)");
+ } else {
+ strlcpy(str, fw_str, ETH_GSTRING_LEN);
+ strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
+ if (test_info->offline_mask & (1 << i))
+ strncat(str, " (offline)",
+ ETH_GSTRING_LEN - strlen(str));
+ else
+ strncat(str, " (online)",
+ ETH_GSTRING_LEN - strlen(str));
+ }
+ }
+
+ethtool_init_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+void bnxt_ethtool_free(struct bnxt *bp)
+{
+ kfree(bp->test_info);
+ bp->test_info = NULL;
+}
+
const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_pauseparam = bnxt_get_pauseparam,
.set_pauseparam = bnxt_set_pauseparam,
.get_drvinfo = bnxt_get_drvinfo,
+ .get_wol = bnxt_get_wol,
+ .set_wol = bnxt_set_wol,
.get_coalesce = bnxt_get_coalesce,
.set_coalesce = bnxt_set_coalesce,
.get_msglevel = bnxt_get_msglevel,
@@ -2161,4 +2583,5 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_module_eeprom = bnxt_get_module_eeprom,
.nway_reset = bnxt_nway_reset,
.set_phys_id = bnxt_set_phys_id,
+ .self_test = bnxt_self_test,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index ed1e555292e9..f1bc90b6fb5b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -38,5 +39,7 @@ extern const struct ethtool_ops bnxt_ethtool_ops;
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
u32 bnxt_fw_to_ethtool_speed(u16);
u16 bnxt_get_fw_auto_link_speeds(u32);
+void bnxt_ethtool_init(struct bnxt *bp);
+void bnxt_ethtool_free(struct bnxt *bp);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 6e275c23d68b..7dc71bb95837 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -11,19 +11,21 @@
#ifndef BNXT_HSI_H
#define BNXT_HSI_H
-/* HSI and HWRM Specification 1.7.0 */
+/* HSI and HWRM Specification 1.7.6 */
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 7
-#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_UPDATE 6
-#define HWRM_VERSION_STR "1.7.0"
+#define HWRM_VERSION_RSVD 2 /* non-zero means beta version */
+
+#define HWRM_VERSION_STR "1.7.6.2"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
*/
#define HWRM_NA_SIGNATURE ((__le32)(-1))
#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */
+#define HWRM_MAX_RESP_LEN (248) /* hwrm_selftest_qlist */
#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
#define HW_HASH_KEY_SIZE 40
#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
@@ -571,9 +573,10 @@ struct hwrm_ver_get_output {
__le16 max_req_win_len;
__le16 max_resp_len;
__le16 def_req_timeout;
+ u8 init_pending;
+ #define VER_GET_RESP_INIT_PENDING_DEV_NOT_RDY 0x1UL
u8 unused_0;
u8 unused_1;
- u8 unused_2;
u8 valid;
};
@@ -809,6 +812,8 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
#define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
#define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -827,10 +832,12 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
- u8 unused_0;
+ u8 port_pf_cnt;
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
__le16 dflt_vnic_id;
- u8 unused_1;
- u8 unused_2;
+ u8 host_cnt;
+ #define FUNC_QCFG_RESP_HOST_CNT_UNAVAIL 0x0UL
+ u8 unused_0;
__le32 min_bw;
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
@@ -867,12 +874,12 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
#define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
#define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
- u8 unused_3;
+ u8 unused_1;
__le16 alloc_vfs;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
__le16 alloc_sp_tx_rings;
- u8 unused_4;
+ u8 unused_2;
u8 valid;
};
@@ -888,16 +895,13 @@ struct hwrm_func_cfg_input {
u8 unused_0;
u8 unused_1;
__le32 flags;
- #define FUNC_CFG_REQ_FLAGS_PROM_MODE 0x1UL
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK 0x2UL
- #define FUNC_CFG_REQ_FLAGS_SRC_IP_ADDR_CHECK 0x4UL
- #define FUNC_CFG_REQ_FLAGS_VLAN_PRI_MATCH 0x8UL
- #define FUNC_CFG_REQ_FLAGS_DFLT_PRI_NOMATCH 0x10UL
- #define FUNC_CFG_REQ_FLAGS_DISABLE_PAUSE 0x20UL
- #define FUNC_CFG_REQ_FLAGS_DISABLE_STP 0x40UL
- #define FUNC_CFG_REQ_FLAGS_DISABLE_LLDP 0x80UL
- #define FUNC_CFG_REQ_FLAGS_DISABLE_PTPV2 0x100UL
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE 0x200UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
+ #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -1013,7 +1017,7 @@ struct hwrm_func_qstats_output {
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
- __le64 tx_err_pkts;
+ __le64 tx_discard_pkts;
__le64 tx_drop_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
@@ -1021,7 +1025,7 @@ struct hwrm_func_qstats_output {
__le64 rx_ucast_pkts;
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
- __le64 rx_err_pkts;
+ __le64 rx_discard_pkts;
__le64 rx_drop_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
@@ -4743,25 +4747,72 @@ struct hwrm_temp_monitor_query_output {
u8 valid;
};
-/* hwrm_nvm_read */
-/* Input (40 bytes) */
-struct hwrm_nvm_read_input {
+/* hwrm_wol_filter_alloc */
+/* Input (64 bytes) */
+struct hwrm_wol_filter_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
- __le64 host_dest_addr;
- __le16 dir_idx;
+ __le32 flags;
+ __le32 enables;
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
+ __le16 port_id;
+ u8 wol_type;
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
u8 unused_0;
- u8 unused_1;
- __le32 offset;
- __le32 len;
+ __le32 unused_1;
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_buf_size;
+ __le16 pattern_mask_size;
__le32 unused_2;
+ __le64 pattern_buf_addr;
+ __le64 pattern_mask_addr;
};
/* Output (16 bytes) */
-struct hwrm_nvm_read_output {
+struct hwrm_wol_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_wol_filter_free */
+/* Input (32 bytes) */
+struct hwrm_wol_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
+ __le32 enables;
+ #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
+ __le16 port_id;
+ u8 wol_filter_id;
+ u8 unused_0[5];
+};
+
+/* Output (16 bytes) */
+struct hwrm_wol_filter_free_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
@@ -4773,21 +4824,107 @@ struct hwrm_nvm_read_output {
u8 valid;
};
-/* hwrm_nvm_raw_dump */
-/* Input (32 bytes) */
-struct hwrm_nvm_raw_dump_input {
+/* hwrm_wol_filter_qcfg */
+/* Input (56 bytes) */
+struct hwrm_wol_filter_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 handle;
+ __le32 unused_0;
+ __le64 pattern_buf_addr;
+ __le16 pattern_buf_size;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3[3];
+ u8 unused_4;
+ __le64 pattern_mask_addr;
+ __le16 pattern_mask_size;
+ __le16 unused_5[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_wol_filter_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 next_handle;
+ u8 wol_filter_id;
+ u8 wol_type;
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
+ __le32 unused_0;
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_size;
+ __le16 pattern_mask_size;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_wol_reason_qcfg */
+/* Input (40 bytes) */
+struct hwrm_wol_reason_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2[3];
+ u8 unused_3;
+ __le64 wol_pkt_buf_addr;
+ __le16 wol_pkt_buf_size;
+ __le16 unused_4[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_wol_reason_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 wol_reason;
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
+ u8 wol_pkt_len;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_read */
+/* Input (40 bytes) */
+struct hwrm_nvm_read_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 unused_1;
__le32 offset;
__le32 len;
+ __le32 unused_2;
};
/* Output (16 bytes) */
-struct hwrm_nvm_raw_dump_output {
+struct hwrm_nvm_read_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
@@ -4881,6 +5018,15 @@ struct hwrm_nvm_write_output {
u8 valid;
};
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_nvm_write_cmd_err {
+ u8 code;
+ #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ u8 unused_0[7];
+};
+
/* hwrm_nvm_modify */
/* Input (40 bytes) */
struct hwrm_nvm_modify_input {
@@ -5112,6 +5258,100 @@ struct hwrm_nvm_install_update_cmd_err {
u8 unused_0[7];
};
+/* hwrm_selftest_qlist */
+/* Input (16 bytes) */
+struct hwrm_selftest_qlist_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (248 bytes) */
+struct hwrm_selftest_qlist_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_tests;
+ u8 available_tests;
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
+ u8 offline_tests;
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
+ u8 unused_0;
+ __le16 test_timeout;
+ u8 unused_1;
+ u8 unused_2;
+ char test0_name[32];
+ char test1_name[32];
+ char test2_name[32];
+ char test3_name[32];
+ char test4_name[32];
+ char test5_name[32];
+ char test6_name[32];
+ char test7_name[32];
+};
+
+/* hwrm_selftest_exec */
+/* Input (24 bytes) */
+struct hwrm_selftest_exec_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_selftest_exec_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 requested_tests;
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
+ u8 test_success;
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
+ __le16 unused_0[3];
+};
+
+/* hwrm_selftest_irq */
+/* Input (16 bytes) */
+struct hwrm_selftest_irq_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (8 bytes) */
+struct hwrm_selftest_irq_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
/* Hardware Resource Manager Specification */
/* Input (16 bytes) */
struct input {
@@ -5130,6 +5370,16 @@ struct output {
__le16 resp_len;
};
+/* Short Command Structure (16 bytes) */
+struct hwrm_short_input {
+ __le16 req_type;
+ __le16 signature;
+ #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
+ __le16 unused_0;
+ __le16 size;
+ __le64 req_addr;
+};
+
/* Command numbering (8 bytes) */
struct cmd_nums {
__le16 req_type;
@@ -5252,11 +5502,15 @@ struct cmd_nums {
#define HWRM_CFA_FLOW_FLUSH (0x105UL)
#define HWRM_CFA_FLOW_STATS (0x106UL)
#define HWRM_CFA_FLOW_INFO (0x107UL)
+ #define HWRM_SELFTEST_QLIST (0x200UL)
+ #define HWRM_SELFTEST_EXEC (0x201UL)
+ #define HWRM_SELFTEST_IRQ (0x202UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
#define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
#define HWRM_DBG_DUMP (0xff14UL)
+ #define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL)
#define HWRM_NVM_VALIDATE_OPTION (0xffefUL)
#define HWRM_NVM_FLUSH (0xfff0UL)
#define HWRM_NVM_GET_VARIABLE (0xfff1UL)
@@ -5464,6 +5718,7 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
#define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
__le16 len;
u8 version;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 0b8cd7443843..b8e7248294d9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -84,6 +85,9 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
u32 func_flags;
int rc;
+ if (bp->hwrm_spec_code < 0x10701)
+ return -ENOTSUPP;
+
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
@@ -96,9 +100,9 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
func_flags = vf->func_flags;
if (setting)
- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+ func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
else
- func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+ func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
/*TODO: if the driver supports VLAN filter on guest VLAN,
* the spoof check should also include vlan anti-spoofing
*/
@@ -134,8 +138,11 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
ivi->max_tx_rate = vf->max_tx_rate;
ivi->min_tx_rate = vf->min_tx_rate;
ivi->vlan = vf->vlan;
- ivi->qos = vf->flags & BNXT_VF_QOS;
- ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK;
+ if (vf->flags & BNXT_VF_QOS)
+ ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
+ else
+ ivi->qos = 0;
+ ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
if (!(vf->flags & BNXT_VF_LINK_FORCED))
ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
else if (vf->flags & BNXT_VF_LINK_UP)
@@ -300,7 +307,6 @@ static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
for (i = 0; i < num_vfs; i++) {
vf = &bp->pf.vf[i];
memset(vf, 0, sizeof(*vf));
- vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP;
}
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 1ab72e4820af..dbc8d977fc5a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 899c30fb5188..9dae32756767 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -19,11 +19,10 @@
#include "bnxt.h"
#include "bnxt_xdp.h"
-static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len, u16 rx_prod)
+void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ dma_addr_t mapping, u32 len, u16 rx_prod)
{
struct bnxt_sw_tx_bd *tx_buf;
- struct tx_bd_ext *txbd1;
struct tx_bd *txbd;
u32 flags;
u16 prod;
@@ -33,23 +32,13 @@ static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
tx_buf->rx_prod = rx_prod;
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
- flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
- (2 << TX_BD_FLAGS_BD_CNT_SHIFT) | TX_BD_FLAGS_COAL_NOW |
+ flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) |
TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = prod;
txbd->tx_bd_haddr = cpu_to_le64(mapping);
prod = NEXT_TX(prod);
- txbd1 = (struct tx_bd_ext *)
- &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
-
- txbd1->tx_bd_hsize_lflags = cpu_to_le32(0);
- txbd1->tx_bd_mss = cpu_to_le32(0);
- txbd1->tx_bd_cfa_action = cpu_to_le32(0);
- txbd1->tx_bd_cfa_meta = cpu_to_le32(0);
-
- prod = NEXT_TX(prod);
txr->tx_prod = prod;
}
@@ -66,7 +55,6 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
for (i = 0; i < nr_pkts; i++) {
last_tx_cons = tx_cons;
tx_cons = NEXT_TX(tx_cons);
- tx_cons = NEXT_TX(tx_cons);
}
txr->tx_cons = tx_cons;
if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) {
@@ -133,7 +121,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
return false;
case XDP_TX:
- if (tx_avail < 2) {
+ if (tx_avail < 1) {
trace_xdp_exception(bp->dev, xdp_prog, act);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index b529f2c5355b..12a5ad66b564 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -10,6 +10,8 @@
#ifndef BNXT_XDP_H
#define BNXT_XDP_H
+void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ dma_addr_t mapping, u32 len, u16 rx_prod);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct page *page, u8 **data_ptr, unsigned int *len,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 365895ed3c3e..a205a9ff9e17 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -621,7 +621,7 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
/* GENET TDMA hardware does not support a configurable timeout, but will
* always generate an interrupt either after MBDONE packets have been
- * transmitted, or when the ring is emtpy.
+ * transmitted, or when the ring is empty.
*/
if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
@@ -707,6 +707,19 @@ struct bcmgenet_stats {
.reg_offset = offset, \
}
+#define STAT_GENET_Q(num) \
+ STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
+ tx_rings[num].packets), \
+ STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
+ tx_rings[num].bytes), \
+ STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
+ rx_rings[num].bytes), \
+ STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
+ rx_rings[num].packets), \
+ STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
+ rx_rings[num].errors), \
+ STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
+ rx_rings[num].dropped)
/* There is a 0xC gap between the end of RX and beginning of TX stats and then
* between the end of TX stats and the beginning of the RX RUNT
@@ -801,6 +814,12 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
+ /* Per TX queues */
+ STAT_GENET_Q(0),
+ STAT_GENET_Q(1),
+ STAT_GENET_Q(2),
+ STAT_GENET_Q(3),
+ STAT_GENET_Q(16),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -1078,8 +1097,17 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
/* Power down LED */
if (priv->hw_params->flags & GENET_HAS_EXT) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= (EXT_PWR_DOWN_PHY |
- EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ if (GENET_IS_V5(priv))
+ reg |= EXT_PWR_DOWN_PHY_EN |
+ EXT_PWR_DOWN_PHY_RD |
+ EXT_PWR_DOWN_PHY_SD |
+ EXT_PWR_DOWN_PHY_RX |
+ EXT_PWR_DOWN_PHY_TX |
+ EXT_IDDQ_GLBL_PWR;
+ else
+ reg |= EXT_PWR_DOWN_PHY;
+
+ reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
bcmgenet_phy_power_set(priv->dev, false);
@@ -1104,12 +1132,34 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
switch (mode) {
case GENET_POWER_PASSIVE:
- reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
- EXT_PWR_DOWN_BIAS);
- /* fallthrough */
+ reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ if (GENET_IS_V5(priv)) {
+ reg &= ~(EXT_PWR_DOWN_PHY_EN |
+ EXT_PWR_DOWN_PHY_RD |
+ EXT_PWR_DOWN_PHY_SD |
+ EXT_PWR_DOWN_PHY_RX |
+ EXT_PWR_DOWN_PHY_TX |
+ EXT_IDDQ_GLBL_PWR);
+ reg |= EXT_PHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ mdelay(1);
+
+ reg &= ~EXT_PHY_RESET;
+ } else {
+ reg &= ~EXT_PWR_DOWN_PHY;
+ reg |= EXT_PWR_DN_EN_LD;
+ }
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ bcmgenet_phy_power_set(priv->dev, true);
+ bcmgenet_mii_reset(priv->dev);
+ break;
+
case GENET_POWER_CABLE_SENSE:
/* enable APD */
- reg |= EXT_PWR_DN_EN_LD;
+ if (!GENET_IS_V5(priv)) {
+ reg |= EXT_PWR_DN_EN_LD;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ }
break;
case GENET_POWER_WOL_MAGIC:
bcmgenet_wol_power_up_cfg(priv, mode);
@@ -1117,39 +1167,20 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
default:
break;
}
-
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- if (mode == GENET_POWER_PASSIVE) {
- bcmgenet_phy_power_set(priv->dev, true);
- bcmgenet_mii_reset(priv->dev);
- }
}
/* ioctl handle special commands that are not present in ethtool. */
static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- int val = 0;
if (!netif_running(dev))
return -EINVAL;
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- if (!priv->phydev)
- val = -ENODEV;
- else
- val = phy_mii_ioctl(priv->phydev, rq, cmd);
- break;
-
- default:
- val = -EINVAL;
- break;
- }
+ if (!priv->phydev)
+ return -ENODEV;
- return val;
+ return phy_mii_ioctl(priv->phydev, rq, cmd);
}
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
@@ -1240,14 +1271,18 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
unsigned int txbds_ready;
unsigned int txbds_processed = 0;
- /* Compute how many buffers are transmitted since last xmit call */
- c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
- c_index &= DMA_C_INDEX_MASK;
-
- if (likely(c_index >= ring->c_index))
- txbds_ready = c_index - ring->c_index;
+ /* Clear status before servicing to reduce spurious interrupts */
+ if (ring->index == DESC_INDEX)
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
+ INTRL2_CPU_CLEAR);
else
- txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
+ bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+ INTRL2_CPU_CLEAR);
+
+ /* Compute how many buffers are transmitted since last xmit call */
+ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
+ & DMA_C_INDEX_MASK;
+ txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
netif_dbg(priv, tx_done, dev,
"%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
@@ -1280,15 +1315,15 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
}
ring->free_bds += txbds_processed;
- ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
+ ring->c_index = c_index;
- dev->stats.tx_packets += pkts_compl;
- dev->stats.tx_bytes += bytes_compl;
+ ring->packets += pkts_compl;
+ ring->bytes += bytes_compl;
netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
pkts_compl, bytes_compl);
- return pkts_compl;
+ return txbds_processed;
}
static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
@@ -1657,18 +1692,28 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned long dma_flag;
int len;
unsigned int rxpktprocessed = 0, rxpkttoprocess;
- unsigned int p_index;
+ unsigned int p_index, mask;
unsigned int discards;
unsigned int chksum_ok = 0;
+ /* Clear status before servicing to reduce spurious interrupts */
+ if (ring->index == DESC_INDEX) {
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
+ INTRL2_CPU_CLEAR);
+ } else {
+ mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
+ bcmgenet_intrl2_1_writel(priv,
+ mask,
+ INTRL2_CPU_CLEAR);
+ }
+
p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
DMA_P_INDEX_DISCARD_CNT_MASK;
if (discards > ring->old_discards) {
discards = discards - ring->old_discards;
- dev->stats.rx_missed_errors += discards;
- dev->stats.rx_errors += discards;
+ ring->errors += discards;
ring->old_discards += discards;
/* Clear HW register when we reach 75% of maximum 0xFFFF */
@@ -1680,12 +1725,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
}
p_index &= DMA_P_INDEX_MASK;
-
- if (likely(p_index >= ring->c_index))
- rxpkttoprocess = p_index - ring->c_index;
- else
- rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
- p_index;
+ rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
netif_dbg(priv, rx_status, dev,
"RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
@@ -1696,7 +1736,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
skb = bcmgenet_rx_refill(priv, cb);
if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
+ ring->dropped++;
goto next;
}
@@ -1724,7 +1764,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
- dev->stats.rx_errors++;
+ ring->errors++;
dev_kfree_skb_any(skb);
goto next;
}
@@ -1773,8 +1813,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
/*Finish setting up the received SKB and send it to the kernel*/
skb->protocol = eth_type_trans(skb, priv->dev);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
+ ring->packets++;
+ ring->bytes += len;
if (dma_flag & DMA_RX_MULT)
dev->stats.multicast++;
@@ -1912,10 +1952,8 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
/* Mask all interrupts.*/
bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
}
static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
@@ -1942,8 +1980,6 @@ static int init_umac(struct bcmgenet_priv *priv)
int ret;
u32 reg;
u32 int0_enable = 0;
- u32 int1_enable = 0;
- int i;
dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
@@ -1970,12 +2006,6 @@ static int init_umac(struct bcmgenet_priv *priv)
bcmgenet_intr_disable(priv);
- /* Enable Rx default queue 16 interrupts */
- int0_enable |= UMAC_IRQ_RXDMA_DONE;
-
- /* Enable Tx default queue 16 interrupts */
- int0_enable |= UMAC_IRQ_TXDMA_DONE;
-
/* Configure backpressure vectors for MoCA */
if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
reg = bcmgenet_bp_mc_get(priv);
@@ -1993,18 +2023,8 @@ static int init_umac(struct bcmgenet_priv *priv)
if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
- /* Enable Rx priority queue interrupts */
- for (i = 0; i < priv->hw_params->rx_queues; ++i)
- int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
-
- /* Enable Tx priority queue interrupts */
- for (i = 0; i < priv->hw_params->tx_queues; ++i)
- int1_enable |= (1 << i);
-
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
- bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
- /* Enable rx/tx engine.*/
dev_dbg(kdev, "done init umac\n");
return 0;
@@ -2136,22 +2156,33 @@ static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_enable = UMAC_IRQ_TXDMA_DONE;
+ u32 int1_enable = 0;
struct bcmgenet_tx_ring *ring;
for (i = 0; i < priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_enable(&ring->napi);
+ int1_enable |= (1 << i);
}
ring = &priv->tx_rings[DESC_INDEX];
napi_enable(&ring->napi);
+
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
}
static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_disable = UMAC_IRQ_TXDMA_DONE;
+ u32 int1_disable = 0xffff;
struct bcmgenet_tx_ring *ring;
+ bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
+
for (i = 0; i < priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_disable(&ring->napi);
@@ -2264,22 +2295,33 @@ static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_enable = UMAC_IRQ_RXDMA_DONE;
+ u32 int1_enable = 0;
struct bcmgenet_rx_ring *ring;
for (i = 0; i < priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_enable(&ring->napi);
+ int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
}
ring = &priv->rx_rings[DESC_INDEX];
napi_enable(&ring->napi);
+
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
}
static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_disable = UMAC_IRQ_RXDMA_DONE;
+ u32 int1_disable = 0xffff << UMAC_IRQ1_RX_INTR_SHIFT;
struct bcmgenet_rx_ring *ring;
+ bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
+
for (i = 0; i < priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_disable(&ring->napi);
@@ -2634,6 +2676,15 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
}
+ if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
+ UMAC_IRQ_PHY_DET_F |
+ UMAC_IRQ_LINK_EVENT |
+ UMAC_IRQ_HFB_SM |
+ UMAC_IRQ_HFB_MM)) {
+ /* all other interested interrupts handled in bottom half */
+ schedule_work(&priv->bcmgenet_irq_work);
+ }
+
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
wake_up(&priv->wq);
@@ -2921,7 +2972,7 @@ static int bcmgenet_close(struct net_device *dev)
if (ret)
return ret;
- /* Disable MAC transmit. TX DMA disabled have to done before this */
+ /* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
/* tx reclaim */
@@ -3101,6 +3152,48 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
+static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned long tx_bytes = 0, tx_packets = 0;
+ unsigned long rx_bytes = 0, rx_packets = 0;
+ unsigned long rx_errors = 0, rx_dropped = 0;
+ struct bcmgenet_tx_ring *tx_ring;
+ struct bcmgenet_rx_ring *rx_ring;
+ unsigned int q;
+
+ for (q = 0; q < priv->hw_params->tx_queues; q++) {
+ tx_ring = &priv->tx_rings[q];
+ tx_bytes += tx_ring->bytes;
+ tx_packets += tx_ring->packets;
+ }
+ tx_ring = &priv->tx_rings[DESC_INDEX];
+ tx_bytes += tx_ring->bytes;
+ tx_packets += tx_ring->packets;
+
+ for (q = 0; q < priv->hw_params->rx_queues; q++) {
+ rx_ring = &priv->rx_rings[q];
+
+ rx_bytes += rx_ring->bytes;
+ rx_packets += rx_ring->packets;
+ rx_errors += rx_ring->errors;
+ rx_dropped += rx_ring->dropped;
+ }
+ rx_ring = &priv->rx_rings[DESC_INDEX];
+ rx_bytes += rx_ring->bytes;
+ rx_packets += rx_ring->packets;
+ rx_errors += rx_ring->errors;
+ rx_dropped += rx_ring->dropped;
+
+ dev->stats.tx_bytes = tx_bytes;
+ dev->stats.tx_packets = tx_packets;
+ dev->stats.rx_bytes = rx_bytes;
+ dev->stats.rx_packets = rx_packets;
+ dev->stats.rx_errors = rx_errors;
+ dev->stats.rx_missed_errors = rx_errors;
+ return &dev->stats;
+}
+
static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_open = bcmgenet_open,
.ndo_stop = bcmgenet_close,
@@ -3113,6 +3206,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bcmgenet_poll_controller,
#endif
+ .ndo_get_stats = bcmgenet_get_stats,
};
/* Array of GENET hardware parameters/characteristics */
@@ -3186,6 +3280,25 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
},
+ [GENET_V5] = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 3,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
+ },
};
/* Infer hardware parameters from the detected GENET version */
@@ -3196,26 +3309,22 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
u8 major;
u16 gphy_rev;
- if (GENET_IS_V4(priv)) {
+ if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v4;
priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
- priv->version = GENET_V4;
} else if (GENET_IS_V3(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
- priv->version = GENET_V3;
} else if (GENET_IS_V2(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
- priv->version = GENET_V2;
} else if (GENET_IS_V1(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
- priv->version = GENET_V1;
}
/* enum genet_version starts at 1 */
@@ -3225,7 +3334,9 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
/* Read GENET HW version */
reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
major = (reg >> 24 & 0x0f);
- if (major == 5)
+ if (major == 6)
+ major = 5;
+ else if (major == 5)
major = 4;
else if (major == 0)
major = 1;
@@ -3253,19 +3364,25 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
*/
gphy_rev = reg & 0xffff;
+ if (GENET_IS_V5(priv)) {
+ /* The EPHY revision should come from the MDIO registers of
+ * the PHY not from GENET.
+ */
+ if (gphy_rev != 0) {
+ pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
+ gphy_rev);
+ }
/* This is reserved so should require special treatment */
- if (gphy_rev == 0 || gphy_rev == 0x01ff) {
+ } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
return;
- }
-
/* This is the good old scheme, just GPHY major, no minor nor patch */
- if ((gphy_rev & 0xf0) != 0)
+ } else if ((gphy_rev & 0xf0) != 0) {
priv->gphy_rev = gphy_rev << 8;
-
/* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
- else if ((gphy_rev & 0xff00) != 0)
+ } else if ((gphy_rev & 0xff00) != 0) {
priv->gphy_rev = gphy_rev;
+ }
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!(params->flags & GENET_HAS_40BITS))
@@ -3295,6 +3412,7 @@ static const struct of_device_id bcmgenet_match[] = {
{ .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
{ .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
{ .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
+ { .compatible = "brcm,genet-v5", .data = (void *)GENET_V5 },
{ },
};
MODULE_DEVICE_TABLE(of, bcmgenet_match);
@@ -3493,7 +3611,7 @@ static int bcmgenet_suspend(struct device *d)
if (ret)
return ret;
- /* Disable MAC transmit. TX DMA disabled have to done before this */
+ /* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
/* tx reclaim */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index db7f289d65ae..efd07020b89f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -355,8 +355,14 @@ struct bcmgenet_mib_counters {
#define EXT_PWR_DN_EN_LD (1 << 3)
#define EXT_ENERGY_DET (1 << 4)
#define EXT_IDDQ_FROM_PHY (1 << 5)
+#define EXT_IDDQ_GLBL_PWR (1 << 7)
#define EXT_PHY_RESET (1 << 8)
#define EXT_ENERGY_DET_MASK (1 << 12)
+#define EXT_PWR_DOWN_PHY_TX (1 << 16)
+#define EXT_PWR_DOWN_PHY_RX (1 << 17)
+#define EXT_PWR_DOWN_PHY_SD (1 << 18)
+#define EXT_PWR_DOWN_PHY_RD (1 << 19)
+#define EXT_PWR_DOWN_PHY_EN (1 << 20)
#define EXT_RGMII_OOB_CTRL 0x0C
#define RGMII_LINK (1 << 4)
@@ -499,13 +505,15 @@ enum bcmgenet_version {
GENET_V1 = 1,
GENET_V2,
GENET_V3,
- GENET_V4
+ GENET_V4,
+ GENET_V5
};
#define GENET_IS_V1(p) ((p)->version == GENET_V1)
#define GENET_IS_V2(p) ((p)->version == GENET_V2)
#define GENET_IS_V3(p) ((p)->version == GENET_V3)
#define GENET_IS_V4(p) ((p)->version == GENET_V4)
+#define GENET_IS_V5(p) ((p)->version == GENET_V5)
/* Hardware flags */
#define GENET_HAS_40BITS (1 << 0)
@@ -544,6 +552,8 @@ struct bcmgenet_skb_cb {
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
struct napi_struct napi; /* NAPI per tx queue */
+ unsigned long packets;
+ unsigned long bytes;
unsigned int index; /* ring index */
unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/
@@ -562,6 +572,10 @@ struct bcmgenet_tx_ring {
struct bcmgenet_rx_ring {
struct napi_struct napi; /* Rx NAPI struct */
+ unsigned long bytes;
+ unsigned long packets;
+ unsigned long errors;
+ unsigned long dropped;
unsigned int index; /* Rx ring index */
struct enet_cb *cbs; /* Rx ring buffer control block */
unsigned int size; /* Rx ring size */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index b97122926d3a..2fbd027f0148 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -1,7 +1,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -127,7 +127,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
struct net_device *dev = priv->dev;
- u32 cpu_mask_clear;
int retries = 0;
u32 reg;
@@ -173,18 +172,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
- /* Enable the MPD interrupt */
- cpu_mask_clear = UMAC_IRQ_MPD_R;
-
- bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
-
return 0;
}
void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
- u32 cpu_mask_set;
u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) {
@@ -201,10 +194,4 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
priv->crc_fwd_en = 0;
-
- /* Stop monitoring magic packet IRQ */
- cpu_mask_set = UMAC_IRQ_MPD_R;
-
- /* Stop monitoring magic packet IRQ */
- bcmgenet_intrl2_0_writel(priv, cpu_mask_set, INTRL2_CPU_MASK_SET);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 2f9281936f0e..285676f8da6b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -1,7 +1,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -195,39 +195,43 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
u32 reg = 0;
/* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
- if (!GENET_IS_V4(priv))
- return;
-
- reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
- if (enable) {
- reg &= ~EXT_CK25_DIS;
- bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
- mdelay(1);
-
- reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
- reg |= EXT_GPHY_RESET;
+ if (GENET_IS_V4(priv)) {
+ reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
+ if (enable) {
+ reg &= ~EXT_CK25_DIS;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+
+ reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
+ reg |= EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+
+ reg &= ~EXT_GPHY_RESET;
+ } else {
+ reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN |
+ EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+ reg |= EXT_CK25_DIS;
+ }
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
- mdelay(1);
-
- reg &= ~EXT_GPHY_RESET;
+ udelay(60);
} else {
- reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | EXT_GPHY_RESET;
- bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(1);
- reg |= EXT_CK25_DIS;
}
- bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
- udelay(60);
}
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
u32 reg;
- /* Speed settings are set in bcmgenet_mii_setup() */
- reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
- reg |= LED_ACT_SOURCE_MAC;
- bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+ if (!GENET_IS_V5(priv)) {
+ /* Speed settings are set in bcmgenet_mii_setup() */
+ reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
+ reg |= LED_ACT_SOURCE_MAC;
+ bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+ }
if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
fixed_phy_set_link_update(priv->phydev,
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 30d1eb9ebec9..f395b951f5e7 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -825,6 +825,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
return timeout_us ? 0 : -EBUSY;
}
+#ifdef CONFIG_TIGON3_HWMON
static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
{
u32 i, apedata;
@@ -904,6 +905,7 @@ static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
return 0;
}
+#endif
static int tg3_ape_send_event(struct tg3 *tp, u32 event)
{
@@ -10744,6 +10746,7 @@ static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
return tg3_reset_hw(tp, reset_phy);
}
+#ifdef CONFIG_TIGON3_HWMON
static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
{
int i;
@@ -10826,6 +10829,10 @@ static void tg3_hwmon_open(struct tg3 *tp)
dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
}
}
+#else
+static inline void tg3_hwmon_close(struct tg3 *tp) { }
+static inline void tg3_hwmon_open(struct tg3 *tp) { }
+#endif /* CONFIG_TIGON3_HWMON */
#define TG3_STAT_ADD32(PSTAT, REG) \
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 30606b11b128..91f7492623d3 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -432,15 +432,17 @@ static int macb_mii_probe(struct net_device *dev)
}
pdata = dev_get_platdata(&bp->pdev->dev);
- if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
- ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin,
- "phy int");
- if (!ret) {
- phy_irq = gpio_to_irq(pdata->phy_irq_pin);
- phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
+ if (pdata) {
+ if (gpio_is_valid(pdata->phy_irq_pin)) {
+ ret = devm_gpio_request(&bp->pdev->dev,
+ pdata->phy_irq_pin, "phy int");
+ if (!ret) {
+ phy_irq = gpio_to_irq(pdata->phy_irq_pin);
+ phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
+ }
+ } else {
+ phydev->irq = PHY_POLL;
}
- } else {
- phydev->irq = PHY_POLL;
}
/* attach the mac to the phy */
@@ -684,8 +686,8 @@ static void macb_tx_error_task(struct work_struct *work)
netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(bp, tail),
skb->data);
- bp->stats.tx_packets++;
- bp->stats.tx_bytes += skb->len;
+ bp->dev->stats.tx_packets++;
+ bp->dev->stats.tx_bytes += skb->len;
}
} else {
/* "Buffers exhausted mid-frame" errors may only happen
@@ -778,8 +780,8 @@ static void macb_tx_interrupt(struct macb_queue *queue)
netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(bp, tail),
skb->data);
- bp->stats.tx_packets++;
- bp->stats.tx_bytes += skb->len;
+ bp->dev->stats.tx_packets++;
+ bp->dev->stats.tx_bytes += skb->len;
}
/* Now we can safely release resources */
@@ -911,14 +913,14 @@ static int gem_rx(struct macb *bp, int budget)
if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
netdev_err(bp->dev,
"not whole frame pointed by descriptor\n");
- bp->stats.rx_dropped++;
+ bp->dev->stats.rx_dropped++;
break;
}
skb = bp->rx_skbuff[entry];
if (unlikely(!skb)) {
netdev_err(bp->dev,
"inconsistent Rx descriptor chain\n");
- bp->stats.rx_dropped++;
+ bp->dev->stats.rx_dropped++;
break;
}
/* now everything is ready for receiving packet */
@@ -938,8 +940,8 @@ static int gem_rx(struct macb *bp, int budget)
GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- bp->stats.rx_packets++;
- bp->stats.rx_bytes += skb->len;
+ bp->dev->stats.rx_packets++;
+ bp->dev->stats.rx_bytes += skb->len;
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
@@ -984,7 +986,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
*/
skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
if (!skb) {
- bp->stats.rx_dropped++;
+ bp->dev->stats.rx_dropped++;
for (frag = first_frag; ; frag++) {
desc = macb_rx_desc(bp, frag);
desc->addr &= ~MACB_BIT(RX_USED);
@@ -1030,8 +1032,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
__skb_pull(skb, NET_IP_ALIGN);
skb->protocol = eth_type_trans(skb, bp->dev);
- bp->stats.rx_packets++;
- bp->stats.rx_bytes += skb->len;
+ bp->dev->stats.rx_packets++;
+ bp->dev->stats.rx_bytes += skb->len;
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
skb->len, skb->csum);
netif_receive_skb(skb);
@@ -2210,7 +2212,7 @@ static void gem_update_stats(struct macb *bp)
static struct net_device_stats *gem_get_stats(struct macb *bp)
{
struct gem_stats *hwstat = &bp->hw_stats.gem;
- struct net_device_stats *nstat = &bp->stats;
+ struct net_device_stats *nstat = &bp->dev->stats;
gem_update_stats(bp);
@@ -2281,7 +2283,7 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
static struct net_device_stats *macb_get_stats(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
- struct net_device_stats *nstat = &bp->stats;
+ struct net_device_stats *nstat = &bp->dev->stats;
struct macb_stats *hwstat = &bp->hw_stats.macb;
if (macb_is_gem(bp))
@@ -2993,15 +2995,15 @@ static void at91ether_rx(struct net_device *dev)
memcpy(skb_put(skb, pktlen), p_recv, pktlen);
skb->protocol = eth_type_trans(skb, dev);
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pktlen;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pktlen;
netif_rx(skb);
} else {
- lp->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
}
if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
- lp->stats.multicast++;
+ dev->stats.multicast++;
/* reset ownership bit */
desc->addr &= ~MACB_BIT(RX_USED);
@@ -3036,15 +3038,15 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
if (intstatus & MACB_BIT(TCOMP)) {
/* The TCOM bit is set even if the transmission failed */
if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
- lp->stats.tx_errors++;
+ dev->stats.tx_errors++;
if (lp->skb) {
dev_kfree_skb_irq(lp->skb);
lp->skb = NULL;
dma_unmap_single(NULL, lp->skb_physaddr,
lp->skb_length, DMA_TO_DEVICE);
- lp->stats.tx_packets++;
- lp->stats.tx_bytes += lp->skb_length;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += lp->skb_length;
}
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 234a49eaccfd..ec037b0fa2a4 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -919,7 +919,6 @@ struct macb {
struct clk *rx_clk;
struct net_device *dev;
struct napi_struct napi;
- struct net_device_stats stats;
union {
struct macb_stats macb;
struct gem_stats gem;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
index 2fedd91f3df8..dee604651ba7 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -43,6 +43,8 @@ struct octeon_cn23xx_pf {
struct octeon_config *conf;
};
+#define CN23XX_SLI_DEF_BP 0x40
+
int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
int validate_cn23xx_pf_config_info(struct octeon_device *oct,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index f629c2fe04a4..796c2cbc11f6 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -26,6 +26,9 @@
#include "octeon_main.h"
#include "octeon_network.h"
+/* OOM task polling interval */
+#define LIO_OOM_POLL_INTERVAL_MS 250
+
int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
{
struct lio *lio = GET_LIO(netdev);
@@ -124,6 +127,17 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
struct octeon_device *oct = lio->oct_dev;
u8 *mac;
+ if (nctrl->completion && nctrl->response_code) {
+ /* Signal whoever is interested that the response code from the
+ * firmware has arrived.
+ */
+ WRITE_ONCE(*nctrl->response_code, nctrl->status);
+ complete(nctrl->completion);
+ }
+
+ if (nctrl->status)
+ return;
+
switch (nctrl->ncmd.s.cmd) {
case OCTNET_CMD_CHANGE_DEVFLAGS:
case OCTNET_CMD_SET_MULTI_LIST:
@@ -131,11 +145,20 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
case OCTNET_CMD_CHANGE_MACADDR:
mac = ((u8 *)&nctrl->udd[0]) + 2;
- netif_info(lio, probe, lio->netdev,
- "MACAddr changed to %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
- mac[0], mac[1],
- mac[2], mac[3],
- mac[4], mac[5]);
+ if (nctrl->ncmd.s.param1) {
+ /* vfidx is 0 based, but vf_num (param1) is 1 based */
+ int vfidx = nctrl->ncmd.s.param1 - 1;
+ bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
+
+ if (mac_is_admin_assigned)
+ netif_info(lio, probe, lio->netdev,
+ "MAC Address %pM is configured for VF %d\n",
+ mac, vfidx);
+ } else {
+ netif_info(lio, probe, lio->netdev,
+ " MACAddr changed to %pM\n",
+ mac);
+ }
break;
case OCTNET_CMD_CHANGE_MTU:
@@ -284,3 +307,56 @@ void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
* the PF did that already
*/
}
+
+static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = (struct lio *)wk->ctxptr;
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_droq *droq;
+ int q, q_no = 0;
+
+ if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
+ for (q = 0; q < lio->linfo.num_rxpciq; q++) {
+ q_no = lio->linfo.rxpciq[q].s.q_no;
+ droq = oct->droq[q_no];
+ if (!droq)
+ continue;
+ octeon_droq_check_oom(droq);
+ }
+ }
+ queue_delayed_work(lio->rxq_status_wq.wq,
+ &lio->rxq_status_wq.wk.work,
+ msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
+}
+
+int setup_rx_oom_poll_fn(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status",
+ WQ_MEM_RECLAIM, 0);
+ if (!lio->rxq_status_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
+ return -ENOMEM;
+ }
+ INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work,
+ octnet_poll_check_rxq_oom_status);
+ lio->rxq_status_wq.wk.ctxptr = lio;
+ queue_delayed_work(lio->rxq_status_wq.wq,
+ &lio->rxq_status_wq.wk.work,
+ msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
+ return 0;
+}
+
+void cleanup_rx_oom_poll_fn(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (lio->rxq_status_wq.wq) {
+ cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work);
+ flush_workqueue(lio->rxq_status_wq.wq);
+ destroy_workqueue(lio->rxq_status_wq.wq);
+ }
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 50384cede8be..579dc7336f58 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -33,6 +33,19 @@
static int octnet_get_link_stats(struct net_device *netdev);
+struct oct_intrmod_context {
+ int octeon_id;
+ wait_queue_head_t wc;
+ int cond;
+ int status;
+};
+
+struct oct_intrmod_resp {
+ u64 rh;
+ struct oct_intrmod_cfg intrmod;
+ u64 status;
+};
+
struct oct_mdio_cmd_context {
int octeon_id;
wait_queue_head_t wc;
@@ -213,17 +226,23 @@ static int lio_get_link_ksettings(struct net_device *netdev,
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct oct_link_info *linfo;
- u32 supported, advertising;
+ u32 supported = 0, advertising = 0;
linfo = &lio->linfo;
if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
ecmd->base.port = PORT_FIBRE;
- supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
- SUPPORTED_Pause);
- advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
+
+ if (linfo->link.s.speed == SPEED_10000) {
+ supported = SUPPORTED_10000baseT_Full;
+ advertising = ADVERTISED_10000baseT_Full;
+ }
+
+ supported |= SUPPORTED_FIBRE | SUPPORTED_Pause;
+ advertising |= ADVERTISED_Pause;
ethtool_convert_legacy_u32_to_link_mode(
ecmd->link_modes.supported, supported);
ethtool_convert_legacy_u32_to_link_mode(
@@ -1292,95 +1311,101 @@ static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
}
}
-static int lio_get_intr_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *intr_coal)
+/* Callback function for intrmod */
+static void octnet_intrmod_callback(struct octeon_device *oct_dev,
+ u32 status,
+ void *ptr)
{
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct = lio->oct_dev;
- struct octeon_instr_queue *iq;
- struct oct_intrmod_cfg *intrmod_cfg;
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
+ struct oct_intrmod_context *ctx;
- intrmod_cfg = &oct->intrmod;
+ ctx = (struct oct_intrmod_context *)sc->ctxptr;
- switch (oct->chip_id) {
- case OCTEON_CN23XX_PF_VID:
- case OCTEON_CN23XX_VF_VID:
- if (!intrmod_cfg->rx_enable) {
- intr_coal->rx_coalesce_usecs = intrmod_cfg->rx_usecs;
- intr_coal->rx_max_coalesced_frames =
- intrmod_cfg->rx_frames;
- }
- if (!intrmod_cfg->tx_enable)
- intr_coal->tx_max_coalesced_frames =
- intrmod_cfg->tx_frames;
- break;
- case OCTEON_CN68XX:
- case OCTEON_CN66XX: {
- struct octeon_cn6xxx *cn6xxx =
- (struct octeon_cn6xxx *)oct->chip;
+ ctx->status = status;
- if (!intrmod_cfg->rx_enable) {
- intr_coal->rx_coalesce_usecs =
- CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
- intr_coal->rx_max_coalesced_frames =
- CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
- }
- iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
- intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
- break;
- }
- default:
- netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+ WRITE_ONCE(ctx->cond, 1);
+
+ /* This barrier is required to be sure that the response has been
+ * written fully before waking up the handler
+ */
+ wmb();
+
+ wake_up_interruptible(&ctx->wc);
+}
+
+/* get interrupt moderation parameters */
+static int octnet_get_intrmod_cfg(struct lio *lio,
+ struct oct_intrmod_cfg *intr_cfg)
+{
+ struct octeon_soft_command *sc;
+ struct oct_intrmod_context *ctx;
+ struct oct_intrmod_resp *resp;
+ int retval;
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ /* Alloc soft command */
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ 0,
+ sizeof(struct oct_intrmod_resp),
+ sizeof(struct oct_intrmod_context));
+
+ if (!sc)
+ return -ENOMEM;
+
+ resp = (struct oct_intrmod_resp *)sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_intrmod_resp));
+
+ ctx = (struct oct_intrmod_context *)sc->ctxptr;
+ memset(ctx, 0, sizeof(struct oct_intrmod_context));
+ WRITE_ONCE(ctx->cond, 0);
+ ctx->octeon_id = lio_get_device_id(oct_dev);
+ init_waitqueue_head(&ctx->wc);
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0);
+
+ sc->callback = octnet_intrmod_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 1000;
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct_dev, sc);
return -EINVAL;
}
- if (intrmod_cfg->rx_enable) {
- intr_coal->use_adaptive_rx_coalesce =
- intrmod_cfg->rx_enable;
- intr_coal->rate_sample_interval =
- intrmod_cfg->check_intrvl;
- intr_coal->pkt_rate_high =
- intrmod_cfg->maxpkt_ratethr;
- intr_coal->pkt_rate_low =
- intrmod_cfg->minpkt_ratethr;
- intr_coal->rx_max_coalesced_frames_high =
- intrmod_cfg->rx_maxcnt_trigger;
- intr_coal->rx_coalesce_usecs_high =
- intrmod_cfg->rx_maxtmr_trigger;
- intr_coal->rx_coalesce_usecs_low =
- intrmod_cfg->rx_mintmr_trigger;
- intr_coal->rx_max_coalesced_frames_low =
- intrmod_cfg->rx_mincnt_trigger;
+
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
+ dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n");
+ goto intrmod_info_wait_intr;
}
- if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
- (intrmod_cfg->tx_enable)) {
- intr_coal->use_adaptive_tx_coalesce = intrmod_cfg->tx_enable;
- intr_coal->tx_max_coalesced_frames_high =
- intrmod_cfg->tx_maxcnt_trigger;
- intr_coal->tx_max_coalesced_frames_low =
- intrmod_cfg->tx_mincnt_trigger;
+
+ retval = ctx->status || resp->status;
+ if (retval) {
+ dev_err(&oct_dev->pci_dev->dev,
+ "Get interrupt moderation parameters failed\n");
+ goto intrmod_info_wait_fail;
}
- return 0;
-}
-/* Callback function for intrmod */
-static void octnet_intrmod_callback(struct octeon_device *oct_dev,
- u32 status,
- void *ptr)
-{
- struct oct_intrmod_cmd *cmd = ptr;
- struct octeon_soft_command *sc = cmd->sc;
+ octeon_swap_8B_data((u64 *)&resp->intrmod,
+ (sizeof(struct oct_intrmod_cfg)) / 8);
+ memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg));
+ octeon_free_soft_command(oct_dev, sc);
- oct_dev = cmd->oct_dev;
+ return 0;
- if (status)
- dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
- CVM_CAST64(status));
- else
- dev_info(&oct_dev->pci_dev->dev,
- "Rx-Adaptive Interrupt moderation enabled:%llx\n",
- oct_dev->intrmod.rx_enable);
+intrmod_info_wait_fail:
octeon_free_soft_command(oct_dev, sc);
+
+intrmod_info_wait_intr:
+
+ return -ENODEV;
}
/* Configure interrupt moderation parameters */
@@ -1388,7 +1413,7 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
struct oct_intrmod_cfg *intr_cfg)
{
struct octeon_soft_command *sc;
- struct oct_intrmod_cmd *cmd;
+ struct oct_intrmod_context *ctx;
struct oct_intrmod_cfg *cfg;
int retval;
struct octeon_device *oct_dev = lio->oct_dev;
@@ -1398,19 +1423,21 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
octeon_alloc_soft_command(oct_dev,
sizeof(struct oct_intrmod_cfg),
0,
- sizeof(struct oct_intrmod_cmd));
+ sizeof(struct oct_intrmod_context));
if (!sc)
return -ENOMEM;
- cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
+ ctx = (struct oct_intrmod_context *)sc->ctxptr;
+
+ WRITE_ONCE(ctx->cond, 0);
+ ctx->octeon_id = lio_get_device_id(oct_dev);
+ init_waitqueue_head(&ctx->wc);
+
cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
- cmd->sc = sc;
- cmd->cfg = cfg;
- cmd->oct_dev = oct_dev;
sc->iq_no = lio->linfo.txpciq[0].s.q_no;
@@ -1418,7 +1445,7 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
sc->callback = octnet_intrmod_callback;
- sc->callback_arg = cmd;
+ sc->callback_arg = sc;
sc->wait_time = 1000;
retval = octeon_send_soft_command(oct_dev, sc);
@@ -1427,7 +1454,29 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
return -EINVAL;
}
- return 0;
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) {
+ retval = ctx->status;
+ if (retval)
+ dev_err(&oct_dev->pci_dev->dev,
+ "intrmod config failed. Status: %llx\n",
+ CVM_CAST64(retval));
+ else
+ dev_info(&oct_dev->pci_dev->dev,
+ "Rx-Adaptive Interrupt moderation %s\n",
+ (intr_cfg->rx_enable) ?
+ "enabled" : "disabled");
+
+ octeon_free_soft_command(oct_dev, sc);
+
+ return ((retval) ? -ENODEV : 0);
+ }
+
+ dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n");
+
+ return -EINTR;
}
static void
@@ -1584,80 +1633,106 @@ static int octnet_get_link_stats(struct net_device *netdev)
return 0;
}
-/* Enable/Disable auto interrupt Moderation */
-static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
- *intr_coal)
+static int lio_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *intr_coal)
{
- int ret = 0;
+ struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- struct oct_intrmod_cfg *intrmod_cfg;
-
- intrmod_cfg = &oct->intrmod;
-
- if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) {
- if (intr_coal->rate_sample_interval)
- intrmod_cfg->check_intrvl =
- intr_coal->rate_sample_interval;
- else
- intrmod_cfg->check_intrvl =
- LIO_INTRMOD_CHECK_INTERVAL;
+ struct octeon_instr_queue *iq;
+ struct oct_intrmod_cfg intrmod_cfg;
- if (intr_coal->pkt_rate_high)
- intrmod_cfg->maxpkt_ratethr =
- intr_coal->pkt_rate_high;
- else
- intrmod_cfg->maxpkt_ratethr =
- LIO_INTRMOD_MAXPKT_RATETHR;
+ if (octnet_get_intrmod_cfg(lio, &intrmod_cfg))
+ return -ENODEV;
- if (intr_coal->pkt_rate_low)
- intrmod_cfg->minpkt_ratethr =
- intr_coal->pkt_rate_low;
- else
- intrmod_cfg->minpkt_ratethr =
- LIO_INTRMOD_MINPKT_RATETHR;
+ switch (oct->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ case OCTEON_CN23XX_VF_VID: {
+ if (!intrmod_cfg.rx_enable) {
+ intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs;
+ intr_coal->rx_max_coalesced_frames =
+ oct->rx_max_coalesced_frames;
+ }
+ if (!intrmod_cfg.tx_enable)
+ intr_coal->tx_max_coalesced_frames =
+ oct->tx_max_coalesced_frames;
+ break;
}
- if (oct->intrmod.rx_enable) {
- if (intr_coal->rx_max_coalesced_frames_high)
- intrmod_cfg->rx_maxcnt_trigger =
- intr_coal->rx_max_coalesced_frames_high;
- else
- intrmod_cfg->rx_maxcnt_trigger =
- LIO_INTRMOD_RXMAXCNT_TRIGGER;
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
- if (intr_coal->rx_coalesce_usecs_high)
- intrmod_cfg->rx_maxtmr_trigger =
- intr_coal->rx_coalesce_usecs_high;
- else
- intrmod_cfg->rx_maxtmr_trigger =
- LIO_INTRMOD_RXMAXTMR_TRIGGER;
+ if (!intrmod_cfg.rx_enable) {
+ intr_coal->rx_coalesce_usecs =
+ CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
+ intr_coal->rx_max_coalesced_frames =
+ CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
+ }
+ iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
+ intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
+ break;
+ }
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+ return -EINVAL;
+ }
+ if (intrmod_cfg.rx_enable) {
+ intr_coal->use_adaptive_rx_coalesce =
+ intrmod_cfg.rx_enable;
+ intr_coal->rate_sample_interval =
+ intrmod_cfg.check_intrvl;
+ intr_coal->pkt_rate_high =
+ intrmod_cfg.maxpkt_ratethr;
+ intr_coal->pkt_rate_low =
+ intrmod_cfg.minpkt_ratethr;
+ intr_coal->rx_max_coalesced_frames_high =
+ intrmod_cfg.rx_maxcnt_trigger;
+ intr_coal->rx_coalesce_usecs_high =
+ intrmod_cfg.rx_maxtmr_trigger;
+ intr_coal->rx_coalesce_usecs_low =
+ intrmod_cfg.rx_mintmr_trigger;
+ intr_coal->rx_max_coalesced_frames_low =
+ intrmod_cfg.rx_mincnt_trigger;
+ }
+ if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
+ (intrmod_cfg.tx_enable)) {
+ intr_coal->use_adaptive_tx_coalesce =
+ intrmod_cfg.tx_enable;
+ intr_coal->tx_max_coalesced_frames_high =
+ intrmod_cfg.tx_maxcnt_trigger;
+ intr_coal->tx_max_coalesced_frames_low =
+ intrmod_cfg.tx_mincnt_trigger;
+ }
+ return 0;
+}
- if (intr_coal->rx_coalesce_usecs_low)
- intrmod_cfg->rx_mintmr_trigger =
- intr_coal->rx_coalesce_usecs_low;
- else
- intrmod_cfg->rx_mintmr_trigger =
- LIO_INTRMOD_RXMINTMR_TRIGGER;
+/* Enable/Disable auto interrupt Moderation */
+static int oct_cfg_adaptive_intr(struct lio *lio,
+ struct oct_intrmod_cfg *intrmod_cfg,
+ struct ethtool_coalesce *intr_coal)
+{
+ int ret = 0;
- if (intr_coal->rx_max_coalesced_frames_low)
- intrmod_cfg->rx_mincnt_trigger =
- intr_coal->rx_max_coalesced_frames_low;
- else
- intrmod_cfg->rx_mincnt_trigger =
- LIO_INTRMOD_RXMINCNT_TRIGGER;
+ if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) {
+ intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval;
+ intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high;
+ intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low;
}
- if (oct->intrmod.tx_enable) {
- if (intr_coal->tx_max_coalesced_frames_high)
- intrmod_cfg->tx_maxcnt_trigger =
- intr_coal->tx_max_coalesced_frames_high;
- else
- intrmod_cfg->tx_maxcnt_trigger =
- LIO_INTRMOD_TXMAXCNT_TRIGGER;
- if (intr_coal->tx_max_coalesced_frames_low)
- intrmod_cfg->tx_mincnt_trigger =
- intr_coal->tx_max_coalesced_frames_low;
- else
- intrmod_cfg->tx_mincnt_trigger =
- LIO_INTRMOD_TXMINCNT_TRIGGER;
+ if (intrmod_cfg->rx_enable) {
+ intrmod_cfg->rx_maxcnt_trigger =
+ intr_coal->rx_max_coalesced_frames_high;
+ intrmod_cfg->rx_maxtmr_trigger =
+ intr_coal->rx_coalesce_usecs_high;
+ intrmod_cfg->rx_mintmr_trigger =
+ intr_coal->rx_coalesce_usecs_low;
+ intrmod_cfg->rx_mincnt_trigger =
+ intr_coal->rx_max_coalesced_frames_low;
+ }
+ if (intrmod_cfg->tx_enable) {
+ intrmod_cfg->tx_maxcnt_trigger =
+ intr_coal->tx_max_coalesced_frames_high;
+ intrmod_cfg->tx_mincnt_trigger =
+ intr_coal->tx_max_coalesced_frames_low;
}
ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
@@ -1666,7 +1741,9 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
}
static int
-oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
+oct_cfg_rx_intrcnt(struct lio *lio,
+ struct oct_intrmod_cfg *intrmod,
+ struct ethtool_coalesce *intr_coal)
{
struct octeon_device *oct = lio->oct_dev;
u32 rx_max_coalesced_frames;
@@ -1692,7 +1769,7 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
int q_no;
if (!intr_coal->rx_max_coalesced_frames)
- rx_max_coalesced_frames = oct->intrmod.rx_frames;
+ rx_max_coalesced_frames = intrmod->rx_frames;
else
rx_max_coalesced_frames =
intr_coal->rx_max_coalesced_frames;
@@ -1703,17 +1780,18 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
(octeon_read_csr64(
oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
(0x3fffff00000000UL)) |
- rx_max_coalesced_frames);
+ (rx_max_coalesced_frames - 1));
/*consider setting resend bit*/
}
- oct->intrmod.rx_frames = rx_max_coalesced_frames;
+ intrmod->rx_frames = rx_max_coalesced_frames;
+ oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
break;
}
case OCTEON_CN23XX_VF_VID: {
int q_no;
if (!intr_coal->rx_max_coalesced_frames)
- rx_max_coalesced_frames = oct->intrmod.rx_frames;
+ rx_max_coalesced_frames = intrmod->rx_frames;
else
rx_max_coalesced_frames =
intr_coal->rx_max_coalesced_frames;
@@ -1724,9 +1802,10 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
(0x3fffff00000000UL)) |
rx_max_coalesced_frames);
- /* consider writing to resend bit here */
+ /*consider writing to resend bit here*/
}
- oct->intrmod.rx_frames = rx_max_coalesced_frames;
+ intrmod->rx_frames = rx_max_coalesced_frames;
+ oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
break;
}
default:
@@ -1736,6 +1815,7 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
}
static int oct_cfg_rx_intrtime(struct lio *lio,
+ struct oct_intrmod_cfg *intrmod,
struct ethtool_coalesce *intr_coal)
{
struct octeon_device *oct = lio->oct_dev;
@@ -1766,7 +1846,7 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
int q_no;
if (!intr_coal->rx_coalesce_usecs)
- rx_coalesce_usecs = oct->intrmod.rx_usecs;
+ rx_coalesce_usecs = intrmod->rx_usecs;
else
rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
time_threshold =
@@ -1775,11 +1855,12 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
q_no += oct->sriov_info.pf_srn;
octeon_write_csr64(oct,
CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
- (oct->intrmod.rx_frames |
- (time_threshold << 32)));
+ (intrmod->rx_frames |
+ ((u64)time_threshold << 32)));
/*consider writing to resend bit here*/
}
- oct->intrmod.rx_usecs = rx_coalesce_usecs;
+ intrmod->rx_usecs = rx_coalesce_usecs;
+ oct->rx_coalesce_usecs = rx_coalesce_usecs;
break;
}
case OCTEON_CN23XX_VF_VID: {
@@ -1787,7 +1868,7 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
int q_no;
if (!intr_coal->rx_coalesce_usecs)
- rx_coalesce_usecs = oct->intrmod.rx_usecs;
+ rx_coalesce_usecs = intrmod->rx_usecs;
else
rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
@@ -1796,11 +1877,12 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
for (q_no = 0; q_no < oct->num_oqs; q_no++) {
octeon_write_csr64(
oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
- (oct->intrmod.rx_frames |
- (time_threshold << 32)));
- /* consider setting resend bit */
+ (intrmod->rx_frames |
+ ((u64)time_threshold << 32)));
+ /*consider setting resend bit*/
}
- oct->intrmod.rx_usecs = rx_coalesce_usecs;
+ intrmod->rx_usecs = rx_coalesce_usecs;
+ oct->rx_coalesce_usecs = rx_coalesce_usecs;
break;
}
default:
@@ -1811,8 +1893,9 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
}
static int
-oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
- __attribute__((unused)))
+oct_cfg_tx_intrcnt(struct lio *lio,
+ struct oct_intrmod_cfg *intrmod,
+ struct ethtool_coalesce *intr_coal)
{
struct octeon_device *oct = lio->oct_dev;
u32 iq_intr_pkt;
@@ -1839,12 +1922,13 @@ oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
val = readq(inst_cnt_reg);
/*clear wmark and count.dont want to write count back*/
val = (val & 0xFFFF000000000000ULL) |
- ((u64)iq_intr_pkt
+ ((u64)(iq_intr_pkt - 1)
<< CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
writeq(val, inst_cnt_reg);
/*consider setting resend bit*/
}
- oct->intrmod.tx_frames = iq_intr_pkt;
+ intrmod->tx_frames = iq_intr_pkt;
+ oct->tx_max_coalesced_frames = iq_intr_pkt;
break;
}
default:
@@ -1859,6 +1943,7 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
struct lio *lio = GET_LIO(netdev);
int ret;
struct octeon_device *oct = lio->oct_dev;
+ struct oct_intrmod_cfg intrmod = {0};
u32 j, q_no;
int db_max, db_min;
@@ -1877,8 +1962,8 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
} else {
dev_err(&oct->pci_dev->dev,
"LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
- intr_coal->tx_max_coalesced_frames, db_min,
- db_max);
+ intr_coal->tx_max_coalesced_frames,
+ db_min, db_max);
return -EINVAL;
}
break;
@@ -1889,24 +1974,36 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
return -EINVAL;
}
- oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
- oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
+ intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
+ intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
+ intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
+ intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
+ intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
- ret = oct_cfg_adaptive_intr(lio, intr_coal);
+ ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal);
if (!intr_coal->use_adaptive_rx_coalesce) {
- ret = oct_cfg_rx_intrtime(lio, intr_coal);
+ ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal);
if (ret)
goto ret_intrmod;
- ret = oct_cfg_rx_intrcnt(lio, intr_coal);
+ ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal);
if (ret)
goto ret_intrmod;
+ } else {
+ oct->rx_coalesce_usecs =
+ CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
+ oct->rx_max_coalesced_frames =
+ CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
}
+
if (!intr_coal->use_adaptive_tx_coalesce) {
- ret = oct_cfg_tx_intrcnt(lio, intr_coal);
+ ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal);
if (ret)
goto ret_intrmod;
+ } else {
+ oct->tx_max_coalesced_frames =
+ CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
}
return 0;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 92f46b1375c3..927617cbf6a9 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -16,6 +16,7 @@
* NONINFRINGEMENT. See the GNU General Public License for more details.
***********************************************************************/
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/firmware.h>
#include <net/vxlan.h>
@@ -60,12 +61,6 @@ MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
static int ptp_enable = 1;
-/* Bit mask values for lio->ifstate */
-#define LIO_IFSTATE_DROQ_OPS 0x01
-#define LIO_IFSTATE_REGISTERED 0x02
-#define LIO_IFSTATE_RUNNING 0x04
-#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
-
/* Polling interval for determining when NIC application is alive */
#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
@@ -178,6 +173,8 @@ static int liquidio_stop(struct net_device *netdev);
static void liquidio_remove(struct pci_dev *pdev);
static int liquidio_probe(struct pci_dev *pdev,
const struct pci_device_id *ent);
+static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
+ int linkstate);
static struct handshake handshake[MAX_OCTEON_DEVICES];
static struct completion first_stage;
@@ -531,36 +528,6 @@ static void liquidio_deinit_pci(void)
}
/**
- * \brief check interface state
- * @param lio per-network private data
- * @param state_flag flag state to check
- */
-static inline int ifstate_check(struct lio *lio, int state_flag)
-{
- return atomic_read(&lio->ifstate) & state_flag;
-}
-
-/**
- * \brief set interface state
- * @param lio per-network private data
- * @param state_flag flag state to set
- */
-static inline void ifstate_set(struct lio *lio, int state_flag)
-{
- atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
-}
-
-/**
- * \brief clear interface state
- * @param lio per-network private data
- * @param state_flag flag state to clear
- */
-static inline void ifstate_reset(struct lio *lio, int state_flag)
-{
- atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
-}
-
-/**
* \brief Stop Tx queues
* @param netdev network device
*/
@@ -748,7 +715,8 @@ static void delete_glists(struct lio *lio)
kfree(g);
} while (g);
- if (lio->glists_virt_base && lio->glists_virt_base[i]) {
+ if (lio->glists_virt_base && lio->glists_virt_base[i] &&
+ lio->glists_dma_base && lio->glists_dma_base[i]) {
lio_dma_free(lio->oct_dev,
lio->glist_entry_size * lio->tx_qsize,
lio->glists_virt_base[i],
@@ -805,7 +773,7 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
}
for (i = 0; i < num_iqs; i++) {
- int numa_node = cpu_to_node(i % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
spin_lock_init(&lio->glist_lock[i]);
@@ -967,14 +935,13 @@ static void update_txq_status(struct octeon_device *oct, int iq_num)
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
tx_restart, 1);
netif_wake_subqueue(netdev, iq->q_index);
- } else {
- if (!octnet_iq_is_full(oct, lio->txq)) {
- INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
- lio->txq,
- tx_restart, 1);
- wake_q(netdev, lio->txq);
- }
}
+ } else if (netif_queue_stopped(netdev) &&
+ lio->linfo.link.s.link_up &&
+ (!octnet_iq_is_full(oct, lio->txq))) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
+ lio->txq, tx_restart, 1);
+ netif_wake_queue(netdev);
}
}
@@ -1084,16 +1051,35 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
int i;
int num_ioq_vectors;
int num_alloc_ioq_vectors;
+ char *queue_irq_names = NULL;
+ char *aux_irq_name = NULL;
if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
oct->num_msix_irqs = oct->sriov_info.num_pf_rings;
/* one non ioq interrupt for handling sli_mac_pf_int_sum */
oct->num_msix_irqs += 1;
+ /* allocate storage for the names assigned to each irq */
+ oct->irq_name_storage =
+ kcalloc((MAX_IOQ_INTERRUPTS_PER_PF + 1), INTRNAMSIZ,
+ GFP_KERNEL);
+ if (!oct->irq_name_storage) {
+ dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
+ return -ENOMEM;
+ }
+
+ queue_irq_names = oct->irq_name_storage;
+ aux_irq_name = &queue_irq_names
+ [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
+
oct->msix_entries = kcalloc(
oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
- if (!oct->msix_entries)
- return 1;
+ if (!oct->msix_entries) {
+ dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return -ENOMEM;
+ }
msix_entries = (struct msix_entry *)oct->msix_entries;
/*Assumption is that pf msix vectors start from pf srn to pf to
@@ -1111,7 +1097,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
kfree(oct->msix_entries);
oct->msix_entries = NULL;
- return 1;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return num_alloc_ioq_vectors;
}
dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
@@ -1119,9 +1107,12 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
/** For PF, there is one non-ioq interrupt handler */
num_ioq_vectors -= 1;
+
+ snprintf(aux_irq_name, INTRNAMSIZ,
+ "LiquidIO%u-pf%u-aux", oct->octeon_id, oct->pf_num);
irqret = request_irq(msix_entries[num_ioq_vectors].vector,
- liquidio_legacy_intr_handler, 0, "octeon",
- oct);
+ liquidio_legacy_intr_handler, 0,
+ aux_irq_name, oct);
if (irqret) {
dev_err(&oct->pci_dev->dev,
"OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
@@ -1129,13 +1120,20 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
- return 1;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return irqret;
}
for (i = 0; i < num_ioq_vectors; i++) {
+ snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ,
+ "LiquidIO%u-pf%u-rxtx-%u",
+ oct->octeon_id, oct->pf_num, i);
+
irqret = request_irq(msix_entries[i].vector,
liquidio_msix_intr_handler, 0,
- "octeon", &oct->ioq_vector[i]);
+ &queue_irq_names[IRQ_NAME_OFF(i)],
+ &oct->ioq_vector[i]);
if (irqret) {
dev_err(&oct->pci_dev->dev,
"OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
@@ -1155,7 +1153,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
- return 1;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return irqret;
}
oct->ioq_vector[i].vector = msix_entries[i].vector;
/* assign the cpu mask for this msix interrupt vector */
@@ -1173,111 +1173,150 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
else
oct->flags |= LIO_FLAG_MSI_ENABLED;
+ /* allocate storage for the names assigned to the irq */
+ oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
+ if (!oct->irq_name_storage)
+ return -ENOMEM;
+
+ queue_irq_names = oct->irq_name_storage;
+
+ snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
+ "LiquidIO%u-pf%u-rxtx-%u",
+ oct->octeon_id, oct->pf_num, 0);
+
irqret = request_irq(oct->pci_dev->irq,
- liquidio_legacy_intr_handler, IRQF_SHARED,
- "octeon", oct);
+ liquidio_legacy_intr_handler,
+ IRQF_SHARED,
+ &queue_irq_names[IRQ_NAME_OFF(0)], oct);
if (irqret) {
if (oct->flags & LIO_FLAG_MSI_ENABLED)
pci_disable_msi(oct->pci_dev);
dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
irqret);
- return 1;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return irqret;
}
}
return 0;
}
+static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
+{
+ struct octeon_device *other_oct;
+
+ other_oct = lio_get_device(oct->octeon_id + 1);
+
+ if (other_oct && other_oct->pci_dev) {
+ int oct_busnum, other_oct_busnum;
+
+ oct_busnum = oct->pci_dev->bus->number;
+ other_oct_busnum = other_oct->pci_dev->bus->number;
+
+ if (oct_busnum == other_oct_busnum) {
+ int oct_slot, other_oct_slot;
+
+ oct_slot = PCI_SLOT(oct->pci_dev->devfn);
+ other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
+
+ if (oct_slot == other_oct_slot)
+ return other_oct;
+ }
+ }
+
+ return NULL;
+}
+
+static void disable_all_vf_links(struct octeon_device *oct)
+{
+ struct net_device *netdev;
+ int max_vfs, vf, i;
+
+ if (!oct)
+ return;
+
+ max_vfs = oct->sriov_info.max_vfs;
+
+ for (i = 0; i < oct->ifcount; i++) {
+ netdev = oct->props[i].netdev;
+ if (!netdev)
+ continue;
+
+ for (vf = 0; vf < max_vfs; vf++)
+ liquidio_set_vf_link_state(netdev, vf,
+ IFLA_VF_LINK_STATE_DISABLE);
+ }
+}
+
static int liquidio_watchdog(void *param)
{
- u64 wdog;
- u16 mask_of_stuck_cores = 0;
- u16 mask_of_crashed_cores = 0;
- int core_num;
- u8 core_is_stuck[LIO_MAX_CORES];
- u8 core_crashed[LIO_MAX_CORES];
+ bool err_msg_was_printed[LIO_MAX_CORES];
+ u16 mask_of_crashed_or_stuck_cores = 0;
+ bool all_vf_links_are_disabled = false;
struct octeon_device *oct = param;
+ struct octeon_device *other_oct;
+#ifdef CONFIG_MODULE_UNLOAD
+ long refcount, vfs_referencing_pf;
+ u64 vfs_mask1, vfs_mask2;
+#endif
+ int core;
- memset(core_is_stuck, 0, sizeof(core_is_stuck));
- memset(core_crashed, 0, sizeof(core_crashed));
+ memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
while (!kthread_should_stop()) {
- mask_of_crashed_cores =
+ /* sleep for a couple of seconds so that we don't hog the CPU */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(2000));
+
+ mask_of_crashed_or_stuck_cores =
(u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
- for (core_num = 0; core_num < LIO_MAX_CORES; core_num++) {
- if (!core_is_stuck[core_num]) {
- wdog = lio_pci_readq(oct, CIU3_WDOG(core_num));
-
- /* look at watchdog state field */
- wdog &= CIU3_WDOG_MASK;
- if (wdog) {
- /* this watchdog timer has expired */
- core_is_stuck[core_num] =
- LIO_MONITOR_WDOG_EXPIRE;
- mask_of_stuck_cores |= (1 << core_num);
- }
- }
+ if (!mask_of_crashed_or_stuck_cores)
+ continue;
- if (!core_crashed[core_num])
- core_crashed[core_num] =
- (mask_of_crashed_cores >> core_num) & 1;
- }
+ WRITE_ONCE(oct->cores_crashed, true);
+ other_oct = get_other_octeon_device(oct);
+ if (other_oct)
+ WRITE_ONCE(other_oct->cores_crashed, true);
- if (mask_of_stuck_cores) {
- for (core_num = 0; core_num < LIO_MAX_CORES;
- core_num++) {
- if (core_is_stuck[core_num] == 1) {
- dev_err(&oct->pci_dev->dev,
- "ERROR: Octeon core %d is stuck!\n",
- core_num);
- /* 2 means we have printk'd an error
- * so no need to repeat the same printk
- */
- core_is_stuck[core_num] =
- LIO_MONITOR_CORE_STUCK_MSGD;
- }
- }
- }
+ for (core = 0; core < LIO_MAX_CORES; core++) {
+ bool core_crashed_or_got_stuck;
- if (mask_of_crashed_cores) {
- for (core_num = 0; core_num < LIO_MAX_CORES;
- core_num++) {
- if (core_crashed[core_num] == 1) {
- dev_err(&oct->pci_dev->dev,
- "ERROR: Octeon core %d crashed! See oct-fwdump for details.\n",
- core_num);
- /* 2 means we have printk'd an error
- * so no need to repeat the same printk
- */
- core_crashed[core_num] =
- LIO_MONITOR_CORE_STUCK_MSGD;
- }
+ core_crashed_or_got_stuck =
+ (mask_of_crashed_or_stuck_cores
+ >> core) & 1;
+
+ if (core_crashed_or_got_stuck &&
+ !err_msg_was_printed[core]) {
+ dev_err(&oct->pci_dev->dev,
+ "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
+ core);
+ err_msg_was_printed[core] = true;
}
}
+
+ if (all_vf_links_are_disabled)
+ continue;
+
+ disable_all_vf_links(oct);
+ disable_all_vf_links(other_oct);
+ all_vf_links_are_disabled = true;
+
#ifdef CONFIG_MODULE_UNLOAD
- if (mask_of_stuck_cores || mask_of_crashed_cores) {
- /* make module refcount=0 so that rmmod will work */
- long refcount;
+ vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
+ vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
- refcount = module_refcount(THIS_MODULE);
+ vfs_referencing_pf = hweight64(vfs_mask1);
+ vfs_referencing_pf += hweight64(vfs_mask2);
- while (refcount > 0) {
+ refcount = module_refcount(THIS_MODULE);
+ if (refcount >= vfs_referencing_pf) {
+ while (vfs_referencing_pf) {
module_put(THIS_MODULE);
- refcount = module_refcount(THIS_MODULE);
- }
-
- /* compensate for and withstand an unlikely (but still
- * possible) race condition
- */
- while (refcount < 0) {
- try_module_get(THIS_MODULE);
- refcount = module_refcount(THIS_MODULE);
+ vfs_referencing_pf--;
}
}
#endif
- /* sleep for two seconds */
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(2 * HZ);
}
return 0;
@@ -1369,6 +1408,12 @@ liquidio_probe(struct pci_dev *pdev,
return 0;
}
+static bool fw_type_is_none(void)
+{
+ return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
+ sizeof(LIO_FW_NAME_TYPE_NONE)) == 0;
+}
+
/**
*\brief Destroy resources associated with octeon device
* @param pdev PCI device structure
@@ -1449,6 +1494,9 @@ static void octeon_destroy_resources(struct octeon_device *oct)
pci_disable_msi(oct->pci_dev);
}
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+
/* fallthrough */
case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
if (OCTEON_CN23XX_PF(oct))
@@ -1508,9 +1556,12 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* fallthrough */
case OCT_DEV_PCI_MAP_DONE:
- /* Soft reset the octeon device before exiting */
- if ((!OCTEON_CN23XX_PF(oct)) || !oct->octeon_id)
- oct->fn_list.soft_reset(oct);
+ if (!fw_type_is_none()) {
+ /* Soft reset the octeon device before exiting */
+ if (!OCTEON_CN23XX_PF(oct) ||
+ (OCTEON_CN23XX_PF(oct) && !oct->octeon_id))
+ oct->fn_list.soft_reset(oct);
+ }
octeon_unmap_pci_barx(oct, 0);
octeon_unmap_pci_barx(oct, 1);
@@ -1643,6 +1694,15 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
liquidio_stop(netdev);
+ if (fw_type_is_none()) {
+ struct octnic_ctrl_pkt nctrl;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+ nctrl.ncmd.s.cmd = OCTNET_CMD_RESET_PF;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ octnet_send_nic_ctrl_pkt(oct, &nctrl);
+ }
+
if (oct->props[lio->ifidx].napi_enabled == 1) {
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_disable(napi);
@@ -1658,6 +1718,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
cleanup_link_status_change_wq(netdev);
+ cleanup_rx_oom_poll_fn(netdev);
+
delete_glists(lio);
free_netdev(netdev);
@@ -2126,8 +2188,7 @@ static int load_firmware(struct octeon_device *oct)
char fw_name[LIO_MAX_FW_FILENAME_LEN];
char *tmp_fw_type;
- if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
- sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) {
+ if (fw_type_is_none()) {
dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
return ret;
}
@@ -2211,8 +2272,8 @@ static void if_cfg_callback(struct octeon_device *oct,
oct = lio_get_device(ctx->octeon_id);
if (resp->status)
- dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
- CVM_CAST64(resp->status));
+ dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n",
+ CVM_CAST64(resp->status), status);
WRITE_ONCE(ctx->cond, 1);
snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
@@ -2437,8 +2498,11 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
/* Flush the instruction queue */
iq = oct->instr_queue[iq_no];
if (iq) {
- /* Process iq buffers with in the budget limits */
- tx_done = octeon_flush_iq(oct, iq, budget);
+ if (atomic_read(&iq->instr_pending))
+ /* Process iq buffers with in the budget limits */
+ tx_done = octeon_flush_iq(oct, iq, budget);
+ else
+ tx_done = 1;
/* Update iq read-index rather than waiting for next interrupt.
* Return back if tx_done is false.
*/
@@ -2555,6 +2619,15 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
__func__);
return 1;
}
+
+ if (octeon_dev->ioq_vector) {
+ struct octeon_ioq_vector *ioq_vector;
+
+ ioq_vector = &octeon_dev->ioq_vector[q];
+ netif_set_xps_queue(netdev,
+ &ioq_vector->affinity_mask,
+ ioq_vector->iq_index);
+ }
}
return 0;
@@ -3426,6 +3499,8 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
struct octnic_ctrl_pkt nctrl;
int ret = 0;
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = command;
nctrl.ncmd.s.param1 = rx_cmd;
@@ -3459,6 +3534,8 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
struct octnic_ctrl_pkt nctrl;
int ret = 0;
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = command;
nctrl.ncmd.s.more = vxlan_cmd_bit;
@@ -3596,7 +3673,8 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
nctrl.ncmd.s.more = 1;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.cb_fn = 0;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
nctrl.wait_time = LIO_CMD_WAIT_TM;
nctrl.udd[0] = 0;
@@ -4122,6 +4200,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
if (setup_link_status_change_wq(netdev))
goto setup_nic_dev_fail;
+ if (setup_rx_oom_poll_fn(netdev))
+ goto setup_nic_dev_fail;
+
/* Register the network device with the OS */
if (register_netdev(netdev)) {
dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
@@ -4271,7 +4352,6 @@ static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
*/
static int liquidio_init_nic_module(struct octeon_device *oct)
{
- struct oct_intrmod_cfg *intrmod_cfg;
int i, retval = 0;
int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
@@ -4296,22 +4376,6 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
liquidio_ptp_init(oct);
- /* Initialize interrupt moderation params */
- intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
- intrmod_cfg->rx_enable = 1;
- intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
- intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
- intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
- intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
- intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
- intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
- intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
- intrmod_cfg->tx_enable = 1;
- intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
- intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
- intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
- intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
- intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
return retval;
@@ -4373,6 +4437,7 @@ octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
struct octeon_device *oct = (struct octeon_device *)buf;
struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
int i, notice, vf_idx;
+ bool cores_crashed;
u64 *data, vf_num;
notice = recv_pkt->rh.r.ossp;
@@ -4383,19 +4448,23 @@ octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
octeon_swap_8B_data(&vf_num, 1);
vf_idx = (int)vf_num - 1;
+ cores_crashed = READ_ONCE(oct->cores_crashed);
+
if (notice == VF_DRV_LOADED) {
if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
dev_info(&oct->pci_dev->dev,
"driver for VF%d was loaded\n", vf_idx);
- try_module_get(THIS_MODULE);
+ if (!cores_crashed)
+ try_module_get(THIS_MODULE);
}
} else if (notice == VF_DRV_REMOVED) {
if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
dev_info(&oct->pci_dev->dev,
"driver for VF%d was removed\n", vf_idx);
- module_put(THIS_MODULE);
+ if (!cores_crashed)
+ module_put(THIS_MODULE);
}
} else if (notice == VF_DRV_MACADDR_CHANGED) {
u8 *b = (u8 *)&data[1];
@@ -4447,14 +4516,16 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
if (OCTEON_CN23XX_PF(octeon_dev)) {
if (!cn23xx_fw_loaded(octeon_dev)) {
fw_loaded = 0;
- /* Do a soft reset of the Octeon device. */
- if (octeon_dev->fn_list.soft_reset(octeon_dev))
- return 1;
- /* things might have changed */
- if (!cn23xx_fw_loaded(octeon_dev))
- fw_loaded = 0;
- else
- fw_loaded = 1;
+ if (!fw_type_is_none()) {
+ /* Do a soft reset of the Octeon device. */
+ if (octeon_dev->fn_list.soft_reset(octeon_dev))
+ return 1;
+ /* things might have changed */
+ if (!cn23xx_fw_loaded(octeon_dev))
+ fw_loaded = 0;
+ else
+ fw_loaded = 1;
+ }
} else {
fw_loaded = 1;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 7b83be4ce1fe..34c77821fad9 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -16,6 +16,7 @@
* NONINFRINGEMENT. See the GNU General Public License for more details.
***********************************************************************/
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <net/vxlan.h>
#include "liquidio_common.h"
@@ -39,12 +40,6 @@ MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
-/* Bit mask values for lio->ifstate */
-#define LIO_IFSTATE_DROQ_OPS 0x01
-#define LIO_IFSTATE_REGISTERED 0x02
-#define LIO_IFSTATE_RUNNING 0x04
-#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
-
struct liquidio_if_cfg_context {
int octeon_id;
@@ -336,36 +331,6 @@ static struct pci_driver liquidio_vf_pci_driver = {
};
/**
- * \brief check interface state
- * @param lio per-network private data
- * @param state_flag flag state to check
- */
-static int ifstate_check(struct lio *lio, int state_flag)
-{
- return atomic_read(&lio->ifstate) & state_flag;
-}
-
-/**
- * \brief set interface state
- * @param lio per-network private data
- * @param state_flag flag state to set
- */
-static void ifstate_set(struct lio *lio, int state_flag)
-{
- atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
-}
-
-/**
- * \brief clear interface state
- * @param lio per-network private data
- * @param state_flag flag state to clear
- */
-static void ifstate_reset(struct lio *lio, int state_flag)
-{
- atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
-}
-
-/**
* \brief Stop Tx queues
* @param netdev network device
*/
@@ -506,7 +471,8 @@ static void delete_glists(struct lio *lio)
kfree(g);
} while (g);
- if (lio->glists_virt_base && lio->glists_virt_base[i]) {
+ if (lio->glists_virt_base && lio->glists_virt_base[i] &&
+ lio->glists_dma_base && lio->glists_dma_base[i]) {
lio_dma_free(lio->oct_dev,
lio->glist_entry_size * lio->tx_qsize,
lio->glists_virt_base[i],
@@ -722,13 +688,12 @@ static void update_txq_status(struct octeon_device *oct, int iq_num)
netif_wake_subqueue(netdev, iq->q_index);
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
tx_restart, 1);
- } else {
- if (!octnet_iq_is_full(oct, lio->txq)) {
- INCR_INSTRQUEUE_PKT_COUNT(
- lio->oct_dev, lio->txq, tx_restart, 1);
- wake_q(netdev, lio->txq);
- }
}
+ } else if (netif_queue_stopped(netdev) && lio->linfo.link.s.link_up &&
+ (!octnet_iq_is_full(oct, lio->txq))) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
+ lio->txq, tx_restart, 1);
+ netif_wake_queue(netdev);
}
}
@@ -780,6 +745,7 @@ liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
static int octeon_setup_interrupt(struct octeon_device *oct)
{
struct msix_entry *msix_entries;
+ char *queue_irq_names = NULL;
int num_alloc_ioq_vectors;
int num_ioq_vectors;
int irqret;
@@ -788,10 +754,25 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
if (oct->msix_on) {
oct->num_msix_irqs = oct->sriov_info.rings_per_vf;
+ /* allocate storage for the names assigned to each irq */
+ oct->irq_name_storage =
+ kcalloc(MAX_IOQ_INTERRUPTS_PER_VF, INTRNAMSIZ,
+ GFP_KERNEL);
+ if (!oct->irq_name_storage) {
+ dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
+ return -ENOMEM;
+ }
+
+ queue_irq_names = oct->irq_name_storage;
+
oct->msix_entries = kcalloc(
oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
- if (!oct->msix_entries)
- return 1;
+ if (!oct->msix_entries) {
+ dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return -ENOMEM;
+ }
msix_entries = (struct msix_entry *)oct->msix_entries;
@@ -805,16 +786,23 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
kfree(oct->msix_entries);
oct->msix_entries = NULL;
- return 1;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return num_alloc_ioq_vectors;
}
dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
num_ioq_vectors = oct->num_msix_irqs;
for (i = 0; i < num_ioq_vectors; i++) {
+ snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ,
+ "LiquidIO%u-vf%u-rxtx-%u",
+ oct->octeon_id, oct->vf_num, i);
+
irqret = request_irq(msix_entries[i].vector,
liquidio_msix_intr_handler, 0,
- "octeon", &oct->ioq_vector[i]);
+ &queue_irq_names[IRQ_NAME_OFF(i)],
+ &oct->ioq_vector[i]);
if (irqret) {
dev_err(&oct->pci_dev->dev,
"OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
@@ -830,7 +818,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
- return 1;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return irqret;
}
oct->ioq_vector[i].vector = msix_entries[i].vector;
/* assign the cpu mask for this msix interrupt vector */
@@ -975,6 +965,8 @@ static void octeon_destroy_resources(struct octeon_device *oct)
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
}
/* Soft reset the octeon device before exiting */
if (oct->pci_dev->reset_fn)
@@ -1163,6 +1155,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);
+ cleanup_rx_oom_poll_fn(netdev);
+
cleanup_link_status_change_wq(netdev);
delete_glists(lio);
@@ -1642,8 +1636,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
/* Flush the instruction queue */
iq = oct->instr_queue[iq_no];
if (iq) {
- /* Process iq buffers with in the budget limits */
- tx_done = octeon_flush_iq(oct, iq, budget);
+ if (atomic_read(&iq->instr_pending))
+ /* Process iq buffers with in the budget limits */
+ tx_done = octeon_flush_iq(oct, iq, budget);
+ else
+ tx_done = 1;
+
/* Update iq read-index rather than waiting for next interrupt.
* Return back if tx_done is false.
*/
@@ -2486,6 +2484,8 @@ liquidio_vlan_rx_add_vid(struct net_device *netdev,
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
+ struct completion compl;
+ u16 response_code;
int ret = 0;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
@@ -2497,14 +2497,25 @@ liquidio_vlan_rx_add_vid(struct net_device *netdev,
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+ init_completion(&compl);
+ nctrl.completion = &compl;
+ nctrl.response_code = &response_code;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
ret);
+ return -EIO;
}
- return ret;
+ if (!wait_for_completion_timeout(&compl,
+ msecs_to_jiffies(nctrl.wait_time)))
+ return -EPERM;
+
+ if (READ_ONCE(response_code))
+ return -EPERM;
+
+ return 0;
}
static int
@@ -2549,6 +2560,8 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
struct octnic_ctrl_pkt nctrl;
int ret = 0;
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = command;
nctrl.ncmd.s.param1 = rx_cmd;
@@ -2581,6 +2594,8 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
struct octnic_ctrl_pkt nctrl;
int ret = 0;
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = command;
nctrl.ncmd.s.more = vxlan_cmd_bit;
@@ -3003,6 +3018,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
if (setup_link_status_change_wq(netdev))
goto setup_nic_dev_fail;
+ if (setup_rx_oom_poll_fn(netdev))
+ goto setup_nic_dev_fail;
+
/* Register the network device with the OS */
if (register_netdev(netdev)) {
dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
@@ -3057,7 +3075,6 @@ setup_nic_wait_intr:
*/
static int liquidio_init_nic_module(struct octeon_device *oct)
{
- struct oct_intrmod_cfg *intrmod_cfg;
int num_nic_ports = 1;
int i, retval = 0;
@@ -3079,22 +3096,6 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
goto octnet_init_failure;
}
- /* Initialize interrupt moderation params */
- intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
- intrmod_cfg->rx_enable = 1;
- intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
- intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
- intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
- intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
- intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
- intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
- intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
- intrmod_cfg->tx_enable = 1;
- intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
- intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
- intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
- intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
- intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
return retval;
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 294c6f3c6b48..8ea2323d8d67 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -27,7 +27,7 @@
#define LIQUIDIO_PACKAGE ""
#define LIQUIDIO_BASE_MAJOR_VERSION 1
-#define LIQUIDIO_BASE_MINOR_VERSION 4
+#define LIQUIDIO_BASE_MINOR_VERSION 5
#define LIQUIDIO_BASE_MICRO_VERSION 1
#define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
__stringify(LIQUIDIO_BASE_MINOR_VERSION)
@@ -83,6 +83,7 @@ enum octeon_tag_type {
#define OPCODE_NIC_INTRMOD_CFG 0x08
#define OPCODE_NIC_IF_CFG 0x09
#define OPCODE_NIC_VF_DRV_NOTICE 0x0A
+#define OPCODE_NIC_INTRMOD_PARAMS 0x0B
#define VF_DRV_LOADED 1
#define VF_DRV_REMOVED -1
#define VF_DRV_MACADDR_CHANGED 2
@@ -100,6 +101,11 @@ enum octeon_tag_type {
#define BYTES_PER_DHLEN_UNIT 8
#define MAX_REG_CNT 2000000U
+#define INTRNAMSIZ 32
+#define IRQ_NAME_OFF(i) ((i) * INTRNAMSIZ)
+#define MAX_IOQ_INTERRUPTS_PER_PF (64 * 2)
+#define MAX_IOQ_INTERRUPTS_PER_VF (8 * 2)
+
static inline u32 incr_index(u32 index, u32 count, u32 max)
{
@@ -181,6 +187,7 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_Q 0
/* NIC Command types */
+#define OCTNET_CMD_RESET_PF 0x0
#define OCTNET_CMD_CHANGE_MTU 0x1
#define OCTNET_CMD_CHANGE_MACADDR 0x2
#define OCTNET_CMD_CHANGE_DEVFLAGS 0x3
@@ -845,29 +852,6 @@ struct oct_mdio_cmd {
#define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats))
-/* intrmod: max. packet rate threshold */
-#define LIO_INTRMOD_MAXPKT_RATETHR 196608
-/* intrmod: min. packet rate threshold */
-#define LIO_INTRMOD_MINPKT_RATETHR 9216
-/* intrmod: max. packets to trigger interrupt */
-#define LIO_INTRMOD_RXMAXCNT_TRIGGER 384
-/* intrmod: min. packets to trigger interrupt */
-#define LIO_INTRMOD_RXMINCNT_TRIGGER 0
-/* intrmod: max. time to trigger interrupt */
-#define LIO_INTRMOD_RXMAXTMR_TRIGGER 128
-/* 66xx:intrmod: min. time to trigger interrupt
- * (value of 1 is optimum for TCP_RR)
- */
-#define LIO_INTRMOD_RXMINTMR_TRIGGER 1
-
-/* intrmod: max. packets to trigger interrupt */
-#define LIO_INTRMOD_TXMAXCNT_TRIGGER 64
-/* intrmod: min. packets to trigger interrupt */
-#define LIO_INTRMOD_TXMINCNT_TRIGGER 0
-
-/* intrmod: poll interval in seconds */
-#define LIO_INTRMOD_CHECK_INTERVAL 1
-
struct oct_intrmod_cfg {
u64 rx_enable;
u64 tx_enable;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 9675ffbf25e6..e21b477d0159 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -793,7 +793,7 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
u32 num_descs = 0;
u32 iq_no = 0;
union oct_txpciq txpciq;
- int numa_node = cpu_to_node(iq_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
if (OCTEON_CN6XXX(oct))
num_descs =
@@ -837,7 +837,7 @@ int octeon_setup_output_queues(struct octeon_device *oct)
u32 num_descs = 0;
u32 desc_size = 0;
u32 oq_no = 0;
- int numa_node = cpu_to_node(oq_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
if (OCTEON_CN6XXX(oct)) {
num_descs =
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index c301a3852482..92f67de111aa 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -453,9 +453,6 @@ struct octeon_device {
/** List of dispatch functions */
struct octeon_dispatch_list dispatch;
- /* Interrupt Moderation */
- struct oct_intrmod_cfg intrmod;
-
u32 int_status;
u64 droq_intr;
@@ -517,6 +514,9 @@ struct octeon_device {
void *msix_entries;
+ /* when requesting IRQs, the names are stored here */
+ void *irq_name_storage;
+
struct octeon_sriov_info sriov_info;
struct octeon_pf_vf_hs_word pfvf_hsword;
@@ -538,6 +538,12 @@ struct octeon_device {
u32 priv_flags;
void *watchdog_task;
+
+ u32 rx_coalesce_usecs;
+ u32 rx_max_coalesced_frames;
+ u32 tx_max_coalesced_frames;
+
+ bool cores_crashed;
};
#define OCT_DRV_ONLINE 1
@@ -551,12 +557,6 @@ struct octeon_device {
#define CHIP_CONF(oct, TYPE) \
(((struct octeon_ ## TYPE *)((oct)->chip))->conf)
-struct oct_intrmod_cmd {
- struct octeon_device *oct_dev;
- struct octeon_soft_command *sc;
- struct oct_intrmod_cfg *cfg;
-};
-
/*------------------ Function Prototypes ----------------------*/
/** Initialize device list memory */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 79f809479af6..286be5539cef 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -226,8 +226,7 @@ int octeon_init_droq(struct octeon_device *oct,
struct octeon_droq *droq;
u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
- int orig_node = dev_to_node(&oct->pci_dev->dev);
- int numa_node = cpu_to_node(q_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
@@ -267,13 +266,8 @@ int octeon_init_droq(struct octeon_device *oct,
droq->buffer_size = c_buf_size;
desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
- set_dev_node(&oct->pci_dev->dev, numa_node);
droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
(dma_addr_t *)&droq->desc_ring_dma);
- set_dev_node(&oct->pci_dev->dev, orig_node);
- if (!droq->desc_ring)
- droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
- (dma_addr_t *)&droq->desc_ring_dma);
if (!droq->desc_ring) {
dev_err(&oct->pci_dev->dev,
@@ -519,6 +513,32 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
return desc_refilled;
}
+/** check if we can allocate packets to get out of oom.
+ * @param droq - Droq being checked.
+ * @return does not return anything
+ */
+void octeon_droq_check_oom(struct octeon_droq *droq)
+{
+ int desc_refilled;
+ struct octeon_device *oct = droq->oct_dev;
+
+ if (readl(droq->pkts_credit_reg) <= CN23XX_SLI_DEF_BP) {
+ spin_lock_bh(&droq->lock);
+ desc_refilled = octeon_droq_refill(oct, droq);
+ if (desc_refilled) {
+ /* Flush the droq descriptor data to memory to be sure
+ * that when we update the credits the data in memory
+ * is accurate.
+ */
+ wmb();
+ writel(desc_refilled, droq->pkts_credit_reg);
+ /* make sure mmio write completes */
+ mmiowb();
+ }
+ spin_unlock_bh(&droq->lock);
+ }
+}
+
static inline u32
octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
{
@@ -970,7 +990,7 @@ int octeon_create_droq(struct octeon_device *oct,
u32 desc_size, void *app_ctx)
{
struct octeon_droq *droq;
- int numa_node = cpu_to_node(q_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
if (oct->droq[q_no]) {
dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index 6982c0af5ecc..9781577115e7 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -426,4 +426,6 @@ int octeon_droq_process_packets(struct octeon_device *oct,
int octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no,
int cmd, u32 arg);
+void octeon_droq_check_oom(struct octeon_droq *droq);
+
#endif /*__OCTEON_DROQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 4608a5af35a3..5063a12613e5 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -152,7 +152,7 @@ struct octeon_instr_queue {
struct oct_iq_stats stats;
/** DMA mapped base address of the input descriptor ring. */
- u64 base_addr_dma;
+ dma_addr_t base_addr_dma;
/** Application context */
void *app_ctx;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
index 201b9875f9bb..5cca73b8880b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -313,6 +313,7 @@ int octeon_mbox_process_message(struct octeon_mbox *mbox)
return 0;
}
+ spin_unlock_irqrestore(&mbox->lock, flags);
WARN_ON(1);
return 0;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index eef2a1e8a7e3..bf483932ff25 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -28,6 +28,12 @@
#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
#define LIO_MIN_MTU_SIZE ETH_MIN_MTU
+/* Bit mask values for lio->ifstate */
+#define LIO_IFSTATE_DROQ_OPS 0x01
+#define LIO_IFSTATE_REGISTERED 0x02
+#define LIO_IFSTATE_RUNNING 0x04
+#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
+
struct oct_nic_stats_resp {
u64 rh;
struct oct_link_stats stats;
@@ -123,6 +129,9 @@ struct lio {
/* work queue for txq status */
struct cavium_wq txq_status_wq;
+ /* work queue for rxq oom status */
+ struct cavium_wq rxq_status_wq;
+
/* work queue for link status */
struct cavium_wq link_status_wq;
@@ -132,10 +141,6 @@ struct lio {
#define LIO_SIZE (sizeof(struct lio))
#define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
-#define CIU3_WDOG(c) (0x1010000020000ULL + ((c) << 3))
-#define CIU3_WDOG_MASK 12ULL
-#define LIO_MONITOR_WDOG_EXPIRE 1
-#define LIO_MONITOR_CORE_STUCK_MSGD 2
#define LIO_MAX_CORES 12
/**
@@ -146,6 +151,10 @@ struct lio {
*/
int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
+int setup_rx_oom_poll_fn(struct net_device *netdev);
+
+void cleanup_rx_oom_poll_fn(struct net_device *netdev);
+
/**
* \brief Link control command completion callback
* @param nctrl_ptr pointer to control packet structure
@@ -438,4 +447,34 @@ static inline void octeon_fast_packet_next(struct octeon_droq *droq,
get_rbd(droq->recv_buf_list[idx].buffer), copy_len);
}
+/**
+ * \brief check interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to check
+ */
+static inline int ifstate_check(struct lio *lio, int state_flag)
+{
+ return atomic_read(&lio->ifstate) & state_flag;
+}
+
+/**
+ * \brief set interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to set
+ */
+static inline void ifstate_set(struct lio *lio, int state_flag)
+{
+ atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
+}
+
+/**
+ * \brief clear interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to clear
+ */
+static inline void ifstate_reset(struct lio *lio, int state_flag)
+{
+ atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
+}
+
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
index 0243be8dd56f..b457cf23fce6 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -100,14 +100,16 @@ static void octnet_link_ctrl_callback(struct octeon_device *oct,
nctrl = (struct octnic_ctrl_pkt *)sc->ctxptr;
- /* Call the callback function if status is OK.
- * Status is OK only if a response was expected and core returned
- * success.
+ /* Call the callback function if status is zero (meaning OK) or status
+ * contains a firmware status code bigger than zero (meaning the
+ * firmware is reporting an error).
* If no response was expected, status is OK if the command was posted
* successfully.
*/
- if (!status && nctrl->cb_fn)
+ if ((!status || status > FIRMWARE_STATUS_CODE(0)) && nctrl->cb_fn) {
+ nctrl->status = status;
nctrl->cb_fn(nctrl);
+ }
octeon_free_soft_command(oct, sc);
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index 0c7a5c9b2932..6480ef863441 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -62,6 +62,10 @@ struct octnic_ctrl_pkt {
/** Callback function called when the command has been fetched */
octnic_ctrl_pkt_cb_fn_t cb_fn;
+
+ u32 status;
+ u16 *response_code;
+ struct completion *completion;
};
#define MAX_UDD_SIZE(nctrl) (sizeof((nctrl)->udd))
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 707bc15adec6..261f448f9de2 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -62,8 +62,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
u32 iq_no = (u32)txpciq.s.q_no;
u32 q_size;
struct cavium_wq *db_wq;
- int orig_node = dev_to_node(&oct->pci_dev->dev);
- int numa_node = cpu_to_node(iq_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
if (OCTEON_CN6XXX(oct))
conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx)));
@@ -91,13 +90,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
iq->oct_dev = oct;
- set_dev_node(&oct->pci_dev->dev, numa_node);
- iq->base_addr = lio_dma_alloc(oct, q_size,
- (dma_addr_t *)&iq->base_addr_dma);
- set_dev_node(&oct->pci_dev->dev, orig_node);
- if (!iq->base_addr)
- iq->base_addr = lio_dma_alloc(oct, q_size,
- (dma_addr_t *)&iq->base_addr_dma);
+ iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma);
if (!iq->base_addr) {
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
iq_no);
@@ -211,7 +204,7 @@ int octeon_setup_iq(struct octeon_device *oct,
void *app_ctx)
{
u32 iq_no = (u32)txpciq.s.q_no;
- int numa_node = cpu_to_node(iq_no % num_online_cpus());
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
if (oct->instr_queue[iq_no]) {
dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index 2fbaae96b505..3d691c69f74d 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -69,50 +69,53 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
int resp_to_process = MAX_ORD_REQS_TO_PROCESS;
u32 status;
u64 status64;
- struct octeon_instr_rdp *rdp;
- u64 rptr;
ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
do {
spin_lock_bh(&ordered_sc_list->lock);
- if (ordered_sc_list->head.next == &ordered_sc_list->head) {
+ if (list_empty(&ordered_sc_list->head)) {
spin_unlock_bh(&ordered_sc_list->lock);
return 1;
}
- sc = (struct octeon_soft_command *)ordered_sc_list->
- head.next;
- if (OCTEON_CN23XX_PF(octeon_dev) ||
- OCTEON_CN23XX_VF(octeon_dev)) {
- rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
- rptr = sc->cmd.cmd3.rptr;
- } else {
- rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
- rptr = sc->cmd.cmd2.rptr;
- }
+ sc = list_first_entry(&ordered_sc_list->head,
+ struct octeon_soft_command, node);
status = OCTEON_REQUEST_PENDING;
/* check if octeon has finished DMA'ing a response
* to where rptr is pointing to
*/
- dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev,
- rptr, rdp->rlen,
- DMA_FROM_DEVICE);
status64 = *sc->status_word;
if (status64 != COMPLETION_WORD_INIT) {
+ /* This logic ensures that all 64b have been written.
+ * 1. check byte 0 for non-FF
+ * 2. if non-FF, then swap result from BE to host order
+ * 3. check byte 7 (swapped to 0) for non-FF
+ * 4. if non-FF, use the low 32-bit status code
+ * 5. if either byte 0 or byte 7 is FF, don't use status
+ */
if ((status64 & 0xff) != 0xff) {
octeon_swap_8B_data(&status64, 1);
if (((status64 & 0xff) != 0xff)) {
- status = (u32)(status64 &
- 0xffffffffULL);
+ /* retrieve 16-bit firmware status */
+ status = (u32)(status64 & 0xffffULL);
+ if (status) {
+ status =
+ FIRMWARE_STATUS_CODE(status);
+ } else {
+ /* i.e. no error */
+ status = OCTEON_REQUEST_DONE;
+ }
}
}
} else if (force_quit || (sc->timeout &&
time_after(jiffies, (unsigned long)sc->timeout))) {
+ dev_err(&octeon_dev->pci_dev->dev, "%s: cmd failed, timeout (%ld, %ld)\n",
+ __func__, (long)jiffies, (long)sc->timeout);
status = OCTEON_REQUEST_TIMEOUT;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h
index cbb2d84e8932..9169c2815dba 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.h
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h
@@ -78,6 +78,8 @@ enum {
/*------------ Error codes used by host driver -----------------*/
#define DRIVER_MAJOR_ERROR_CODE 0x0000
+/*------ Error codes used by firmware (bits 15..0 set by firmware */
+#define FIRMWARE_MAJOR_ERROR_CODE 0x0001
/** A value of 0x00000000 indicates no error i.e. success */
#define DRIVER_ERROR_NONE 0x00000000
@@ -116,6 +118,9 @@ enum {
};
+#define FIRMWARE_STATUS_CODE(status) \
+ ((FIRMWARE_MAJOR_ERROR_CODE << 16) | (status))
+
/** Initialize the response lists. The number of response lists to create is
* given by count.
* @param octeon_dev - the octeon device structure.
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 2269ff562d95..4a02e618e318 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -252,12 +252,14 @@ struct nicvf_drv_stats {
u64 tx_csum_overflow;
/* driver debug stats */
- u64 rcv_buffer_alloc_failures;
u64 tx_tso;
u64 tx_timeout;
u64 txq_stop;
u64 txq_wake;
+ u64 rcv_buffer_alloc_failures;
+ u64 page_alloc;
+
struct u64_stats_sync syncp;
};
@@ -266,9 +268,9 @@ struct nicvf {
struct net_device *netdev;
struct pci_dev *pdev;
void __iomem *reg_base;
+ struct bpf_prog *xdp_prog;
#define MAX_QUEUES_PER_QSET 8
struct queue_set *qs;
- struct nicvf_cq_poll *napi[8];
void *iommu_domain;
u8 vf_id;
u8 sqs_id;
@@ -294,6 +296,7 @@ struct nicvf {
/* Queue count */
u8 rx_queues;
u8 tx_queues;
+ u8 xdp_tx_queues;
u8 max_queues;
u8 node;
@@ -318,10 +321,11 @@ struct nicvf {
struct nicvf_drv_stats __percpu *drv_stats;
struct bgx_stats bgx_stats;
+ /* Napi */
+ struct nicvf_cq_poll *napi[8];
+
/* MSI-X */
- bool msix_enabled;
u8 num_vec;
- struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
char irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
bool irq_allocated[NIC_VF_MSIX_VECTORS];
cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS];
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 767234e2e8f9..fb770b0182d3 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -65,9 +65,7 @@ struct nicpf {
bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
/* MSI-X */
- bool msix_enabled;
u8 num_vec;
- struct msix_entry *msix_entries;
bool irq_allocated[NIC_PF_MSIX_VECTORS];
char irq_name[NIC_PF_MSIX_VECTORS][20];
};
@@ -1088,7 +1086,7 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
u64 intr;
u8 vf, vf_per_mbx_reg = 64;
- if (irq == nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector)
+ if (irq == pci_irq_vector(nic->pdev, NIC_PF_INTR_ID_MBOX0))
mbx = 0;
else
mbx = 1;
@@ -1107,51 +1105,13 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
return IRQ_HANDLED;
}
-static int nic_enable_msix(struct nicpf *nic)
-{
- int i, ret;
-
- nic->num_vec = pci_msix_vec_count(nic->pdev);
-
- nic->msix_entries = kmalloc_array(nic->num_vec,
- sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!nic->msix_entries)
- return -ENOMEM;
-
- for (i = 0; i < nic->num_vec; i++)
- nic->msix_entries[i].entry = i;
-
- ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
- if (ret) {
- dev_err(&nic->pdev->dev,
- "Request for #%d msix vectors failed, returned %d\n",
- nic->num_vec, ret);
- kfree(nic->msix_entries);
- return ret;
- }
-
- nic->msix_enabled = 1;
- return 0;
-}
-
-static void nic_disable_msix(struct nicpf *nic)
-{
- if (nic->msix_enabled) {
- pci_disable_msix(nic->pdev);
- kfree(nic->msix_entries);
- nic->msix_enabled = 0;
- nic->num_vec = 0;
- }
-}
-
static void nic_free_all_interrupts(struct nicpf *nic)
{
int irq;
for (irq = 0; irq < nic->num_vec; irq++) {
if (nic->irq_allocated[irq])
- free_irq(nic->msix_entries[irq].vector, nic);
+ free_irq(pci_irq_vector(nic->pdev, irq), nic);
nic->irq_allocated[irq] = false;
}
}
@@ -1159,18 +1119,24 @@ static void nic_free_all_interrupts(struct nicpf *nic)
static int nic_register_interrupts(struct nicpf *nic)
{
int i, ret;
+ nic->num_vec = pci_msix_vec_count(nic->pdev);
/* Enable MSI-X */
- ret = nic_enable_msix(nic);
- if (ret)
- return ret;
+ ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec,
+ PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&nic->pdev->dev,
+ "Request for #%d msix vectors failed, returned %d\n",
+ nic->num_vec, ret);
+ return 1;
+ }
/* Register mailbox interrupt handler */
for (i = NIC_PF_INTR_ID_MBOX0; i < nic->num_vec; i++) {
sprintf(nic->irq_name[i],
"NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0));
- ret = request_irq(nic->msix_entries[i].vector,
+ ret = request_irq(pci_irq_vector(nic->pdev, i),
nic_mbx_intr_handler, 0,
nic->irq_name[i], nic);
if (ret)
@@ -1186,14 +1152,16 @@ static int nic_register_interrupts(struct nicpf *nic)
fail:
dev_err(&nic->pdev->dev, "Request irq failed\n");
nic_free_all_interrupts(nic);
- nic_disable_msix(nic);
+ pci_free_irq_vectors(nic->pdev);
+ nic->num_vec = 0;
return ret;
}
static void nic_unregister_interrupts(struct nicpf *nic)
{
nic_free_all_interrupts(nic);
- nic_disable_msix(nic);
+ pci_free_irq_vectors(nic->pdev);
+ nic->num_vec = 0;
}
static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 02a986cdbb39..b9ece9cbf98b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -100,11 +100,12 @@ static const struct nicvf_stat nicvf_drv_stats[] = {
NICVF_DRV_STAT(tx_csum_overlap),
NICVF_DRV_STAT(tx_csum_overflow),
- NICVF_DRV_STAT(rcv_buffer_alloc_failures),
NICVF_DRV_STAT(tx_tso),
NICVF_DRV_STAT(tx_timeout),
NICVF_DRV_STAT(txq_stop),
NICVF_DRV_STAT(txq_wake),
+ NICVF_DRV_STAT(rcv_buffer_alloc_failures),
+ NICVF_DRV_STAT(page_alloc),
};
static const struct nicvf_stat nicvf_queue_stats[] = {
@@ -720,7 +721,7 @@ static int nicvf_set_channels(struct net_device *dev,
struct nicvf *nic = netdev_priv(dev);
int err = 0;
bool if_up = netif_running(dev);
- int cqcount;
+ u8 cqcount, txq_count;
if (!channel->rx_count || !channel->tx_count)
return -EINVAL;
@@ -729,10 +730,26 @@ static int nicvf_set_channels(struct net_device *dev,
if (channel->tx_count > nic->max_queues)
return -EINVAL;
+ if (nic->xdp_prog &&
+ ((channel->tx_count + channel->rx_count) > nic->max_queues)) {
+ netdev_err(nic->netdev,
+ "XDP mode, RXQs + TXQs > Max %d\n",
+ nic->max_queues);
+ return -EINVAL;
+ }
+
if (if_up)
nicvf_stop(dev);
- cqcount = max(channel->rx_count, channel->tx_count);
+ nic->rx_queues = channel->rx_count;
+ nic->tx_queues = channel->tx_count;
+ if (!nic->xdp_prog)
+ nic->xdp_tx_queues = 0;
+ else
+ nic->xdp_tx_queues = channel->rx_count;
+
+ txq_count = nic->xdp_tx_queues + nic->tx_queues;
+ cqcount = max(nic->rx_queues, txq_count);
if (cqcount > MAX_CMP_QUEUES_PER_QS) {
nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS);
@@ -741,12 +758,10 @@ static int nicvf_set_channels(struct net_device *dev,
nic->sqs_count = 0;
}
- nic->qs->rq_cnt = min_t(u32, channel->rx_count, MAX_RCV_QUEUES_PER_QS);
- nic->qs->sq_cnt = min_t(u32, channel->tx_count, MAX_SND_QUEUES_PER_QS);
+ nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
+ nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
- nic->rx_queues = channel->rx_count;
- nic->tx_queues = channel->tx_count;
err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues);
if (err)
return err;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 24017588f531..d6477af88085 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -17,6 +17,9 @@
#include <linux/prefetch.h>
#include <linux/irq.h>
#include <linux/iommu.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/filter.h>
#include "nic_reg.h"
#include "nic.h"
@@ -397,8 +400,10 @@ static void nicvf_request_sqs(struct nicvf *nic)
if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
- if (nic->tx_queues > MAX_SND_QUEUES_PER_QS)
- tx_queues = nic->tx_queues - MAX_SND_QUEUES_PER_QS;
+
+ tx_queues = nic->tx_queues + nic->xdp_tx_queues;
+ if (tx_queues > MAX_SND_QUEUES_PER_QS)
+ tx_queues = tx_queues - MAX_SND_QUEUES_PER_QS;
/* Set no of Rx/Tx queues in each of the SQsets */
for (sqs = 0; sqs < nic->sqs_count; sqs++) {
@@ -496,12 +501,99 @@ static int nicvf_init_resources(struct nicvf *nic)
return 0;
}
+static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
+ struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
+ struct sk_buff **skb)
+{
+ struct xdp_buff xdp;
+ struct page *page;
+ u32 action;
+ u16 len, offset = 0;
+ u64 dma_addr, cpu_addr;
+ void *orig_data;
+
+ /* Retrieve packet buffer's DMA address and length */
+ len = *((u16 *)((void *)cqe_rx + (3 * sizeof(u64))));
+ dma_addr = *((u64 *)((void *)cqe_rx + (7 * sizeof(u64))));
+
+ cpu_addr = nicvf_iova_to_phys(nic, dma_addr);
+ if (!cpu_addr)
+ return false;
+ cpu_addr = (u64)phys_to_virt(cpu_addr);
+ page = virt_to_page((void *)cpu_addr);
+
+ xdp.data_hard_start = page_address(page);
+ xdp.data = (void *)cpu_addr;
+ xdp.data_end = xdp.data + len;
+ orig_data = xdp.data;
+
+ rcu_read_lock();
+ action = bpf_prog_run_xdp(prog, &xdp);
+ rcu_read_unlock();
+
+ /* Check if XDP program has changed headers */
+ if (orig_data != xdp.data) {
+ len = xdp.data_end - xdp.data;
+ offset = orig_data - xdp.data;
+ dma_addr -= offset;
+ }
+
+ switch (action) {
+ case XDP_PASS:
+ /* Check if it's a recycled page, if not
+ * unmap the DMA mapping.
+ *
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) == 1) {
+ dma_addr &= PAGE_MASK;
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+ RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+
+ /* Build SKB and pass on packet to network stack */
+ *skb = build_skb(xdp.data,
+ RCV_FRAG_LEN - cqe_rx->align_pad + offset);
+ if (!*skb)
+ put_page(page);
+ else
+ skb_put(*skb, len);
+ return false;
+ case XDP_TX:
+ nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(action);
+ case XDP_ABORTED:
+ trace_xdp_exception(nic->netdev, prog, action);
+ case XDP_DROP:
+ /* Check if it's a recycled page, if not
+ * unmap the DMA mapping.
+ *
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) == 1) {
+ dma_addr &= PAGE_MASK;
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+ RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+ put_page(page);
+ return true;
+ }
+ return false;
+}
+
static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct cqe_send_t *cqe_tx,
- int cqe_type, int budget,
+ int budget, int *subdesc_cnt,
unsigned int *tx_pkts, unsigned int *tx_bytes)
{
struct sk_buff *skb = NULL;
+ struct page *page;
struct nicvf *nic = netdev_priv(netdev);
struct snd_queue *sq;
struct sq_hdr_subdesc *hdr;
@@ -513,12 +605,26 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
return;
- netdev_dbg(nic->netdev,
- "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
- __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
- cqe_tx->sqe_ptr, hdr->subdesc_cnt);
+ /* Check for errors */
+ if (cqe_tx->send_status)
+ nicvf_check_cqe_tx_errs(nic->pnicvf, cqe_tx);
+
+ /* Is this a XDP designated Tx queue */
+ if (sq->is_xdp) {
+ page = (struct page *)sq->xdp_page[cqe_tx->sqe_ptr];
+ /* Check if it's recycled page or else unmap DMA mapping */
+ if (page && (page_ref_count(page) == 1))
+ nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
+ hdr->subdesc_cnt);
+
+ /* Release page reference for recycling */
+ if (page)
+ put_page(page);
+ sq->xdp_page[cqe_tx->sqe_ptr] = (u64)NULL;
+ *subdesc_cnt += hdr->subdesc_cnt + 1;
+ return;
+ }
- nicvf_check_cqe_tx_errs(nic, cqe_tx);
skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
if (skb) {
/* Check for dummy descriptor used for HW TSO offload on 88xx */
@@ -528,12 +634,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
(struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
tso_sqe->subdesc_cnt);
- nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
+ *subdesc_cnt += tso_sqe->subdesc_cnt + 1;
} else {
nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
hdr->subdesc_cnt);
}
- nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+ *subdesc_cnt += hdr->subdesc_cnt + 1;
prefetch(skb);
(*tx_pkts)++;
*tx_bytes += skb->len;
@@ -544,7 +650,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
* a SKB attached, so just free SQEs here.
*/
if (!nic->hw_tso)
- nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+ *subdesc_cnt += hdr->subdesc_cnt + 1;
}
}
@@ -578,9 +684,9 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
static void nicvf_rcv_pkt_handler(struct net_device *netdev,
struct napi_struct *napi,
- struct cqe_rx_t *cqe_rx)
+ struct cqe_rx_t *cqe_rx, struct snd_queue *sq)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct nicvf *nic = netdev_priv(netdev);
struct nicvf *snic = nic;
int err = 0;
@@ -595,16 +701,25 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
}
/* Check for errors */
- err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
- if (err && !cqe_rx->rb_cnt)
- return;
+ if (cqe_rx->err_level || cqe_rx->err_opcode) {
+ err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
+ if (err && !cqe_rx->rb_cnt)
+ return;
+ }
- skb = nicvf_get_rcv_skb(snic, cqe_rx);
- if (!skb) {
- netdev_dbg(nic->netdev, "Packet not received\n");
- return;
+ /* For XDP, ignore pkts spanning multiple pages */
+ if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
+ /* Packet consumed by XDP */
+ if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb))
+ return;
+ } else {
+ skb = nicvf_get_rcv_skb(snic, cqe_rx,
+ nic->xdp_prog ? true : false);
}
+ if (!skb)
+ return;
+
if (netif_msg_pktdata(nic)) {
netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
skb, skb->len);
@@ -646,13 +761,14 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
{
int processed_cqe, work_done = 0, tx_done = 0;
int cqe_count, cqe_head;
+ int subdesc_cnt = 0;
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
struct cmp_queue *cq = &qs->cq[cq_idx];
struct cqe_rx_t *cq_desc;
struct netdev_queue *txq;
- struct snd_queue *sq;
- unsigned int tx_pkts = 0, tx_bytes = 0;
+ struct snd_queue *sq = &qs->sq[cq_idx];
+ unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;
spin_lock_bh(&cq->lock);
loop:
@@ -667,8 +783,6 @@ loop:
cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
cqe_head &= 0xFFFF;
- netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
- __func__, cq_idx, cqe_count, cqe_head);
while (processed_cqe < cqe_count) {
/* Get the CQ descriptor */
cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
@@ -682,17 +796,15 @@ loop:
break;
}
- netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
- cq_idx, cq_desc->cqe_type);
switch (cq_desc->cqe_type) {
case CQE_TYPE_RX:
- nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
+ nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq);
work_done++;
break;
case CQE_TYPE_SEND:
- nicvf_snd_pkt_handler(netdev,
- (void *)cq_desc, CQE_TYPE_SEND,
- budget, &tx_pkts, &tx_bytes);
+ nicvf_snd_pkt_handler(netdev, (void *)cq_desc,
+ budget, &subdesc_cnt,
+ &tx_pkts, &tx_bytes);
tx_done++;
break;
case CQE_TYPE_INVALID:
@@ -704,9 +816,6 @@ loop:
}
processed_cqe++;
}
- netdev_dbg(nic->netdev,
- "%s CQ%d processed_cqe %d work_done %d budget %d\n",
- __func__, cq_idx, processed_cqe, work_done, budget);
/* Ring doorbell to inform H/W to reuse processed CQEs */
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
@@ -716,13 +825,26 @@ loop:
goto loop;
done:
+ /* Update SQ's descriptor free count */
+ if (subdesc_cnt)
+ nicvf_put_sq_desc(sq, subdesc_cnt);
+
+ txq_idx = nicvf_netdev_qidx(nic, cq_idx);
+ /* Handle XDP TX queues */
+ if (nic->pnicvf->xdp_prog) {
+ if (txq_idx < nic->pnicvf->xdp_tx_queues) {
+ nicvf_xdp_sq_doorbell(nic, sq, cq_idx);
+ goto out;
+ }
+ nic = nic->pnicvf;
+ txq_idx -= nic->pnicvf->xdp_tx_queues;
+ }
+
/* Wakeup TXQ if its stopped earlier due to SQ full */
- sq = &nic->qs->sq[cq_idx];
if (tx_done ||
(atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) {
netdev = nic->pnicvf->netdev;
- txq = netdev_get_tx_queue(netdev,
- nicvf_netdev_qidx(nic, cq_idx));
+ txq = netdev_get_tx_queue(netdev, txq_idx);
if (tx_pkts)
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
@@ -735,10 +857,11 @@ done:
if (netif_msg_tx_err(nic))
netdev_warn(netdev,
"%s: Transmit queue wakeup SQ%d\n",
- netdev->name, cq_idx);
+ netdev->name, txq_idx);
}
}
+out:
spin_unlock_bh(&cq->lock);
return work_done;
}
@@ -882,38 +1005,9 @@ static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
return IRQ_HANDLED;
}
-static int nicvf_enable_msix(struct nicvf *nic)
-{
- int ret, vec;
-
- nic->num_vec = NIC_VF_MSIX_VECTORS;
-
- for (vec = 0; vec < nic->num_vec; vec++)
- nic->msix_entries[vec].entry = vec;
-
- ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
- if (ret) {
- netdev_err(nic->netdev,
- "Req for #%d msix vectors failed\n", nic->num_vec);
- return 0;
- }
- nic->msix_enabled = 1;
- return 1;
-}
-
-static void nicvf_disable_msix(struct nicvf *nic)
-{
- if (nic->msix_enabled) {
- pci_disable_msix(nic->pdev);
- nic->msix_enabled = 0;
- nic->num_vec = 0;
- }
-}
-
static void nicvf_set_irq_affinity(struct nicvf *nic)
{
int vec, cpu;
- int irqnum;
for (vec = 0; vec < nic->num_vec; vec++) {
if (!nic->irq_allocated[vec])
@@ -930,15 +1024,14 @@ static void nicvf_set_irq_affinity(struct nicvf *nic)
cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
nic->affinity_mask[vec]);
- irqnum = nic->msix_entries[vec].vector;
- irq_set_affinity_hint(irqnum, nic->affinity_mask[vec]);
+ irq_set_affinity_hint(pci_irq_vector(nic->pdev, vec),
+ nic->affinity_mask[vec]);
}
}
static int nicvf_register_interrupts(struct nicvf *nic)
{
int irq, ret = 0;
- int vector;
for_each_cq_irq(irq)
sprintf(nic->irq_name[irq], "%s-rxtx-%d",
@@ -957,8 +1050,8 @@ static int nicvf_register_interrupts(struct nicvf *nic)
/* Register CQ interrupts */
for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
- vector = nic->msix_entries[irq].vector;
- ret = request_irq(vector, nicvf_intr_handler,
+ ret = request_irq(pci_irq_vector(nic->pdev, irq),
+ nicvf_intr_handler,
0, nic->irq_name[irq], nic->napi[irq]);
if (ret)
goto err;
@@ -968,8 +1061,8 @@ static int nicvf_register_interrupts(struct nicvf *nic)
/* Register RBDR interrupt */
for (irq = NICVF_INTR_ID_RBDR;
irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
- vector = nic->msix_entries[irq].vector;
- ret = request_irq(vector, nicvf_rbdr_intr_handler,
+ ret = request_irq(pci_irq_vector(nic->pdev, irq),
+ nicvf_rbdr_intr_handler,
0, nic->irq_name[irq], nic);
if (ret)
goto err;
@@ -981,7 +1074,7 @@ static int nicvf_register_interrupts(struct nicvf *nic)
nic->pnicvf->netdev->name,
nic->sqs_mode ? (nic->sqs_id + 1) : 0);
irq = NICVF_INTR_ID_QS_ERR;
- ret = request_irq(nic->msix_entries[irq].vector,
+ ret = request_irq(pci_irq_vector(nic->pdev, irq),
nicvf_qs_err_intr_handler,
0, nic->irq_name[irq], nic);
if (ret)
@@ -1001,6 +1094,7 @@ err:
static void nicvf_unregister_interrupts(struct nicvf *nic)
{
+ struct pci_dev *pdev = nic->pdev;
int irq;
/* Free registered interrupts */
@@ -1008,19 +1102,20 @@ static void nicvf_unregister_interrupts(struct nicvf *nic)
if (!nic->irq_allocated[irq])
continue;
- irq_set_affinity_hint(nic->msix_entries[irq].vector, NULL);
+ irq_set_affinity_hint(pci_irq_vector(pdev, irq), NULL);
free_cpumask_var(nic->affinity_mask[irq]);
if (irq < NICVF_INTR_ID_SQ)
- free_irq(nic->msix_entries[irq].vector, nic->napi[irq]);
+ free_irq(pci_irq_vector(pdev, irq), nic->napi[irq]);
else
- free_irq(nic->msix_entries[irq].vector, nic);
+ free_irq(pci_irq_vector(pdev, irq), nic);
nic->irq_allocated[irq] = false;
}
/* Disable MSI-X */
- nicvf_disable_msix(nic);
+ pci_free_irq_vectors(pdev);
+ nic->num_vec = 0;
}
/* Initialize MSIX vectors and register MISC interrupt.
@@ -1032,16 +1127,22 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
int irq = NICVF_INTR_ID_MISC;
/* Return if mailbox interrupt is already registered */
- if (nic->msix_enabled)
+ if (nic->pdev->msix_enabled)
return 0;
/* Enable MSI-X */
- if (!nicvf_enable_msix(nic))
+ nic->num_vec = pci_msix_vec_count(nic->pdev);
+ ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec,
+ PCI_IRQ_MSIX);
+ if (ret < 0) {
+ netdev_err(nic->netdev,
+ "Req for #%d msix vectors failed\n", nic->num_vec);
return 1;
+ }
sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
/* Register Misc interrupt */
- ret = request_irq(nic->msix_entries[irq].vector,
+ ret = request_irq(pci_irq_vector(nic->pdev, irq),
nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
if (ret)
@@ -1076,6 +1177,13 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+ /* In XDP case, initial HW tx queues are used for XDP,
+ * but stack's queue mapping starts at '0', so skip the
+ * Tx queues attached to Rx queues for XDP.
+ */
+ if (nic->xdp_prog)
+ qid += nic->xdp_tx_queues;
+
snic = nic;
/* Get secondary Qset's SQ structure */
if (qid >= MAX_SND_QUEUES_PER_QS) {
@@ -1164,7 +1272,7 @@ int nicvf_stop(struct net_device *netdev)
/* Wait for pending IRQ handlers to finish */
for (irq = 0; irq < nic->num_vec; irq++)
- synchronize_irq(nic->msix_entries[irq].vector);
+ synchronize_irq(pci_irq_vector(nic->pdev, irq));
tasklet_kill(&nic->rbdr_task);
tasklet_kill(&nic->qs_err_task);
@@ -1365,7 +1473,7 @@ static int nicvf_set_mac_address(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- if (nic->msix_enabled) {
+ if (nic->pdev->msix_enabled) {
if (nicvf_hw_set_mac_addr(nic, netdev))
return -EBUSY;
} else {
@@ -1553,6 +1661,114 @@ static int nicvf_set_features(struct net_device *netdev,
return 0;
}
+static void nicvf_set_xdp_queues(struct nicvf *nic, bool bpf_attached)
+{
+ u8 cq_count, txq_count;
+
+ /* Set XDP Tx queue count same as Rx queue count */
+ if (!bpf_attached)
+ nic->xdp_tx_queues = 0;
+ else
+ nic->xdp_tx_queues = nic->rx_queues;
+
+ /* If queue count > MAX_CMP_QUEUES_PER_QS, then additional qsets
+ * needs to be allocated, check how many.
+ */
+ txq_count = nic->xdp_tx_queues + nic->tx_queues;
+ cq_count = max(nic->rx_queues, txq_count);
+ if (cq_count > MAX_CMP_QUEUES_PER_QS) {
+ nic->sqs_count = roundup(cq_count, MAX_CMP_QUEUES_PER_QS);
+ nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
+ } else {
+ nic->sqs_count = 0;
+ }
+
+ /* Set primary Qset's resources */
+ nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
+ nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
+ nic->qs->cq_cnt = max_t(u8, nic->qs->rq_cnt, nic->qs->sq_cnt);
+
+ /* Update stack */
+ nicvf_set_real_num_queues(nic->netdev, nic->tx_queues, nic->rx_queues);
+}
+
+static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
+{
+ struct net_device *dev = nic->netdev;
+ bool if_up = netif_running(nic->netdev);
+ struct bpf_prog *old_prog;
+ bool bpf_attached = false;
+
+ /* For now just support only the usual MTU sized frames */
+ if (prog && (dev->mtu > 1500)) {
+ netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+ dev->mtu);
+ return -EOPNOTSUPP;
+ }
+
+ /* ALL SQs attached to CQs i.e same as RQs, are treated as
+ * XDP Tx queues and more Tx queues are allocated for
+ * network stack to send pkts out.
+ *
+ * No of Tx queues are either same as Rx queues or whatever
+ * is left in max no of queues possible.
+ */
+ if ((nic->rx_queues + nic->tx_queues) > nic->max_queues) {
+ netdev_warn(dev,
+ "Failed to attach BPF prog, RXQs + TXQs > Max %d\n",
+ nic->max_queues);
+ return -ENOMEM;
+ }
+
+ if (if_up)
+ nicvf_stop(nic->netdev);
+
+ old_prog = xchg(&nic->xdp_prog, prog);
+ /* Detach old prog, if any */
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (nic->xdp_prog) {
+ /* Attach BPF program */
+ nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
+ if (!IS_ERR(nic->xdp_prog))
+ bpf_attached = true;
+ }
+
+ /* Calculate Tx queues needed for XDP and network stack */
+ nicvf_set_xdp_queues(nic, bpf_attached);
+
+ if (if_up) {
+ /* Reinitialize interface, clean slate */
+ nicvf_open(nic->netdev);
+ netif_trans_update(nic->netdev);
+ }
+
+ return 0;
+}
+
+static int nicvf_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ /* To avoid checks while retrieving buffer address from CQE_RX,
+ * do not support XDP for T88 pass1.x silicons which are anyway
+ * not in use widely.
+ */
+ if (pass1_silicon(nic->pdev))
+ return -EOPNOTSUPP;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return nicvf_xdp_setup(nic, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = !!nic->xdp_prog;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops nicvf_netdev_ops = {
.ndo_open = nicvf_open,
.ndo_stop = nicvf_stop,
@@ -1563,6 +1779,7 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_tx_timeout = nicvf_tx_timeout,
.ndo_fix_features = nicvf_fix_features,
.ndo_set_features = nicvf_set_features,
+ .ndo_xdp = nicvf_xdp,
};
static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1665,8 +1882,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_unregister_interrupts;
- netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_TSO | NETIF_F_GRO |
+ netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_GRO | NETIF_F_TSO6 |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_CTAG_RX);
netdev->hw_features |= NETIF_F_RXHASH;
@@ -1674,7 +1892,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features |= netdev->hw_features;
netdev->hw_features |= NETIF_F_LOOPBACK;
- netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
netdev->netdev_ops = &nicvf_netdev_ops;
netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index f13289f0d238..2b181762ad49 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -19,16 +19,8 @@
#include "q_struct.h"
#include "nicvf_queues.h"
-#define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0)
-
-static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
-{
- /* Translation is installed only when IOMMU is present */
- if (nic->iommu_domain)
- return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
- return dma_addr;
-}
-
+static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
+ int size, u64 data);
static void nicvf_get_page(struct nicvf *nic)
{
if (!nic->rb_pageref || !nic->rb_page)
@@ -90,46 +82,152 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
dmem->base = NULL;
}
-/* Allocate buffer for packet reception
- * HW returns memory address where packet is DMA'ed but not a pointer
- * into RBDR ring, so save buffer address at the start of fragment and
- * align the start address to a cache aligned address
+#define XDP_PAGE_REFCNT_REFILL 256
+
+/* Allocate a new page or recycle one if possible
+ *
+ * We cannot optimize dma mapping here, since
+ * 1. It's only one RBDR ring for 8 Rx queues.
+ * 2. CQE_RX gives address of the buffer where pkt has been DMA'ed
+ * and not idx into RBDR ring, so can't refer to saved info.
+ * 3. There are multiple receive buffers per page
*/
-static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
- u32 buf_len, u64 **rbuf)
+static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
+ struct rbdr *rbdr, gfp_t gfp)
+{
+ int ref_count;
+ struct page *page = NULL;
+ struct pgcache *pgcache, *next;
+
+ /* Check if page is already allocated */
+ pgcache = &rbdr->pgcache[rbdr->pgidx];
+ page = pgcache->page;
+ /* Check if page can be recycled */
+ if (page) {
+ ref_count = page_ref_count(page);
+ /* Check if this page has been used once i.e 'put_page'
+ * called after packet transmission i.e internal ref_count
+ * and page's ref_count are equal i.e page can be recycled.
+ */
+ if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
+ pgcache->ref_count--;
+ else
+ page = NULL;
+
+ /* In non-XDP mode, page's ref_count needs to be '1' for it
+ * to be recycled.
+ */
+ if (!rbdr->is_xdp && (ref_count != 1))
+ page = NULL;
+ }
+
+ if (!page) {
+ page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 0);
+ if (!page)
+ return NULL;
+
+ this_cpu_inc(nic->pnicvf->drv_stats->page_alloc);
+
+ /* Check for space */
+ if (rbdr->pgalloc >= rbdr->pgcnt) {
+ /* Page can still be used */
+ nic->rb_page = page;
+ return NULL;
+ }
+
+ /* Save the page in page cache */
+ pgcache->page = page;
+ pgcache->dma_addr = 0;
+ pgcache->ref_count = 0;
+ rbdr->pgalloc++;
+ }
+
+ /* Take additional page references for recycling */
+ if (rbdr->is_xdp) {
+ /* Since there is single RBDR (i.e single core doing
+ * page recycling) per 8 Rx queues, in XDP mode adjusting
+ * page references atomically is the biggest bottleneck, so
+ * take bunch of references at a time.
+ *
+ * So here, below reference counts defer by '1'.
+ */
+ if (!pgcache->ref_count) {
+ pgcache->ref_count = XDP_PAGE_REFCNT_REFILL;
+ page_ref_add(page, XDP_PAGE_REFCNT_REFILL);
+ }
+ } else {
+ /* In non-XDP case, single 64K page is divided across multiple
+ * receive buffers, so cost of recycling is less anyway.
+ * So we can do with just one extra reference.
+ */
+ page_ref_add(page, 1);
+ }
+
+ rbdr->pgidx++;
+ rbdr->pgidx &= (rbdr->pgcnt - 1);
+
+ /* Prefetch refcount of next page in page cache */
+ next = &rbdr->pgcache[rbdr->pgidx];
+ page = next->page;
+ if (page)
+ prefetch(&page->_refcount);
+
+ return pgcache;
+}
+
+/* Allocate buffer for packet reception */
+static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
+ gfp_t gfp, u32 buf_len, u64 *rbuf)
{
- int order = NICVF_PAGE_ORDER;
+ struct pgcache *pgcache = NULL;
- /* Check if request can be accomodated in previous allocated page */
- if (nic->rb_page &&
- ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) {
+ /* Check if request can be accomodated in previous allocated page.
+ * But in XDP mode only one buffer per page is permitted.
+ */
+ if (!rbdr->is_xdp && nic->rb_page &&
+ ((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) {
nic->rb_pageref++;
goto ret;
}
nicvf_get_page(nic);
+ nic->rb_page = NULL;
- /* Allocate a new page */
- nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
- order);
- if (!nic->rb_page) {
+ /* Get new page, either recycled or new one */
+ pgcache = nicvf_alloc_page(nic, rbdr, gfp);
+ if (!pgcache && !nic->rb_page) {
this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
return -ENOMEM;
}
+
nic->rb_page_offset = 0;
+
+ /* Reserve space for header modifications by BPF program */
+ if (rbdr->is_xdp)
+ buf_len += XDP_PACKET_HEADROOM;
+
+ /* Check if it's recycled */
+ if (pgcache)
+ nic->rb_page = pgcache->page;
ret:
- /* HW will ensure data coherency, CPU sync not required */
- *rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
+ if (rbdr->is_xdp && pgcache && pgcache->dma_addr) {
+ *rbuf = pgcache->dma_addr;
+ } else {
+ /* HW will ensure data coherency, CPU sync not required */
+ *rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
nic->rb_page_offset, buf_len,
DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC));
- if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
- if (!nic->rb_page_offset)
- __free_pages(nic->rb_page, order);
- nic->rb_page = NULL;
- return -ENOMEM;
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
+ if (!nic->rb_page_offset)
+ __free_pages(nic->rb_page, 0);
+ nic->rb_page = NULL;
+ return -ENOMEM;
+ }
+ if (pgcache)
+ pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
+ nic->rb_page_offset += buf_len;
}
- nic->rb_page_offset += buf_len;
return 0;
}
@@ -159,7 +257,7 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
int ring_len, int buf_size)
{
int idx;
- u64 *rbuf;
+ u64 rbuf;
struct rbdr_entry_t *desc;
int err;
@@ -177,10 +275,34 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
rbdr->head = 0;
rbdr->tail = 0;
+ /* Initialize page recycling stuff.
+ *
+ * Can't use single buffer per page especially with 64K pages.
+ * On embedded platforms i.e 81xx/83xx available memory itself
+ * is low and minimum ring size of RBDR is 8K, that takes away
+ * lots of memory.
+ *
+ * But for XDP it has to be a single buffer per page.
+ */
+ if (!nic->pnicvf->xdp_prog) {
+ rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size);
+ rbdr->is_xdp = false;
+ } else {
+ rbdr->pgcnt = ring_len;
+ rbdr->is_xdp = true;
+ }
+ rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt);
+ rbdr->pgcache = kzalloc(sizeof(*rbdr->pgcache) *
+ rbdr->pgcnt, GFP_KERNEL);
+ if (!rbdr->pgcache)
+ return -ENOMEM;
+ rbdr->pgidx = 0;
+ rbdr->pgalloc = 0;
+
nic->rb_page = NULL;
for (idx = 0; idx < ring_len; idx++) {
- err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
- &rbuf);
+ err = nicvf_alloc_rcv_buffer(nic, rbdr, GFP_KERNEL,
+ RCV_FRAG_LEN, &rbuf);
if (err) {
/* To free already allocated and mapped ones */
rbdr->tail = idx - 1;
@@ -188,7 +310,7 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
}
desc = GET_RBDR_DESC(rbdr, idx);
- desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
+ desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1);
}
nicvf_get_page(nic);
@@ -201,6 +323,7 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
{
int head, tail;
u64 buf_addr, phys_addr;
+ struct pgcache *pgcache;
struct rbdr_entry_t *desc;
if (!rbdr)
@@ -216,7 +339,7 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
/* Release page references */
while (head != tail) {
desc = GET_RBDR_DESC(rbdr, head);
- buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
+ buf_addr = desc->buf_addr;
phys_addr = nicvf_iova_to_phys(nic, buf_addr);
dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
@@ -227,13 +350,31 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
}
/* Release buffer of tail desc */
desc = GET_RBDR_DESC(rbdr, tail);
- buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
+ buf_addr = desc->buf_addr;
phys_addr = nicvf_iova_to_phys(nic, buf_addr);
dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
if (phys_addr)
put_page(virt_to_page(phys_to_virt(phys_addr)));
+ /* Sync page cache info */
+ smp_rmb();
+
+ /* Release additional page references held for recycling */
+ head = 0;
+ while (head < rbdr->pgcnt) {
+ pgcache = &rbdr->pgcache[head];
+ if (pgcache->page && page_ref_count(pgcache->page) != 0) {
+ if (!rbdr->is_xdp) {
+ put_page(pgcache->page);
+ continue;
+ }
+ page_ref_sub(pgcache->page, pgcache->ref_count - 1);
+ put_page(pgcache->page);
+ }
+ head++;
+ }
+
/* Free RBDR ring */
nicvf_free_q_desc_mem(nic, &rbdr->dmem);
}
@@ -248,7 +389,7 @@ static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
int refill_rb_cnt;
struct rbdr *rbdr;
struct rbdr_entry_t *desc;
- u64 *rbuf;
+ u64 rbuf;
int new_rb = 0;
refill:
@@ -269,17 +410,20 @@ refill:
else
refill_rb_cnt = qs->rbdr_len - qcount - 1;
+ /* Sync page cache info */
+ smp_rmb();
+
/* Start filling descs from tail */
tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
while (refill_rb_cnt) {
tail++;
tail &= (rbdr->dmem.q_len - 1);
- if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
+ if (nicvf_alloc_rcv_buffer(nic, rbdr, gfp, RCV_FRAG_LEN, &rbuf))
break;
desc = GET_RBDR_DESC(rbdr, tail);
- desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
+ desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1);
refill_rb_cnt--;
new_rb++;
}
@@ -362,7 +506,7 @@ static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
/* Initialize transmit queue */
static int nicvf_init_snd_queue(struct nicvf *nic,
- struct snd_queue *sq, int q_len)
+ struct snd_queue *sq, int q_len, int qidx)
{
int err;
@@ -375,17 +519,38 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
if (!sq->skbuff)
return -ENOMEM;
+
sq->head = 0;
sq->tail = 0;
- atomic_set(&sq->free_cnt, q_len - 1);
sq->thresh = SND_QUEUE_THRESH;
- /* Preallocate memory for TSO segment's header */
- sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
- q_len * TSO_HEADER_SIZE,
- &sq->tso_hdrs_phys, GFP_KERNEL);
- if (!sq->tso_hdrs)
- return -ENOMEM;
+ /* Check if this SQ is a XDP TX queue */
+ if (nic->sqs_mode)
+ qidx += ((nic->sqs_id + 1) * MAX_SND_QUEUES_PER_QS);
+ if (qidx < nic->pnicvf->xdp_tx_queues) {
+ /* Alloc memory to save page pointers for XDP_TX */
+ sq->xdp_page = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
+ if (!sq->xdp_page)
+ return -ENOMEM;
+ sq->xdp_desc_cnt = 0;
+ sq->xdp_free_cnt = q_len - 1;
+ sq->is_xdp = true;
+ } else {
+ sq->xdp_page = NULL;
+ sq->xdp_desc_cnt = 0;
+ sq->xdp_free_cnt = 0;
+ sq->is_xdp = false;
+
+ atomic_set(&sq->free_cnt, q_len - 1);
+
+ /* Preallocate memory for TSO segment's header */
+ sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
+ q_len * TSO_HEADER_SIZE,
+ &sq->tso_hdrs_phys,
+ GFP_KERNEL);
+ if (!sq->tso_hdrs)
+ return -ENOMEM;
+ }
return 0;
}
@@ -411,6 +576,7 @@ void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
{
struct sk_buff *skb;
+ struct page *page;
struct sq_hdr_subdesc *hdr;
struct sq_hdr_subdesc *tso_sqe;
@@ -428,8 +594,15 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
smp_rmb();
while (sq->head != sq->tail) {
skb = (struct sk_buff *)sq->skbuff[sq->head];
- if (!skb)
+ if (!skb || !sq->xdp_page)
+ goto next;
+
+ page = (struct page *)sq->xdp_page[sq->head];
+ if (!page)
goto next;
+ else
+ put_page(page);
+
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
/* Check for dummy descriptor used for HW TSO offload on 88xx */
if (hdr->dont_send) {
@@ -442,12 +615,14 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
nicvf_unmap_sndq_buffers(nic, sq, sq->head,
hdr->subdesc_cnt);
}
- dev_kfree_skb_any(skb);
+ if (skb)
+ dev_kfree_skb_any(skb);
next:
sq->head++;
sq->head &= (sq->dmem.q_len - 1);
}
kfree(sq->skbuff);
+ kfree(sq->xdp_page);
nicvf_free_q_desc_mem(nic, &sq->dmem);
}
@@ -838,7 +1013,7 @@ static int nicvf_alloc_resources(struct nicvf *nic)
/* Alloc send queue */
for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
- if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
+ if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
goto alloc_fail;
}
@@ -876,6 +1051,7 @@ int nicvf_set_qset_resources(struct nicvf *nic)
nic->rx_queues = qs->rq_cnt;
nic->tx_queues = qs->sq_cnt;
+ nic->xdp_tx_queues = 0;
return 0;
}
@@ -940,7 +1116,10 @@ static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
int qentry;
qentry = sq->tail;
- atomic_sub(desc_cnt, &sq->free_cnt);
+ if (!sq->is_xdp)
+ atomic_sub(desc_cnt, &sq->free_cnt);
+ else
+ sq->xdp_free_cnt -= desc_cnt;
sq->tail += desc_cnt;
sq->tail &= (sq->dmem.q_len - 1);
@@ -958,7 +1137,10 @@ static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
/* Free descriptor back to SQ for future use */
void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
{
- atomic_add(desc_cnt, &sq->free_cnt);
+ if (!sq->is_xdp)
+ atomic_add(desc_cnt, &sq->free_cnt);
+ else
+ sq->xdp_free_cnt += desc_cnt;
sq->head += desc_cnt;
sq->head &= (sq->dmem.q_len - 1);
}
@@ -1016,6 +1198,58 @@ void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
}
}
+/* XDP Transmit APIs */
+void nicvf_xdp_sq_doorbell(struct nicvf *nic,
+ struct snd_queue *sq, int sq_num)
+{
+ if (!sq->xdp_desc_cnt)
+ return;
+
+ /* make sure all memory stores are done before ringing doorbell */
+ wmb();
+
+ /* Inform HW to xmit all TSO segments */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ sq_num, sq->xdp_desc_cnt);
+ sq->xdp_desc_cnt = 0;
+}
+
+static inline void
+nicvf_xdp_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
+ int subdesc_cnt, u64 data, int len)
+{
+ struct sq_hdr_subdesc *hdr;
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+ memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+ hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+ hdr->subdesc_cnt = subdesc_cnt;
+ hdr->tot_len = len;
+ hdr->post_cqe = 1;
+ sq->xdp_page[qentry] = (u64)virt_to_page((void *)data);
+}
+
+int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
+ u64 bufaddr, u64 dma_addr, u16 len)
+{
+ int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
+ int qentry;
+
+ if (subdesc_cnt > sq->xdp_free_cnt)
+ return 0;
+
+ qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
+
+ nicvf_xdp_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, bufaddr, len);
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ nicvf_sq_add_gather_subdesc(sq, qentry, len, dma_addr);
+
+ sq->xdp_desc_cnt += subdesc_cnt;
+
+ return 1;
+}
+
/* Calculate no of SQ subdescriptors needed to transmit all
* segments of this TSO packet.
* Taken from 'Tilera network driver' with a minor modification.
@@ -1094,7 +1328,13 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
{
int proto;
struct sq_hdr_subdesc *hdr;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ ip.hdr = skb_network_header(skb);
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
memset(hdr, 0, SND_QUEUE_DESC_SIZE);
hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
@@ -1119,7 +1359,9 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
hdr->l3_offset = skb_network_offset(skb);
hdr->l4_offset = skb_transport_offset(skb);
- proto = ip_hdr(skb)->protocol;
+ proto = (ip.v4->version == 4) ? ip.v4->protocol :
+ ip.v6->nexthdr;
+
switch (proto) {
case IPPROTO_TCP:
hdr->csum_l4 = SEND_L4_CSUM_TCP;
@@ -1366,8 +1608,33 @@ static inline unsigned frag_num(unsigned i)
#endif
}
+static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
+ u64 buf_addr, bool xdp)
+{
+ struct page *page = NULL;
+ int len = RCV_FRAG_LEN;
+
+ if (xdp) {
+ page = virt_to_page(phys_to_virt(buf_addr));
+ /* Check if it's a recycled page, if not
+ * unmap the DMA mapping.
+ *
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) != 1)
+ return;
+
+ len += XDP_PACKET_HEADROOM;
+ /* Receive buffers in XDP mode are mapped from page start */
+ dma_addr &= PAGE_MASK;
+ }
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, len,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
/* Returns SKB for a received packet */
-struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
+ struct cqe_rx_t *cqe_rx, bool xdp)
{
int frag;
int payload_len = 0;
@@ -1402,10 +1669,9 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
if (!frag) {
/* First fragment */
- dma_unmap_page_attrs(&nic->pdev->dev,
- *rb_ptrs - cqe_rx->align_pad,
- RCV_FRAG_LEN, DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
+ nicvf_unmap_rcv_buffer(nic,
+ *rb_ptrs - cqe_rx->align_pad,
+ phys_addr, xdp);
skb = nicvf_rb_ptr_to_skb(nic,
phys_addr - cqe_rx->align_pad,
payload_len);
@@ -1415,9 +1681,7 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
skb_put(skb, payload_len);
} else {
/* Add fragments */
- dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs,
- RCV_FRAG_LEN, DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
+ nicvf_unmap_rcv_buffer(nic, *rb_ptrs, phys_addr, xdp);
page = virt_to_page(phys_to_virt(phys_addr));
offset = phys_to_virt(phys_addr) - page_address(page);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
@@ -1547,9 +1811,6 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
/* Check for errors in the receive cmp.queue entry */
int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
{
- if (!cqe_rx->err_level && !cqe_rx->err_opcode)
- return 0;
-
if (netif_msg_rx_err(nic))
netdev_err(nic->netdev,
"%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
@@ -1638,8 +1899,6 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
{
switch (cqe_tx->send_status) {
- case CQ_TX_ERROP_GOOD:
- return 0;
case CQ_TX_ERROP_DESC_FAULT:
this_cpu_inc(nic->drv_stats->tx_desc_fault);
break;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 10cb4b84625b..57858522c33c 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -10,6 +10,7 @@
#define NICVF_QUEUES_H
#include <linux/netdevice.h>
+#include <linux/iommu.h>
#include "q_struct.h"
#define MAX_QUEUE_SET 128
@@ -213,6 +214,12 @@ struct q_desc_mem {
void *unalign_base;
};
+struct pgcache {
+ struct page *page;
+ int ref_count;
+ u64 dma_addr;
+};
+
struct rbdr {
bool enable;
u32 dma_size;
@@ -222,6 +229,13 @@ struct rbdr {
u32 head;
u32 tail;
struct q_desc_mem dmem;
+ bool is_xdp;
+
+ /* For page recycling */
+ int pgidx;
+ int pgcnt;
+ int pgalloc;
+ struct pgcache *pgcache;
} ____cacheline_aligned_in_smp;
struct rcv_queue {
@@ -258,6 +272,10 @@ struct snd_queue {
u32 tail;
u64 *skbuff;
void *desc;
+ u64 *xdp_page;
+ u16 xdp_desc_cnt;
+ u16 xdp_free_cnt;
+ bool is_xdp;
#define TSO_HEADER_SIZE 128
/* For TSO segment's header */
@@ -301,6 +319,14 @@ struct queue_set {
#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
+static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
+{
+ /* Translation is installed only when IOMMU is present */
+ if (nic->iommu_domain)
+ return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
+ return dma_addr;
+}
+
void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
int hdr_sqe, u8 subdesc_cnt);
void nicvf_config_vlan_stripping(struct nicvf *nic,
@@ -318,8 +344,12 @@ void nicvf_sq_free_used_descs(struct net_device *netdev,
struct snd_queue *sq, int qidx);
int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
struct sk_buff *skb, u8 sq_num);
+int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
+ u64 bufaddr, u64 dma_addr, u16 len);
+void nicvf_xdp_sq_doorbell(struct nicvf *nic, struct snd_queue *sq, int sq_num);
-struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
+ struct cqe_rx_t *cqe_rx, bool xdp);
void nicvf_rbdr_task(unsigned long data);
void nicvf_rbdr_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h
index f36347237a54..e47205aa87ea 100644
--- a/drivers/net/ethernet/cavium/thunder/q_struct.h
+++ b/drivers/net/ethernet/cavium/thunder/q_struct.h
@@ -359,15 +359,7 @@ union cq_desc_t {
};
struct rbdr_entry_t {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 rsvd0:15;
- u64 buf_addr:42;
- u64 cache_align:7;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 cache_align:7;
- u64 buf_addr:42;
- u64 rsvd0:15;
-#endif
+ u64 buf_addr;
};
/* TCP reassembly context */
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 6916c62f2487..94b9482f14a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -223,7 +223,6 @@ struct port_info {
struct cmac *mac;
struct cphy *phy;
struct link_config link_config;
- struct net_device_stats netstats;
};
struct sge;
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index d8aff7a4b3c7..8623be13bf86 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -296,7 +296,7 @@ static struct net_device_stats *t1_get_stats(struct net_device *dev)
{
struct adapter *adapter = dev->ml_priv;
struct port_info *p = &adapter->port[dev->if_port];
- struct net_device_stats *ns = &p->netstats;
+ struct net_device_stats *ns = &dev->stats;
const struct cmac_statistics *pstats;
/* Do a full update of the MAC stats */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
index 8b395b537330..087ff0ffb597 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
@@ -72,7 +72,6 @@ struct port_info {
struct cphy phy;
struct cmac mac;
struct link_config link_config;
- struct net_device_stats netstats;
int activity;
__be32 iscsi_ipv4addr;
struct iscsi_config iscsic;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index d76491676b51..2ff6bd139c96 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1489,7 +1489,7 @@ static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- struct net_device_stats *ns = &pi->netstats;
+ struct net_device_stats *ns = &dev->stats;
const struct mac_stats *pstats;
spin_lock(&adapter->stats_lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6faaca1b48b3..c12c4a3b82b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2338,6 +2338,10 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
f->locked = 1;
f->fs.rpttid = 1;
+ /* Save the actual tid. We need this to get the corresponding
+ * filter entry structure in filter_rpl.
+ */
+ f->tid = stid + adap->tids.ftid_base;
ret = set_filter_wr(adap, stid);
if (ret) {
clear_filter(adap, f);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 87000cd39737..0de8eb72325c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -6369,7 +6369,6 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
unsigned int fl_align_log = fls(fl_align) - 1;
- unsigned int ingpad;
t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
HOSTPAGESIZEPF0_V(sge_hps) |
@@ -6389,6 +6388,10 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
INGPADBOUNDARY_SHIFT_X) |
EGRSTATUSPAGESIZE_V(stat_len != 64));
} else {
+ unsigned int pack_align;
+ unsigned int ingpad, ingpack;
+ unsigned int pcie_cap;
+
/* T5 introduced the separation of the Free List Padding and
* Packing Boundaries. Thus, we can select a smaller Padding
* Boundary to avoid uselessly chewing up PCIe Link and Memory
@@ -6401,27 +6404,62 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
* Size (the minimum unit of transfer to/from Memory). If we
* have a Padding Boundary which is smaller than the Memory
* Line Size, that'll involve a Read-Modify-Write cycle on the
- * Memory Controller which is never good. For T5 the smallest
- * Padding Boundary which we can select is 32 bytes which is
- * larger than any known Memory Controller Line Size so we'll
- * use that.
- *
- * T5 has a different interpretation of the "0" value for the
- * Packing Boundary. This corresponds to 16 bytes instead of
- * the expected 32 bytes. We never have a Packing Boundary
- * less than 32 bytes so we can't use that special value but
- * on the other hand, if we wanted 32 bytes, the best we can
- * really do is 64 bytes.
- */
- if (fl_align <= 32) {
+ * Memory Controller which is never good.
+ */
+
+ /* We want the Packing Boundary to be based on the Cache Line
+ * Size in order to help avoid False Sharing performance
+ * issues between CPUs, etc. We also want the Packing
+ * Boundary to incorporate the PCI-E Maximum Payload Size. We
+ * get best performance when the Packing Boundary is a
+ * multiple of the Maximum Payload Size.
+ */
+ pack_align = fl_align;
+ pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ unsigned int mps, mps_log;
+ u16 devctl;
+
+ /* The PCIe Device Control Maximum Payload Size field
+ * [bits 7:5] encodes sizes as powers of 2 starting at
+ * 128 bytes.
+ */
+ pci_read_config_word(adap->pdev,
+ pcie_cap + PCI_EXP_DEVCTL,
+ &devctl);
+ mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
+ mps = 1 << mps_log;
+ if (mps > pack_align)
+ pack_align = mps;
+ }
+
+ /* N.B. T5/T6 have a crazy special interpretation of the "0"
+ * value for the Packing Boundary. This corresponds to 16
+ * bytes instead of the expected 32 bytes. So if we want 32
+ * bytes, the best we can really do is 64 bytes ...
+ */
+ if (pack_align <= 16) {
+ ingpack = INGPACKBOUNDARY_16B_X;
+ fl_align = 16;
+ } else if (pack_align == 32) {
+ ingpack = INGPACKBOUNDARY_64B_X;
fl_align = 64;
- fl_align_log = 6;
+ } else {
+ unsigned int pack_align_log = fls(pack_align) - 1;
+
+ ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
+ fl_align = pack_align;
}
+ /* Use the smallest Ingress Padding which isn't smaller than
+ * the Memory Controller Read/Write Size. We'll take that as
+ * being 8 bytes since we don't know of any system with a
+ * wider Memory Controller Bus Width.
+ */
if (is_t5(adap->params.chip))
- ingpad = INGPCIEBOUNDARY_32B_X;
+ ingpad = INGPADBOUNDARY_32B_X;
else
- ingpad = T6_INGPADBOUNDARY_32B_X;
+ ingpad = T6_INGPADBOUNDARY_8B_X;
t4_set_reg_field(adap, SGE_CONTROL_A,
INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
@@ -6430,8 +6468,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
EGRSTATUSPAGESIZE_V(stat_len != 64));
t4_set_reg_field(adap, SGE_CONTROL2_A,
INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
- INGPACKBOUNDARY_V(fl_align_log -
- INGPACKBOUNDARY_SHIFT_X));
+ INGPACKBOUNDARY_V(ingpack));
}
/*
* Adjust various SGE Free List Host Buffer Sizes.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
index 36cf3073ca37..f6558cbfc54e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -54,11 +54,15 @@
#define INGPADBOUNDARY_SHIFT_X 5
#define T6_INGPADBOUNDARY_SHIFT_X 3
+#define T6_INGPADBOUNDARY_8B_X 0
#define T6_INGPADBOUNDARY_32B_X 2
+#define INGPADBOUNDARY_32B_X 0
+
/* CONTROL2 register */
#define INGPACKBOUNDARY_SHIFT_X 5
#define INGPACKBOUNDARY_16B_X 0
+#define INGPACKBOUNDARY_64B_X 1
/* GTS register */
#define SGE_TIMERREGS 6
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 3647b28e8de0..47384f7323ac 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1896,7 +1896,7 @@ static int cs89x0_platform_remove(struct platform_device *pdev)
return 0;
}
-static const struct __maybe_unused of_device_id cs89x0_match[] = {
+static const struct of_device_id __maybe_unused cs89x0_match[] = {
{ .compatible = "cirrus,cs8900", },
{ .compatible = "cirrus,cs8920", },
{ },
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 127ce9707378..91b8f6f5a765 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -312,8 +312,6 @@ struct de_private {
u32 msg_enable;
- struct net_device_stats net_stats;
-
struct pci_dev *pdev;
u16 setup_frame[DE_SETUP_FRAME_WORDS];
@@ -388,14 +386,14 @@ static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
netif_warn(de, rx_err, de->dev,
"Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
status);
- de->net_stats.rx_length_errors++;
+ de->dev->stats.rx_length_errors++;
}
} else if (status & RxError) {
/* There was a fatal error. */
- de->net_stats.rx_errors++; /* end of a packet.*/
- if (status & 0x0890) de->net_stats.rx_length_errors++;
- if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
- if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
+ de->dev->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x0890) de->dev->stats.rx_length_errors++;
+ if (status & RxErrCRC) de->dev->stats.rx_crc_errors++;
+ if (status & RxErrFIFO) de->dev->stats.rx_fifo_errors++;
}
}
@@ -423,7 +421,7 @@ static void de_rx (struct de_private *de)
mapping = de->rx_skb[rx_tail].mapping;
if (unlikely(drop)) {
- de->net_stats.rx_dropped++;
+ de->dev->stats.rx_dropped++;
goto rx_next;
}
@@ -441,7 +439,7 @@ static void de_rx (struct de_private *de)
buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
copy_skb = netdev_alloc_skb(de->dev, buflen);
if (unlikely(!copy_skb)) {
- de->net_stats.rx_dropped++;
+ de->dev->stats.rx_dropped++;
drop = 1;
rx_work = 100;
goto rx_next;
@@ -470,8 +468,8 @@ static void de_rx (struct de_private *de)
skb->protocol = eth_type_trans (skb, de->dev);
- de->net_stats.rx_packets++;
- de->net_stats.rx_bytes += skb->len;
+ de->dev->stats.rx_packets++;
+ de->dev->stats.rx_bytes += skb->len;
rc = netif_rx (skb);
if (rc == NET_RX_DROP)
drop = 1;
@@ -572,18 +570,18 @@ static void de_tx (struct de_private *de)
netif_dbg(de, tx_err, de->dev,
"tx err, status 0x%x\n",
status);
- de->net_stats.tx_errors++;
+ de->dev->stats.tx_errors++;
if (status & TxOWC)
- de->net_stats.tx_window_errors++;
+ de->dev->stats.tx_window_errors++;
if (status & TxMaxCol)
- de->net_stats.tx_aborted_errors++;
+ de->dev->stats.tx_aborted_errors++;
if (status & TxLinkFail)
- de->net_stats.tx_carrier_errors++;
+ de->dev->stats.tx_carrier_errors++;
if (status & TxFIFOUnder)
- de->net_stats.tx_fifo_errors++;
+ de->dev->stats.tx_fifo_errors++;
} else {
- de->net_stats.tx_packets++;
- de->net_stats.tx_bytes += skb->len;
+ de->dev->stats.tx_packets++;
+ de->dev->stats.tx_bytes += skb->len;
netif_dbg(de, tx_done, de->dev,
"tx done, slot %d\n", tx_tail);
}
@@ -814,9 +812,9 @@ static void de_set_rx_mode (struct net_device *dev)
static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
{
if (unlikely(rx_missed & RxMissedOver))
- de->net_stats.rx_missed_errors += RxMissedMask;
+ de->dev->stats.rx_missed_errors += RxMissedMask;
else
- de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
+ de->dev->stats.rx_missed_errors += (rx_missed & RxMissedMask);
}
static void __de_get_stats(struct de_private *de)
@@ -836,7 +834,7 @@ static struct net_device_stats *de_get_stats(struct net_device *dev)
__de_get_stats(de);
spin_unlock_irq(&de->lock);
- return &de->net_stats;
+ return &dev->stats;
}
static inline int de_is_running (struct de_private *de)
@@ -1348,7 +1346,7 @@ static void de_clean_rings (struct de_private *de)
struct sk_buff *skb = de->tx_skb[i].skb;
if ((skb) && (skb != DE_DUMMY_SKB)) {
if (skb != DE_SETUP_SKB) {
- de->net_stats.tx_dropped++;
+ de->dev->stats.tx_dropped++;
pci_unmap_single(de->pdev,
de->tx_skb[i].mapping,
skb->len, PCI_DMA_TODEVICE);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 1e350135f11d..778f974e2928 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -878,10 +878,10 @@ tx_error (struct net_device *dev, int tx_status)
frame_id = (tx_status & 0xffff0000);
printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
dev->name, tx_status, frame_id);
- np->stats.tx_errors++;
+ dev->stats.tx_errors++;
/* Ttransmit Underrun */
if (tx_status & 0x10) {
- np->stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
/* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
dw16(ASICCtrl + 2,
@@ -903,7 +903,7 @@ tx_error (struct net_device *dev, int tx_status)
}
/* Late Collision */
if (tx_status & 0x04) {
- np->stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
/* TxReset and clear FIFO */
dw16(ASICCtrl + 2, TxReset | FIFOReset);
/* Wait reset done */
@@ -916,13 +916,8 @@ tx_error (struct net_device *dev, int tx_status)
/* Let TxStartThresh stay default value */
}
/* Maximum Collisions */
-#ifdef ETHER_STATS
if (tx_status & 0x08)
- np->stats.collisions16++;
-#else
- if (tx_status & 0x08)
- np->stats.collisions++;
-#endif
+ dev->stats.collisions++;
/* Restart the Tx */
dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
}
@@ -952,15 +947,15 @@ receive_packet (struct net_device *dev)
break;
/* Update rx error statistics, drop packet. */
if (frame_status & RFS_Errors) {
- np->stats.rx_errors++;
+ dev->stats.rx_errors++;
if (frame_status & (RxRuntFrame | RxLengthError))
- np->stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
if (frame_status & RxFCSError)
- np->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
if (frame_status & RxAlignmentError && np->speed != 1000)
- np->stats.rx_frame_errors++;
+ dev->stats.rx_frame_errors++;
if (frame_status & RxFIFOOverrun)
- np->stats.rx_fifo_errors++;
+ dev->stats.rx_fifo_errors++;
} else {
struct sk_buff *skb;
@@ -1096,23 +1091,23 @@ get_stats (struct net_device *dev)
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
- np->stats.rx_packets += dr32(FramesRcvOk);
- np->stats.tx_packets += dr32(FramesXmtOk);
- np->stats.rx_bytes += dr32(OctetRcvOk);
- np->stats.tx_bytes += dr32(OctetXmtOk);
+ dev->stats.rx_packets += dr32(FramesRcvOk);
+ dev->stats.tx_packets += dr32(FramesXmtOk);
+ dev->stats.rx_bytes += dr32(OctetRcvOk);
+ dev->stats.tx_bytes += dr32(OctetXmtOk);
- np->stats.multicast = dr32(McstFramesRcvdOk);
- np->stats.collisions += dr32(SingleColFrames)
+ dev->stats.multicast = dr32(McstFramesRcvdOk);
+ dev->stats.collisions += dr32(SingleColFrames)
+ dr32(MultiColFrames);
/* detailed tx errors */
stat_reg = dr16(FramesAbortXSColls);
- np->stats.tx_aborted_errors += stat_reg;
- np->stats.tx_errors += stat_reg;
+ dev->stats.tx_aborted_errors += stat_reg;
+ dev->stats.tx_errors += stat_reg;
stat_reg = dr16(CarrierSenseErrors);
- np->stats.tx_carrier_errors += stat_reg;
- np->stats.tx_errors += stat_reg;
+ dev->stats.tx_carrier_errors += stat_reg;
+ dev->stats.tx_errors += stat_reg;
/* Clear all other statistic register. */
dr32(McstOctetXmtOk);
@@ -1142,7 +1137,7 @@ get_stats (struct net_device *dev)
dr16(TCPCheckSumErrors);
dr16(UDPCheckSumErrors);
dr16(IPCheckSumErrors);
- return &np->stats;
+ return &dev->stats;
}
static int
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 5d8ae5320242..10e98ba33ebf 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -377,7 +377,6 @@ struct netdev_private {
void __iomem *eeprom_addr;
spinlock_t tx_lock;
spinlock_t rx_lock;
- struct net_device_stats stats;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
unsigned int speed; /* Operating speed */
unsigned int vlan; /* VLAN Id */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index d49528ad7821..50566243e6fa 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -567,6 +567,12 @@ struct be_error_recovery {
/* Ethtool priv_flags */
#define BE_DISABLE_TPE_RECOVERY 0x1
+struct be_vxlan_port {
+ struct list_head list;
+ __be16 port; /* VxLAN UDP dst port */
+ int port_aliases; /* alias count */
+};
+
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -671,9 +677,9 @@ struct be_adapter {
u32 sli_family;
u8 hba_port_num;
u16 pvid;
- __be16 vxlan_port;
- int vxlan_port_count;
- int vxlan_port_aliases;
+ __be16 vxlan_port; /* offloaded vxlan port num */
+ int vxlan_port_count; /* active vxlan port count */
+ struct list_head vxlan_port_list; /* vxlan port list */
struct phy_info phy;
u8 wol_cap;
bool wol_en;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6be3b9aba8ed..f3a09ab55900 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3857,6 +3857,44 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
}
}
+static int be_enable_vxlan_offloads(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct device *dev = &adapter->pdev->dev;
+ struct be_vxlan_port *vxlan_port;
+ __be16 port;
+ int status;
+
+ vxlan_port = list_first_entry(&adapter->vxlan_port_list,
+ struct be_vxlan_port, list);
+ port = vxlan_port->port;
+
+ status = be_cmd_manage_iface(adapter, adapter->if_handle,
+ OP_CONVERT_NORMAL_TO_TUNNEL);
+ if (status) {
+ dev_warn(dev, "Failed to convert normal interface to tunnel\n");
+ return status;
+ }
+ adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
+
+ status = be_cmd_set_vxlan_port(adapter, port);
+ if (status) {
+ dev_warn(dev, "Failed to add VxLAN port\n");
+ return status;
+ }
+ adapter->vxlan_port = port;
+
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
+
+ dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
+ be16_to_cpu(port));
+ return 0;
+}
+
static void be_disable_vxlan_offloads(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -4903,63 +4941,59 @@ static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
* those other tunnels are unexported on the fly through ndo_features_check().
*
* Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
- * adds more than one port, disable offloads and don't re-enable them again
- * until after all the tunnels are removed.
+ * adds more than one port, disable offloads and re-enable them again when
+ * there's only one port left. We maintain a list of ports for this purpose.
*/
static void be_work_add_vxlan_port(struct work_struct *work)
{
struct be_cmd_work *cmd_work =
container_of(work, struct be_cmd_work, work);
struct be_adapter *adapter = cmd_work->adapter;
- struct net_device *netdev = adapter->netdev;
struct device *dev = &adapter->pdev->dev;
__be16 port = cmd_work->info.vxlan_port;
+ struct be_vxlan_port *vxlan_port;
int status;
- if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
- adapter->vxlan_port_aliases++;
- goto done;
+ /* Bump up the alias count if it is an existing port */
+ list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
+ if (vxlan_port->port == port) {
+ vxlan_port->port_aliases++;
+ goto done;
+ }
}
+ /* Add a new port to our list. We don't need a lock here since port
+ * add/delete are done only in the context of a single-threaded work
+ * queue (be_wq).
+ */
+ vxlan_port = kzalloc(sizeof(*vxlan_port), GFP_KERNEL);
+ if (!vxlan_port)
+ goto done;
+
+ vxlan_port->port = port;
+ INIT_LIST_HEAD(&vxlan_port->list);
+ list_add_tail(&vxlan_port->list, &adapter->vxlan_port_list);
+ adapter->vxlan_port_count++;
+
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
dev_info(dev,
"Only one UDP port supported for VxLAN offloads\n");
dev_info(dev, "Disabling VxLAN offloads\n");
- adapter->vxlan_port_count++;
goto err;
}
- if (adapter->vxlan_port_count++ >= 1)
+ if (adapter->vxlan_port_count > 1)
goto done;
- status = be_cmd_manage_iface(adapter, adapter->if_handle,
- OP_CONVERT_NORMAL_TO_TUNNEL);
- if (status) {
- dev_warn(dev, "Failed to convert normal interface to tunnel\n");
- goto err;
- }
-
- status = be_cmd_set_vxlan_port(adapter, port);
- if (status) {
- dev_warn(dev, "Failed to add VxLAN port\n");
- goto err;
- }
- adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
- adapter->vxlan_port = port;
-
- netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_GSO_UDP_TUNNEL;
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
+ status = be_enable_vxlan_offloads(adapter);
+ if (!status)
+ goto done;
- dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
- be16_to_cpu(port));
- goto done;
err:
be_disable_vxlan_offloads(adapter);
done:
kfree(cmd_work);
+ return;
}
static void be_work_del_vxlan_port(struct work_struct *work)
@@ -4968,23 +5002,40 @@ static void be_work_del_vxlan_port(struct work_struct *work)
container_of(work, struct be_cmd_work, work);
struct be_adapter *adapter = cmd_work->adapter;
__be16 port = cmd_work->info.vxlan_port;
+ struct be_vxlan_port *vxlan_port;
- if (adapter->vxlan_port != port)
- goto done;
+ /* Nothing to be done if a port alias is being deleted */
+ list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
+ if (vxlan_port->port == port) {
+ if (vxlan_port->port_aliases) {
+ vxlan_port->port_aliases--;
+ goto done;
+ }
+ break;
+ }
+ }
+
+ /* No port aliases left; delete the port from the list */
+ list_del(&vxlan_port->list);
+ adapter->vxlan_port_count--;
- if (adapter->vxlan_port_aliases) {
- adapter->vxlan_port_aliases--;
+ /* Disable VxLAN offload if this is the offloaded port */
+ if (adapter->vxlan_port == vxlan_port->port) {
+ WARN_ON(adapter->vxlan_port_count);
+ be_disable_vxlan_offloads(adapter);
+ dev_info(&adapter->pdev->dev,
+ "Disabled VxLAN offloads for UDP port %d\n",
+ be16_to_cpu(port));
goto out;
}
- be_disable_vxlan_offloads(adapter);
+ /* If only 1 port is left, re-enable VxLAN offload */
+ if (adapter->vxlan_port_count == 1)
+ be_enable_vxlan_offloads(adapter);
- dev_info(&adapter->pdev->dev,
- "Disabled VxLAN offloads for UDP port %d\n",
- be16_to_cpu(port));
-done:
- adapter->vxlan_port_count--;
out:
+ kfree(vxlan_port);
+done:
kfree(cmd_work);
}
@@ -5217,15 +5268,15 @@ static bool be_err_is_recoverable(struct be_adapter *adapter)
dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
ue_err_code);
- if (jiffies - err_rec->probe_time <= initial_idle_time) {
+ if (time_before_eq(jiffies - err_rec->probe_time, initial_idle_time)) {
dev_err(&adapter->pdev->dev,
"Cannot recover within %lu sec from driver load\n",
jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
return false;
}
- if (err_rec->last_recovery_time &&
- (jiffies - err_rec->last_recovery_time <= recovery_interval)) {
+ if (err_rec->last_recovery_time && time_before_eq(
+ jiffies - err_rec->last_recovery_time, recovery_interval)) {
dev_err(&adapter->pdev->dev,
"Cannot recover within %lu sec from last recovery\n",
jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
@@ -5626,6 +5677,7 @@ static int be_drv_init(struct be_adapter *adapter)
/* Must be a power of 2 or else MODULO will BUG_ON */
adapter->be_get_temp_freq = 64;
+ INIT_LIST_HEAD(&adapter->vxlan_port_list);
return 0;
free_rx_filter:
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 23d82748f52b..e863ba74d005 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1148,14 +1148,14 @@ static int ethoc_probe(struct platform_device *pdev)
/* Allow the platform setup code to pass in a MAC address. */
if (pdata) {
- memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
+ ether_addr_copy(netdev->dev_addr, pdata->hwaddr);
priv->phy_id = pdata->phy_id;
} else {
const void *mac;
mac = of_get_mac_address(pdev->dev.of_node);
if (mac)
- memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
+ ether_addr_copy(netdev->dev_addr, mac);
priv->phy_id = -1;
}
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index f819843e2bae..659f1ad37e96 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_net.h>
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index ade6b3e4ed13..95bf5e89cfd1 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -32,6 +32,9 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+#include <linux/of_net.h>
#include <net/ip.h>
#include <net/ncsi.h>
@@ -40,103 +43,137 @@
#define DRV_NAME "ftgmac100"
#define DRV_VERSION "0.7"
-#define RX_QUEUE_ENTRIES 256 /* must be power of 2 */
-#define TX_QUEUE_ENTRIES 512 /* must be power of 2 */
+/* Arbitrary values, I am not sure the HW has limits */
+#define MAX_RX_QUEUE_ENTRIES 1024
+#define MAX_TX_QUEUE_ENTRIES 1024
+#define MIN_RX_QUEUE_ENTRIES 32
+#define MIN_TX_QUEUE_ENTRIES 32
-#define MAX_PKT_SIZE 1518
-#define RX_BUF_SIZE PAGE_SIZE /* must be smaller than 0x3fff */
+/* Defaults */
+#define DEF_RX_QUEUE_ENTRIES 128
+#define DEF_TX_QUEUE_ENTRIES 128
-/******************************************************************************
- * private data
- *****************************************************************************/
-struct ftgmac100_descs {
- struct ftgmac100_rxdes rxdes[RX_QUEUE_ENTRIES];
- struct ftgmac100_txdes txdes[TX_QUEUE_ENTRIES];
-};
+#define MAX_PKT_SIZE 1536
+#define RX_BUF_SIZE MAX_PKT_SIZE /* must be smaller than 0x3fff */
+
+/* Min number of tx ring entries before stopping queue */
+#define TX_THRESHOLD (MAX_SKB_FRAGS + 1)
struct ftgmac100 {
+ /* Registers */
struct resource *res;
void __iomem *base;
- int irq;
-
- struct ftgmac100_descs *descs;
- dma_addr_t descs_dma_addr;
-
- struct page *rx_pages[RX_QUEUE_ENTRIES];
+ /* Rx ring */
+ unsigned int rx_q_entries;
+ struct ftgmac100_rxdes *rxdes;
+ dma_addr_t rxdes_dma;
+ struct sk_buff **rx_skbs;
unsigned int rx_pointer;
+ u32 rxdes0_edorr_mask;
+
+ /* Tx ring */
+ unsigned int tx_q_entries;
+ struct ftgmac100_txdes *txdes;
+ dma_addr_t txdes_dma;
+ struct sk_buff **tx_skbs;
unsigned int tx_clean_pointer;
unsigned int tx_pointer;
- unsigned int tx_pending;
+ u32 txdes0_edotr_mask;
+
+ /* Used to signal the reset task of ring change request */
+ unsigned int new_rx_q_entries;
+ unsigned int new_tx_q_entries;
- spinlock_t tx_lock;
+ /* Scratch page to use when rx skb alloc fails */
+ void *rx_scratch;
+ dma_addr_t rx_scratch_dma;
+ /* Component structures */
struct net_device *netdev;
struct device *dev;
struct ncsi_dev *ndev;
struct napi_struct napi;
-
+ struct work_struct reset_task;
struct mii_bus *mii_bus;
- int old_speed;
- int int_mask_all;
- bool use_ncsi;
- bool enabled;
-
- u32 rxdes0_edorr_mask;
- u32 txdes0_edotr_mask;
-};
-static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes, gfp_t gfp);
-
-/******************************************************************************
- * internal functions (hardware register access)
- *****************************************************************************/
-static void ftgmac100_set_rx_ring_base(struct ftgmac100 *priv, dma_addr_t addr)
-{
- iowrite32(addr, priv->base + FTGMAC100_OFFSET_RXR_BADR);
-}
+ /* Link management */
+ int cur_speed;
+ int cur_duplex;
+ bool use_ncsi;
-static void ftgmac100_set_rx_buffer_size(struct ftgmac100 *priv,
- unsigned int size)
-{
- size = FTGMAC100_RBSR_SIZE(size);
- iowrite32(size, priv->base + FTGMAC100_OFFSET_RBSR);
-}
+ /* Multicast filter settings */
+ u32 maht0;
+ u32 maht1;
-static void ftgmac100_set_normal_prio_tx_ring_base(struct ftgmac100 *priv,
- dma_addr_t addr)
-{
- iowrite32(addr, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
-}
+ /* Flow control settings */
+ bool tx_pause;
+ bool rx_pause;
+ bool aneg_pause;
-static void ftgmac100_txdma_normal_prio_start_polling(struct ftgmac100 *priv)
-{
- iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
-}
+ /* Misc */
+ bool need_mac_restart;
+ bool is_aspeed;
+};
-static int ftgmac100_reset_hw(struct ftgmac100 *priv)
+static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
{
struct net_device *netdev = priv->netdev;
int i;
/* NOTE: reset clears all registers */
- iowrite32(FTGMAC100_MACCR_SW_RST, priv->base + FTGMAC100_OFFSET_MACCR);
- for (i = 0; i < 5; i++) {
+ iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
+ iowrite32(maccr | FTGMAC100_MACCR_SW_RST,
+ priv->base + FTGMAC100_OFFSET_MACCR);
+ for (i = 0; i < 50; i++) {
unsigned int maccr;
maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
if (!(maccr & FTGMAC100_MACCR_SW_RST))
return 0;
- udelay(1000);
+ udelay(1);
}
- netdev_err(netdev, "software reset failed\n");
+ netdev_err(netdev, "Hardware reset failed\n");
return -EIO;
}
-static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac)
+static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv)
+{
+ u32 maccr = 0;
+
+ switch (priv->cur_speed) {
+ case SPEED_10:
+ case 0: /* no link */
+ break;
+
+ case SPEED_100:
+ maccr |= FTGMAC100_MACCR_FAST_MODE;
+ break;
+
+ case SPEED_1000:
+ maccr |= FTGMAC100_MACCR_GIGA_MODE;
+ break;
+ default:
+ netdev_err(priv->netdev, "Unknown speed %d !\n",
+ priv->cur_speed);
+ break;
+ }
+
+ /* (Re)initialize the queue pointers */
+ priv->rx_pointer = 0;
+ priv->tx_clean_pointer = 0;
+ priv->tx_pointer = 0;
+
+ /* The doc says reset twice with 10us interval */
+ if (ftgmac100_reset_mac(priv, maccr))
+ return -EIO;
+ usleep_range(10, 1000);
+ return ftgmac100_reset_mac(priv, maccr);
+}
+
+static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac)
{
unsigned int maddr = mac[0] << 8 | mac[1];
unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
@@ -145,7 +182,7 @@ static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac)
iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
}
-static void ftgmac100_setup_mac(struct ftgmac100 *priv)
+static void ftgmac100_initial_mac(struct ftgmac100 *priv)
{
u8 mac[ETH_ALEN];
unsigned int m;
@@ -189,716 +226,833 @@ static int ftgmac100_set_mac_addr(struct net_device *dev, void *p)
return ret;
eth_commit_mac_addr_change(dev, p);
- ftgmac100_set_mac(netdev_priv(dev), dev->dev_addr);
+ ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr);
return 0;
}
-static void ftgmac100_init_hw(struct ftgmac100 *priv)
-{
- /* setup ring buffer base registers */
- ftgmac100_set_rx_ring_base(priv,
- priv->descs_dma_addr +
- offsetof(struct ftgmac100_descs, rxdes));
- ftgmac100_set_normal_prio_tx_ring_base(priv,
- priv->descs_dma_addr +
- offsetof(struct ftgmac100_descs, txdes));
-
- ftgmac100_set_rx_buffer_size(priv, RX_BUF_SIZE);
-
- iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), priv->base + FTGMAC100_OFFSET_APTC);
-
- ftgmac100_set_mac(priv, priv->netdev->dev_addr);
-}
-
-#define MACCR_ENABLE_ALL (FTGMAC100_MACCR_TXDMA_EN | \
- FTGMAC100_MACCR_RXDMA_EN | \
- FTGMAC100_MACCR_TXMAC_EN | \
- FTGMAC100_MACCR_RXMAC_EN | \
- FTGMAC100_MACCR_FULLDUP | \
- FTGMAC100_MACCR_CRC_APD | \
- FTGMAC100_MACCR_RX_RUNT | \
- FTGMAC100_MACCR_RX_BROADPKT)
-
-static void ftgmac100_start_hw(struct ftgmac100 *priv, int speed)
+static void ftgmac100_config_pause(struct ftgmac100 *priv)
{
- int maccr = MACCR_ENABLE_ALL;
+ u32 fcr = FTGMAC100_FCR_PAUSE_TIME(16);
- switch (speed) {
- default:
- case 10:
- break;
-
- case 100:
- maccr |= FTGMAC100_MACCR_FAST_MODE;
- break;
+ /* Throttle tx queue when receiving pause frames */
+ if (priv->rx_pause)
+ fcr |= FTGMAC100_FCR_FC_EN;
- case 1000:
- maccr |= FTGMAC100_MACCR_GIGA_MODE;
- break;
- }
+ /* Enables sending pause frames when the RX queue is past a
+ * certain threshold.
+ */
+ if (priv->tx_pause)
+ fcr |= FTGMAC100_FCR_FCTHR_EN;
- iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
+ iowrite32(fcr, priv->base + FTGMAC100_OFFSET_FCR);
}
-static void ftgmac100_stop_hw(struct ftgmac100 *priv)
+static void ftgmac100_init_hw(struct ftgmac100 *priv)
{
- iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
-}
+ u32 reg, rfifo_sz, tfifo_sz;
-/******************************************************************************
- * internal functions (receive descriptor)
- *****************************************************************************/
-static bool ftgmac100_rxdes_first_segment(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FRS);
-}
+ /* Clear stale interrupts */
+ reg = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
+ iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR);
-static bool ftgmac100_rxdes_last_segment(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_LRS);
-}
+ /* Setup RX ring buffer base */
+ iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR);
-static bool ftgmac100_rxdes_packet_ready(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY);
-}
+ /* Setup TX ring buffer base */
+ iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
-static void ftgmac100_rxdes_set_dma_own(const struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes)
-{
- /* clear status bits */
- rxdes->rxdes0 &= cpu_to_le32(priv->rxdes0_edorr_mask);
-}
+ /* Configure RX buffer size */
+ iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE),
+ priv->base + FTGMAC100_OFFSET_RBSR);
-static bool ftgmac100_rxdes_rx_error(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ERR);
-}
+ /* Set RX descriptor autopoll */
+ iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1),
+ priv->base + FTGMAC100_OFFSET_APTC);
-static bool ftgmac100_rxdes_crc_error(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_CRC_ERR);
-}
+ /* Write MAC address */
+ ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr);
-static bool ftgmac100_rxdes_frame_too_long(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FTL);
-}
+ /* Write multicast filter */
+ iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0);
+ iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1);
-static bool ftgmac100_rxdes_runt(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RUNT);
+ /* Configure descriptor sizes and increase burst sizes according
+ * to values in Aspeed SDK. The FIFO arbitration is enabled and
+ * the thresholds set based on the recommended values in the
+ * AST2400 specification.
+ */
+ iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) | /* 2*8 bytes RX descs */
+ FTGMAC100_DBLAC_TXDES_SIZE(2) | /* 2*8 bytes TX descs */
+ FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */
+ FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */
+ FTGMAC100_DBLAC_RX_THR_EN | /* Enable fifo threshold arb */
+ FTGMAC100_DBLAC_RXFIFO_HTHR(6) | /* 6/8 of FIFO high threshold */
+ FTGMAC100_DBLAC_RXFIFO_LTHR(2), /* 2/8 of FIFO low threshold */
+ priv->base + FTGMAC100_OFFSET_DBLAC);
+
+ /* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt
+ * mitigation doesn't seem to provide any benefit with NAPI so leave
+ * it at that.
+ */
+ iowrite32(FTGMAC100_ITC_RXINT_THR(1) |
+ FTGMAC100_ITC_TXINT_THR(1),
+ priv->base + FTGMAC100_OFFSET_ITC);
+
+ /* Configure FIFO sizes in the TPAFCR register */
+ reg = ioread32(priv->base + FTGMAC100_OFFSET_FEAR);
+ rfifo_sz = reg & 0x00000007;
+ tfifo_sz = (reg >> 3) & 0x00000007;
+ reg = ioread32(priv->base + FTGMAC100_OFFSET_TPAFCR);
+ reg &= ~0x3f000000;
+ reg |= (tfifo_sz << 27);
+ reg |= (rfifo_sz << 24);
+ iowrite32(reg, priv->base + FTGMAC100_OFFSET_TPAFCR);
+}
+
+static void ftgmac100_start_hw(struct ftgmac100 *priv)
+{
+ u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
+
+ /* Keep the original GMAC and FAST bits */
+ maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE);
+
+ /* Add all the main enable bits */
+ maccr |= FTGMAC100_MACCR_TXDMA_EN |
+ FTGMAC100_MACCR_RXDMA_EN |
+ FTGMAC100_MACCR_TXMAC_EN |
+ FTGMAC100_MACCR_RXMAC_EN |
+ FTGMAC100_MACCR_CRC_APD |
+ FTGMAC100_MACCR_PHY_LINK_LEVEL |
+ FTGMAC100_MACCR_RX_RUNT |
+ FTGMAC100_MACCR_RX_BROADPKT;
+
+ /* Add other bits as needed */
+ if (priv->cur_duplex == DUPLEX_FULL)
+ maccr |= FTGMAC100_MACCR_FULLDUP;
+ if (priv->netdev->flags & IFF_PROMISC)
+ maccr |= FTGMAC100_MACCR_RX_ALL;
+ if (priv->netdev->flags & IFF_ALLMULTI)
+ maccr |= FTGMAC100_MACCR_RX_MULTIPKT;
+ else if (netdev_mc_count(priv->netdev))
+ maccr |= FTGMAC100_MACCR_HT_MULTI_EN;
+
+ /* Vlan filtering enabled */
+ if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ maccr |= FTGMAC100_MACCR_RM_VLAN;
+
+ /* Hit the HW */
+ iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
}
-static bool ftgmac100_rxdes_odd_nibble(struct ftgmac100_rxdes *rxdes)
+static void ftgmac100_stop_hw(struct ftgmac100 *priv)
{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ODD_NB);
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
}
-static unsigned int ftgmac100_rxdes_data_length(struct ftgmac100_rxdes *rxdes)
+static void ftgmac100_calc_mc_hash(struct ftgmac100 *priv)
{
- return le32_to_cpu(rxdes->rxdes0) & FTGMAC100_RXDES0_VDBC;
-}
+ struct netdev_hw_addr *ha;
-static bool ftgmac100_rxdes_multicast(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_MULTICAST);
-}
+ priv->maht1 = 0;
+ priv->maht0 = 0;
+ netdev_for_each_mc_addr(ha, priv->netdev) {
+ u32 crc_val = ether_crc_le(ETH_ALEN, ha->addr);
-static void ftgmac100_rxdes_set_end_of_ring(const struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes)
-{
- rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
+ crc_val = (~(crc_val >> 2)) & 0x3f;
+ if (crc_val >= 32)
+ priv->maht1 |= 1ul << (crc_val - 32);
+ else
+ priv->maht0 |= 1ul << (crc_val);
+ }
}
-static void ftgmac100_rxdes_set_dma_addr(struct ftgmac100_rxdes *rxdes,
- dma_addr_t addr)
+static void ftgmac100_set_rx_mode(struct net_device *netdev)
{
- rxdes->rxdes3 = cpu_to_le32(addr);
-}
+ struct ftgmac100 *priv = netdev_priv(netdev);
-static dma_addr_t ftgmac100_rxdes_get_dma_addr(struct ftgmac100_rxdes *rxdes)
-{
- return le32_to_cpu(rxdes->rxdes3);
-}
+ /* Setup the hash filter */
+ ftgmac100_calc_mc_hash(priv);
-static bool ftgmac100_rxdes_is_tcp(struct ftgmac100_rxdes *rxdes)
-{
- return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) ==
- cpu_to_le32(FTGMAC100_RXDES1_PROT_TCPIP);
-}
+ /* Interface down ? that's all there is to do */
+ if (!netif_running(netdev))
+ return;
-static bool ftgmac100_rxdes_is_udp(struct ftgmac100_rxdes *rxdes)
-{
- return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) ==
- cpu_to_le32(FTGMAC100_RXDES1_PROT_UDPIP);
-}
+ /* Update the HW */
+ iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0);
+ iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1);
-static bool ftgmac100_rxdes_tcpcs_err(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_TCP_CHKSUM_ERR);
+ /* Reconfigure MACCR */
+ ftgmac100_start_hw(priv);
}
-static bool ftgmac100_rxdes_udpcs_err(struct ftgmac100_rxdes *rxdes)
+static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
+ struct ftgmac100_rxdes *rxdes, gfp_t gfp)
{
- return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_UDP_CHKSUM_ERR);
-}
+ struct net_device *netdev = priv->netdev;
+ struct sk_buff *skb;
+ dma_addr_t map;
+ int err;
-static bool ftgmac100_rxdes_ipcs_err(struct ftgmac100_rxdes *rxdes)
-{
- return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_IP_CHKSUM_ERR);
-}
+ skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE);
+ if (unlikely(!skb)) {
+ if (net_ratelimit())
+ netdev_warn(netdev, "failed to allocate rx skb\n");
+ err = -ENOMEM;
+ map = priv->rx_scratch_dma;
+ } else {
+ map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, map))) {
+ if (net_ratelimit())
+ netdev_err(netdev, "failed to map rx page\n");
+ dev_kfree_skb_any(skb);
+ map = priv->rx_scratch_dma;
+ skb = NULL;
+ err = -ENOMEM;
+ }
+ }
-static inline struct page **ftgmac100_rxdes_page_slot(struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes)
-{
- return &priv->rx_pages[rxdes - priv->descs->rxdes];
-}
+ /* Store skb */
+ priv->rx_skbs[entry] = skb;
-/*
- * rxdes2 is not used by hardware. We use it to keep track of page.
- * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
- */
-static void ftgmac100_rxdes_set_page(struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes,
- struct page *page)
-{
- *ftgmac100_rxdes_page_slot(priv, rxdes) = page;
-}
+ /* Store DMA address into RX desc */
+ rxdes->rxdes3 = cpu_to_le32(map);
-static struct page *ftgmac100_rxdes_get_page(struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes)
-{
- return *ftgmac100_rxdes_page_slot(priv, rxdes);
-}
+ /* Ensure the above is ordered vs clearing the OWN bit */
+ dma_wmb();
-/******************************************************************************
- * internal functions (receive)
- *****************************************************************************/
-static int ftgmac100_next_rx_pointer(int pointer)
-{
- return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
-}
+ /* Clean status (which resets own bit) */
+ if (entry == (priv->rx_q_entries - 1))
+ rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask);
+ else
+ rxdes->rxdes0 = 0;
-static void ftgmac100_rx_pointer_advance(struct ftgmac100 *priv)
-{
- priv->rx_pointer = ftgmac100_next_rx_pointer(priv->rx_pointer);
-}
-
-static struct ftgmac100_rxdes *ftgmac100_current_rxdes(struct ftgmac100 *priv)
-{
- return &priv->descs->rxdes[priv->rx_pointer];
+ return 0;
}
-static struct ftgmac100_rxdes *
-ftgmac100_rx_locate_first_segment(struct ftgmac100 *priv)
+static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv,
+ unsigned int pointer)
{
- struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv);
-
- while (ftgmac100_rxdes_packet_ready(rxdes)) {
- if (ftgmac100_rxdes_first_segment(rxdes))
- return rxdes;
-
- ftgmac100_rxdes_set_dma_own(priv, rxdes);
- ftgmac100_rx_pointer_advance(priv);
- rxdes = ftgmac100_current_rxdes(priv);
- }
-
- return NULL;
+ return (pointer + 1) & (priv->rx_q_entries - 1);
}
-static bool ftgmac100_rx_packet_error(struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes)
+static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status)
{
struct net_device *netdev = priv->netdev;
- bool error = false;
-
- if (unlikely(ftgmac100_rxdes_rx_error(rxdes))) {
- if (net_ratelimit())
- netdev_info(netdev, "rx err\n");
+ if (status & FTGMAC100_RXDES0_RX_ERR)
netdev->stats.rx_errors++;
- error = true;
- }
-
- if (unlikely(ftgmac100_rxdes_crc_error(rxdes))) {
- if (net_ratelimit())
- netdev_info(netdev, "rx crc err\n");
+ if (status & FTGMAC100_RXDES0_CRC_ERR)
netdev->stats.rx_crc_errors++;
- error = true;
- } else if (unlikely(ftgmac100_rxdes_ipcs_err(rxdes))) {
- if (net_ratelimit())
- netdev_info(netdev, "rx IP checksum err\n");
-
- error = true;
- }
-
- if (unlikely(ftgmac100_rxdes_frame_too_long(rxdes))) {
- if (net_ratelimit())
- netdev_info(netdev, "rx frame too long\n");
-
- netdev->stats.rx_length_errors++;
- error = true;
- } else if (unlikely(ftgmac100_rxdes_runt(rxdes))) {
- if (net_ratelimit())
- netdev_info(netdev, "rx runt\n");
-
- netdev->stats.rx_length_errors++;
- error = true;
- } else if (unlikely(ftgmac100_rxdes_odd_nibble(rxdes))) {
- if (net_ratelimit())
- netdev_info(netdev, "rx odd nibble\n");
+ if (status & (FTGMAC100_RXDES0_FTL |
+ FTGMAC100_RXDES0_RUNT |
+ FTGMAC100_RXDES0_RX_ODD_NB))
netdev->stats.rx_length_errors++;
- error = true;
- }
-
- return error;
}
-static void ftgmac100_rx_drop_packet(struct ftgmac100 *priv)
+static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
{
struct net_device *netdev = priv->netdev;
- struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv);
- bool done = false;
+ struct ftgmac100_rxdes *rxdes;
+ struct sk_buff *skb;
+ unsigned int pointer, size;
+ u32 status, csum_vlan;
+ dma_addr_t map;
- if (net_ratelimit())
- netdev_dbg(netdev, "drop packet %p\n", rxdes);
+ /* Grab next RX descriptor */
+ pointer = priv->rx_pointer;
+ rxdes = &priv->rxdes[pointer];
- do {
- if (ftgmac100_rxdes_last_segment(rxdes))
- done = true;
+ /* Grab descriptor status */
+ status = le32_to_cpu(rxdes->rxdes0);
- ftgmac100_rxdes_set_dma_own(priv, rxdes);
- ftgmac100_rx_pointer_advance(priv);
- rxdes = ftgmac100_current_rxdes(priv);
- } while (!done && ftgmac100_rxdes_packet_ready(rxdes));
+ /* Do we have a packet ? */
+ if (!(status & FTGMAC100_RXDES0_RXPKT_RDY))
+ return false;
- netdev->stats.rx_dropped++;
-}
+ /* Order subsequent reads with the test for the ready bit */
+ dma_rmb();
-static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
-{
- struct net_device *netdev = priv->netdev;
- struct ftgmac100_rxdes *rxdes;
- struct sk_buff *skb;
- bool done = false;
+ /* We don't cope with fragmented RX packets */
+ if (unlikely(!(status & FTGMAC100_RXDES0_FRS) ||
+ !(status & FTGMAC100_RXDES0_LRS)))
+ goto drop;
- rxdes = ftgmac100_rx_locate_first_segment(priv);
- if (!rxdes)
- return false;
+ /* Grab received size and csum vlan field in the descriptor */
+ size = status & FTGMAC100_RXDES0_VDBC;
+ csum_vlan = le32_to_cpu(rxdes->rxdes1);
- if (unlikely(ftgmac100_rx_packet_error(priv, rxdes))) {
- ftgmac100_rx_drop_packet(priv);
- return true;
+ /* Any error (other than csum offload) flagged ? */
+ if (unlikely(status & RXDES0_ANY_ERROR)) {
+ /* Correct for incorrect flagging of runt packets
+ * with vlan tags... Just accept a runt packet that
+ * has been flagged as vlan and whose size is at
+ * least 60 bytes.
+ */
+ if ((status & FTGMAC100_RXDES0_RUNT) &&
+ (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) &&
+ (size >= 60))
+ status &= ~FTGMAC100_RXDES0_RUNT;
+
+ /* Any error still in there ? */
+ if (status & RXDES0_ANY_ERROR) {
+ ftgmac100_rx_packet_error(priv, status);
+ goto drop;
+ }
}
- /* start processing */
- skb = netdev_alloc_skb_ip_align(netdev, 128);
- if (unlikely(!skb)) {
- if (net_ratelimit())
- netdev_err(netdev, "rx skb alloc failed\n");
-
- ftgmac100_rx_drop_packet(priv);
- return true;
+ /* If the packet had no skb (failed to allocate earlier)
+ * then try to allocate one and skip
+ */
+ skb = priv->rx_skbs[pointer];
+ if (!unlikely(skb)) {
+ ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
+ goto drop;
}
- if (unlikely(ftgmac100_rxdes_multicast(rxdes)))
+ if (unlikely(status & FTGMAC100_RXDES0_MULTICAST))
netdev->stats.multicast++;
- /*
- * It seems that HW does checksum incorrectly with fragmented packets,
- * so we are conservative here - if HW checksum error, let software do
- * the checksum again.
+ /* If the HW found checksum errors, bounce it to software.
+ *
+ * If we didn't, we need to see if the packet was recognized
+ * by HW as one of the supported checksummed protocols before
+ * we accept the HW test results.
*/
- if ((ftgmac100_rxdes_is_tcp(rxdes) && !ftgmac100_rxdes_tcpcs_err(rxdes)) ||
- (ftgmac100_rxdes_is_udp(rxdes) && !ftgmac100_rxdes_udpcs_err(rxdes)))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- do {
- dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
- struct page *page = ftgmac100_rxdes_get_page(priv, rxdes);
- unsigned int size;
+ if (netdev->features & NETIF_F_RXCSUM) {
+ u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR |
+ FTGMAC100_RXDES1_UDP_CHKSUM_ERR |
+ FTGMAC100_RXDES1_IP_CHKSUM_ERR;
+ if ((csum_vlan & err_bits) ||
+ !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK))
+ skb->ip_summed = CHECKSUM_NONE;
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
- dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+ /* Transfer received size to skb */
+ skb_put(skb, size);
- size = ftgmac100_rxdes_data_length(rxdes);
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0, size);
+ /* Extract vlan tag */
+ if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ csum_vlan & 0xffff);
- skb->len += size;
- skb->data_len += size;
- skb->truesize += PAGE_SIZE;
+ /* Tear down DMA mapping, do necessary cache management */
+ map = le32_to_cpu(rxdes->rxdes3);
- if (ftgmac100_rxdes_last_segment(rxdes))
- done = true;
+#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU)
+ /* When we don't have an iommu, we can save cycles by not
+ * invalidating the cache for the part of the packet that
+ * wasn't received.
+ */
+ dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE);
+#else
+ dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+#endif
- ftgmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
- ftgmac100_rx_pointer_advance(priv);
- rxdes = ftgmac100_current_rxdes(priv);
- } while (!done);
+ /* Resplenish rx ring */
+ ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
+ priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
- /* Small frames are copied into linear part of skb to free one page */
- if (skb->len <= 128) {
- skb->truesize -= PAGE_SIZE;
- __pskb_pull_tail(skb, skb->len);
- } else {
- /* We pull the minimum amount into linear part */
- __pskb_pull_tail(skb, ETH_HLEN);
- }
skb->protocol = eth_type_trans(skb, netdev);
netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += skb->len;
+ netdev->stats.rx_bytes += size;
/* push packet to protocol stack */
- napi_gro_receive(&priv->napi, skb);
+ if (skb->ip_summed == CHECKSUM_NONE)
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&priv->napi, skb);
(*processed)++;
return true;
+
+ drop:
+ /* Clean rxdes0 (which resets own bit) */
+ rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
+ priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
+ netdev->stats.rx_dropped++;
+ return true;
}
-/******************************************************************************
- * internal functions (transmit descriptor)
- *****************************************************************************/
-static void ftgmac100_txdes_reset(const struct ftgmac100 *priv,
- struct ftgmac100_txdes *txdes)
+static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv,
+ unsigned int index)
{
- /* clear all except end of ring bit */
- txdes->txdes0 &= cpu_to_le32(priv->txdes0_edotr_mask);
- txdes->txdes1 = 0;
- txdes->txdes2 = 0;
- txdes->txdes3 = 0;
+ if (index == (priv->tx_q_entries - 1))
+ return priv->txdes0_edotr_mask;
+ else
+ return 0;
}
-static bool ftgmac100_txdes_owned_by_dma(struct ftgmac100_txdes *txdes)
+static unsigned int ftgmac100_next_tx_pointer(struct ftgmac100 *priv,
+ unsigned int pointer)
{
- return txdes->txdes0 & cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
+ return (pointer + 1) & (priv->tx_q_entries - 1);
}
-static void ftgmac100_txdes_set_dma_own(struct ftgmac100_txdes *txdes)
+static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv)
{
- /*
- * Make sure dma own bit will not be set before any other
- * descriptor fields.
+ /* Returns the number of available slots in the TX queue
+ *
+ * This always leaves one free slot so we don't have to
+ * worry about empty vs. full, and this simplifies the
+ * test for ftgmac100_tx_buf_cleanable() below
*/
- wmb();
- txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
+ return (priv->tx_clean_pointer - priv->tx_pointer - 1) &
+ (priv->tx_q_entries - 1);
}
-static void ftgmac100_txdes_set_end_of_ring(const struct ftgmac100 *priv,
- struct ftgmac100_txdes *txdes)
+static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv)
{
- txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
+ return priv->tx_pointer != priv->tx_clean_pointer;
}
-static void ftgmac100_txdes_set_first_segment(struct ftgmac100_txdes *txdes)
+static void ftgmac100_free_tx_packet(struct ftgmac100 *priv,
+ unsigned int pointer,
+ struct sk_buff *skb,
+ struct ftgmac100_txdes *txdes,
+ u32 ctl_stat)
{
- txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_FTS);
-}
+ dma_addr_t map = le32_to_cpu(txdes->txdes3);
+ size_t len;
-static void ftgmac100_txdes_set_last_segment(struct ftgmac100_txdes *txdes)
-{
- txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_LTS);
-}
+ if (ctl_stat & FTGMAC100_TXDES0_FTS) {
+ len = skb_headlen(skb);
+ dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE);
+ } else {
+ len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat);
+ dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE);
+ }
-static void ftgmac100_txdes_set_buffer_size(struct ftgmac100_txdes *txdes,
- unsigned int len)
-{
- txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXBUF_SIZE(len));
+ /* Free SKB on last segment */
+ if (ctl_stat & FTGMAC100_TXDES0_LTS)
+ dev_kfree_skb(skb);
+ priv->tx_skbs[pointer] = NULL;
}
-static void ftgmac100_txdes_set_txint(struct ftgmac100_txdes *txdes)
+static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
{
- txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TXIC);
-}
+ struct net_device *netdev = priv->netdev;
+ struct ftgmac100_txdes *txdes;
+ struct sk_buff *skb;
+ unsigned int pointer;
+ u32 ctl_stat;
-static void ftgmac100_txdes_set_tcpcs(struct ftgmac100_txdes *txdes)
-{
- txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TCP_CHKSUM);
-}
+ pointer = priv->tx_clean_pointer;
+ txdes = &priv->txdes[pointer];
-static void ftgmac100_txdes_set_udpcs(struct ftgmac100_txdes *txdes)
-{
- txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_UDP_CHKSUM);
-}
+ ctl_stat = le32_to_cpu(txdes->txdes0);
+ if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN)
+ return false;
-static void ftgmac100_txdes_set_ipcs(struct ftgmac100_txdes *txdes)
-{
- txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_IP_CHKSUM);
-}
+ skb = priv->tx_skbs[pointer];
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+ ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
+ txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
-static void ftgmac100_txdes_set_dma_addr(struct ftgmac100_txdes *txdes,
- dma_addr_t addr)
-{
- txdes->txdes3 = cpu_to_le32(addr);
-}
+ priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
-static dma_addr_t ftgmac100_txdes_get_dma_addr(struct ftgmac100_txdes *txdes)
-{
- return le32_to_cpu(txdes->txdes3);
+ return true;
}
-/*
- * txdes2 is not used by hardware. We use it to keep track of socket buffer.
- * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
- */
-static void ftgmac100_txdes_set_skb(struct ftgmac100_txdes *txdes,
- struct sk_buff *skb)
+static void ftgmac100_tx_complete(struct ftgmac100 *priv)
{
- txdes->txdes2 = (unsigned int)skb;
-}
+ struct net_device *netdev = priv->netdev;
-static struct sk_buff *ftgmac100_txdes_get_skb(struct ftgmac100_txdes *txdes)
-{
- return (struct sk_buff *)txdes->txdes2;
-}
+ /* Process all completed packets */
+ while (ftgmac100_tx_buf_cleanable(priv) &&
+ ftgmac100_tx_complete_packet(priv))
+ ;
-/******************************************************************************
- * internal functions (transmit)
- *****************************************************************************/
-static int ftgmac100_next_tx_pointer(int pointer)
-{
- return (pointer + 1) & (TX_QUEUE_ENTRIES - 1);
+ /* Restart queue if needed */
+ smp_mb();
+ if (unlikely(netif_queue_stopped(netdev) &&
+ ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(netdev, 0);
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_queue_stopped(netdev) &&
+ ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
+ netif_wake_queue(netdev);
+ __netif_tx_unlock(txq);
+ }
}
-static void ftgmac100_tx_pointer_advance(struct ftgmac100 *priv)
+static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan)
{
- priv->tx_pointer = ftgmac100_next_tx_pointer(priv->tx_pointer);
-}
+ if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
+ u8 ip_proto = ip_hdr(skb)->protocol;
-static void ftgmac100_tx_clean_pointer_advance(struct ftgmac100 *priv)
-{
- priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv->tx_clean_pointer);
+ *csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM;
+ switch(ip_proto) {
+ case IPPROTO_TCP:
+ *csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM;
+ return true;
+ case IPPROTO_UDP:
+ *csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM;
+ return true;
+ case IPPROTO_IP:
+ return true;
+ }
+ }
+ return skb_checksum_help(skb) == 0;
}
-static struct ftgmac100_txdes *ftgmac100_current_txdes(struct ftgmac100 *priv)
+static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
- return &priv->descs->txdes[priv->tx_pointer];
-}
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct ftgmac100_txdes *txdes, *first;
+ unsigned int pointer, nfrags, len, i, j;
+ u32 f_ctl_stat, ctl_stat, csum_vlan;
+ dma_addr_t map;
-static struct ftgmac100_txdes *
-ftgmac100_current_clean_txdes(struct ftgmac100 *priv)
-{
- return &priv->descs->txdes[priv->tx_clean_pointer];
-}
+ /* The HW doesn't pad small frames */
+ if (eth_skb_pad(skb)) {
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
-static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
-{
- struct net_device *netdev = priv->netdev;
- struct ftgmac100_txdes *txdes;
- struct sk_buff *skb;
- dma_addr_t map;
+ /* Reject oversize packets */
+ if (unlikely(skb->len > MAX_PKT_SIZE)) {
+ if (net_ratelimit())
+ netdev_dbg(netdev, "tx packet too big\n");
+ goto drop;
+ }
- if (priv->tx_pending == 0)
- return false;
+ /* Do we have a limit on #fragments ? I yet have to get a reply
+ * from Aspeed. If there's one I haven't hit it.
+ */
+ nfrags = skb_shinfo(skb)->nr_frags;
- txdes = ftgmac100_current_clean_txdes(priv);
+ /* Get header len */
+ len = skb_headlen(skb);
- if (ftgmac100_txdes_owned_by_dma(txdes))
- return false;
+ /* Map the packet head */
+ map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, map)) {
+ if (net_ratelimit())
+ netdev_err(netdev, "map tx packet head failed\n");
+ goto drop;
+ }
- skb = ftgmac100_txdes_get_skb(txdes);
- map = ftgmac100_txdes_get_dma_addr(txdes);
+ /* Grab the next free tx descriptor */
+ pointer = priv->tx_pointer;
+ txdes = first = &priv->txdes[pointer];
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += skb->len;
+ /* Setup it up with the packet head. Don't write the head to the
+ * ring just yet
+ */
+ priv->tx_skbs[pointer] = skb;
+ f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
+ f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
+ f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
+ f_ctl_stat |= FTGMAC100_TXDES0_FTS;
+ if (nfrags == 0)
+ f_ctl_stat |= FTGMAC100_TXDES0_LTS;
+ txdes->txdes3 = cpu_to_le32(map);
+
+ /* Setup HW checksumming */
+ csum_vlan = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ !ftgmac100_prep_tx_csum(skb, &csum_vlan))
+ goto drop;
+
+ /* Add VLAN tag */
+ if (skb_vlan_tag_present(skb)) {
+ csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
+ csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
+ }
- dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+ txdes->txdes1 = cpu_to_le32(csum_vlan);
+
+ /* Next descriptor */
+ pointer = ftgmac100_next_tx_pointer(priv, pointer);
+
+ /* Add the fragments */
+ for (i = 0; i < nfrags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ len = frag->size;
+
+ /* Map it */
+ map = skb_frag_dma_map(priv->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, map))
+ goto dma_err;
+
+ /* Setup descriptor */
+ priv->tx_skbs[pointer] = skb;
+ txdes = &priv->txdes[pointer];
+ ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
+ ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
+ ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
+ if (i == (nfrags - 1))
+ ctl_stat |= FTGMAC100_TXDES0_LTS;
+ txdes->txdes0 = cpu_to_le32(ctl_stat);
+ txdes->txdes1 = 0;
+ txdes->txdes3 = cpu_to_le32(map);
+
+ /* Next one */
+ pointer = ftgmac100_next_tx_pointer(priv, pointer);
+ }
- dev_kfree_skb(skb);
+ /* Order the previous packet and descriptor udpates
+ * before setting the OWN bit on the first descriptor.
+ */
+ dma_wmb();
+ first->txdes0 = cpu_to_le32(f_ctl_stat);
- ftgmac100_txdes_reset(priv, txdes);
+ /* Update next TX pointer */
+ priv->tx_pointer = pointer;
- ftgmac100_tx_clean_pointer_advance(priv);
+ /* If there isn't enough room for all the fragments of a new packet
+ * in the TX ring, stop the queue. The sequence below is race free
+ * vs. a concurrent restart in ftgmac100_poll()
+ */
+ if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) {
+ netif_stop_queue(netdev);
+ /* Order the queue stop with the test below */
+ smp_mb();
+ if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
+ netif_wake_queue(netdev);
+ }
- spin_lock(&priv->tx_lock);
- priv->tx_pending--;
- spin_unlock(&priv->tx_lock);
- netif_wake_queue(netdev);
+ /* Poke transmitter to read the updated TX descriptors */
+ iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
- return true;
-}
+ return NETDEV_TX_OK;
-static void ftgmac100_tx_complete(struct ftgmac100 *priv)
-{
- while (ftgmac100_tx_complete_packet(priv))
- ;
+ dma_err:
+ if (net_ratelimit())
+ netdev_err(netdev, "map tx fragment failed\n");
+
+ /* Free head */
+ pointer = priv->tx_pointer;
+ ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat);
+ first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask);
+
+ /* Then all fragments */
+ for (j = 0; j < i; j++) {
+ pointer = ftgmac100_next_tx_pointer(priv, pointer);
+ txdes = &priv->txdes[pointer];
+ ctl_stat = le32_to_cpu(txdes->txdes0);
+ ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
+ txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
+ }
+
+ /* This cannot be reached if we successfully mapped the
+ * last fragment, so we know ftgmac100_free_tx_packet()
+ * hasn't freed the skb yet.
+ */
+ drop:
+ /* Drop the packet */
+ dev_kfree_skb_any(skb);
+ netdev->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
}
-static int ftgmac100_xmit(struct ftgmac100 *priv, struct sk_buff *skb,
- dma_addr_t map)
+static void ftgmac100_free_buffers(struct ftgmac100 *priv)
{
- struct net_device *netdev = priv->netdev;
- struct ftgmac100_txdes *txdes;
- unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
-
- txdes = ftgmac100_current_txdes(priv);
- ftgmac100_tx_pointer_advance(priv);
-
- /* setup TX descriptor */
- ftgmac100_txdes_set_skb(txdes, skb);
- ftgmac100_txdes_set_dma_addr(txdes, map);
- ftgmac100_txdes_set_buffer_size(txdes, len);
-
- ftgmac100_txdes_set_first_segment(txdes);
- ftgmac100_txdes_set_last_segment(txdes);
- ftgmac100_txdes_set_txint(txdes);
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- __be16 protocol = skb->protocol;
-
- if (protocol == cpu_to_be16(ETH_P_IP)) {
- u8 ip_proto = ip_hdr(skb)->protocol;
-
- ftgmac100_txdes_set_ipcs(txdes);
- if (ip_proto == IPPROTO_TCP)
- ftgmac100_txdes_set_tcpcs(txdes);
- else if (ip_proto == IPPROTO_UDP)
- ftgmac100_txdes_set_udpcs(txdes);
- }
+ int i;
+
+ /* Free all RX buffers */
+ for (i = 0; i < priv->rx_q_entries; i++) {
+ struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
+ struct sk_buff *skb = priv->rx_skbs[i];
+ dma_addr_t map = le32_to_cpu(rxdes->rxdes3);
+
+ if (!skb)
+ continue;
+
+ priv->rx_skbs[i] = NULL;
+ dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
}
- spin_lock(&priv->tx_lock);
- priv->tx_pending++;
- if (priv->tx_pending == TX_QUEUE_ENTRIES)
- netif_stop_queue(netdev);
+ /* Free all TX buffers */
+ for (i = 0; i < priv->tx_q_entries; i++) {
+ struct ftgmac100_txdes *txdes = &priv->txdes[i];
+ struct sk_buff *skb = priv->tx_skbs[i];
+
+ if (!skb)
+ continue;
+ ftgmac100_free_tx_packet(priv, i, skb, txdes,
+ le32_to_cpu(txdes->txdes0));
+ }
+}
- /* start transmit */
- ftgmac100_txdes_set_dma_own(txdes);
- spin_unlock(&priv->tx_lock);
+static void ftgmac100_free_rings(struct ftgmac100 *priv)
+{
+ /* Free skb arrays */
+ kfree(priv->rx_skbs);
+ kfree(priv->tx_skbs);
- ftgmac100_txdma_normal_prio_start_polling(priv);
+ /* Free descriptors */
+ if (priv->rxdes)
+ dma_free_coherent(priv->dev, MAX_RX_QUEUE_ENTRIES *
+ sizeof(struct ftgmac100_rxdes),
+ priv->rxdes, priv->rxdes_dma);
+ priv->rxdes = NULL;
- return NETDEV_TX_OK;
+ if (priv->txdes)
+ dma_free_coherent(priv->dev, MAX_TX_QUEUE_ENTRIES *
+ sizeof(struct ftgmac100_txdes),
+ priv->txdes, priv->txdes_dma);
+ priv->txdes = NULL;
+
+ /* Free scratch packet buffer */
+ if (priv->rx_scratch)
+ dma_free_coherent(priv->dev, RX_BUF_SIZE,
+ priv->rx_scratch, priv->rx_scratch_dma);
}
-/******************************************************************************
- * internal functions (buffer)
- *****************************************************************************/
-static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
- struct ftgmac100_rxdes *rxdes, gfp_t gfp)
+static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
{
- struct net_device *netdev = priv->netdev;
- struct page *page;
- dma_addr_t map;
+ /* Allocate skb arrays */
+ priv->rx_skbs = kcalloc(MAX_RX_QUEUE_ENTRIES, sizeof(void *),
+ GFP_KERNEL);
+ if (!priv->rx_skbs)
+ return -ENOMEM;
+ priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *),
+ GFP_KERNEL);
+ if (!priv->tx_skbs)
+ return -ENOMEM;
- page = alloc_page(gfp);
- if (!page) {
- if (net_ratelimit())
- netdev_err(netdev, "failed to allocate rx page\n");
+ /* Allocate descriptors */
+ priv->rxdes = dma_zalloc_coherent(priv->dev,
+ MAX_RX_QUEUE_ENTRIES *
+ sizeof(struct ftgmac100_rxdes),
+ &priv->rxdes_dma, GFP_KERNEL);
+ if (!priv->rxdes)
+ return -ENOMEM;
+ priv->txdes = dma_zalloc_coherent(priv->dev,
+ MAX_TX_QUEUE_ENTRIES *
+ sizeof(struct ftgmac100_txdes),
+ &priv->txdes_dma, GFP_KERNEL);
+ if (!priv->txdes)
return -ENOMEM;
- }
- map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(priv->dev, map))) {
- if (net_ratelimit())
- netdev_err(netdev, "failed to map rx page\n");
- __free_page(page);
+ /* Allocate scratch packet buffer */
+ priv->rx_scratch = dma_alloc_coherent(priv->dev,
+ RX_BUF_SIZE,
+ &priv->rx_scratch_dma,
+ GFP_KERNEL);
+ if (!priv->rx_scratch)
return -ENOMEM;
- }
- ftgmac100_rxdes_set_page(priv, rxdes, page);
- ftgmac100_rxdes_set_dma_addr(rxdes, map);
- ftgmac100_rxdes_set_dma_own(priv, rxdes);
return 0;
}
-static void ftgmac100_free_buffers(struct ftgmac100 *priv)
+static void ftgmac100_init_rings(struct ftgmac100 *priv)
{
+ struct ftgmac100_rxdes *rxdes = NULL;
+ struct ftgmac100_txdes *txdes = NULL;
int i;
- for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
- struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
- struct page *page = ftgmac100_rxdes_get_page(priv, rxdes);
- dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
+ /* Update entries counts */
+ priv->rx_q_entries = priv->new_rx_q_entries;
+ priv->tx_q_entries = priv->new_tx_q_entries;
- if (!page)
- continue;
+ if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES))
+ return;
- dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
- __free_page(page);
+ /* Initialize RX ring */
+ for (i = 0; i < priv->rx_q_entries; i++) {
+ rxdes = &priv->rxdes[i];
+ rxdes->rxdes0 = 0;
+ rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma);
}
+ /* Mark the end of the ring */
+ rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
- for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
- struct ftgmac100_txdes *txdes = &priv->descs->txdes[i];
- struct sk_buff *skb = ftgmac100_txdes_get_skb(txdes);
- dma_addr_t map = ftgmac100_txdes_get_dma_addr(txdes);
-
- if (!skb)
- continue;
+ if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES))
+ return;
- dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
- kfree_skb(skb);
+ /* Initialize TX ring */
+ for (i = 0; i < priv->tx_q_entries; i++) {
+ txdes = &priv->txdes[i];
+ txdes->txdes0 = 0;
}
-
- dma_free_coherent(priv->dev, sizeof(struct ftgmac100_descs),
- priv->descs, priv->descs_dma_addr);
+ txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
}
-static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
+static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
{
int i;
- priv->descs = dma_zalloc_coherent(priv->dev,
- sizeof(struct ftgmac100_descs),
- &priv->descs_dma_addr, GFP_KERNEL);
- if (!priv->descs)
- return -ENOMEM;
-
- /* initialize RX ring */
- ftgmac100_rxdes_set_end_of_ring(priv,
- &priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
-
- for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
- struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+ for (i = 0; i < priv->rx_q_entries; i++) {
+ struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
- if (ftgmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL))
- goto err;
+ if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL))
+ return -ENOMEM;
}
-
- /* initialize TX ring */
- ftgmac100_txdes_set_end_of_ring(priv,
- &priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
return 0;
-
-err:
- ftgmac100_free_buffers(priv);
- return -ENOMEM;
}
-/******************************************************************************
- * internal functions (mdio)
- *****************************************************************************/
static void ftgmac100_adjust_link(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
- int ier;
+ bool tx_pause, rx_pause;
+ int new_speed;
+
+ /* We store "no link" as speed 0 */
+ if (!phydev->link)
+ new_speed = 0;
+ else
+ new_speed = phydev->speed;
+
+ /* Grab pause settings from PHY if configured to do so */
+ if (priv->aneg_pause) {
+ rx_pause = tx_pause = phydev->pause;
+ if (phydev->asym_pause)
+ tx_pause = !rx_pause;
+ } else {
+ rx_pause = priv->rx_pause;
+ tx_pause = priv->tx_pause;
+ }
- if (phydev->speed == priv->old_speed)
+ /* Link hasn't changed, do nothing */
+ if (phydev->speed == priv->cur_speed &&
+ phydev->duplex == priv->cur_duplex &&
+ rx_pause == priv->rx_pause &&
+ tx_pause == priv->tx_pause)
return;
- priv->old_speed = phydev->speed;
-
- ier = ioread32(priv->base + FTGMAC100_OFFSET_IER);
+ /* Print status if we have a link or we had one and just lost it,
+ * don't print otherwise.
+ */
+ if (new_speed || priv->cur_speed)
+ phy_print_status(phydev);
- /* disable all interrupts */
- iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+ priv->cur_speed = new_speed;
+ priv->cur_duplex = phydev->duplex;
+ priv->rx_pause = rx_pause;
+ priv->tx_pause = tx_pause;
- netif_stop_queue(netdev);
- ftgmac100_stop_hw(priv);
+ /* Link is down, do nothing else */
+ if (!new_speed)
+ return;
- netif_start_queue(netdev);
- ftgmac100_init_hw(priv);
- ftgmac100_start_hw(priv, phydev->speed);
+ /* Disable all interrupts */
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
- /* re-enable interrupts */
- iowrite32(ier, priv->base + FTGMAC100_OFFSET_IER);
+ /* Reset the adapter asynchronously */
+ schedule_work(&priv->reset_task);
}
-static int ftgmac100_mii_probe(struct ftgmac100 *priv)
+static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf)
{
struct net_device *netdev = priv->netdev;
struct phy_device *phydev;
@@ -910,19 +1064,25 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv)
}
phydev = phy_connect(netdev, phydev_name(phydev),
- &ftgmac100_adjust_link, PHY_INTERFACE_MODE_GMII);
+ &ftgmac100_adjust_link, intf);
if (IS_ERR(phydev)) {
netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
return PTR_ERR(phydev);
}
+ /* Indicate that we support PAUSE frames (see comment in
+ * Documentation/networking/phy.txt)
+ */
+ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ phydev->advertising = phydev->supported;
+
+ /* Display what we found */
+ phy_attached_info(phydev);
+
return 0;
}
-/******************************************************************************
- * struct mii_bus functions
- *****************************************************************************/
static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
{
struct net_device *netdev = bus->priv;
@@ -994,9 +1154,6 @@ static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
return -EIO;
}
-/******************************************************************************
- * struct ethtool_ops functions
- *****************************************************************************/
static void ftgmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -1005,175 +1162,365 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev,
strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
+static void ftgmac100_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ memset(ering, 0, sizeof(*ering));
+ ering->rx_max_pending = MAX_RX_QUEUE_ENTRIES;
+ ering->tx_max_pending = MAX_TX_QUEUE_ENTRIES;
+ ering->rx_pending = priv->rx_q_entries;
+ ering->tx_pending = priv->tx_q_entries;
+}
+
+static int ftgmac100_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ if (ering->rx_pending > MAX_RX_QUEUE_ENTRIES ||
+ ering->tx_pending > MAX_TX_QUEUE_ENTRIES ||
+ ering->rx_pending < MIN_RX_QUEUE_ENTRIES ||
+ ering->tx_pending < MIN_TX_QUEUE_ENTRIES ||
+ !is_power_of_2(ering->rx_pending) ||
+ !is_power_of_2(ering->tx_pending))
+ return -EINVAL;
+
+ priv->new_rx_q_entries = ering->rx_pending;
+ priv->new_tx_q_entries = ering->tx_pending;
+ if (netif_running(netdev))
+ schedule_work(&priv->reset_task);
+
+ return 0;
+}
+
+static void ftgmac100_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ pause->autoneg = priv->aneg_pause;
+ pause->tx_pause = priv->tx_pause;
+ pause->rx_pause = priv->rx_pause;
+}
+
+static int ftgmac100_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+
+ priv->aneg_pause = pause->autoneg;
+ priv->tx_pause = pause->tx_pause;
+ priv->rx_pause = pause->rx_pause;
+
+ if (phydev) {
+ phydev->advertising &= ~ADVERTISED_Pause;
+ phydev->advertising &= ~ADVERTISED_Asym_Pause;
+
+ if (pause->rx_pause) {
+ phydev->advertising |= ADVERTISED_Pause;
+ phydev->advertising |= ADVERTISED_Asym_Pause;
+ }
+
+ if (pause->tx_pause)
+ phydev->advertising ^= ADVERTISED_Asym_Pause;
+ }
+ if (netif_running(netdev)) {
+ if (phydev && priv->aneg_pause)
+ phy_start_aneg(phydev);
+ else
+ ftgmac100_config_pause(priv);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops ftgmac100_ethtool_ops = {
.get_drvinfo = ftgmac100_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_ringparam = ftgmac100_get_ringparam,
+ .set_ringparam = ftgmac100_set_ringparam,
+ .get_pauseparam = ftgmac100_get_pauseparam,
+ .set_pauseparam = ftgmac100_set_pauseparam,
};
-/******************************************************************************
- * interrupt handler
- *****************************************************************************/
static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id)
{
struct net_device *netdev = dev_id;
struct ftgmac100 *priv = netdev_priv(netdev);
+ unsigned int status, new_mask = FTGMAC100_INT_BAD;
- /* When running in NCSI mode, the interface should be ready for
- * receiving or transmitting NCSI packets before it's opened.
- */
- if (likely(priv->use_ncsi || netif_running(netdev))) {
- /* Disable interrupts for polling */
- iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
- napi_schedule(&priv->napi);
+ /* Fetch and clear interrupt bits, process abnormal ones */
+ status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
+ iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
+ if (unlikely(status & FTGMAC100_INT_BAD)) {
+
+ /* RX buffer unavailable */
+ if (status & FTGMAC100_INT_NO_RXBUF)
+ netdev->stats.rx_over_errors++;
+
+ /* received packet lost due to RX FIFO full */
+ if (status & FTGMAC100_INT_RPKT_LOST)
+ netdev->stats.rx_fifo_errors++;
+
+ /* sent packet lost due to excessive TX collision */
+ if (status & FTGMAC100_INT_XPKT_LOST)
+ netdev->stats.tx_fifo_errors++;
+
+ /* AHB error -> Reset the chip */
+ if (status & FTGMAC100_INT_AHB_ERR) {
+ if (net_ratelimit())
+ netdev_warn(netdev,
+ "AHB bus error ! Resetting chip.\n");
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+ schedule_work(&priv->reset_task);
+ return IRQ_HANDLED;
+ }
+
+ /* We may need to restart the MAC after such errors, delay
+ * this until after we have freed some Rx buffers though
+ */
+ priv->need_mac_restart = true;
+
+ /* Disable those errors until we restart */
+ new_mask &= ~status;
}
+ /* Only enable "bad" interrupts while NAPI is on */
+ iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER);
+
+ /* Schedule NAPI bh */
+ napi_schedule_irqoff(&priv->napi);
+
return IRQ_HANDLED;
}
-/******************************************************************************
- * struct napi_struct functions
- *****************************************************************************/
+static bool ftgmac100_check_rx(struct ftgmac100 *priv)
+{
+ struct ftgmac100_rxdes *rxdes = &priv->rxdes[priv->rx_pointer];
+
+ /* Do we have a packet ? */
+ return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY));
+}
+
static int ftgmac100_poll(struct napi_struct *napi, int budget)
{
struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi);
- struct net_device *netdev = priv->netdev;
- unsigned int status;
- bool completed = true;
- int rx = 0;
+ int work_done = 0;
+ bool more;
- status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
- iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
+ /* Handle TX completions */
+ if (ftgmac100_tx_buf_cleanable(priv))
+ ftgmac100_tx_complete(priv);
- if (status & (FTGMAC100_INT_RPKT_BUF | FTGMAC100_INT_NO_RXBUF)) {
- /*
- * FTGMAC100_INT_RPKT_BUF:
- * RX DMA has received packets into RX buffer successfully
- *
- * FTGMAC100_INT_NO_RXBUF:
- * RX buffer unavailable
- */
- bool retry;
+ /* Handle RX packets */
+ do {
+ more = ftgmac100_rx_packet(priv, &work_done);
+ } while (more && work_done < budget);
- do {
- retry = ftgmac100_rx_packet(priv, &rx);
- } while (retry && rx < budget);
- if (retry && rx == budget)
- completed = false;
- }
+ /* The interrupt is telling us to kick the MAC back to life
+ * after an RX overflow
+ */
+ if (unlikely(priv->need_mac_restart)) {
+ ftgmac100_start_hw(priv);
- if (status & (FTGMAC100_INT_XPKT_ETH | FTGMAC100_INT_XPKT_LOST)) {
- /*
- * FTGMAC100_INT_XPKT_ETH:
- * packet transmitted to ethernet successfully
- *
- * FTGMAC100_INT_XPKT_LOST:
- * packet transmitted to ethernet lost due to late
- * collision or excessive collision
- */
- ftgmac100_tx_complete(priv);
+ /* Re-enable "bad" interrupts */
+ iowrite32(FTGMAC100_INT_BAD,
+ priv->base + FTGMAC100_OFFSET_IER);
}
- if (status & priv->int_mask_all & (FTGMAC100_INT_NO_RXBUF |
- FTGMAC100_INT_RPKT_LOST | FTGMAC100_INT_AHB_ERR)) {
- if (net_ratelimit())
- netdev_info(netdev, "[ISR] = 0x%x: %s%s%s\n", status,
- status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "",
- status & FTGMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
- status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : "");
+ /* As long as we are waiting for transmit packets to be
+ * completed we keep NAPI going
+ */
+ if (ftgmac100_tx_buf_cleanable(priv))
+ work_done = budget;
+
+ if (work_done < budget) {
+ /* We are about to re-enable all interrupts. However
+ * the HW has been latching RX/TX packet interrupts while
+ * they were masked. So we clear them first, then we need
+ * to re-check if there's something to process
+ */
+ iowrite32(FTGMAC100_INT_RXTX,
+ priv->base + FTGMAC100_OFFSET_ISR);
- if (status & FTGMAC100_INT_NO_RXBUF) {
- /* RX buffer unavailable */
- netdev->stats.rx_over_errors++;
- }
+ /* Push the above (and provides a barrier vs. subsequent
+ * reads of the descriptor).
+ */
+ ioread32(priv->base + FTGMAC100_OFFSET_ISR);
- if (status & FTGMAC100_INT_RPKT_LOST) {
- /* received packet lost due to RX FIFO full */
- netdev->stats.rx_fifo_errors++;
- }
- }
+ /* Check RX and TX descriptors for more work to do */
+ if (ftgmac100_check_rx(priv) ||
+ ftgmac100_tx_buf_cleanable(priv))
+ return budget;
- if (completed) {
+ /* deschedule NAPI */
napi_complete(napi);
/* enable all interrupts */
- iowrite32(priv->int_mask_all,
+ iowrite32(FTGMAC100_INT_ALL,
priv->base + FTGMAC100_OFFSET_IER);
}
- return rx;
+ return work_done;
}
-/******************************************************************************
- * struct net_device_ops functions
- *****************************************************************************/
-static int ftgmac100_open(struct net_device *netdev)
+static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
{
- struct ftgmac100 *priv = netdev_priv(netdev);
- unsigned int status;
+ int err = 0;
+
+ /* Re-init descriptors (adjust queue sizes) */
+ ftgmac100_init_rings(priv);
+
+ /* Realloc rx descriptors */
+ err = ftgmac100_alloc_rx_buffers(priv);
+ if (err && !ignore_alloc_err)
+ return err;
+
+ /* Reinit and restart HW */
+ ftgmac100_init_hw(priv);
+ ftgmac100_config_pause(priv);
+ ftgmac100_start_hw(priv);
+
+ /* Re-enable the device */
+ napi_enable(&priv->napi);
+ netif_start_queue(priv->netdev);
+
+ /* Enable all interrupts */
+ iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER);
+
+ return err;
+}
+
+static void ftgmac100_reset_task(struct work_struct *work)
+{
+ struct ftgmac100 *priv = container_of(work, struct ftgmac100,
+ reset_task);
+ struct net_device *netdev = priv->netdev;
int err;
- err = ftgmac100_alloc_buffers(priv);
+ netdev_dbg(netdev, "Resetting NIC...\n");
+
+ /* Lock the world */
+ rtnl_lock();
+ if (netdev->phydev)
+ mutex_lock(&netdev->phydev->lock);
+ if (priv->mii_bus)
+ mutex_lock(&priv->mii_bus->mdio_lock);
+
+
+ /* Check if the interface is still up */
+ if (!netif_running(netdev))
+ goto bail;
+
+ /* Stop the network stack */
+ netif_trans_update(netdev);
+ napi_disable(&priv->napi);
+ netif_tx_disable(netdev);
+
+ /* Stop and reset the MAC */
+ ftgmac100_stop_hw(priv);
+ err = ftgmac100_reset_and_config_mac(priv);
if (err) {
- netdev_err(netdev, "failed to allocate buffers\n");
- goto err_alloc;
+ /* Not much we can do ... it might come back... */
+ netdev_err(netdev, "attempting to continue...\n");
}
- err = request_irq(priv->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
+ /* Free all rx and tx buffers */
+ ftgmac100_free_buffers(priv);
+
+ /* Setup everything again and restart chip */
+ ftgmac100_init_all(priv, true);
+
+ netdev_dbg(netdev, "Reset done !\n");
+ bail:
+ if (priv->mii_bus)
+ mutex_unlock(&priv->mii_bus->mdio_lock);
+ if (netdev->phydev)
+ mutex_unlock(&netdev->phydev->lock);
+ rtnl_unlock();
+}
+
+static int ftgmac100_open(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ int err;
+
+ /* Allocate ring buffers */
+ err = ftgmac100_alloc_rings(priv);
if (err) {
- netdev_err(netdev, "failed to request irq %d\n", priv->irq);
- goto err_irq;
+ netdev_err(netdev, "Failed to allocate descriptors\n");
+ return err;
}
- priv->rx_pointer = 0;
- priv->tx_clean_pointer = 0;
- priv->tx_pointer = 0;
- priv->tx_pending = 0;
+ /* When using NC-SI we force the speed to 100Mbit/s full duplex,
+ *
+ * Otherwise we leave it set to 0 (no link), the link
+ * message from the PHY layer will handle setting it up to
+ * something else if needed.
+ */
+ if (priv->use_ncsi) {
+ priv->cur_duplex = DUPLEX_FULL;
+ priv->cur_speed = SPEED_100;
+ } else {
+ priv->cur_duplex = 0;
+ priv->cur_speed = 0;
+ }
- err = ftgmac100_reset_hw(priv);
+ /* Reset the hardware */
+ err = ftgmac100_reset_and_config_mac(priv);
if (err)
goto err_hw;
- ftgmac100_init_hw(priv);
- ftgmac100_start_hw(priv, priv->use_ncsi ? 100 : 10);
+ /* Initialize NAPI */
+ netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
- /* Clear stale interrupts */
- status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
- iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
+ /* Grab our interrupt */
+ err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
+ if (err) {
+ netdev_err(netdev, "failed to request irq %d\n", netdev->irq);
+ goto err_irq;
+ }
- if (netdev->phydev)
+ /* Start things up */
+ err = ftgmac100_init_all(priv, false);
+ if (err) {
+ netdev_err(netdev, "Failed to allocate packet buffers\n");
+ goto err_alloc;
+ }
+
+ if (netdev->phydev) {
+ /* If we have a PHY, start polling */
phy_start(netdev->phydev);
- else if (priv->use_ncsi)
+ } else if (priv->use_ncsi) {
+ /* If using NC-SI, set our carrier on and start the stack */
netif_carrier_on(netdev);
- napi_enable(&priv->napi);
- netif_start_queue(netdev);
-
- /* enable all interrupts */
- iowrite32(priv->int_mask_all, priv->base + FTGMAC100_OFFSET_IER);
-
- /* Start the NCSI device */
- if (priv->use_ncsi) {
+ /* Start the NCSI device */
err = ncsi_start_dev(priv->ndev);
if (err)
goto err_ncsi;
}
- priv->enabled = true;
-
return 0;
-err_ncsi:
+ err_ncsi:
napi_disable(&priv->napi);
netif_stop_queue(netdev);
- iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
-err_hw:
- free_irq(priv->irq, netdev);
-err_irq:
+ err_alloc:
ftgmac100_free_buffers(priv);
-err_alloc:
+ free_irq(netdev->irq, netdev);
+ err_irq:
+ netif_napi_del(&priv->napi);
+ err_hw:
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+ ftgmac100_free_rings(priv);
return err;
}
@@ -1181,64 +1528,87 @@ static int ftgmac100_stop(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
- if (!priv->enabled)
- return 0;
+ /* Note about the reset task: We are called with the rtnl lock
+ * held, so we are synchronized against the core of the reset
+ * task. We must not try to synchronously cancel it otherwise
+ * we can deadlock. But since it will test for netif_running()
+ * which has already been cleared by the net core, we don't
+ * anything special to do.
+ */
/* disable all interrupts */
- priv->enabled = false;
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
netif_stop_queue(netdev);
napi_disable(&priv->napi);
+ netif_napi_del(&priv->napi);
if (netdev->phydev)
phy_stop(netdev->phydev);
else if (priv->use_ncsi)
ncsi_stop_dev(priv->ndev);
ftgmac100_stop_hw(priv);
- free_irq(priv->irq, netdev);
+ free_irq(netdev->irq, netdev);
ftgmac100_free_buffers(priv);
+ ftgmac100_free_rings(priv);
return 0;
}
-static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
+/* optional */
+static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ if (!netdev->phydev)
+ return -ENXIO;
+
+ return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+}
+
+static void ftgmac100_tx_timeout(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
- dma_addr_t map;
- if (unlikely(skb->len > MAX_PKT_SIZE)) {
- if (net_ratelimit())
- netdev_dbg(netdev, "tx packet too big\n");
+ /* Disable all interrupts */
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
- netdev->stats.tx_dropped++;
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
+ /* Do the reset outside of interrupt context */
+ schedule_work(&priv->reset_task);
+}
- map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(priv->dev, map))) {
- /* drop packet */
- if (net_ratelimit())
- netdev_err(netdev, "map socket buffer failed\n");
+static int ftgmac100_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ netdev_features_t changed = netdev->features ^ features;
- netdev->stats.tx_dropped++;
- kfree_skb(skb);
- return NETDEV_TX_OK;
+ if (!netif_running(netdev))
+ return 0;
+
+ /* Update the vlan filtering bit */
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
+ u32 maccr;
+
+ maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
+ if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ maccr |= FTGMAC100_MACCR_RM_VLAN;
+ else
+ maccr &= ~FTGMAC100_MACCR_RM_VLAN;
+ iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
}
- return ftgmac100_xmit(priv, skb, map);
+ return 0;
}
-/* optional */
-static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ftgmac100_poll_controller(struct net_device *netdev)
{
- if (!netdev->phydev)
- return -ENXIO;
+ unsigned long flags;
- return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+ local_irq_save(flags);
+ ftgmac100_interrupt(netdev->irq, netdev);
+ local_irq_restore(flags);
}
+#endif
static const struct net_device_ops ftgmac100_netdev_ops = {
.ndo_open = ftgmac100_open,
@@ -1247,12 +1617,20 @@ static const struct net_device_ops ftgmac100_netdev_ops = {
.ndo_set_mac_address = ftgmac100_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = ftgmac100_do_ioctl,
+ .ndo_tx_timeout = ftgmac100_tx_timeout,
+ .ndo_set_rx_mode = ftgmac100_set_rx_mode,
+ .ndo_set_features = ftgmac100_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ftgmac100_poll_controller,
+#endif
};
static int ftgmac100_setup_mdio(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
struct platform_device *pdev = to_platform_device(priv->dev);
+ int phy_intf = PHY_INTERFACE_MODE_RGMII;
+ struct device_node *np = pdev->dev.of_node;
int i, err = 0;
u32 reg;
@@ -1261,14 +1639,46 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
if (!priv->mii_bus)
return -EIO;
- if (of_machine_is_compatible("aspeed,ast2400") ||
- of_machine_is_compatible("aspeed,ast2500")) {
+ if (priv->is_aspeed) {
/* This driver supports the old MDIO interface */
reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
};
+ /* Get PHY mode from device-tree */
+ if (np) {
+ /* Default to RGMII. It's a gigabit part after all */
+ phy_intf = of_get_phy_mode(np);
+ if (phy_intf < 0)
+ phy_intf = PHY_INTERFACE_MODE_RGMII;
+
+ /* Aspeed only supports these. I don't know about other IP
+ * block vendors so I'm going to just let them through for
+ * now. Note that this is only a warning if for some obscure
+ * reason the DT really means to lie about it or it's a newer
+ * part we don't know about.
+ *
+ * On the Aspeed SoC there are additionally straps and SCU
+ * control bits that could tell us what the interface is
+ * (or allow us to configure it while the IP block is held
+ * in reset). For now I chose to keep this driver away from
+ * those SoC specific bits and assume the device-tree is
+ * right and the SCU has been configured properly by pinmux
+ * or the firmware.
+ */
+ if (priv->is_aspeed &&
+ phy_intf != PHY_INTERFACE_MODE_RMII &&
+ phy_intf != PHY_INTERFACE_MODE_RGMII &&
+ phy_intf != PHY_INTERFACE_MODE_RGMII_ID &&
+ phy_intf != PHY_INTERFACE_MODE_RGMII_RXID &&
+ phy_intf != PHY_INTERFACE_MODE_RGMII_TXID) {
+ netdev_warn(netdev,
+ "Unsupported PHY mode %s !\n",
+ phy_modes(phy_intf));
+ }
+ }
+
priv->mii_bus->name = "ftgmac100_mdio";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
pdev->name, pdev->id);
@@ -1285,7 +1695,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
goto err_register_mdiobus;
}
- err = ftgmac100_mii_probe(priv);
+ err = ftgmac100_mii_probe(priv, phy_intf);
if (err) {
dev_err(priv->dev, "MII Probe failed!\n");
goto err_mii_probe;
@@ -1321,15 +1731,13 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
nd->link_up ? "up" : "down");
}
-/******************************************************************************
- * struct platform_driver functions
- *****************************************************************************/
static int ftgmac100_probe(struct platform_device *pdev)
{
struct resource *res;
int irq;
struct net_device *netdev;
struct ftgmac100 *priv;
+ struct device_node *np;
int err = 0;
if (!pdev)
@@ -1354,6 +1762,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
netdev->ethtool_ops = &ftgmac100_ethtool_ops;
netdev->netdev_ops = &ftgmac100_netdev_ops;
+ netdev->watchdog_timeo = 5 * HZ;
platform_set_drvdata(pdev, netdev);
@@ -1361,11 +1770,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv = netdev_priv(netdev);
priv->netdev = netdev;
priv->dev = &pdev->dev;
-
- spin_lock_init(&priv->tx_lock);
-
- /* initialize NAPI */
- netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
+ INIT_WORK(&priv->reset_task, ftgmac100_reset_task);
/* map io memory */
priv->res = request_mem_region(res->start, resource_size(res),
@@ -1383,29 +1788,28 @@ static int ftgmac100_probe(struct platform_device *pdev)
goto err_ioremap;
}
- priv->irq = irq;
+ netdev->irq = irq;
- /* MAC address from chip or random one */
- ftgmac100_setup_mac(priv);
+ /* Enable pause */
+ priv->tx_pause = true;
+ priv->rx_pause = true;
+ priv->aneg_pause = true;
- priv->int_mask_all = (FTGMAC100_INT_RPKT_LOST |
- FTGMAC100_INT_XPKT_ETH |
- FTGMAC100_INT_XPKT_LOST |
- FTGMAC100_INT_AHB_ERR |
- FTGMAC100_INT_RPKT_BUF |
- FTGMAC100_INT_NO_RXBUF);
+ /* MAC address from chip or random one */
+ ftgmac100_initial_mac(priv);
- if (of_machine_is_compatible("aspeed,ast2400") ||
- of_machine_is_compatible("aspeed,ast2500")) {
+ np = pdev->dev.of_node;
+ if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
+ of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
priv->rxdes0_edorr_mask = BIT(30);
priv->txdes0_edotr_mask = BIT(30);
+ priv->is_aspeed = true;
} else {
priv->rxdes0_edorr_mask = BIT(15);
priv->txdes0_edotr_mask = BIT(15);
}
- if (pdev->dev.of_node &&
- of_get_property(pdev->dev.of_node, "use-ncsi", NULL)) {
+ if (np && of_get_property(np, "use-ncsi", NULL)) {
if (!IS_ENABLED(CONFIG_NET_NCSI)) {
dev_err(&pdev->dev, "NCSI stack not enabled\n");
goto err_ncsi_dev;
@@ -1423,15 +1827,21 @@ static int ftgmac100_probe(struct platform_device *pdev)
goto err_setup_mdio;
}
- /* We have to disable on-chip IP checksum functionality
- * when NCSI is enabled on the interface. It doesn't work
- * in that case.
- */
- netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
- if (priv->use_ncsi &&
- of_get_property(pdev->dev.of_node, "no-hw-checksum", NULL))
- netdev->features &= ~NETIF_F_IP_CSUM;
+ /* Default ring sizes */
+ priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES;
+ priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES;
+ /* Base feature set */
+ netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ NETIF_F_GRO | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
+ /* AST2400 doesn't have working HW checksum generation */
+ if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
+ netdev->hw_features &= ~NETIF_F_HW_CSUM;
+ if (np && of_get_property(np, "no-hw-checksum", NULL))
+ netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
+ netdev->features |= netdev->hw_features;
/* register network device */
err = register_netdev(netdev);
@@ -1440,7 +1850,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
goto err_register_netdev;
}
- netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
+ netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base);
return 0;
@@ -1467,6 +1877,12 @@ static int ftgmac100_remove(struct platform_device *pdev)
priv = netdev_priv(netdev);
unregister_netdev(netdev);
+
+ /* There's a small chance the reset task will have been re-queued,
+ * during stop, make sure it's gone before we free the structure.
+ */
+ cancel_work_sync(&priv->reset_task);
+
ftgmac100_destroy_mdio(netdev);
iounmap(priv->base);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
index a7ce0ac8858a..0653d8176e6a 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.h
+++ b/drivers/net/ethernet/faraday/ftgmac100.h
@@ -86,6 +86,20 @@
#define FTGMAC100_INT_PHYSTS_CHG (1 << 9)
#define FTGMAC100_INT_NO_HPTXBUF (1 << 10)
+/* Interrupts we care about in NAPI mode */
+#define FTGMAC100_INT_BAD (FTGMAC100_INT_RPKT_LOST | \
+ FTGMAC100_INT_XPKT_LOST | \
+ FTGMAC100_INT_AHB_ERR | \
+ FTGMAC100_INT_NO_RXBUF)
+
+/* Normal RX/TX interrupts, enabled when NAPI off */
+#define FTGMAC100_INT_RXTX (FTGMAC100_INT_XPKT_ETH | \
+ FTGMAC100_INT_RPKT_BUF)
+
+/* All the interrupts we care about */
+#define FTGMAC100_INT_ALL (FTGMAC100_INT_RPKT_BUF | \
+ FTGMAC100_INT_BAD)
+
/*
* Interrupt timer control register
*/
@@ -185,13 +199,20 @@
#define FTGMAC100_PHYDATA_MIIRDATA(phydata) (((phydata) >> 16) & 0xffff)
/*
+ * Flow control register
+ */
+#define FTGMAC100_FCR_FC_EN (1 << 0)
+#define FTGMAC100_FCR_FCTHR_EN (1 << 2)
+#define FTGMAC100_FCR_PAUSE_TIME(x) (((x) & 0xffff) << 16)
+
+/*
* Transmit descriptor, aligned to 16 bytes
*/
struct ftgmac100_txdes {
- unsigned int txdes0;
- unsigned int txdes1;
- unsigned int txdes2; /* not used by HW */
- unsigned int txdes3; /* TXBUF_BADR */
+ __le32 txdes0; /* Control & status bits */
+ __le32 txdes1; /* Irq, checksum and vlan control */
+ __le32 txdes2; /* Reserved */
+ __le32 txdes3; /* DMA buffer address */
} __attribute__ ((aligned(16)));
#define FTGMAC100_TXDES0_TXBUF_SIZE(x) ((x) & 0x3fff)
@@ -213,10 +234,10 @@ struct ftgmac100_txdes {
* Receive descriptor, aligned to 16 bytes
*/
struct ftgmac100_rxdes {
- unsigned int rxdes0;
- unsigned int rxdes1;
- unsigned int rxdes2; /* not used by HW */
- unsigned int rxdes3; /* RXBUF_BADR */
+ __le32 rxdes0; /* Control & status bits */
+ __le32 rxdes1; /* Checksum and vlan status */
+ __le32 rxdes2; /* length/type on AST2500 */
+ __le32 rxdes3; /* DMA buffer address */
} __attribute__ ((aligned(16)));
#define FTGMAC100_RXDES0_VDBC 0x3fff
@@ -234,6 +255,14 @@ struct ftgmac100_rxdes {
#define FTGMAC100_RXDES0_FRS (1 << 29)
#define FTGMAC100_RXDES0_RXPKT_RDY (1 << 31)
+/* Errors we care about for dropping packets */
+#define RXDES0_ANY_ERROR ( \
+ FTGMAC100_RXDES0_RX_ERR | \
+ FTGMAC100_RXDES0_CRC_ERR | \
+ FTGMAC100_RXDES0_FTL | \
+ FTGMAC100_RXDES0_RUNT | \
+ FTGMAC100_RXDES0_RX_ODD_NB)
+
#define FTGMAC100_RXDES1_VLANTAG_CI 0xffff
#define FTGMAC100_RXDES1_PROT_MASK (0x3 << 20)
#define FTGMAC100_RXDES1_PROT_NONIP (0x0 << 20)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index e2ca107f9d94..9a520e4f0df9 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -137,6 +137,13 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
/* L4 Type field: TCP */
#define FM_L4_PARSE_RESULT_TCP 0x20
+/* FD status field indicating whether the FM Parser has attempted to validate
+ * the L4 csum of the frame.
+ * Note that having this bit set doesn't necessarily imply that the checksum
+ * is valid. One would have to check the parse results to find that out.
+ */
+#define FM_FD_STAT_L4CV 0x00000004
+
#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
@@ -235,6 +242,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
* For conformity, we'll still declare GSO explicitly.
*/
net_dev->features |= NETIF_F_GSO;
+ net_dev->features |= NETIF_F_RXCSUM;
net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
/* we do not want shared skbs on TX */
@@ -334,6 +342,45 @@ static void dpaa_get_stats64(struct net_device *net_dev,
}
}
+static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ u8 num_tc;
+ int i;
+
+ if (tc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_tc = tc->mqprio->num_tc;
+
+ if (num_tc == priv->num_tc)
+ return 0;
+
+ if (!num_tc) {
+ netdev_reset_tc(net_dev);
+ goto out;
+ }
+
+ if (num_tc > DPAA_TC_NUM) {
+ netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
+ DPAA_TC_NUM);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(net_dev, num_tc);
+
+ for (i = 0; i < num_tc; i++)
+ netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
+ i * DPAA_TC_TXQ_NUM);
+
+out:
+ priv->num_tc = num_tc ? : 1;
+ netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+ return 0;
+}
+
static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
{
struct platform_device *of_dev;
@@ -557,16 +604,18 @@ static void dpaa_bps_free(struct dpaa_priv *priv)
/* Use multiple WQs for FQ assignment:
* - Tx Confirmation queues go to WQ1.
- * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
- * to be scheduled, in case there are many more FQs in WQ3).
- * - Rx Default and Tx queues go to WQ3 (no differentiation between
- * Rx and Tx traffic).
+ * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
+ * to be scheduled, in case there are many more FQs in WQ6).
+ * - Rx Default goes to WQ6.
+ * - Tx queues go to different WQs depending on their priority. Equal
+ * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
+ * WQ0 (highest priority).
* This ensures that Tx-confirmed buffers are timely released. In particular,
* it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
* are greatly outnumbered by other FQs in the system, while
* dequeue scheduling is round-robin.
*/
-static inline void dpaa_assign_wq(struct dpaa_fq *fq)
+static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
{
switch (fq->fq_type) {
case FQ_TYPE_TX_CONFIRM:
@@ -575,11 +624,33 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq)
break;
case FQ_TYPE_RX_ERROR:
case FQ_TYPE_TX_ERROR:
- fq->wq = 2;
+ fq->wq = 5;
break;
case FQ_TYPE_RX_DEFAULT:
+ fq->wq = 6;
+ break;
case FQ_TYPE_TX:
- fq->wq = 3;
+ switch (idx / DPAA_TC_TXQ_NUM) {
+ case 0:
+ /* Low priority (best effort) */
+ fq->wq = 6;
+ break;
+ case 1:
+ /* Medium priority */
+ fq->wq = 2;
+ break;
+ case 2:
+ /* High priority */
+ fq->wq = 1;
+ break;
+ case 3:
+ /* Very high priority */
+ fq->wq = 0;
+ break;
+ default:
+ WARN(1, "Too many TX FQs: more than %d!\n",
+ DPAA_ETH_TXQ_NUM);
+ }
break;
default:
WARN(1, "Invalid FQ type %d for FQID %d!\n",
@@ -607,7 +678,7 @@ static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
}
for (i = 0; i < count; i++)
- dpaa_assign_wq(dpaa_fq + i);
+ dpaa_assign_wq(dpaa_fq + i, i);
return dpaa_fq;
}
@@ -903,7 +974,7 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
* Tx Confirmation FQs.
*/
if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
- initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK);
/* FQ placement */
initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
@@ -985,7 +1056,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
/* Initialization common to all ingress queues */
if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
- initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
+ initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
+ QM_FQCTRL_CTXASTASHING);
initfq.fqd.context_a.stashing.exclusive =
QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
QM_STASHING_EXCL_ANNOTATION;
@@ -1055,9 +1127,9 @@ static int dpaa_fq_free(struct device *dev, struct list_head *list)
return err;
}
-static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
- struct dpaa_fq *defq,
- struct dpaa_buffer_layout *buf_layout)
+static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
+ struct dpaa_fq *defq,
+ struct dpaa_buffer_layout *buf_layout)
{
struct fman_buffer_prefix_content buf_prefix_content;
struct fman_port_params params;
@@ -1076,23 +1148,29 @@ static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
err = fman_port_config(port, &params);
- if (err)
+ if (err) {
pr_err("%s: fman_port_config failed\n", __func__);
+ return err;
+ }
err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
- if (err)
+ if (err) {
pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
__func__);
+ return err;
+ }
err = fman_port_init(port);
if (err)
pr_err("%s: fm_port_init failed\n", __func__);
+
+ return err;
}
-static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
- size_t count, struct dpaa_fq *errq,
- struct dpaa_fq *defq,
- struct dpaa_buffer_layout *buf_layout)
+static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
+ size_t count, struct dpaa_fq *errq,
+ struct dpaa_fq *defq,
+ struct dpaa_buffer_layout *buf_layout)
{
struct fman_buffer_prefix_content buf_prefix_content;
struct fman_port_rx_params *rx_p;
@@ -1120,32 +1198,44 @@ static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
}
err = fman_port_config(port, &params);
- if (err)
+ if (err) {
pr_err("%s: fman_port_config failed\n", __func__);
+ return err;
+ }
err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
- if (err)
+ if (err) {
pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
__func__);
+ return err;
+ }
err = fman_port_init(port);
if (err)
pr_err("%s: fm_port_init failed\n", __func__);
+
+ return err;
}
-static void dpaa_eth_init_ports(struct mac_device *mac_dev,
- struct dpaa_bp **bps, size_t count,
- struct fm_port_fqs *port_fqs,
- struct dpaa_buffer_layout *buf_layout,
- struct device *dev)
+static int dpaa_eth_init_ports(struct mac_device *mac_dev,
+ struct dpaa_bp **bps, size_t count,
+ struct fm_port_fqs *port_fqs,
+ struct dpaa_buffer_layout *buf_layout,
+ struct device *dev)
{
struct fman_port *rxport = mac_dev->port[RX];
struct fman_port *txport = mac_dev->port[TX];
+ int err;
+
+ err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
+ port_fqs->tx_defq, &buf_layout[TX]);
+ if (err)
+ return err;
+
+ err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
+ port_fqs->rx_defq, &buf_layout[RX]);
- dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
- port_fqs->tx_defq, &buf_layout[TX]);
- dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
- port_fqs->rx_defq, &buf_layout[RX]);
+ return err;
}
static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
@@ -1526,6 +1616,23 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
return skb;
}
+static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
+{
+ /* The parser has run and performed L4 checksum validation.
+ * We know there were no parser errors (and implicitly no
+ * L4 csum error), otherwise we wouldn't be here.
+ */
+ if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
+ (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
+ return CHECKSUM_UNNECESSARY;
+
+ /* We're here because either the parser didn't run or the L4 checksum
+ * was not verified. This may include the case of a UDP frame with
+ * checksum zero or an L4 proto other than TCP/UDP
+ */
+ return CHECKSUM_NONE;
+}
+
/* Build a linear skb around the received buffer.
* We are guaranteed there is enough room at the end of the data buffer to
* accommodate the shared info area of the skb.
@@ -1556,7 +1663,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
skb_reserve(skb, fd_off);
skb_put(skb, qm_fd_get_length(fd));
- skb->ip_summed = CHECKSUM_NONE;
+ skb->ip_summed = rx_csum_offload(priv, fd);
return skb;
@@ -1616,7 +1723,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
if (WARN_ON(unlikely(!skb)))
goto free_buffers;
- skb->ip_summed = CHECKSUM_NONE;
+ skb->ip_summed = rx_csum_offload(priv, fd);
/* Make sure forwarded skbs will have enough space
* on Tx, if extra headers are added.
@@ -2093,7 +2200,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
dma_addr_t addr = qm_fd_addr(fd);
enum qm_fd_format fd_format;
struct net_device *net_dev;
- u32 fd_status = fd->status;
+ u32 fd_status;
struct dpaa_bp *dpaa_bp;
struct dpaa_priv *priv;
unsigned int skb_len;
@@ -2350,6 +2457,7 @@ static const struct net_device_ops dpaa_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = dpaa_set_rx_mode,
.ndo_do_ioctl = dpaa_ioctl,
+ .ndo_setup_tc = dpaa_setup_tc,
};
static int dpaa_napi_add(struct net_device *net_dev)
@@ -2624,8 +2732,10 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
/* All real interfaces need their ports initialized */
- dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
- &priv->buf_layout[0], dev);
+ err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
+ &priv->buf_layout[0], dev);
+ if (err)
+ goto init_ports_failed;
priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
if (!priv->percpu_priv) {
@@ -2638,6 +2748,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
memset(percpu_priv, 0, sizeof(*percpu_priv));
}
+ priv->num_tc = 1;
+ netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+
/* Initialize NAPI */
err = dpaa_napi_add(net_dev);
if (err < 0)
@@ -2658,6 +2771,7 @@ netdev_init_failed:
napi_add_failed:
dpaa_napi_del(net_dev);
alloc_percpu_failed:
+init_ports_failed:
dpaa_fq_free(dev, &priv->dpaa_fq_list);
fq_alloc_failed:
qman_delete_cgr_safe(&priv->ingress_cgr);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index 1f9aebf3f3c5..9941a7866ebe 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -39,7 +39,12 @@
#include "mac.h"
#include "dpaa_eth_trace.h"
-#define DPAA_ETH_TXQ_NUM NR_CPUS
+/* Number of prioritised traffic classes */
+#define DPAA_TC_NUM 4
+/* Number of Tx queues per traffic class */
+#define DPAA_TC_TXQ_NUM NR_CPUS
+/* Total number of Tx queues */
+#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
#define DPAA_BPS_NUM 3 /* number of bpools per interface */
@@ -152,6 +157,7 @@ struct dpaa_priv {
u16 channel;
struct list_head dpaa_fq_list;
+ u8 num_tc;
u32 msg_enable; /* net_device message level */
struct {
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 91a16641e851..a92bf94f8e94 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -117,8 +117,9 @@ static struct platform_device_id fec_devtype[] = {
.name = "imx6ul-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
+ FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_COALESCE,
}, {
/* sentinel */
}
@@ -235,14 +236,14 @@ static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
struct bufdesc_prop *bd)
{
return (bdp >= bd->last) ? bd->base
- : (struct bufdesc *)(((unsigned)bdp) + bd->dsize);
+ : (struct bufdesc *)(((void *)bdp) + bd->dsize);
}
static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
struct bufdesc_prop *bd)
{
return (bdp <= bd->base) ? bd->last
- : (struct bufdesc *)(((unsigned)bdp) - bd->dsize);
+ : (struct bufdesc *)(((void *)bdp) - bd->dsize);
}
static int fec_enet_get_bd_index(struct bufdesc *bdp,
@@ -1266,7 +1267,7 @@ skb_done:
}
}
- /* ERR006538: Keep the transmitter going */
+ /* ERR006358: Keep the transmitter going */
if (bdp != txq->bd.cur &&
readl(txq->bd.reg_desc_active) == 0)
writel(0, txq->bd.reg_desc_active);
@@ -2651,7 +2652,7 @@ static void fec_enet_free_queue(struct net_device *ndev)
for (i = 0; i < fep->num_tx_queues; i++)
if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
txq = fep->tx_queue[i];
- dma_free_coherent(NULL,
+ dma_free_coherent(&fep->pdev->dev,
txq->bd.ring_size * TSO_HEADER_SIZE,
txq->tso_hdrs,
txq->tso_hdrs_dma);
@@ -2685,7 +2686,7 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
txq->tx_wake_threshold =
(txq->bd.ring_size - txq->tx_stop_threshold) / 2;
- txq->tso_hdrs = dma_alloc_coherent(NULL,
+ txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
txq->bd.ring_size * TSO_HEADER_SIZE,
&txq->tso_hdrs_dma,
GFP_KERNEL);
@@ -3187,7 +3188,7 @@ static int fec_enet_init(struct net_device *ndev)
}
#ifdef CONFIG_OF
-static void fec_reset_phy(struct platform_device *pdev)
+static int fec_reset_phy(struct platform_device *pdev)
{
int err, phy_reset;
bool active_high = false;
@@ -3195,16 +3196,18 @@ static void fec_reset_phy(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
if (!np)
- return;
+ return 0;
- of_property_read_u32(np, "phy-reset-duration", &msec);
+ err = of_property_read_u32(np, "phy-reset-duration", &msec);
/* A sane reset duration should not be longer than 1s */
- if (msec > 1000)
+ if (!err && msec > 1000)
msec = 1;
phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
- if (!gpio_is_valid(phy_reset))
- return;
+ if (phy_reset == -EPROBE_DEFER)
+ return phy_reset;
+ else if (!gpio_is_valid(phy_reset))
+ return 0;
active_high = of_property_read_bool(np, "phy-reset-active-high");
@@ -3213,7 +3216,7 @@ static void fec_reset_phy(struct platform_device *pdev)
"phy-reset");
if (err) {
dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
- return;
+ return err;
}
if (msec > 20)
@@ -3222,14 +3225,17 @@ static void fec_reset_phy(struct platform_device *pdev)
usleep_range(msec * 1000, msec * 1000 + 1000);
gpio_set_value_cansleep(phy_reset, !active_high);
+
+ return 0;
}
#else /* CONFIG_OF */
-static void fec_reset_phy(struct platform_device *pdev)
+static int fec_reset_phy(struct platform_device *pdev)
{
/*
* In case of platform probe, the reset has been done
* by machine code.
*/
+ return 0;
}
#endif /* CONFIG_OF */
@@ -3400,6 +3406,7 @@ fec_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to enable phy regulator: %d\n", ret);
+ clk_disable_unprepare(fep->clk_ipg);
goto failed_regulator;
}
} else {
@@ -3412,7 +3419,9 @@ fec_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- fec_reset_phy(pdev);
+ ret = fec_reset_phy(pdev);
+ if (ret)
+ goto failed_reset;
if (fep->bufdesc_ex)
fec_ptp_init(pdev);
@@ -3473,8 +3482,10 @@ failed_init:
fec_ptp_stop(pdev);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
+failed_reset:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
failed_regulator:
- clk_disable_unprepare(fep->clk_ipg);
failed_clk_ipg:
fec_enet_clk_enable(ndev, false);
failed_clk:
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index f60845f0c6ca..4aefe2438969 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -59,6 +59,7 @@
#define DMA_OFFSET 0x000C2000
#define FPM_OFFSET 0x000C3000
#define IMEM_OFFSET 0x000C4000
+#define HWP_OFFSET 0x000C7000
#define CGP_OFFSET 0x000DB000
/* Exceptions bit map */
@@ -218,6 +219,9 @@
#define QMI_GS_HALT_NOT_BUSY 0x00000002
+/* HWP defines */
+#define HWP_RPIMAC_PEN 0x00000001
+
/* IRAM defines */
#define IRAM_IADD_AIE 0x80000000
#define IRAM_READY 0x80000000
@@ -475,6 +479,12 @@ struct fman_dma_regs {
u32 res00e0[0x400 - 56];
};
+struct fman_hwp_regs {
+ u32 res0000[0x844 / 4]; /* 0x000..0x843 */
+ u32 fmprrpimac; /* FM Parser Internal memory access control */
+ u32 res[(0x1000 - 0x848) / 4]; /* 0x848..0xFFF */
+};
+
/* Structure that holds current FMan state.
* Used for saving run time information.
*/
@@ -606,6 +616,7 @@ struct fman {
struct fman_bmi_regs __iomem *bmi_regs;
struct fman_qmi_regs __iomem *qmi_regs;
struct fman_dma_regs __iomem *dma_regs;
+ struct fman_hwp_regs __iomem *hwp_regs;
fman_exceptions_cb *exception_cb;
fman_bus_error_cb *bus_error_cb;
/* Spinlock for FMan use */
@@ -999,6 +1010,12 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
}
+static void hwp_init(struct fman_hwp_regs __iomem *hwp_rg)
+{
+ /* enable HW Parser */
+ iowrite32be(HWP_RPIMAC_PEN, &hwp_rg->fmprrpimac);
+}
+
static int enable(struct fman *fman, struct fman_cfg *cfg)
{
u32 cfg_reg = 0;
@@ -1195,7 +1212,7 @@ static int fill_soc_specific_params(struct fman_state_struct *state)
state->max_num_of_open_dmas = 32;
state->fm_port_num_of_cg = 256;
state->num_of_rx_ports = 6;
- state->total_fifo_size = 122 * 1024;
+ state->total_fifo_size = 136 * 1024;
break;
case 2:
@@ -1793,6 +1810,7 @@ static int fman_config(struct fman *fman)
fman->bmi_regs = base_addr + BMI_OFFSET;
fman->qmi_regs = base_addr + QMI_OFFSET;
fman->dma_regs = base_addr + DMA_OFFSET;
+ fman->hwp_regs = base_addr + HWP_OFFSET;
fman->base_addr = base_addr;
spin_lock_init(&fman->spinlock);
@@ -2062,6 +2080,9 @@ static int fman_init(struct fman *fman)
/* Init QMI Registers */
qmi_init(fman->qmi_regs, fman->cfg);
+ /* Init HW Parser */
+ hwp_init(fman->hwp_regs);
+
err = enable(fman, cfg);
if (err != 0)
return err;
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index 57aae8d17d77..f53e1473dbcc 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -134,14 +134,14 @@ enum fman_exceptions {
struct fman_prs_result {
u8 lpid; /* Logical port id */
u8 shimr; /* Shim header result */
- u16 l2r; /* Layer 2 result */
- u16 l3r; /* Layer 3 result */
+ __be16 l2r; /* Layer 2 result */
+ __be16 l3r; /* Layer 3 result */
u8 l4r; /* Layer 4 result */
u8 cplan; /* Classification plan id */
- u16 nxthdr; /* Next Header */
- u16 cksum; /* Running-sum */
+ __be16 nxthdr; /* Next Header */
+ __be16 cksum; /* Running-sum */
/* Flags&fragment-offset field of the last IP-header */
- u16 flags_frag_off;
+ __be16 flags_frag_off;
/* Routing type field of a IPV6 routing extension header */
u8 route_type;
/* Routing Extension Header Present; last bit is IP valid */
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 84ea130eed36..98bba10fc38c 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -381,6 +381,9 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
/* check RGMII support */
if (iface == PHY_INTERFACE_MODE_RGMII ||
+ iface == PHY_INTERFACE_MODE_RGMII_ID ||
+ iface == PHY_INTERFACE_MODE_RGMII_RXID ||
+ iface == PHY_INTERFACE_MODE_RGMII_TXID ||
iface == PHY_INTERFACE_MODE_RMII)
if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
return -EINVAL;
@@ -390,7 +393,10 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
return -EINVAL;
- is_rgmii = iface == PHY_INTERFACE_MODE_RGMII;
+ is_rgmii = iface == PHY_INTERFACE_MODE_RGMII ||
+ iface == PHY_INTERFACE_MODE_RGMII_ID ||
+ iface == PHY_INTERFACE_MODE_RGMII_RXID ||
+ iface == PHY_INTERFACE_MODE_RGMII_TXID;
is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index cd6a53eaf161..c0296880feba 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -443,7 +443,10 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
break;
default:
tmp |= IF_MODE_GMII;
- if (phy_if == PHY_INTERFACE_MODE_RGMII)
+ if (phy_if == PHY_INTERFACE_MODE_RGMII ||
+ phy_if == PHY_INTERFACE_MODE_RGMII_ID ||
+ phy_if == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phy_if == PHY_INTERFACE_MODE_RGMII_TXID)
tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
}
iowrite32be(tmp, &regs->if_mode);
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index 173d8e0fd716..c4a66469a907 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -36,6 +36,7 @@
#include "fman_mac.h"
#include <linux/netdevice.h>
+#include <linux/phy_fixed.h>
struct fman_mac *memac_config(struct fman_mac_params *params);
int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 9f3bb50a2365..57bf44fa16a1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -62,6 +62,7 @@
#define BMI_PORT_REGS_OFFSET 0
#define QMI_PORT_REGS_OFFSET 0x400
+#define HWP_PORT_REGS_OFFSET 0x800
/* Default values */
#define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN \
@@ -182,7 +183,7 @@
#define NIA_ENG_BMI 0x00500000
#define NIA_ENG_QMI_ENQ 0x00540000
#define NIA_ENG_QMI_DEQ 0x00580000
-
+#define NIA_ENG_HWP 0x00440000
#define NIA_BMI_AC_ENQ_FRAME 0x00000002
#define NIA_BMI_AC_TX_RELEASE 0x000002C0
#define NIA_BMI_AC_RELEASE 0x000000C0
@@ -317,6 +318,19 @@ struct fman_port_qmi_regs {
u32 fmqm_pndcc; /* PortID n Dequeue Confirm Counter */
};
+#define HWP_HXS_COUNT 16
+#define HWP_HXS_PHE_REPORT 0x00000800
+#define HWP_HXS_PCAC_PSTAT 0x00000100
+#define HWP_HXS_PCAC_PSTOP 0x00000001
+struct fman_port_hwp_regs {
+ struct {
+ u32 ssa; /* Soft Sequence Attachment */
+ u32 lcv; /* Line-up Enable Confirmation Mask */
+ } pmda[HWP_HXS_COUNT]; /* Parse Memory Direct Access Registers */
+ u32 reserved080[(0x3f8 - 0x080) / 4]; /* (0x080-0x3f7) */
+ u32 fmpr_pcac; /* Configuration Access Control */
+};
+
/* QMI dequeue prefetch modes */
enum fman_port_deq_prefetch {
FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */
@@ -436,6 +450,7 @@ struct fman_port {
union fman_port_bmi_regs __iomem *bmi_regs;
struct fman_port_qmi_regs __iomem *qmi_regs;
+ struct fman_port_hwp_regs __iomem *hwp_regs;
struct fman_sp_buffer_offsets buffer_offsets;
@@ -521,9 +536,12 @@ static int init_bmi_rx(struct fman_port *port)
/* NIA */
tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
- tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
+ tmp |= NIA_ENG_HWP;
iowrite32be(tmp, &regs->fmbm_rfne);
+ /* Parser Next Engine NIA */
+ iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME, &regs->fmbm_rfpne);
+
/* Enqueue NIA */
iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_rfene);
@@ -665,6 +683,50 @@ static int init_qmi(struct fman_port *port)
return 0;
}
+static void stop_port_hwp(struct fman_port *port)
+{
+ struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
+ int cnt = 100;
+
+ iowrite32be(HWP_HXS_PCAC_PSTOP, &regs->fmpr_pcac);
+
+ while (cnt-- > 0 &&
+ (ioread32be(&regs->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
+ udelay(10);
+ if (!cnt)
+ pr_err("Timeout stopping HW Parser\n");
+}
+
+static void start_port_hwp(struct fman_port *port)
+{
+ struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
+ int cnt = 100;
+
+ iowrite32be(0, &regs->fmpr_pcac);
+
+ while (cnt-- > 0 &&
+ !(ioread32be(&regs->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
+ udelay(10);
+ if (!cnt)
+ pr_err("Timeout starting HW Parser\n");
+}
+
+static void init_hwp(struct fman_port *port)
+{
+ struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
+ int i;
+
+ stop_port_hwp(port);
+
+ for (i = 0; i < HWP_HXS_COUNT; i++) {
+ /* enable HXS error reporting into FD[STATUS] PHE */
+ iowrite32be(0x00000000, &regs->pmda[i].ssa);
+ iowrite32be(0xffffffff, &regs->pmda[i].lcv);
+ }
+
+ start_port_hwp(port);
+}
+
static int init(struct fman_port *port)
{
int err;
@@ -673,6 +735,8 @@ static int init(struct fman_port *port)
switch (port->port_type) {
case FMAN_PORT_TYPE_RX:
err = init_bmi_rx(port);
+ if (!err)
+ init_hwp(port);
break;
case FMAN_PORT_TYPE_TX:
err = init_bmi_tx(port);
@@ -686,7 +750,8 @@ static int init(struct fman_port *port)
/* Init QMI registers */
err = init_qmi(port);
- return err;
+ if (err)
+ return err;
return 0;
}
@@ -1247,7 +1312,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
/* Allocate the FM driver's parameters structure */
port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL);
if (!port->cfg)
- goto err_params;
+ return -EINVAL;
/* Initialize FM port parameters which will be kept by the driver */
port->port_type = port->dts_params.type;
@@ -1276,6 +1341,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
/* set memory map pointers */
port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
+ port->hwp_regs = base_addr + HWP_PORT_REGS_OFFSET;
port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
/* resource distribution. */
@@ -1327,8 +1393,6 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
err_port_cfg:
kfree(port->cfg);
-err_params:
- kfree(port);
return -EINVAL;
}
EXPORT_SYMBOL(fman_port_config);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index db9c0bcf54cd..1fc27c97e3b2 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -38,12 +38,6 @@
#include <asm/irq.h>
#include <linux/uaccess.h>
-#ifdef CONFIG_8xx
-#include <asm/8xx_immap.h>
-#include <asm/pgtable.h>
-#include <asm/cpm1.h>
-#endif
-
#include "fs_enet.h"
#include "fec.h"
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index 96d44cf44fe0..64300ac13e02 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -37,12 +37,6 @@
#include <asm/irq.h>
#include <linux/uaccess.h>
-#ifdef CONFIG_8xx
-#include <asm/8xx_immap.h>
-#include <asm/pgtable.h>
-#include <asm/cpm1.h>
-#endif
-
#include "fs_enet.h"
/*************************************************/
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index b6ed818f78ff..9d9b6e6dd988 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -9,9 +9,9 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
-
#include "hnae.h"
#define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
@@ -57,11 +57,15 @@ static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
{
+ if (unlikely(!cb->priv))
+ return;
+
if (cb->type == DESC_TYPE_SKB)
dev_kfree_skb_any((struct sk_buff *)cb->priv);
else if (unlikely(is_rx_ring(ring)))
put_page((struct page *)cb->priv);
- memset(cb, 0, sizeof(*cb));
+
+ cb->priv = NULL;
}
static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
@@ -197,6 +201,7 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
ring->q = q;
ring->flags = flags;
+ spin_lock_init(&ring->lock);
assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
/* not matter for tx or rx ring, the ntc and ntc start from 0 */
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 8016854796fb..04211ac73b36 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -67,6 +67,8 @@ do { \
#define AE_IS_VER1(ver) ((ver) == AE_VERSION_1)
#define AE_NAME_SIZE 16
+#define BD_SIZE_2048_MAX_MTU 6000
+
/* some said the RX and TX RCB format should not be the same in the future. But
* it is the same now...
*/
@@ -101,7 +103,6 @@ enum hnae_led_state {
#define HNS_RX_FLAG_L4ID_TCP 0x1
#define HNS_RX_FLAG_L4ID_SCTP 0x3
-
#define HNS_TXD_ASID_S 0
#define HNS_TXD_ASID_M (0xff << HNS_TXD_ASID_S)
#define HNS_TXD_BUFNUM_S 8
@@ -273,6 +274,9 @@ struct hnae_ring {
/* statistic */
struct ring_stats stats;
+ /* ring lock for poll one */
+ spinlock_t lock;
+
dma_addr_t desc_dma_addr;
u32 buf_size; /* size for hnae_desc->addr, preset by AE */
u16 desc_num; /* total number of desc */
@@ -483,11 +487,11 @@ struct hnae_ae_ops {
u32 auto_neg, u32 rx_en, u32 tx_en);
void (*get_coalesce_usecs)(struct hnae_handle *handle,
u32 *tx_usecs, u32 *rx_usecs);
- void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle,
- u32 *tx_frames, u32 *rx_frames);
+ void (*get_max_coalesced_frames)(struct hnae_handle *handle,
+ u32 *tx_frames, u32 *rx_frames);
int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
int (*set_coalesce_frames)(struct hnae_handle *handle,
- u32 coalesce_frames);
+ u32 tx_frames, u32 rx_frames);
void (*get_coalesce_range)(struct hnae_handle *handle,
u32 *tx_frames_low, u32 *rx_frames_low,
u32 *tx_frames_high, u32 *rx_frames_high,
@@ -646,6 +650,41 @@ static inline void hnae_reuse_buffer(struct hnae_ring *ring, int i)
ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
}
+/* when reinit buffer size, we should reinit buffer description */
+static inline void hnae_reinit_all_ring_desc(struct hnae_handle *h)
+{
+ int i, j;
+ struct hnae_ring *ring;
+
+ for (i = 0; i < h->q_num; i++) {
+ ring = &h->qs[i]->rx_ring;
+ for (j = 0; j < ring->desc_num; j++)
+ ring->desc[j].addr = cpu_to_le64(ring->desc_cb[j].dma);
+ }
+
+ wmb(); /* commit all data before submit */
+}
+
+/* when reinit buffer size, we should reinit page offset */
+static inline void hnae_reinit_all_ring_page_off(struct hnae_handle *h)
+{
+ int i, j;
+ struct hnae_ring *ring;
+
+ for (i = 0; i < h->q_num; i++) {
+ ring = &h->qs[i]->rx_ring;
+ for (j = 0; j < ring->desc_num; j++) {
+ ring->desc_cb[j].page_offset = 0;
+ if (ring->desc[j].addr !=
+ cpu_to_le64(ring->desc_cb[j].dma))
+ ring->desc[j].addr =
+ cpu_to_le64(ring->desc_cb[j].dma);
+ }
+ }
+
+ wmb(); /* commit all data before submit */
+}
+
#define hnae_set_field(origin, mask, shift, val) \
do { \
(origin) &= (~(mask)); \
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 0a9cdf00b31a..ff864a187d5a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -267,8 +267,32 @@ static int hns_ae_clr_multicast(struct hnae_handle *handle)
static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu)
{
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct hnae_queue *q;
+ u32 rx_buf_size;
+ int i, ret;
+
+ /* when buf_size is 2048, max mtu is 6K for rx ring max bd num is 3. */
+ if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
+ if (new_mtu <= BD_SIZE_2048_MAX_MTU)
+ rx_buf_size = 2048;
+ else
+ rx_buf_size = 4096;
+ } else {
+ rx_buf_size = mac_cb->dsaf_dev->buf_size;
+ }
+
+ ret = hns_mac_set_mtu(mac_cb, new_mtu, rx_buf_size);
- return hns_mac_set_mtu(mac_cb, new_mtu);
+ if (!ret) {
+ /* reinit ring buf_size */
+ for (i = 0; i < handle->q_num; i++) {
+ q = handle->qs[i];
+ q->rx_ring.buf_size = rx_buf_size;
+ hns_rcb_set_rx_ring_bs(q, rx_buf_size);
+ }
+ }
+
+ return ret;
}
static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable)
@@ -463,15 +487,21 @@ static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
ring_pair->port_id_in_comm);
}
-static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle,
- u32 *tx_frames, u32 *rx_frames)
+static void hns_ae_get_max_coalesced_frames(struct hnae_handle *handle,
+ u32 *tx_frames, u32 *rx_frames)
{
struct ring_pair_cb *ring_pair =
container_of(handle->qs[0], struct ring_pair_cb, q);
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
- *tx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
- ring_pair->port_id_in_comm);
- *rx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+ handle->port_type == HNAE_PORT_DEBUG)
+ *tx_frames = hns_rcb_get_rx_coalesced_frames(
+ ring_pair->rcb_common, ring_pair->port_id_in_comm);
+ else
+ *tx_frames = hns_rcb_get_tx_coalesced_frames(
+ ring_pair->rcb_common, ring_pair->port_id_in_comm);
+ *rx_frames = hns_rcb_get_rx_coalesced_frames(ring_pair->rcb_common,
ring_pair->port_id_in_comm);
}
@@ -485,15 +515,34 @@ static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout);
}
-static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
- u32 coalesce_frames)
+static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
+ u32 tx_frames, u32 rx_frames)
{
+ int ret;
struct ring_pair_cb *ring_pair =
container_of(handle->qs[0], struct ring_pair_cb, q);
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
- return hns_rcb_set_coalesced_frames(
- ring_pair->rcb_common,
- ring_pair->port_id_in_comm, coalesce_frames);
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+ handle->port_type == HNAE_PORT_DEBUG) {
+ if (tx_frames != rx_frames)
+ return -EINVAL;
+ return hns_rcb_set_rx_coalesced_frames(
+ ring_pair->rcb_common,
+ ring_pair->port_id_in_comm, rx_frames);
+ } else {
+ if (tx_frames != 1)
+ return -EINVAL;
+ ret = hns_rcb_set_tx_coalesced_frames(
+ ring_pair->rcb_common,
+ ring_pair->port_id_in_comm, tx_frames);
+ if (ret)
+ return ret;
+
+ return hns_rcb_set_rx_coalesced_frames(
+ ring_pair->rcb_common,
+ ring_pair->port_id_in_comm, rx_frames);
+ }
}
static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
@@ -504,20 +553,27 @@ static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
{
struct dsaf_device *dsaf_dev;
+ assert(handle);
+
dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
- *tx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES;
- *rx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES;
- *tx_frames_high =
- (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ?
- HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1;
- *rx_frames_high =
- (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ?
- HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1;
- *tx_usecs_low = 0;
- *rx_usecs_low = 0;
- *tx_usecs_high = HNS_RCB_MAX_COALESCED_USECS;
- *rx_usecs_high = HNS_RCB_MAX_COALESCED_USECS;
+ *tx_frames_low = HNS_RCB_TX_FRAMES_LOW;
+ *rx_frames_low = HNS_RCB_RX_FRAMES_LOW;
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+ handle->port_type == HNAE_PORT_DEBUG)
+ *tx_frames_high =
+ (dsaf_dev->desc_num - 1 > HNS_RCB_TX_FRAMES_HIGH) ?
+ HNS_RCB_TX_FRAMES_HIGH : dsaf_dev->desc_num - 1;
+ else
+ *tx_frames_high = 1;
+
+ *rx_frames_high = (dsaf_dev->desc_num - 1 > HNS_RCB_RX_FRAMES_HIGH) ?
+ HNS_RCB_RX_FRAMES_HIGH : dsaf_dev->desc_num - 1;
+ *tx_usecs_low = HNS_RCB_TX_USECS_LOW;
+ *rx_usecs_low = HNS_RCB_RX_USECS_LOW;
+ *tx_usecs_high = HNS_RCB_TX_USECS_HIGH;
+ *rx_usecs_high = HNS_RCB_RX_USECS_HIGH;
}
void hns_ae_update_stats(struct hnae_handle *handle,
@@ -802,8 +858,9 @@ static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key,
memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
/* update the current hash->queue mappings from the shadow RSS table */
- memcpy(indir, ppe_cb->rss_indir_table,
- HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
+ if (indir)
+ memcpy(indir, ppe_cb->rss_indir_table,
+ HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
return 0;
}
@@ -814,15 +871,19 @@ static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir,
struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
/* set the RSS Hash Key if specififed by the user */
- if (key)
- hns_ppe_set_rss_key(ppe_cb, (u32 *)key);
+ if (key) {
+ memcpy(ppe_cb->rss_key, key, HNS_PPEV2_RSS_KEY_SIZE);
+ hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key);
+ }
- /* update the shadow RSS table with user specified qids */
- memcpy(ppe_cb->rss_indir_table, indir,
- HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
+ if (indir) {
+ /* update the shadow RSS table with user specified qids */
+ memcpy(ppe_cb->rss_indir_table, indir,
+ HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
- /* now update the hardware */
- hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
+ /* now update the hardware */
+ hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
+ }
return 0;
}
@@ -846,7 +907,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
.get_autoneg = hns_ae_get_autoneg,
.set_pauseparam = hns_ae_set_pauseparam,
.get_coalesce_usecs = hns_ae_get_coalesce_usecs,
- .get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames,
+ .get_max_coalesced_frames = hns_ae_get_max_coalesced_frames,
.set_coalesce_usecs = hns_ae_set_coalesce_usecs,
.set_coalesce_frames = hns_ae_set_coalesce_frames,
.get_coalesce_range = hns_ae_get_coalesce_range,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 3382441fe7b5..86944bc3b273 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -86,12 +86,11 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
}
-/**
-*hns_gmac_get_en - get port enable
-*@mac_drv:mac device
-*@rx:rx enable
-*@tx:tx enable
-*/
+/* hns_gmac_get_en - get port enable
+ * @mac_drv:mac device
+ * @rx:rx enable
+ * @tx:tx enable
+ */
static void hns_gmac_get_en(void *mac_drv, u32 *rx, u32 *tx)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
@@ -148,6 +147,17 @@ static void hns_gmac_config_max_frame_length(void *mac_drv, u16 newval)
GMAC_MAX_FRM_SIZE_S, newval);
}
+static void hns_gmac_config_pad_and_crc(void *mac_drv, u8 newval)
+{
+ u32 tx_ctrl;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+ dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, !!newval);
+ dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, !!newval);
+ dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
+}
+
static void hns_gmac_config_an_mode(void *mac_drv, u8 newval)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
@@ -250,7 +260,6 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
u32 full_duplex)
{
- u32 tx_ctrl;
struct mac_driver *drv = (struct mac_driver *)mac_drv;
dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG,
@@ -279,14 +288,6 @@ static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
return -EINVAL;
}
- tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
- dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, 1);
- dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, 1);
- dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
-
- dsaf_set_dev_bit(drv, GMAC_MODE_CHANGE_EN_REG,
- GMAC_MODE_CHANGE_EB_B, 1);
-
return 0;
}
@@ -325,6 +326,17 @@ static void hns_gmac_init(void *mac_drv)
hns_gmac_tx_loop_pkt_dis(mac_drv);
if (drv->mac_cb->mac_type == HNAE_PORT_DEBUG)
hns_gmac_set_uc_match(mac_drv, 0);
+
+ hns_gmac_config_pad_and_crc(mac_drv, 1);
+
+ dsaf_set_dev_bit(drv, GMAC_MODE_CHANGE_EN_REG,
+ GMAC_MODE_CHANGE_EB_B, 1);
+
+ /* reduce gmac tx water line to avoid gmac hang-up
+ * in speed 100M and duplex half.
+ */
+ dsaf_set_dev_field(drv, GMAC_TX_WATER_LINE_REG, GMAC_TX_WATER_LINE_MASK,
+ GMAC_TX_WATER_LINE_SHIFT, 8);
}
void hns_gmac_update_stats(void *mac_drv)
@@ -453,24 +465,6 @@ static int hns_gmac_config_loopback(void *mac_drv, enum hnae_loop loop_mode,
return 0;
}
-static void hns_gmac_config_pad_and_crc(void *mac_drv, u8 newval)
-{
- u32 tx_ctrl;
- struct mac_driver *drv = (struct mac_driver *)mac_drv;
-
- tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
- dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, !!newval);
- dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, !!newval);
- dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
-}
-
-static void hns_gmac_get_id(void *mac_drv, u8 *mac_id)
-{
- struct mac_driver *drv = (struct mac_driver *)mac_drv;
-
- *mac_id = drv->mac_id;
-}
-
static void hns_gmac_get_info(void *mac_drv, struct mac_info *mac_info)
{
enum hns_gmac_duplex_mdoe duplex;
@@ -672,7 +666,7 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)
static int hns_gmac_get_sset_count(int stringset)
{
- if (stringset == ETH_SS_STATS)
+ if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
return ARRAY_SIZE(g_gmac_stats_string);
return 0;
@@ -712,7 +706,6 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
mac_drv->config_pad_and_crc = hns_gmac_config_pad_and_crc;
mac_drv->config_half_duplex = hns_gmac_set_duplex_type;
mac_drv->set_rx_ignore_pause_frames = hns_gmac_set_rx_auto_pause_frames;
- mac_drv->mac_get_id = hns_gmac_get_id;
mac_drv->get_info = hns_gmac_get_info;
mac_drv->autoneg_stat = hns_gmac_autoneg_stat;
mac_drv->get_pause_enable = hns_gmac_get_pausefrm_cfg;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index bdd8cdd732fb..8b5cdf490850 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -335,44 +335,6 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
return 0;
}
-/**
- *hns_mac_del_mac - delete mac address into dsaf table,can't delete the same
- * address twice
- *@net_dev: net device
- *@vfn : vf lan
- *@mac : mac address
- *return status
- */
-int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac)
-{
- struct mac_entry_idx *old_mac;
- struct dsaf_device *dsaf_dev;
- u32 ret;
-
- dsaf_dev = mac_cb->dsaf_dev;
-
- if (vfn < DSAF_MAX_VM_NUM) {
- old_mac = &mac_cb->addr_entry_idx[vfn];
- } else {
- dev_err(mac_cb->dev,
- "vf queue is too large, %s mac%d queue = %#x!\n",
- mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vfn);
- return -EINVAL;
- }
-
- if (dsaf_dev) {
- ret = hns_dsaf_del_mac_entry(dsaf_dev, old_mac->vlan_id,
- mac_cb->mac_id, old_mac->addr);
- if (ret)
- return ret;
-
- if (memcmp(old_mac->addr, mac, sizeof(old_mac->addr)) == 0)
- old_mac->valid = 0;
- }
-
- return 0;
-}
-
int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
{
struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
@@ -494,10 +456,9 @@ void hns_mac_reset(struct hns_mac_cb *mac_cb)
}
}
-int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu)
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size)
{
struct mac_driver *drv = hns_mac_get_drv(mac_cb);
- u32 buf_size = mac_cb->dsaf_dev->buf_size;
u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
u32 max_frm = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver) ?
MAC_MAX_MTU : MAC_MAX_MTU_V2;
@@ -735,6 +696,8 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
rc = phy_device_register(phy);
if (rc) {
phy_device_free(phy);
+ dev_err(&mdio->dev, "registered phy fail at address %i\n",
+ addr);
return -ENODEV;
}
@@ -745,7 +708,7 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
return 0;
}
-static void hns_mac_register_phy(struct hns_mac_cb *mac_cb)
+static int hns_mac_register_phy(struct hns_mac_cb *mac_cb)
{
struct acpi_reference_args args;
struct platform_device *pdev;
@@ -755,24 +718,39 @@ static void hns_mac_register_phy(struct hns_mac_cb *mac_cb)
/* Loop over the child nodes and register a phy_device for each one */
if (!to_acpi_device_node(mac_cb->fw_port))
- return;
+ return -ENODEV;
rc = acpi_node_get_property_reference(
mac_cb->fw_port, "mdio-node", 0, &args);
if (rc)
- return;
+ return rc;
addr = hns_mac_phy_parse_addr(mac_cb->dev, mac_cb->fw_port);
if (addr < 0)
- return;
+ return addr;
/* dev address in adev */
pdev = hns_dsaf_find_platform_device(acpi_fwnode_handle(args.adev));
+ if (!pdev) {
+ dev_err(mac_cb->dev, "mac%d mdio pdev is NULL\n",
+ mac_cb->mac_id);
+ return -EINVAL;
+ }
+
mii_bus = platform_get_drvdata(pdev);
+ if (!mii_bus) {
+ dev_err(mac_cb->dev,
+ "mac%d mdio is NULL, dsaf will probe again later\n",
+ mac_cb->mac_id);
+ return -EPROBE_DEFER;
+ }
+
rc = hns_mac_register_phydev(mii_bus, mac_cb, addr);
if (!rc)
dev_dbg(mac_cb->dev, "mac%d register phy addr:%d\n",
mac_cb->mac_id, addr);
+
+ return rc;
}
#define MAC_MEDIA_TYPE_MAX_LEN 16
@@ -793,7 +771,7 @@ static const struct {
*@np:device node
* return: 0 --success, negative --fail
*/
-static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
+static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
{
struct device_node *np;
struct regmap *syscon;
@@ -903,7 +881,15 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
}
}
} else if (is_acpi_node(mac_cb->fw_port)) {
- hns_mac_register_phy(mac_cb);
+ ret = hns_mac_register_phy(mac_cb);
+ /*
+ * Mac can work well if there is phy or not.If the port don't
+ * connect with phy, the return value will be ignored. Only
+ * when there is phy but can't find mdio bus, the return value
+ * will be handled.
+ */
+ if (ret == -EPROBE_DEFER)
+ return ret;
} else {
dev_err(mac_cb->dev, "mac%d cannot find phy node\n",
mac_cb->mac_id);
@@ -1065,6 +1051,7 @@ int hns_mac_init(struct dsaf_device *dsaf_dev)
dsaf_dev->mac_cb[port_id] = mac_cb;
}
}
+
/* init mac_cb for all port */
for (port_id = 0; port_id < max_port_num; port_id++) {
mac_cb = dsaf_dev->mac_cb[port_id];
@@ -1074,6 +1061,7 @@ int hns_mac_init(struct dsaf_device *dsaf_dev)
ret = hns_mac_get_cfg(dsaf_dev, mac_cb);
if (ret)
return ret;
+
ret = hns_mac_init_ex(mac_cb);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 2bb3d1e93c64..24dfba53a0f2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -373,8 +373,6 @@ struct mac_driver {
void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable);
/* config rx mode for promiscuous*/
void (*set_promiscuous)(void *mac_drv, u8 enable);
- /* get mac id */
- void (*mac_get_id)(void *mac_drv, u8 *mac_id);
void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en);
void (*autoneg_stat)(void *mac_drv, u32 *enable);
@@ -436,7 +434,6 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, bool enable);
void hns_mac_start(struct hns_mac_cb *mac_cb);
void hns_mac_stop(struct hns_mac_cb *mac_cb);
-int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac);
void hns_mac_uninit(struct dsaf_device *dsaf_dev);
void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
void hns_mac_reset(struct hns_mac_cb *mac_cb);
@@ -444,7 +441,7 @@ void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg);
void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en);
int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable);
int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en);
-int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu);
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size);
int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
u8 *auto_neg, u16 *speed, u8 *duplex);
int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 403ea9db6dbd..e0bc79ea3d88 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -510,10 +510,10 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 48);
+ DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 55);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 80);
+ DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 110);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
/* for no enable pfc mode */
@@ -521,10 +521,10 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 192);
+ DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 240);
+ DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
}
@@ -1648,87 +1648,6 @@ int hns_dsaf_rm_mac_addr(
mac_entry->addr);
}
-/**
- * hns_dsaf_set_mac_mc_entry - set mac mc-entry
- * @dsaf_dev: dsa fabric device struct pointer
- * @mac_entry: mc-mac entry
- */
-int hns_dsaf_set_mac_mc_entry(
- struct dsaf_device *dsaf_dev,
- struct dsaf_drv_mac_multi_dest_entry *mac_entry)
-{
- u16 entry_index = DSAF_INVALID_ENTRY_IDX;
- struct dsaf_drv_tbl_tcam_key mac_key;
- struct dsaf_tbl_tcam_mcast_cfg mac_data;
- struct dsaf_drv_priv *priv =
- (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
- struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
- struct dsaf_drv_tbl_tcam_key tmp_mac_key;
- struct dsaf_tbl_tcam_data tcam_data;
-
- /* mac addr check */
- if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
- dev_err(dsaf_dev->dev, "set uc %s Mac %pM err!\n",
- dsaf_dev->ae_dev.name, mac_entry->addr);
- return -EINVAL;
- }
-
- /*config key */
- hns_dsaf_set_mac_key(dsaf_dev, &mac_key,
- mac_entry->in_vlan_id,
- mac_entry->in_port_num, mac_entry->addr);
-
- /* entry ie exist? */
- entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
- if (entry_index == DSAF_INVALID_ENTRY_IDX) {
- /*if hasnot, find enpty entry*/
- entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
- if (entry_index == DSAF_INVALID_ENTRY_IDX) {
- /*if hasnot empty, error*/
- dev_err(dsaf_dev->dev,
- "set_uc_entry failed, %s Mac key(%#x:%#x)\n",
- dsaf_dev->ae_dev.name,
- mac_key.high.val, mac_key.low.val);
- return -EINVAL;
- }
-
- /* config hardware entry */
- memset(mac_data.tbl_mcast_port_msk,
- 0, sizeof(mac_data.tbl_mcast_port_msk));
- } else {
- /* config hardware entry */
- hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data,
- &mac_data);
-
- tmp_mac_key.high.val =
- le32_to_cpu(tcam_data.tbl_tcam_data_high);
- tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
- }
- mac_data.tbl_mcast_old_en = 0;
- mac_data.tbl_mcast_item_vld = 1;
- dsaf_set_field(mac_data.tbl_mcast_port_msk[0],
- 0x3F, 0, mac_entry->port_mask[0]);
-
- dev_dbg(dsaf_dev->dev,
- "set_uc_entry, %s key(%#x:%#x) entry_index%d\n",
- dsaf_dev->ae_dev.name, mac_key.high.val,
- mac_key.low.val, entry_index);
-
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
-
- hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, NULL,
- &mac_data);
-
- /* config software entry */
- soft_mac_entry += entry_index;
- soft_mac_entry->index = entry_index;
- soft_mac_entry->tcam_key.high.val = mac_key.high.val;
- soft_mac_entry->tcam_key.low.val = mac_key.low.val;
-
- return 0;
-}
-
static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src)
{
u16 *a = (u16 *)dst;
@@ -2090,166 +2009,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, u8 mac_id,
return ret;
}
-/**
- * hns_dsaf_get_mac_uc_entry - get mac uc entry
- * @dsaf_dev: dsa fabric device struct pointer
- * @mac_entry: mac entry
- */
-int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
- struct dsaf_drv_mac_single_dest_entry *mac_entry)
-{
- u16 entry_index = DSAF_INVALID_ENTRY_IDX;
- struct dsaf_drv_tbl_tcam_key mac_key;
-
- struct dsaf_tbl_tcam_ucast_cfg mac_data;
- struct dsaf_tbl_tcam_data tcam_data;
-
- /* check macaddr */
- if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
- MAC_IS_BROADCAST(mac_entry->addr)) {
- dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
- mac_entry->addr);
- return -EINVAL;
- }
-
- /*config key */
- hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
- mac_entry->in_port_num, mac_entry->addr);
-
- /*check exist? */
- entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
- if (entry_index == DSAF_INVALID_ENTRY_IDX) {
- /*find none, error */
- dev_err(dsaf_dev->dev,
- "get_uc_entry failed, %s Mac key(%#x:%#x)\n",
- dsaf_dev->ae_dev.name,
- mac_key.high.val, mac_key.low.val);
- return -EINVAL;
- }
- dev_dbg(dsaf_dev->dev,
- "get_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
- dsaf_dev->ae_dev.name, mac_key.high.val,
- mac_key.low.val, entry_index);
-
- /* read entry */
- hns_dsaf_tcam_uc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
-
- mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
- mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-
- mac_entry->port_num = mac_data.tbl_ucast_out_port;
-
- return 0;
-}
-
-/**
- * hns_dsaf_get_mac_mc_entry - get mac mc entry
- * @dsaf_dev: dsa fabric device struct pointer
- * @mac_entry: mac entry
- */
-int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
- struct dsaf_drv_mac_multi_dest_entry *mac_entry)
-{
- u16 entry_index = DSAF_INVALID_ENTRY_IDX;
- struct dsaf_drv_tbl_tcam_key mac_key;
-
- struct dsaf_tbl_tcam_mcast_cfg mac_data;
- struct dsaf_tbl_tcam_data tcam_data;
-
- /*check mac addr */
- if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
- MAC_IS_BROADCAST(mac_entry->addr)) {
- dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
- mac_entry->addr);
- return -EINVAL;
- }
-
- /*config key */
- hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
- mac_entry->in_port_num, mac_entry->addr);
-
- /*check exist? */
- entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
- if (entry_index == DSAF_INVALID_ENTRY_IDX) {
- /* find none, error */
- dev_err(dsaf_dev->dev,
- "get_mac_uc_entry failed, %s Mac key(%#x:%#x)\n",
- dsaf_dev->ae_dev.name, mac_key.high.val,
- mac_key.low.val);
- return -EINVAL;
- }
- dev_dbg(dsaf_dev->dev,
- "get_mac_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
- dsaf_dev->ae_dev.name, mac_key.high.val,
- mac_key.low.val, entry_index);
-
- /*read entry */
- hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
-
- mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
- mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-
- mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
- return 0;
-}
-
-/**
- * hns_dsaf_get_mac_entry_by_index - get mac entry by tab index
- * @dsaf_dev: dsa fabric device struct pointer
- * @entry_index: tab entry index
- * @mac_entry: mac entry
- */
-int hns_dsaf_get_mac_entry_by_index(
- struct dsaf_device *dsaf_dev,
- u16 entry_index, struct dsaf_drv_mac_multi_dest_entry *mac_entry)
-{
- struct dsaf_drv_tbl_tcam_key mac_key;
-
- struct dsaf_tbl_tcam_mcast_cfg mac_data;
- struct dsaf_tbl_tcam_ucast_cfg mac_uc_data;
- struct dsaf_tbl_tcam_data tcam_data;
- char mac_addr[ETH_ALEN] = {0};
-
- if (entry_index >= dsaf_dev->tcam_max_num) {
- /* find none, del error */
- dev_err(dsaf_dev->dev, "get_uc_entry failed, %s\n",
- dsaf_dev->ae_dev.name);
- return -EINVAL;
- }
-
- /* mc entry, do read opt */
- hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
-
- mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
- mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-
- mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
-
- /***get mac addr*/
- mac_addr[0] = mac_key.high.bits.mac_0;
- mac_addr[1] = mac_key.high.bits.mac_1;
- mac_addr[2] = mac_key.high.bits.mac_2;
- mac_addr[3] = mac_key.high.bits.mac_3;
- mac_addr[4] = mac_key.low.bits.mac_4;
- mac_addr[5] = mac_key.low.bits.mac_5;
- /**is mc or uc*/
- if (MAC_IS_MULTICAST((u8 *)mac_addr) ||
- MAC_IS_L3_MULTICAST((u8 *)mac_addr)) {
- /**mc donot do*/
- } else {
- /*is not mc, just uc... */
- hns_dsaf_tcam_uc_get(dsaf_dev, entry_index, &tcam_data,
- &mac_uc_data);
-
- mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
- mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-
- mac_entry->port_mask[0] = (1 << mac_uc_data.tbl_ucast_out_port);
- }
-
- return 0;
-}
-
static struct dsaf_device *hns_dsaf_alloc_dev(struct device *dev,
size_t sizeof_priv)
{
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index cef6bf46ae93..4507e8222683 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -68,7 +68,7 @@ enum dsaf_roce_qos_sl {
};
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
-#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP)
+#define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP)
enum hal_dsaf_mode {
HRD_DSAF_NO_DSAF_MODE = 0x0,
@@ -429,23 +429,12 @@ static inline struct hnae_vf_cb *hns_ae_get_vf_cb(
int hns_dsaf_set_mac_uc_entry(struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_single_dest_entry *mac_entry);
-int hns_dsaf_set_mac_mc_entry(struct dsaf_device *dsaf_dev,
- struct dsaf_drv_mac_multi_dest_entry *mac_entry);
int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_single_dest_entry *mac_entry);
int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
u8 in_port_num, u8 *addr);
int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_drv_mac_single_dest_entry *mac_entry);
-int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
- struct dsaf_drv_mac_single_dest_entry *mac_entry);
-int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
- struct dsaf_drv_mac_multi_dest_entry *mac_entry);
-int hns_dsaf_get_mac_entry_by_index(
- struct dsaf_device *dsaf_dev,
- u16 entry_index,
- struct dsaf_drv_mac_multi_dest_entry *mac_entry);
-
void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
@@ -475,5 +464,4 @@ int hns_dsaf_rm_mac_addr(
int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
-
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 6ea872287307..b62816c1574e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -422,7 +422,7 @@ void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb)
int hns_ppe_get_sset_count(int stringset)
{
- if (stringset == ETH_SS_STATS)
+ if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
return ETH_PPE_STATIC_NUM;
return 0;
}
@@ -496,21 +496,23 @@ void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data)
*/
int hns_ppe_init(struct dsaf_device *dsaf_dev)
{
- int i, k;
int ret;
+ int i;
for (i = 0; i < HNS_PPE_COM_NUM; i++) {
ret = hns_ppe_common_get_cfg(dsaf_dev, i);
if (ret)
- goto get_ppe_cfg_fail;
+ goto get_cfg_fail;
ret = hns_rcb_common_get_cfg(dsaf_dev, i);
if (ret)
- goto get_rcb_cfg_fail;
+ goto get_cfg_fail;
hns_ppe_get_cfg(dsaf_dev->ppe_common[i]);
- hns_rcb_get_cfg(dsaf_dev->rcb_common[i]);
+ ret = hns_rcb_get_cfg(dsaf_dev->rcb_common[i]);
+ if (ret)
+ goto get_cfg_fail;
}
for (i = 0; i < HNS_PPE_COM_NUM; i++)
@@ -518,13 +520,12 @@ int hns_ppe_init(struct dsaf_device *dsaf_dev)
return 0;
-get_rcb_cfg_fail:
- hns_ppe_common_free_cfg(dsaf_dev, i);
-get_ppe_cfg_fail:
- for (k = i - 1; k >= 0; k--) {
- hns_rcb_common_free_cfg(dsaf_dev, k);
- hns_ppe_common_free_cfg(dsaf_dev, k);
+get_cfg_fail:
+ for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+ hns_rcb_common_free_cfg(dsaf_dev, i);
+ hns_ppe_common_free_cfg(dsaf_dev, i);
}
+
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index f0ed80d6ef9c..6f3570cfb501 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -32,6 +32,9 @@
#define RCB_RESET_WAIT_TIMES 30
#define RCB_RESET_TRY_TIMES 10
+/* Because default mtu is 1500, rcb buffer size is set to 2048 enough */
+#define RCB_DEFAULT_BUFFER_SIZE 2048
+
/**
*hns_rcb_wait_fbd_clean - clean fbd
*@qs: ring struct pointer array
@@ -192,6 +195,30 @@ void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
wmb(); /* Sync point after breakpoint */
}
+/* hns_rcb_set_tx_ring_bs - init rcb ring buf size regester
+ *@q: hnae_queue
+ *@buf_size: buffer size set to hw
+ */
+void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size)
+{
+ u32 bd_size_type = hns_rcb_buf_size2type(buf_size);
+
+ dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
+ bd_size_type);
+}
+
+/* hns_rcb_set_rx_ring_bs - init rcb ring buf size regester
+ *@q: hnae_queue
+ *@buf_size: buffer size set to hw
+ */
+void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size)
+{
+ u32 bd_size_type = hns_rcb_buf_size2type(buf_size);
+
+ dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
+ bd_size_type);
+}
+
/**
*hns_rcb_ring_init - init rcb ring
*@ring_pair: ring pair control block
@@ -200,8 +227,6 @@ void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
{
struct hnae_queue *q = &ring_pair->q;
- struct rcb_common_cb *rcb_common = ring_pair->rcb_common;
- u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type;
struct hnae_ring *ring =
(ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring;
dma_addr_t dma = ring->desc_dma_addr;
@@ -212,8 +237,8 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1));
- dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
- bd_size_type);
+ hns_rcb_set_rx_ring_bs(q, ring->buf_size);
+
dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
ring_pair->port_id_in_comm);
dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
@@ -224,12 +249,12 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1));
- dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
- bd_size_type);
+ hns_rcb_set_tx_ring_bs(q, ring->buf_size);
+
dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
ring_pair->port_id_in_comm);
dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
- ring_pair->port_id_in_comm);
+ ring_pair->port_id_in_comm + HNS_RCB_TX_PKTLINE_OFFSET);
}
}
@@ -259,13 +284,27 @@ static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
static void hns_rcb_set_port_timeout(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
{
- if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
+ if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG,
timeout * HNS_RCB_CLK_FREQ_MHZ);
- else
+ } else if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
+ if (timeout > HNS_RCB_DEF_GAP_TIME_USECS)
+ dsaf_write_dev(rcb_common,
+ RCB_PORT_INT_GAPTIME_REG + port_idx * 4,
+ HNS_RCB_DEF_GAP_TIME_USECS);
+ else
+ dsaf_write_dev(rcb_common,
+ RCB_PORT_INT_GAPTIME_REG + port_idx * 4,
+ timeout);
+
+ dsaf_write_dev(rcb_common,
+ RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
+ timeout);
+ } else {
dsaf_write_dev(rcb_common,
RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
timeout);
+ }
}
static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
@@ -327,8 +366,12 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
for (i = 0; i < port_num; i++) {
hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
- (void)hns_rcb_set_coalesced_frames(
- rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES);
+ hns_rcb_set_rx_coalesced_frames(
+ rcb_common, i, HNS_RCB_DEF_RX_COALESCED_FRAMES);
+ if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver) &&
+ !HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
+ hns_rcb_set_tx_coalesced_frames(
+ rcb_common, i, HNS_RCB_DEF_TX_COALESCED_FRAMES);
hns_rcb_set_port_timeout(
rcb_common, i, HNS_RCB_DEF_COALESCED_USECS);
}
@@ -380,7 +423,6 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
struct hnae_ring *ring;
struct rcb_common_cb *rcb_common;
struct ring_pair_cb *ring_pair_cb;
- u32 buf_size;
u16 desc_num, mdnum_ppkt;
bool irq_idx, is_ver1;
@@ -401,7 +443,6 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
}
rcb_common = ring_pair_cb->rcb_common;
- buf_size = rcb_common->dsaf_dev->buf_size;
desc_num = rcb_common->dsaf_dev->desc_num;
ring->desc = NULL;
@@ -410,7 +451,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
ring->irq = ring_pair_cb->virq[irq_idx];
ring->desc_dma_addr = 0;
- ring->buf_size = buf_size;
+ ring->buf_size = RCB_DEFAULT_BUFFER_SIZE;
ring->desc_num = desc_num;
ring->max_desc_num_per_pkt = mdnum_ppkt;
ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE;
@@ -430,7 +471,6 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
static int hns_rcb_get_port_in_comm(
struct rcb_common_cb *rcb_common, int ring_idx)
{
-
return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn);
}
@@ -452,7 +492,7 @@ static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common)
*hns_rcb_get_cfg - get rcb config
*@rcb_common: rcb common device
*/
-void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
+int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
{
struct ring_pair_cb *ring_pair_cb;
u32 i;
@@ -477,26 +517,48 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] =
is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2 + 1) :
platform_get_irq(pdev, base_irq_idx + i * 3);
+ if ((ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] == -EPROBE_DEFER) ||
+ (ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] == -EPROBE_DEFER))
+ return -EPROBE_DEFER;
+
ring_pair_cb->q.phy_base =
RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i);
hns_rcb_ring_pair_get_cfg(ring_pair_cb);
}
+
+ return 0;
}
/**
- *hns_rcb_get_coalesced_frames - get rcb port coalesced frames
+ *hns_rcb_get_rx_coalesced_frames - get rcb port rx coalesced frames
*@rcb_common: rcb_common device
*@port_idx:port id in comm
*
*Returns: coalesced_frames
*/
-u32 hns_rcb_get_coalesced_frames(
+u32 hns_rcb_get_rx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx)
{
return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4);
}
/**
+ *hns_rcb_get_tx_coalesced_frames - get rcb port tx coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *
+ *Returns: coalesced_frames
+ */
+u32 hns_rcb_get_tx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx)
+{
+ u64 reg;
+
+ reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4;
+ return dsaf_read_dev(rcb_common, reg);
+}
+
+/**
*hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
*@rcb_common: rcb_common device
*@port_idx:port id in comm
@@ -538,33 +600,47 @@ int hns_rcb_set_coalesce_usecs(
return -EINVAL;
}
}
- if (timeout > HNS_RCB_MAX_COALESCED_USECS) {
+ if (timeout > HNS_RCB_MAX_COALESCED_USECS || timeout == 0) {
dev_err(rcb_common->dsaf_dev->dev,
- "error: coalesce_usecs setting supports 0~1023us\n");
+ "error: coalesce_usecs setting supports 1~1023us\n");
return -EINVAL;
}
+ hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
+ return 0;
+}
- if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
- if (timeout == 0)
- /* set timeout to 0, Disable gap time */
- dsaf_set_reg_field(rcb_common->io_base,
- RCB_INT_GAP_TIME_REG + port_idx * 4,
- PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
- 0);
- else
- /* set timeout non 0, restore gap time to 1 */
- dsaf_set_reg_field(rcb_common->io_base,
- RCB_INT_GAP_TIME_REG + port_idx * 4,
- PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
- 1);
+/**
+ *hns_rcb_set_tx_coalesced_frames - set rcb coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *@coalesced_frames:tx/rx BD num for coalesced frames
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ */
+int hns_rcb_set_tx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
+{
+ u32 old_waterline =
+ hns_rcb_get_tx_coalesced_frames(rcb_common, port_idx);
+ u64 reg;
+
+ if (coalesced_frames == old_waterline)
+ return 0;
+
+ if (coalesced_frames != 1) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "error: not support tx coalesce_frames setting!\n");
+ return -EINVAL;
}
- hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
+ reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4;
+ dsaf_write_dev(rcb_common, reg, coalesced_frames);
return 0;
}
/**
- *hns_rcb_set_coalesced_frames - set rcb coalesced frames
+ *hns_rcb_set_rx_coalesced_frames - set rcb rx coalesced frames
*@rcb_common: rcb_common device
*@port_idx:port id in comm
*@coalesced_frames:tx/rx BD num for coalesced frames
@@ -572,10 +648,11 @@ int hns_rcb_set_coalesce_usecs(
* Returns:
* Zero for success, or an error code in case of failure
*/
-int hns_rcb_set_coalesced_frames(
+int hns_rcb_set_rx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
{
- u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx);
+ u32 old_waterline =
+ hns_rcb_get_rx_coalesced_frames(rcb_common, port_idx);
if (coalesced_frames == old_waterline)
return 0;
@@ -799,7 +876,7 @@ void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
*/
int hns_rcb_get_ring_sset_count(int stringset)
{
- if (stringset == ETH_SS_STATS)
+ if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
return HNS_RING_STATIC_REG_NUM;
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 99b4e1ba0a94..602816498c8d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -35,12 +35,23 @@ struct rcb_common_cb;
#define HNS_RCB_REG_OFFSET 0x10000
+#define HNS_RCB_TX_FRAMES_LOW 1
+#define HNS_RCB_RX_FRAMES_LOW 1
+#define HNS_RCB_TX_FRAMES_HIGH 1023
+#define HNS_RCB_RX_FRAMES_HIGH 1023
+#define HNS_RCB_TX_USECS_LOW 1
+#define HNS_RCB_RX_USECS_LOW 1
+#define HNS_RCB_TX_USECS_HIGH 1023
+#define HNS_RCB_RX_USECS_HIGH 1023
#define HNS_RCB_MAX_COALESCED_FRAMES 1023
#define HNS_RCB_MIN_COALESCED_FRAMES 1
-#define HNS_RCB_DEF_COALESCED_FRAMES 50
+#define HNS_RCB_DEF_RX_COALESCED_FRAMES 50
+#define HNS_RCB_DEF_TX_COALESCED_FRAMES 1
#define HNS_RCB_CLK_FREQ_MHZ 350
#define HNS_RCB_MAX_COALESCED_USECS 0x3ff
-#define HNS_RCB_DEF_COALESCED_USECS 50
+#define HNS_RCB_DEF_COALESCED_USECS 30
+#define HNS_RCB_DEF_GAP_TIME_USECS 20
+#define HNS_RCB_TX_PKTLINE_OFFSET 8
#define HNS_RCB_COMMON_ENDIAN 1
@@ -110,7 +121,7 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index);
void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
void hns_rcb_start(struct hnae_queue *q, u32 val);
-void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
+int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
u16 *max_vfn, u16 *max_q_per_vf);
@@ -125,13 +136,17 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
void hns_rcb_init_hw(struct ring_pair_cb *ring);
void hns_rcb_reset_ring_hw(struct hnae_queue *q);
void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
-u32 hns_rcb_get_coalesced_frames(
+u32 hns_rcb_get_rx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx);
+u32 hns_rcb_get_tx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx);
u32 hns_rcb_get_coalesce_usecs(
struct rcb_common_cb *rcb_common, u32 port_idx);
int hns_rcb_set_coalesce_usecs(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout);
-int hns_rcb_set_coalesced_frames(
+int hns_rcb_set_rx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
+int hns_rcb_set_tx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
void hns_rcb_update_stats(struct hnae_queue *queue);
@@ -146,4 +161,7 @@ int hns_rcb_get_ring_regs_count(void);
void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data);
void hns_rcb_get_strings(int stringset, u8 *data, int index);
+void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size);
+void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size);
+
#endif /* _HNS_DSAF_RCB_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 8fa18fc17cd2..46a52d9bb196 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -421,7 +421,7 @@
#define RCB_CFG_OVERTIME_REG 0x9300
#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304
#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308
-#define RCB_INT_GAP_TIME_REG 0x9400
+#define RCB_PORT_INT_GAPTIME_REG 0x9400
#define RCB_PORT_CFG_OVERTIME_REG 0x9430
#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000
@@ -466,6 +466,7 @@
#define GMAC_DUPLEX_TYPE_REG 0x0008UL
#define GMAC_FD_FC_TYPE_REG 0x000CUL
+#define GMAC_TX_WATER_LINE_REG 0x0010UL
#define GMAC_FC_TX_TIMER_REG 0x001CUL
#define GMAC_FD_FC_ADDR_LOW_REG 0x0020UL
#define GMAC_FD_FC_ADDR_HIGH_REG 0x0024UL
@@ -912,6 +913,9 @@
#define GMAC_DUPLEX_TYPE_B 0
+#define GMAC_TX_WATER_LINE_MASK ((1UL << 8) - 1)
+#define GMAC_TX_WATER_LINE_SHIFT 0
+
#define GMAC_FC_TX_TIMER_S 0
#define GMAC_FC_TX_TIMER_M 0xffff
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index aae830a93050..51e7e9f5af49 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -300,18 +300,6 @@ static void hns_xgmac_set_tx_auto_pause_frames(void *mac_drv, u16 enable)
}
/**
- *hns_xgmac_get_id - get xgmac port id
- *@mac_drv: mac driver
- *@newval:xgmac max frame length
- */
-static void hns_xgmac_get_id(void *mac_drv, u8 *mac_id)
-{
- struct mac_driver *drv = (struct mac_driver *)mac_drv;
-
- *mac_id = drv->mac_id;
-}
-
-/**
*hns_xgmac_config_max_frame_length - set xgmac max frame length
*@mac_drv: mac driver
*@newval:xgmac max frame length
@@ -793,7 +781,7 @@ static void hns_xgmac_get_strings(u32 stringset, u8 *data)
*/
static int hns_xgmac_get_sset_count(int stringset)
{
- if (stringset == ETH_SS_STATS)
+ if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
return ARRAY_SIZE(g_xgmac_stats_string);
return 0;
@@ -833,7 +821,6 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
mac_drv->config_half_duplex = NULL;
mac_drv->set_rx_ignore_pause_frames =
hns_xgmac_set_rx_ignore_pause_frames;
- mac_drv->mac_get_id = hns_xgmac_get_id;
mac_drv->mac_free = hns_xgmac_free;
mac_drv->adjust_link = NULL;
mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index fca37e2c7f01..c6700b91a2df 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -512,7 +512,8 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i,
int last_offset;
bool twobufs;
- twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
+ twobufs = ((PAGE_SIZE < 8192) &&
+ hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
desc = &ring->desc[ring->next_to_clean];
size = le16_to_cpu(desc->rx.size);
@@ -859,7 +860,7 @@ out:
return recv_pkts;
}
-static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
+static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
int num = 0;
@@ -873,22 +874,23 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring_data->ring, 1);
- napi_schedule(&ring_data->napi);
+ return false;
+ } else {
+ return true;
}
}
-static void hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
- int num = 0;
+ int num;
num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
- if (num == 0)
- ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
- ring, 0);
+ if (!num)
+ return true;
else
- napi_schedule(&ring_data->napi);
+ return false;
}
static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
@@ -921,12 +923,13 @@ static int is_valid_clean_head(struct hnae_ring *ring, int h)
/* netif_tx_lock will turn down the performance, set only when necessary */
#ifdef CONFIG_NET_POLL_CONTROLLER
-#define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
-#define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
+#define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock)
+#define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock)
#else
-#define NETIF_TX_LOCK(ndev)
-#define NETIF_TX_UNLOCK(ndev)
+#define NETIF_TX_LOCK(ring)
+#define NETIF_TX_UNLOCK(ring)
#endif
+
/* reclaim all desc in one budget
* return error or number of desc left
*/
@@ -940,13 +943,13 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
int head;
int bytes, pkts;
- NETIF_TX_LOCK(ndev);
+ NETIF_TX_LOCK(ring);
head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
rmb(); /* make sure head is ready before touch any data */
if (is_ring_empty(ring) || head == ring->next_to_clean) {
- NETIF_TX_UNLOCK(ndev);
+ NETIF_TX_UNLOCK(ring);
return 0; /* no data to poll */
}
@@ -954,7 +957,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean);
ring->stats.io_err_cnt++;
- NETIF_TX_UNLOCK(ndev);
+ NETIF_TX_UNLOCK(ring);
return -EIO;
}
@@ -966,7 +969,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
prefetch(&ring->desc_cb[ring->next_to_clean]);
}
- NETIF_TX_UNLOCK(ndev);
+ NETIF_TX_UNLOCK(ring);
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
netdev_tx_completed_queue(dev_queue, pkts, bytes);
@@ -989,7 +992,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
return 0;
}
-static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
+static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
int head;
@@ -1002,20 +1005,21 @@ static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring_data->ring, 1);
- napi_schedule(&ring_data->napi);
+ return false;
+ } else {
+ return true;
}
}
-static void hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
if (head == ring->next_to_clean)
- ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
- ring, 0);
+ return true;
else
- napi_schedule(&ring_data->napi);
+ return false;
}
static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
@@ -1026,7 +1030,7 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
int head;
int bytes, pkts;
- NETIF_TX_LOCK(ndev);
+ NETIF_TX_LOCK(ring);
head = ring->next_to_use; /* ntu :soft setted ring position*/
bytes = 0;
@@ -1034,7 +1038,7 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
while (head != ring->next_to_clean)
hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
- NETIF_TX_UNLOCK(ndev);
+ NETIF_TX_UNLOCK(ring);
dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
netdev_tx_reset_queue(dev_queue);
@@ -1042,15 +1046,23 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
static int hns_nic_common_poll(struct napi_struct *napi, int budget)
{
+ int clean_complete = 0;
struct hns_nic_ring_data *ring_data =
container_of(napi, struct hns_nic_ring_data, napi);
- int clean_complete = ring_data->poll_one(
- ring_data, budget, ring_data->ex_process);
+ struct hnae_ring *ring = ring_data->ring;
- if (clean_complete >= 0 && clean_complete < budget) {
- napi_complete(napi);
- ring_data->fini_process(ring_data);
- return 0;
+try_again:
+ clean_complete += ring_data->poll_one(
+ ring_data, budget - clean_complete,
+ ring_data->ex_process);
+
+ if (clean_complete < budget) {
+ if (ring_data->fini_process(ring_data)) {
+ napi_complete(napi);
+ ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+ } else {
+ goto try_again;
+ }
}
return clean_complete;
@@ -1196,54 +1208,31 @@ static void hns_nic_ring_close(struct net_device *netdev, int idx)
napi_disable(&priv->ring_data[idx].napi);
}
-static void hns_set_irq_affinity(struct hns_nic_priv *priv)
+static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
+ struct hnae_ring *ring, cpumask_t *mask)
{
- struct hnae_handle *h = priv->ae_handle;
- struct hns_nic_ring_data *rd;
- int i;
int cpu;
- cpumask_var_t mask;
-
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
- return;
- /*diffrent irq banlance for 16core and 32core*/
- if (h->q_num == num_possible_cpus()) {
- for (i = 0; i < h->q_num * 2; i++) {
- rd = &priv->ring_data[i];
- if (cpu_online(rd->queue_index)) {
- cpumask_clear(mask);
- cpu = rd->queue_index;
- cpumask_set_cpu(cpu, mask);
- (void)irq_set_affinity_hint(rd->ring->irq,
- mask);
- }
- }
+ /* Diffrent irq banlance between 16core and 32core.
+ * The cpu mask set by ring index according to the ring flag
+ * which indicate the ring is tx or rx.
+ */
+ if (q_num == num_possible_cpus()) {
+ if (is_tx_ring(ring))
+ cpu = ring_idx;
+ else
+ cpu = ring_idx - q_num;
} else {
- for (i = 0; i < h->q_num; i++) {
- rd = &priv->ring_data[i];
- if (cpu_online(rd->queue_index * 2)) {
- cpumask_clear(mask);
- cpu = rd->queue_index * 2;
- cpumask_set_cpu(cpu, mask);
- (void)irq_set_affinity_hint(rd->ring->irq,
- mask);
- }
- }
-
- for (i = h->q_num; i < h->q_num * 2; i++) {
- rd = &priv->ring_data[i];
- if (cpu_online(rd->queue_index * 2 + 1)) {
- cpumask_clear(mask);
- cpu = rd->queue_index * 2 + 1;
- cpumask_set_cpu(cpu, mask);
- (void)irq_set_affinity_hint(rd->ring->irq,
- mask);
- }
- }
+ if (is_tx_ring(ring))
+ cpu = ring_idx * 2;
+ else
+ cpu = (ring_idx - q_num) * 2 + 1;
}
- free_cpumask_var(mask);
+ cpumask_clear(mask);
+ cpumask_set_cpu(cpu, mask);
+
+ return cpu;
}
static int hns_nic_init_irq(struct hns_nic_priv *priv)
@@ -1252,6 +1241,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
struct hns_nic_ring_data *rd;
int i;
int ret;
+ int cpu;
for (i = 0; i < h->q_num * 2; i++) {
rd = &priv->ring_data[i];
@@ -1261,7 +1251,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
"%s-%s%d", priv->netdev->name,
- (i < h->q_num ? "tx" : "rx"), rd->queue_index);
+ (is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index);
rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
@@ -1273,12 +1263,17 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
return ret;
}
disable_irq(rd->ring->irq);
+
+ cpu = hns_nic_init_affinity_mask(h->q_num, i,
+ rd->ring, &rd->mask);
+
+ if (cpu_online(cpu))
+ irq_set_affinity_hint(rd->ring->irq,
+ &rd->mask);
+
rd->ring->irq_init_flag = RCB_IRQ_INITED;
}
- /*set cpu affinity*/
- hns_set_irq_affinity(priv);
-
return 0;
}
@@ -1487,32 +1482,259 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
return (netdev_tx_t)ret;
}
+static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+#define HNS_LB_TX_RING 0
+static struct sk_buff *hns_assemble_skb(struct net_device *ndev)
+{
+ struct sk_buff *skb;
+ struct ethhdr *ethhdr;
+ int frame_len;
+
+ /* allocate test skb */
+ skb = alloc_skb(64, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, 64);
+ skb->dev = ndev;
+ memset(skb->data, 0xFF, skb->len);
+
+ /* must be tcp/ip package */
+ ethhdr = (struct ethhdr *)skb->data;
+ ethhdr->h_proto = htons(ETH_P_IP);
+
+ frame_len = skb->len & (~1ul);
+ memset(&skb->data[frame_len / 2], 0xAA,
+ frame_len / 2 - 1);
+
+ skb->queue_mapping = HNS_LB_TX_RING;
+
+ return skb;
+}
+
+static int hns_enable_serdes_lb(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+ int speed, duplex;
+ int ret;
+
+ ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1);
+ if (ret)
+ return ret;
+
+ ret = ops->start ? ops->start(h) : 0;
+ if (ret)
+ return ret;
+
+ /* link adjust duplex*/
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
+ speed = 1000;
+ else
+ speed = 10000;
+ duplex = 1;
+
+ ops->adjust_link(h, speed, duplex);
+
+ /* wait h/w ready */
+ mdelay(300);
+
+ return 0;
+}
+
+static void hns_disable_serdes_lb(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+
+ ops->stop(h);
+ ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0);
+}
+
+/**
+ *hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The
+ *function as follows:
+ * 1. if one rx ring has found the page_offset is not equal 0 between head
+ * and tail, it means that the chip fetched the wrong descs for the ring
+ * which buffer size is 4096.
+ * 2. we set the chip serdes loopback and set rss indirection to the ring.
+ * 3. construct 64-bytes ip broadcast packages, wait the associated rx ring
+ * recieving all packages and it will fetch new descriptions.
+ * 4. recover to the original state.
+ *
+ *@ndev: net device
+ */
+static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+ struct hns_nic_ring_data *rd;
+ struct hnae_ring *ring;
+ struct sk_buff *skb;
+ u32 *org_indir;
+ u32 *cur_indir;
+ int indir_size;
+ int head, tail;
+ int fetch_num;
+ int i, j;
+ bool found;
+ int retry_times;
+ int ret = 0;
+
+ /* alloc indir memory */
+ indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir);
+ org_indir = kzalloc(indir_size, GFP_KERNEL);
+ if (!org_indir)
+ return -ENOMEM;
+
+ /* store the orginal indirection */
+ ops->get_rss(h, org_indir, NULL, NULL);
+
+ cur_indir = kzalloc(indir_size, GFP_KERNEL);
+ if (!cur_indir) {
+ ret = -ENOMEM;
+ goto cur_indir_alloc_err;
+ }
+
+ /* set loopback */
+ if (hns_enable_serdes_lb(ndev)) {
+ ret = -EINVAL;
+ goto enable_serdes_lb_err;
+ }
+
+ /* foreach every rx ring to clear fetch desc */
+ for (i = 0; i < h->q_num; i++) {
+ ring = &h->qs[i]->rx_ring;
+ head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+ tail = readl_relaxed(ring->io_base + RCB_REG_TAIL);
+ found = false;
+ fetch_num = ring_dist(ring, head, tail);
+
+ while (head != tail) {
+ if (ring->desc_cb[head].page_offset != 0) {
+ found = true;
+ break;
+ }
+
+ head++;
+ if (head == ring->desc_num)
+ head = 0;
+ }
+
+ if (found) {
+ for (j = 0; j < indir_size / sizeof(*org_indir); j++)
+ cur_indir[j] = i;
+ ops->set_rss(h, cur_indir, NULL, 0);
+
+ for (j = 0; j < fetch_num; j++) {
+ /* alloc one skb and init */
+ skb = hns_assemble_skb(ndev);
+ if (!skb)
+ goto out;
+ rd = &tx_ring_data(priv, skb->queue_mapping);
+ hns_nic_net_xmit_hw(ndev, skb, rd);
+
+ retry_times = 0;
+ while (retry_times++ < 10) {
+ mdelay(10);
+ /* clean rx */
+ rd = &rx_ring_data(priv, i);
+ if (rd->poll_one(rd, fetch_num,
+ hns_nic_drop_rx_fetch))
+ break;
+ }
+
+ retry_times = 0;
+ while (retry_times++ < 10) {
+ mdelay(10);
+ /* clean tx ring 0 send package */
+ rd = &tx_ring_data(priv,
+ HNS_LB_TX_RING);
+ if (rd->poll_one(rd, fetch_num, NULL))
+ break;
+ }
+ }
+ }
+ }
+
+out:
+ /* restore everything */
+ ops->set_rss(h, org_indir, NULL, 0);
+ hns_disable_serdes_lb(ndev);
+enable_serdes_lb_err:
+ kfree(cur_indir);
+cur_indir_alloc_err:
+ kfree(org_indir);
+
+ return ret;
+}
+
static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
+ bool if_running = netif_running(ndev);
int ret;
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if (new_mtu < 68)
+ return -EINVAL;
+
+ /* MTU no change */
+ if (new_mtu == ndev->mtu)
+ return 0;
+
if (!h->dev->ops->set_mtu)
return -ENOTSUPP;
- if (netif_running(ndev)) {
+ if (if_running) {
(void)hns_nic_net_stop(ndev);
msleep(100);
+ }
- ret = h->dev->ops->set_mtu(h, new_mtu);
- if (ret)
- netdev_err(ndev, "set mtu fail, return value %d\n",
- ret);
+ if (priv->enet_ver != AE_VERSION_1 &&
+ ndev->mtu <= BD_SIZE_2048_MAX_MTU &&
+ new_mtu > BD_SIZE_2048_MAX_MTU) {
+ /* update desc */
+ hnae_reinit_all_ring_desc(h);
- if (hns_nic_net_open(ndev))
- netdev_err(ndev, "hns net open fail\n");
- } else {
- ret = h->dev->ops->set_mtu(h, new_mtu);
+ /* clear the package which the chip has fetched */
+ ret = hns_nic_clear_all_rx_fetch(ndev);
+
+ /* the page offset must be consist with desc */
+ hnae_reinit_all_ring_page_off(h);
+
+ if (ret) {
+ netdev_err(ndev, "clear the fetched desc fail\n");
+ goto out;
+ }
+ }
+
+ ret = h->dev->ops->set_mtu(h, new_mtu);
+ if (ret) {
+ netdev_err(ndev, "set mtu fail, return value %d\n",
+ ret);
+ goto out;
}
- if (!ret)
- ndev->mtu = new_mtu;
+ /* finally, set new mtu to netdevice */
+ ndev->mtu = new_mtu;
+
+out:
+ if (if_running) {
+ if (hns_nic_net_open(ndev)) {
+ netdev_err(ndev, "hns net open fail\n");
+ ret = -EINVAL;
+ }
+ }
return ret;
}
@@ -1791,7 +2013,7 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
{
WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
-
+ /* make sure to commit the things */
smp_mb__before_atomic();
clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index 5b412de350aa..1b83232082b2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -37,10 +37,11 @@ enum hns_nic_state {
struct hns_nic_ring_data {
struct hnae_ring *ring;
struct napi_struct napi;
+ cpumask_t mask; /* affinity mask */
int queue_index;
int (*poll_one)(struct hns_nic_ring_data *, int, void *);
void (*ex_process)(struct hns_nic_ring_data *, struct sk_buff *);
- void (*fini_process)(struct hns_nic_ring_data *);
+ bool (*fini_process)(struct hns_nic_ring_data *);
};
/* compatible the difference between two versions */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 3ac2183dbd21..b8fab149690f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -146,7 +146,7 @@ static int hns_nic_get_link_ksettings(struct net_device *net_dev,
/* When there is no phy, autoneg is off. */
cmd->base.autoneg = false;
- cmd->base.cmd = speed;
+ cmd->base.speed = speed;
cmd->base.duplex = duplex;
if (net_dev->phydev)
@@ -764,14 +764,14 @@ static int hns_get_coalesce(struct net_device *net_dev,
ec->use_adaptive_tx_coalesce = 1;
if ((!ops->get_coalesce_usecs) ||
- (!ops->get_rx_max_coalesced_frames))
+ (!ops->get_max_coalesced_frames))
return -ESRCH;
ops->get_coalesce_usecs(priv->ae_handle,
&ec->tx_coalesce_usecs,
&ec->rx_coalesce_usecs);
- ops->get_rx_max_coalesced_frames(
+ ops->get_max_coalesced_frames(
priv->ae_handle,
&ec->tx_max_coalesced_frames,
&ec->rx_max_coalesced_frames);
@@ -801,30 +801,28 @@ static int hns_set_coalesce(struct net_device *net_dev,
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
- int ret;
+ int rc1, rc2;
ops = priv->ae_handle->dev->ops;
if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
return -EINVAL;
- if (ec->rx_max_coalesced_frames != ec->tx_max_coalesced_frames)
- return -EINVAL;
-
if ((!ops->set_coalesce_usecs) ||
(!ops->set_coalesce_frames))
return -ESRCH;
- ret = ops->set_coalesce_usecs(priv->ae_handle,
+ rc1 = ops->set_coalesce_usecs(priv->ae_handle,
ec->rx_coalesce_usecs);
- if (ret)
- return ret;
- ret = ops->set_coalesce_frames(
- priv->ae_handle,
- ec->rx_max_coalesced_frames);
+ rc2 = ops->set_coalesce_frames(priv->ae_handle,
+ ec->tx_max_coalesced_frames,
+ ec->rx_max_coalesced_frames);
- return ret;
+ if (rc1 || rc2)
+ return -EINVAL;
+
+ return 0;
}
/**
@@ -1253,12 +1251,10 @@ hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
ops = priv->ae_handle->dev->ops;
- /* currently hfunc can only be Toeplitz hash */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ netdev_err(netdev, "Invalid hfunc!\n");
return -EOPNOTSUPP;
- if (!indir)
- return 0;
+ }
return ops->set_rss(priv->ae_handle, indir, key, hfunc);
}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 501eb2090ca6..e5221d95afe1 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -23,17 +23,9 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/spinlock_types.h>
#define MDIO_DRV_NAME "Hi-HNS_MDIO"
#define MDIO_BUS_NAME "Hisilicon MII Bus"
-#define MDIO_DRV_VERSION "1.3.0"
-#define MDIO_COPYRIGHT "Copyright(c) 2015 Huawei Corporation."
-#define MDIO_DRV_STRING MDIO_BUS_NAME
-#define MDIO_DEFAULT_DEVICE_DESCR MDIO_BUS_NAME
-
-#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
-#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
#define MDIO_TIMEOUT 1000000
@@ -64,9 +56,7 @@ struct hns_mdio_device {
#define MDIO_CMD_DEVAD_S 0
#define MDIO_CMD_PRTAD_M 0x1f
#define MDIO_CMD_PRTAD_S 5
-#define MDIO_CMD_OP_M 0x3
#define MDIO_CMD_OP_S 10
-#define MDIO_CMD_ST_M 0x3
#define MDIO_CMD_ST_S 12
#define MDIO_CMD_START_B 14
@@ -185,18 +175,20 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
static int hns_mdio_wait_ready(struct mii_bus *bus)
{
struct hns_mdio_device *mdio_dev = bus->priv;
+ u32 cmd_reg_value;
int i;
- u32 cmd_reg_value = 1;
/* waitting for MDIO_COMMAND_REG 's mdio_start==0 */
/* after that can do read or write*/
- for (i = 0; cmd_reg_value; i++) {
+ for (i = 0; i < MDIO_TIMEOUT; i++) {
cmd_reg_value = MDIO_GET_REG_BIT(mdio_dev,
MDIO_COMMAND_REG,
MDIO_CMD_START_B);
- if (i == MDIO_TIMEOUT)
- return -ETIMEDOUT;
+ if (!cmd_reg_value)
+ break;
}
+ if ((i == MDIO_TIMEOUT) && cmd_reg_value)
+ return -ETIMEDOUT;
return 0;
}
diff --git a/drivers/net/ethernet/ibm/emac/Makefile b/drivers/net/ethernet/ibm/emac/Makefile
index eba21835d90d..98768ba0955a 100644
--- a/drivers/net/ethernet/ibm/emac/Makefile
+++ b/drivers/net/ethernet/ibm/emac/Makefile
@@ -8,4 +8,3 @@ ibm_emac-y := mal.o core.o phy.o
ibm_emac-$(CONFIG_IBM_EMAC_ZMII) += zmii.o
ibm_emac-$(CONFIG_IBM_EMAC_RGMII) += rgmii.o
ibm_emac-$(CONFIG_IBM_EMAC_TAH) += tah.o
-ibm_emac-$(CONFIG_IBM_EMAC_DEBUG) += debug.o
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index c44036d5761a..508923f39ccf 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1929,7 +1929,7 @@ static struct net_device_stats *emac_stats(struct net_device *ndev)
struct emac_instance *dev = netdev_priv(ndev);
struct emac_stats *st = &dev->stats;
struct emac_error_stats *est = &dev->estats;
- struct net_device_stats *nst = &dev->nstats;
+ struct net_device_stats *nst = &ndev->stats;
unsigned long flags;
DBG2(dev, "stats" NL);
@@ -3173,8 +3173,6 @@ static int emac_probe(struct platform_device *ofdev)
printk("%s: found %s PHY (0x%02x)\n", ndev->name,
dev->phy.def->name, dev->phy.address);
- emac_dbg_register(dev);
-
/* Life is good */
return 0;
@@ -3243,7 +3241,6 @@ static int emac_remove(struct platform_device *ofdev)
mal_unregister_commac(dev->mal, &dev->commac);
emac_put_deps(dev);
- emac_dbg_unregister(dev);
iounmap(dev->emacp);
if (dev->wol_irq)
@@ -3326,9 +3323,6 @@ static int __init emac_init(void)
printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
- /* Init debug stuff */
- emac_init_debug();
-
/* Build EMAC boot list */
emac_make_bootlist();
@@ -3373,7 +3367,6 @@ static void __exit emac_exit(void)
rgmii_exit();
zmii_exit();
mal_exit();
- emac_fini_debug();
/* Destroy EMAC boot list */
for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 0710a6685489..f10e156641d5 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -265,7 +265,6 @@ struct emac_instance {
/* Stats
*/
struct emac_error_stats estats;
- struct net_device_stats nstats;
struct emac_stats stats;
/* Misc
diff --git a/drivers/net/ethernet/ibm/emac/debug.c b/drivers/net/ethernet/ibm/emac/debug.c
deleted file mode 100644
index a559f326bf63..000000000000
--- a/drivers/net/ethernet/ibm/emac/debug.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * drivers/net/ethernet/ibm/emac/debug.c
- *
- * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
- *
- * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
- * <benh@kernel.crashing.org>
- *
- * Based on the arch/ppc version of the driver:
- *
- * Copyright (c) 2004, 2005 Zultys Technologies
- * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/sysrq.h>
-#include <asm/io.h>
-
-#include "core.h"
-
-static DEFINE_SPINLOCK(emac_dbg_lock);
-
-static void emac_desc_dump(struct emac_instance *p)
-{
- int i;
- printk("** EMAC %s TX BDs **\n"
- " tx_cnt = %d tx_slot = %d ack_slot = %d\n",
- p->ofdev->dev.of_node->full_name,
- p->tx_cnt, p->tx_slot, p->ack_slot);
- for (i = 0; i < NUM_TX_BUFF / 2; ++i)
- printk
- ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
- i, p->tx_desc[i].data_ptr, p->tx_skb[i] ? 'V' : ' ',
- p->tx_desc[i].ctrl, p->tx_desc[i].data_len,
- NUM_TX_BUFF / 2 + i,
- p->tx_desc[NUM_TX_BUFF / 2 + i].data_ptr,
- p->tx_skb[NUM_TX_BUFF / 2 + i] ? 'V' : ' ',
- p->tx_desc[NUM_TX_BUFF / 2 + i].ctrl,
- p->tx_desc[NUM_TX_BUFF / 2 + i].data_len);
-
- printk("** EMAC %s RX BDs **\n"
- " rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n"
- " rx_sg_skb = 0x%p\n",
- p->ofdev->dev.of_node->full_name,
- p->rx_slot, p->commac.flags, p->rx_skb_size,
- p->rx_sync_size, p->rx_sg_skb);
- for (i = 0; i < NUM_RX_BUFF / 2; ++i)
- printk
- ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
- i, p->rx_desc[i].data_ptr, p->rx_skb[i] ? 'V' : ' ',
- p->rx_desc[i].ctrl, p->rx_desc[i].data_len,
- NUM_RX_BUFF / 2 + i,
- p->rx_desc[NUM_RX_BUFF / 2 + i].data_ptr,
- p->rx_skb[NUM_RX_BUFF / 2 + i] ? 'V' : ' ',
- p->rx_desc[NUM_RX_BUFF / 2 + i].ctrl,
- p->rx_desc[NUM_RX_BUFF / 2 + i].data_len);
-}
-
-static void emac_mac_dump(struct emac_instance *dev)
-{
- struct emac_regs __iomem *p = dev->emacp;
- const int xaht_regs = EMAC_XAHT_REGS(dev);
- u32 *gaht_base = emac_gaht_base(dev);
- u32 *iaht_base = emac_iaht_base(dev);
- int emac4sync = emac_has_feature(dev, EMAC_FTR_EMAC4SYNC);
- int n;
-
- printk("** EMAC %s registers **\n"
- "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n"
- "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n"
- "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n",
- dev->ofdev->dev.of_node->full_name,
- in_be32(&p->mr0), in_be32(&p->mr1),
- in_be32(&p->tmr0), in_be32(&p->tmr1),
- in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser),
- in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid),
- in_be32(&p->vtci)
- );
-
- if (emac4sync)
- printk("MAR = %04x%08x MMAR = %04x%08x\n",
- in_be32(&p->u0.emac4sync.mahr),
- in_be32(&p->u0.emac4sync.malr),
- in_be32(&p->u0.emac4sync.mmahr),
- in_be32(&p->u0.emac4sync.mmalr)
- );
-
- for (n = 0; n < xaht_regs; n++)
- printk("IAHT%02d = 0x%08x\n", n + 1, in_be32(iaht_base + n));
-
- for (n = 0; n < xaht_regs; n++)
- printk("GAHT%02d = 0x%08x\n", n + 1, in_be32(gaht_base + n));
-
- printk("LSA = %04x%08x IPGVR = 0x%04x\n"
- "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n"
- "OCTX = 0x%08x OCRX = 0x%08x\n",
- in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr),
- in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr),
- in_be32(&p->octx), in_be32(&p->ocrx)
- );
-
- if (!emac4sync) {
- printk("IPCR = 0x%08x\n",
- in_be32(&p->u1.emac4.ipcr)
- );
- } else {
- printk("REVID = 0x%08x TPC = 0x%08x\n",
- in_be32(&p->u1.emac4sync.revid),
- in_be32(&p->u1.emac4sync.tpc)
- );
- }
-
- emac_desc_dump(dev);
-}
-
-static void emac_mal_dump(struct mal_instance *mal)
-{
- int i;
-
- printk("** MAL %s Registers **\n"
- "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n"
- "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n"
- "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n",
- mal->ofdev->dev.of_node->full_name,
- get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR),
- get_mal_dcrn(mal, MAL_IER),
- get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR),
- get_mal_dcrn(mal, MAL_TXEOBISR), get_mal_dcrn(mal, MAL_TXDEIR),
- get_mal_dcrn(mal, MAL_RXCASR), get_mal_dcrn(mal, MAL_RXCARR),
- get_mal_dcrn(mal, MAL_RXEOBISR), get_mal_dcrn(mal, MAL_RXDEIR)
- );
-
- printk("TX|");
- for (i = 0; i < mal->num_tx_chans; ++i) {
- if (i && !(i % 4))
- printk("\n ");
- printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_TXCTPR(i)));
- }
- printk("\nRX|");
- for (i = 0; i < mal->num_rx_chans; ++i) {
- if (i && !(i % 4))
- printk("\n ");
- printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_RXCTPR(i)));
- }
- printk("\n ");
- for (i = 0; i < mal->num_rx_chans; ++i) {
- u32 r = get_mal_dcrn(mal, MAL_RCBS(i));
- if (i && !(i % 3))
- printk("\n ");
- printk("RCBS%d = 0x%08x (%d) ", i, r, r * 16);
- }
- printk("\n");
-}
-
-static struct emac_instance *__emacs[4];
-static struct mal_instance *__mals[1];
-
-void emac_dbg_register(struct emac_instance *dev)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&emac_dbg_lock, flags);
- for (i = 0; i < ARRAY_SIZE(__emacs); i++)
- if (__emacs[i] == NULL) {
- __emacs[i] = dev;
- break;
- }
- spin_unlock_irqrestore(&emac_dbg_lock, flags);
-}
-
-void emac_dbg_unregister(struct emac_instance *dev)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&emac_dbg_lock, flags);
- for (i = 0; i < ARRAY_SIZE(__emacs); i++)
- if (__emacs[i] == dev) {
- __emacs[i] = NULL;
- break;
- }
- spin_unlock_irqrestore(&emac_dbg_lock, flags);
-}
-
-void mal_dbg_register(struct mal_instance *mal)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&emac_dbg_lock, flags);
- for (i = 0; i < ARRAY_SIZE(__mals); i++)
- if (__mals[i] == NULL) {
- __mals[i] = mal;
- break;
- }
- spin_unlock_irqrestore(&emac_dbg_lock, flags);
-}
-
-void mal_dbg_unregister(struct mal_instance *mal)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&emac_dbg_lock, flags);
- for (i = 0; i < ARRAY_SIZE(__mals); i++)
- if (__mals[i] == mal) {
- __mals[i] = NULL;
- break;
- }
- spin_unlock_irqrestore(&emac_dbg_lock, flags);
-}
-
-void emac_dbg_dump_all(void)
-{
- unsigned int i;
- unsigned long flags;
-
- spin_lock_irqsave(&emac_dbg_lock, flags);
-
- for (i = 0; i < ARRAY_SIZE(__mals); ++i)
- if (__mals[i])
- emac_mal_dump(__mals[i]);
-
- for (i = 0; i < ARRAY_SIZE(__emacs); ++i)
- if (__emacs[i])
- emac_mac_dump(__emacs[i]);
-
- spin_unlock_irqrestore(&emac_dbg_lock, flags);
-}
-
-#if defined(CONFIG_MAGIC_SYSRQ)
-static void emac_sysrq_handler(int key)
-{
- emac_dbg_dump_all();
-}
-
-static struct sysrq_key_op emac_sysrq_op = {
- .handler = emac_sysrq_handler,
- .help_msg = "emac(c)",
- .action_msg = "Show EMAC(s) status",
-};
-
-int __init emac_init_debug(void)
-{
- return register_sysrq_key('c', &emac_sysrq_op);
-}
-
-void __exit emac_fini_debug(void)
-{
- unregister_sysrq_key('c', &emac_sysrq_op);
-}
-
-#else
-int __init emac_init_debug(void)
-{
- return 0;
-}
-void __exit emac_fini_debug(void)
-{
-}
-#endif /* CONFIG_MAGIC_SYSRQ */
diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h
index 9c45efe4c8fe..5bdfc174a07e 100644
--- a/drivers/net/ethernet/ibm/emac/debug.h
+++ b/drivers/net/ethernet/ibm/emac/debug.h
@@ -25,32 +25,9 @@
#include "core.h"
#if defined(CONFIG_IBM_EMAC_DEBUG)
-
-struct emac_instance;
-struct mal_instance;
-
-void emac_dbg_register(struct emac_instance *dev);
-void emac_dbg_unregister(struct emac_instance *dev);
-void mal_dbg_register(struct mal_instance *mal);
-void mal_dbg_unregister(struct mal_instance *mal);
-int emac_init_debug(void) __init;
-void emac_fini_debug(void) __exit;
-void emac_dbg_dump_all(void);
-
# define DBG_LEVEL 1
-
#else
-
-# define emac_dbg_register(x) do { } while(0)
-# define emac_dbg_unregister(x) do { } while(0)
-# define mal_dbg_register(x) do { } while(0)
-# define mal_dbg_unregister(x) do { } while(0)
-# define emac_init_debug() do { } while(0)
-# define emac_fini_debug() do { } while(0)
-# define emac_dbg_dump_all() do { } while(0)
-
# define DBG_LEVEL 0
-
#endif
#define EMAC_DBG(d, name, fmt, arg...) \
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index cd3227b088b7..91b1a558f37d 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -695,8 +695,6 @@ static int mal_probe(struct platform_device *ofdev)
wmb();
platform_set_drvdata(ofdev, mal);
- mal_dbg_register(mal);
-
return 0;
fail6:
@@ -740,8 +738,6 @@ static int mal_remove(struct platform_device *ofdev)
mal_reset(mal);
- mal_dbg_unregister(mal);
-
dma_free_coherent(&ofdev->dev,
sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 7acda04d034e..ed8780cca982 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -146,7 +146,6 @@ struct ibmveth_adapter {
struct vio_dev *vdev;
struct net_device *netdev;
struct napi_struct napi;
- struct net_device_stats stats;
unsigned int mcastFilterSize;
void * buffer_list_addr;
void * filter_list_addr;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index b23d6545f835..4fcd2f0378ba 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -65,7 +65,6 @@
#include <linux/irq.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
-#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <net/net_namespace.h>
#include <asm/hvcall.h>
@@ -75,6 +74,7 @@
#include <linux/uaccess.h>
#include <asm/firmware.h>
#include <linux/workqueue.h>
+#include <linux/if_vlan.h>
#include "ibmvnic.h"
@@ -89,7 +89,6 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
static int ibmvnic_remove(struct vio_dev *);
static void release_sub_crqs(struct ibmvnic_adapter *);
-static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
@@ -110,6 +109,11 @@ static int ibmvnic_poll(struct napi_struct *napi, int data);
static void send_map_query(struct ibmvnic_adapter *adapter);
static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
static void send_request_unmap(struct ibmvnic_adapter *, u8);
+static void send_login(struct ibmvnic_adapter *adapter);
+static void send_cap_queries(struct ibmvnic_adapter *adapter);
+static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
+static int ibmvnic_init(struct ibmvnic_adapter *);
+static void release_crq_queue(struct ibmvnic_adapter *);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -159,21 +163,6 @@ static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
return rc;
}
-/* net_device_ops functions */
-
-static void init_rx_pool(struct ibmvnic_adapter *adapter,
- struct ibmvnic_rx_pool *rx_pool, int num, int index,
- int buff_size, int active)
-{
- netdev_dbg(adapter->netdev,
- "Initializing rx_pool %d, %d buffs, %d bytes each\n",
- index, num, buff_size);
- rx_pool->size = num;
- rx_pool->index = index;
- rx_pool->buff_size = buff_size;
- rx_pool->active = active;
-}
-
static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
@@ -202,45 +191,12 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
{
struct device *dev = &adapter->vdev->dev;
- dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ if (!ltb->buff)
+ return;
+
if (!adapter->failover)
send_request_unmap(adapter, ltb->map_id);
-}
-
-static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
- struct ibmvnic_rx_pool *pool)
-{
- struct device *dev = &adapter->vdev->dev;
- int i;
-
- pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
- if (!pool->free_map)
- return -ENOMEM;
-
- pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
- GFP_KERNEL);
-
- if (!pool->rx_buff) {
- dev_err(dev, "Couldn't alloc rx buffers\n");
- kfree(pool->free_map);
- return -ENOMEM;
- }
-
- if (alloc_long_term_buff(adapter, &pool->long_term_buff,
- pool->size * pool->buff_size)) {
- kfree(pool->free_map);
- kfree(pool->rx_buff);
- return -ENOMEM;
- }
-
- for (i = 0; i < pool->size; ++i)
- pool->free_map[i] = i;
-
- atomic_set(&pool->available, 0);
- pool->next_alloc = 0;
- pool->next_free = 0;
-
- return 0;
+ dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
}
static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
@@ -347,93 +303,195 @@ static void replenish_pools(struct ibmvnic_adapter *adapter)
}
}
-static void free_rx_pool(struct ibmvnic_adapter *adapter,
- struct ibmvnic_rx_pool *pool)
+static void release_stats_token(struct ibmvnic_adapter *adapter)
{
- int i;
+ struct device *dev = &adapter->vdev->dev;
+
+ if (!adapter->stats_token)
+ return;
+
+ dma_unmap_single(dev, adapter->stats_token,
+ sizeof(struct ibmvnic_statistics),
+ DMA_FROM_DEVICE);
+ adapter->stats_token = 0;
+}
+
+static int init_stats_token(struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ dma_addr_t stok;
+
+ stok = dma_map_single(dev, &adapter->stats,
+ sizeof(struct ibmvnic_statistics),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, stok)) {
+ dev_err(dev, "Couldn't map stats buffer\n");
+ return -1;
+ }
+
+ adapter->stats_token = stok;
+ return 0;
+}
- kfree(pool->free_map);
- pool->free_map = NULL;
+static void release_rx_pools(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_rx_pool *rx_pool;
+ int rx_scrqs;
+ int i, j;
- if (!pool->rx_buff)
+ if (!adapter->rx_pool)
return;
- for (i = 0; i < pool->size; i++) {
- if (pool->rx_buff[i].skb) {
- dev_kfree_skb_any(pool->rx_buff[i].skb);
- pool->rx_buff[i].skb = NULL;
+ rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ for (i = 0; i < rx_scrqs; i++) {
+ rx_pool = &adapter->rx_pool[i];
+
+ kfree(rx_pool->free_map);
+ free_long_term_buff(adapter, &rx_pool->long_term_buff);
+
+ if (!rx_pool->rx_buff)
+ continue;
+
+ for (j = 0; j < rx_pool->size; j++) {
+ if (rx_pool->rx_buff[j].skb) {
+ dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
+ rx_pool->rx_buff[i].skb = NULL;
+ }
}
+
+ kfree(rx_pool->rx_buff);
}
- kfree(pool->rx_buff);
- pool->rx_buff = NULL;
+
+ kfree(adapter->rx_pool);
+ adapter->rx_pool = NULL;
}
-static int ibmvnic_open(struct net_device *netdev)
+static int init_rx_pools(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_tx_pool *tx_pool;
- union ibmvnic_crq crq;
+ struct ibmvnic_rx_pool *rx_pool;
int rxadd_subcrqs;
u64 *size_array;
- int tx_subcrqs;
int i, j;
rxadd_subcrqs =
- be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- tx_subcrqs =
- be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->
- off_rxadd_buff_size));
- adapter->map_id = 1;
- adapter->napi = kcalloc(adapter->req_rx_queues,
- sizeof(struct napi_struct), GFP_KERNEL);
- if (!adapter->napi)
- goto alloc_napi_failed;
- for (i = 0; i < adapter->req_rx_queues; i++) {
- netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
- NAPI_POLL_WEIGHT);
- napi_enable(&adapter->napi[i]);
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
+
+ adapter->rx_pool = kcalloc(rxadd_subcrqs,
+ sizeof(struct ibmvnic_rx_pool),
+ GFP_KERNEL);
+ if (!adapter->rx_pool) {
+ dev_err(dev, "Failed to allocate rx pools\n");
+ return -1;
}
- adapter->rx_pool =
- kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
- if (!adapter->rx_pool)
- goto rx_pool_arr_alloc_failed;
- send_map_query(adapter);
for (i = 0; i < rxadd_subcrqs; i++) {
- init_rx_pool(adapter, &adapter->rx_pool[i],
- adapter->req_rx_add_entries_per_subcrq, i,
- be64_to_cpu(size_array[i]), 1);
- if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
- dev_err(dev, "Couldn't alloc rx pool\n");
- goto rx_pool_alloc_failed;
+ rx_pool = &adapter->rx_pool[i];
+
+ netdev_dbg(adapter->netdev,
+ "Initializing rx_pool %d, %lld buffs, %lld bytes each\n",
+ i, adapter->req_rx_add_entries_per_subcrq,
+ be64_to_cpu(size_array[i]));
+
+ rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
+ rx_pool->index = i;
+ rx_pool->buff_size = be64_to_cpu(size_array[i]);
+ rx_pool->active = 1;
+
+ rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
+ GFP_KERNEL);
+ if (!rx_pool->free_map) {
+ release_rx_pools(adapter);
+ return -1;
+ }
+
+ rx_pool->rx_buff = kcalloc(rx_pool->size,
+ sizeof(struct ibmvnic_rx_buff),
+ GFP_KERNEL);
+ if (!rx_pool->rx_buff) {
+ dev_err(dev, "Couldn't alloc rx buffers\n");
+ release_rx_pools(adapter);
+ return -1;
+ }
+
+ if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
+ rx_pool->size * rx_pool->buff_size)) {
+ release_rx_pools(adapter);
+ return -1;
}
+
+ for (j = 0; j < rx_pool->size; ++j)
+ rx_pool->free_map[j] = j;
+
+ atomic_set(&rx_pool->available, 0);
+ rx_pool->next_alloc = 0;
+ rx_pool->next_free = 0;
+ }
+
+ return 0;
+}
+
+static void release_tx_pools(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_tx_pool *tx_pool;
+ int i, tx_scrqs;
+
+ if (!adapter->tx_pool)
+ return;
+
+ tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ for (i = 0; i < tx_scrqs; i++) {
+ tx_pool = &adapter->tx_pool[i];
+ kfree(tx_pool->tx_buff);
+ free_long_term_buff(adapter, &tx_pool->long_term_buff);
+ kfree(tx_pool->free_map);
}
- adapter->tx_pool =
- kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
+ kfree(adapter->tx_pool);
+ adapter->tx_pool = NULL;
+}
+
+static int init_tx_pools(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_tx_pool *tx_pool;
+ int tx_subcrqs;
+ int i, j;
+
+ tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ adapter->tx_pool = kcalloc(tx_subcrqs,
+ sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tx_pool)
- goto tx_pool_arr_alloc_failed;
+ return -1;
+
for (i = 0; i < tx_subcrqs; i++) {
tx_pool = &adapter->tx_pool[i];
- tx_pool->tx_buff =
- kcalloc(adapter->req_tx_entries_per_subcrq,
- sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
- if (!tx_pool->tx_buff)
- goto tx_pool_alloc_failed;
+ tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
+ sizeof(struct ibmvnic_tx_buff),
+ GFP_KERNEL);
+ if (!tx_pool->tx_buff) {
+ dev_err(dev, "tx pool buffer allocation failed\n");
+ release_tx_pools(adapter);
+ return -1;
+ }
if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
adapter->req_tx_entries_per_subcrq *
- adapter->req_mtu))
- goto tx_ltb_alloc_failed;
+ adapter->req_mtu)) {
+ release_tx_pools(adapter);
+ return -1;
+ }
- tx_pool->free_map =
- kcalloc(adapter->req_tx_entries_per_subcrq,
- sizeof(int), GFP_KERNEL);
- if (!tx_pool->free_map)
- goto tx_fm_alloc_failed;
+ tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
+ sizeof(int), GFP_KERNEL);
+ if (!tx_pool->free_map) {
+ release_tx_pools(adapter);
+ return -1;
+ }
for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
tx_pool->free_map[j] = j;
@@ -441,20 +499,183 @@ static int ibmvnic_open(struct net_device *netdev)
tx_pool->consumer_index = 0;
tx_pool->producer_index = 0;
}
- adapter->bounce_buffer_size =
- (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
- adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
- GFP_KERNEL);
- if (!adapter->bounce_buffer)
- goto bounce_alloc_failed;
- adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
- adapter->bounce_buffer_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
- dev_err(dev, "Couldn't map tx bounce buffer\n");
- goto bounce_map_failed;
+ return 0;
+}
+
+static void release_error_buffers(struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_error_buff *error_buff, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->error_list_lock, flags);
+ list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
+ list_del(&error_buff->list);
+ dma_unmap_single(dev, error_buff->dma, error_buff->len,
+ DMA_FROM_DEVICE);
+ kfree(error_buff->buff);
+ kfree(error_buff);
+ }
+ spin_unlock_irqrestore(&adapter->error_list_lock, flags);
+}
+
+static int ibmvnic_login(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ unsigned long timeout = msecs_to_jiffies(30000);
+ struct device *dev = &adapter->vdev->dev;
+
+ do {
+ if (adapter->renegotiate) {
+ adapter->renegotiate = false;
+ release_sub_crqs(adapter);
+
+ reinit_completion(&adapter->init_done);
+ send_cap_queries(adapter);
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout)) {
+ dev_err(dev, "Capabilities query timeout\n");
+ return -1;
+ }
+ }
+
+ reinit_completion(&adapter->init_done);
+ send_login(adapter);
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout)) {
+ dev_err(dev, "Login timeout\n");
+ return -1;
+ }
+ } while (adapter->renegotiate);
+
+ return 0;
+}
+
+static void release_resources(struct ibmvnic_adapter *adapter)
+{
+ release_tx_pools(adapter);
+ release_rx_pools(adapter);
+
+ release_stats_token(adapter);
+ release_error_buffers(adapter);
+}
+
+static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
+{
+ struct net_device *netdev = adapter->netdev;
+ unsigned long timeout = msecs_to_jiffies(30000);
+ union ibmvnic_crq crq;
+ bool resend;
+ int rc;
+
+ if (adapter->logical_link_state == link_state) {
+ netdev_dbg(netdev, "Link state already %d\n", link_state);
+ return 0;
+ }
+
+ netdev_err(netdev, "setting link state %d\n", link_state);
+ memset(&crq, 0, sizeof(crq));
+ crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
+ crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
+ crq.logical_link_state.link_state = link_state;
+
+ do {
+ resend = false;
+
+ reinit_completion(&adapter->init_done);
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc) {
+ netdev_err(netdev, "Failed to set link state\n");
+ return rc;
+ }
+
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout)) {
+ netdev_err(netdev, "timeout setting link state\n");
+ return -1;
+ }
+
+ if (adapter->init_done_rc == 1) {
+ /* Partuial success, delay and re-send */
+ mdelay(1000);
+ resend = true;
+ }
+ } while (resend);
+
+ return 0;
+}
+
+static int set_real_num_queues(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int rc;
+
+ rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
+ if (rc) {
+ netdev_err(netdev, "failed to set the number of tx queues\n");
+ return rc;
+ }
+
+ rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
+ if (rc)
+ netdev_err(netdev, "failed to set the number of rx queues\n");
+
+ return rc;
+}
+
+static int ibmvnic_open(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->vdev->dev;
+ int rc = 0;
+ int i;
+
+ if (adapter->is_closed) {
+ rc = ibmvnic_init(adapter);
+ if (rc)
+ return rc;
}
+
+ rc = ibmvnic_login(netdev);
+ if (rc)
+ return rc;
+
+ rc = set_real_num_queues(netdev);
+ if (rc)
+ return rc;
+
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(dev, "failed to initialize sub crq irqs\n");
+ return -1;
+ }
+
+ rc = init_stats_token(adapter);
+ if (rc)
+ return rc;
+
+ adapter->map_id = 1;
+ adapter->napi = kcalloc(adapter->req_rx_queues,
+ sizeof(struct napi_struct), GFP_KERNEL);
+ if (!adapter->napi)
+ goto ibmvnic_open_fail;
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&adapter->napi[i]);
+ }
+
+ send_map_query(adapter);
+
+ rc = init_rx_pools(netdev);
+ if (rc)
+ goto ibmvnic_open_fail;
+
+ rc = init_tx_pools(netdev);
+ if (rc)
+ goto ibmvnic_open_fail;
+
replenish_pools(adapter);
/* We're ready to receive frames, enable the sub-crq interrupts and
@@ -466,106 +687,63 @@ static int ibmvnic_open(struct net_device *netdev)
for (i = 0; i < adapter->req_tx_queues; i++)
enable_scrq_irq(adapter, adapter->tx_scrq[i]);
- memset(&crq, 0, sizeof(crq));
- crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
- crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
- crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
- ibmvnic_send_crq(adapter, &crq);
+ rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
+ if (rc)
+ goto ibmvnic_open_fail;
netif_tx_start_all_queues(netdev);
+ adapter->is_closed = false;
return 0;
-bounce_map_failed:
- kfree(adapter->bounce_buffer);
-bounce_alloc_failed:
- i = tx_subcrqs - 1;
- kfree(adapter->tx_pool[i].free_map);
-tx_fm_alloc_failed:
- free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
-tx_ltb_alloc_failed:
- kfree(adapter->tx_pool[i].tx_buff);
-tx_pool_alloc_failed:
- for (j = 0; j < i; j++) {
- kfree(adapter->tx_pool[j].tx_buff);
- free_long_term_buff(adapter,
- &adapter->tx_pool[j].long_term_buff);
- kfree(adapter->tx_pool[j].free_map);
- }
- kfree(adapter->tx_pool);
- adapter->tx_pool = NULL;
-tx_pool_arr_alloc_failed:
- i = rxadd_subcrqs;
-rx_pool_alloc_failed:
- for (j = 0; j < i; j++) {
- free_rx_pool(adapter, &adapter->rx_pool[j]);
- free_long_term_buff(adapter,
- &adapter->rx_pool[j].long_term_buff);
- }
- kfree(adapter->rx_pool);
- adapter->rx_pool = NULL;
-rx_pool_arr_alloc_failed:
+ibmvnic_open_fail:
for (i = 0; i < adapter->req_rx_queues; i++)
napi_disable(&adapter->napi[i]);
-alloc_napi_failed:
+ release_resources(adapter);
return -ENOMEM;
}
+static void disable_sub_crqs(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (adapter->tx_scrq) {
+ for (i = 0; i < adapter->req_tx_queues; i++)
+ if (adapter->tx_scrq[i])
+ disable_irq(adapter->tx_scrq[i]->irq);
+ }
+
+ if (adapter->rx_scrq) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ if (adapter->rx_scrq[i])
+ disable_irq(adapter->rx_scrq[i]->irq);
+ }
+}
+
static int ibmvnic_close(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- struct device *dev = &adapter->vdev->dev;
- union ibmvnic_crq crq;
+ int rc = 0;
int i;
adapter->closing = true;
+ disable_sub_crqs(adapter);
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_disable(&adapter->napi[i]);
+ if (adapter->napi) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_disable(&adapter->napi[i]);
+ }
if (!adapter->failover)
netif_tx_stop_all_queues(netdev);
- if (adapter->bounce_buffer) {
- if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
- dma_unmap_single(&adapter->vdev->dev,
- adapter->bounce_buffer_dma,
- adapter->bounce_buffer_size,
- DMA_BIDIRECTIONAL);
- adapter->bounce_buffer_dma = DMA_ERROR_CODE;
- }
- kfree(adapter->bounce_buffer);
- adapter->bounce_buffer = NULL;
- }
-
- memset(&crq, 0, sizeof(crq));
- crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
- crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
- crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
- ibmvnic_send_crq(adapter, &crq);
-
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
- i++) {
- kfree(adapter->tx_pool[i].tx_buff);
- free_long_term_buff(adapter,
- &adapter->tx_pool[i].long_term_buff);
- kfree(adapter->tx_pool[i].free_map);
- }
- kfree(adapter->tx_pool);
- adapter->tx_pool = NULL;
+ rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++) {
- free_rx_pool(adapter, &adapter->rx_pool[i]);
- free_long_term_buff(adapter,
- &adapter->rx_pool[i].long_term_buff);
- }
- kfree(adapter->rx_pool);
- adapter->rx_pool = NULL;
+ release_resources(adapter);
+ adapter->is_closed = true;
adapter->closing = false;
-
- return 0;
+ return rc;
}
/**
@@ -714,7 +892,6 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int tx_bytes = 0;
dma_addr_t data_dma_addr;
struct netdev_queue *txq;
- bool used_bounce = false;
unsigned long lpar_rc;
union sub_crq tx_crq;
unsigned int offset;
@@ -731,9 +908,13 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
be32_to_cpu(adapter->login_rsp_buf->
off_txsubm_subcrqs));
if (adapter->migrated) {
+ if (!netif_subqueue_stopped(netdev, skb))
+ netif_stop_subqueue(netdev, queue_num);
+ dev_kfree_skb_any(skb);
+
tx_send_failed++;
tx_dropped++;
- ret = NETDEV_TX_BUSY;
+ ret = NETDEV_TX_OK;
goto out;
}
@@ -755,7 +936,6 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_buff->index = index;
tx_buff->pool_index = queue_num;
tx_buff->last_frag = true;
- tx_buff->used_bounce = used_bounce;
memset(&tx_crq, 0, sizeof(tx_crq));
tx_crq.v1.first = IBMVNIC_CRQ_CMD;
@@ -800,11 +980,13 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
sizeof(tx_buff->indir_arr),
DMA_TO_DEVICE);
if (dma_mapping_error(dev, tx_buff->indir_dma)) {
+ dev_kfree_skb_any(skb);
+ tx_buff->skb = NULL;
if (!firmware_has_feature(FW_FEATURE_CMO))
dev_err(dev, "tx: unable to map descriptor array\n");
tx_map_failed++;
tx_dropped++;
- ret = NETDEV_TX_BUSY;
+ ret = NETDEV_TX_OK;
goto out;
}
lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
@@ -823,15 +1005,20 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
else
tx_pool->consumer_index--;
+ dev_kfree_skb_any(skb);
+ tx_buff->skb = NULL;
+
+ if (lpar_rc == H_CLOSED)
+ netif_stop_subqueue(netdev, queue_num);
+
tx_send_failed++;
tx_dropped++;
- ret = NETDEV_TX_BUSY;
+ ret = NETDEV_TX_OK;
goto out;
}
- atomic_inc(&tx_scrq->used);
-
- if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
+ if (atomic_inc_return(&tx_scrq->used)
+ >= adapter->req_tx_entries_per_subcrq) {
netdev_info(netdev, "Stopping queue %d\n", queue_num);
netif_stop_subqueue(netdev, queue_num);
}
@@ -975,7 +1162,15 @@ restart_poll:
skb = rx_buff->skb;
skb_copy_to_linear_data(skb, rx_buff->data + offset,
length);
- skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
+
+ /* VLAN Header has been stripped by the system firmware and
+ * needs to be inserted by the driver
+ */
+ if (adapter->rx_vlan_header_insertion &&
+ (flags & IBMVNIC_VLAN_STRIPPED))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ ntohs(next->rx_comp.vlan_tci));
+
/* free the entry */
next->rx_comp.first = 0;
remove_buff_from_pool(adapter, rx_buff);
@@ -1176,6 +1371,12 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
scrq->crq_num);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+ if (rc) {
+ netdev_err(adapter->netdev,
+ "Failed to release sub-CRQ %16lx, rc = %ld\n",
+ scrq->crq_num, rc);
+ }
+
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
free_pages((unsigned long)scrq->msgs, 2);
@@ -1189,12 +1390,12 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
struct ibmvnic_sub_crq_queue *scrq;
int rc;
- scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
+ scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
if (!scrq)
return NULL;
- scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
- memset(scrq->msgs, 0, 4 * PAGE_SIZE);
+ scrq->msgs =
+ (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
if (!scrq->msgs) {
dev_warn(dev, "Couldn't allocate crq queue messages page\n");
goto zero_page_failed;
@@ -1222,9 +1423,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
scrq->adapter = adapter;
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
- scrq->cur = 0;
- atomic_set(&scrq->used, 0);
- scrq->rx_skb_top = NULL;
spin_lock_init(&scrq->lock);
netdev_dbg(adapter->netdev,
@@ -1249,49 +1447,40 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
int i;
if (adapter->tx_scrq) {
- for (i = 0; i < adapter->req_tx_queues; i++)
- if (adapter->tx_scrq[i]) {
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ if (!adapter->tx_scrq[i])
+ continue;
+
+ if (adapter->tx_scrq[i]->irq) {
free_irq(adapter->tx_scrq[i]->irq,
adapter->tx_scrq[i]);
irq_dispose_mapping(adapter->tx_scrq[i]->irq);
- release_sub_crq_queue(adapter,
- adapter->tx_scrq[i]);
+ adapter->tx_scrq[i]->irq = 0;
}
+
+ release_sub_crq_queue(adapter, adapter->tx_scrq[i]);
+ }
+
kfree(adapter->tx_scrq);
adapter->tx_scrq = NULL;
}
if (adapter->rx_scrq) {
- for (i = 0; i < adapter->req_rx_queues; i++)
- if (adapter->rx_scrq[i]) {
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ if (!adapter->rx_scrq[i])
+ continue;
+
+ if (adapter->rx_scrq[i]->irq) {
free_irq(adapter->rx_scrq[i]->irq,
adapter->rx_scrq[i]);
irq_dispose_mapping(adapter->rx_scrq[i]->irq);
- release_sub_crq_queue(adapter,
- adapter->rx_scrq[i]);
+ adapter->rx_scrq[i]->irq = 0;
}
- kfree(adapter->rx_scrq);
- adapter->rx_scrq = NULL;
- }
-}
-
-static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
-{
- int i;
- if (adapter->tx_scrq) {
- for (i = 0; i < adapter->req_tx_queues; i++)
- if (adapter->tx_scrq[i])
- release_sub_crq_queue(adapter,
- adapter->tx_scrq[i]);
- adapter->tx_scrq = NULL;
- }
+ release_sub_crq_queue(adapter, adapter->rx_scrq[i]);
+ }
- if (adapter->rx_scrq) {
- for (i = 0; i < adapter->req_rx_queues; i++)
- if (adapter->rx_scrq[i])
- release_sub_crq_queue(adapter,
- adapter->rx_scrq[i]);
+ kfree(adapter->rx_scrq);
adapter->rx_scrq = NULL;
}
}
@@ -1358,7 +1547,6 @@ restart_loop:
continue;
txbuff->data_dma[j] = 0;
- txbuff->used_bounce = false;
}
/* if sub_crq was sent indirectly */
first = txbuff->indir_arr[0].generic.first;
@@ -1369,9 +1557,8 @@ restart_loop:
}
if (txbuff->last_frag) {
- atomic_dec(&scrq->used);
-
- if (atomic_read(&scrq->used) <=
+ if (atomic_sub_return(next->tx_comp.num_comps,
+ &scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
netif_subqueue_stopped(adapter->netdev,
txbuff->skb)) {
@@ -1487,52 +1674,24 @@ req_tx_irq_failed:
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
}
- release_sub_crqs_no_irqs(adapter);
+ release_sub_crqs(adapter);
return rc;
}
-static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
+static int init_sub_crqs(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_sub_crq_queue **allqueues;
int registered_queues = 0;
- union ibmvnic_crq crq;
int total_queues;
int more = 0;
int i;
- if (!retry) {
- /* Sub-CRQ entries are 32 byte long */
- int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
-
- if (adapter->min_tx_entries_per_subcrq > entries_page ||
- adapter->min_rx_add_entries_per_subcrq > entries_page) {
- dev_err(dev, "Fatal, invalid entries per sub-crq\n");
- goto allqueues_failed;
- }
-
- /* Get the minimum between the queried max and the entries
- * that fit in our PAGE_SIZE
- */
- adapter->req_tx_entries_per_subcrq =
- adapter->max_tx_entries_per_subcrq > entries_page ?
- entries_page : adapter->max_tx_entries_per_subcrq;
- adapter->req_rx_add_entries_per_subcrq =
- adapter->max_rx_add_entries_per_subcrq > entries_page ?
- entries_page : adapter->max_rx_add_entries_per_subcrq;
-
- adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
- adapter->req_rx_queues = adapter->opt_rx_comp_queues;
- adapter->req_rx_add_queues = adapter->max_rx_add_queues;
-
- adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
- }
-
total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
- allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
+ allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
if (!allqueues)
- goto allqueues_failed;
+ return -1;
for (i = 0; i < total_queues; i++) {
allqueues[i] = init_sub_crq_queue(adapter);
@@ -1570,7 +1729,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
}
adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
- sizeof(*adapter->tx_scrq), GFP_ATOMIC);
+ sizeof(*adapter->tx_scrq), GFP_KERNEL);
if (!adapter->tx_scrq)
goto tx_failed;
@@ -1580,7 +1739,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
}
adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
- sizeof(*adapter->rx_scrq), GFP_ATOMIC);
+ sizeof(*adapter->rx_scrq), GFP_KERNEL);
if (!adapter->rx_scrq)
goto rx_failed;
@@ -1589,6 +1748,51 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
adapter->rx_scrq[i]->scrq_num = i;
}
+ kfree(allqueues);
+ return 0;
+
+rx_failed:
+ kfree(adapter->tx_scrq);
+ adapter->tx_scrq = NULL;
+tx_failed:
+ for (i = 0; i < registered_queues; i++)
+ release_sub_crq_queue(adapter, allqueues[i]);
+ kfree(allqueues);
+ return -1;
+}
+
+static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
+{
+ struct device *dev = &adapter->vdev->dev;
+ union ibmvnic_crq crq;
+
+ if (!retry) {
+ /* Sub-CRQ entries are 32 byte long */
+ int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
+
+ if (adapter->min_tx_entries_per_subcrq > entries_page ||
+ adapter->min_rx_add_entries_per_subcrq > entries_page) {
+ dev_err(dev, "Fatal, invalid entries per sub-crq\n");
+ return;
+ }
+
+ /* Get the minimum between the queried max and the entries
+ * that fit in our PAGE_SIZE
+ */
+ adapter->req_tx_entries_per_subcrq =
+ adapter->max_tx_entries_per_subcrq > entries_page ?
+ entries_page : adapter->max_tx_entries_per_subcrq;
+ adapter->req_rx_add_entries_per_subcrq =
+ adapter->max_rx_add_entries_per_subcrq > entries_page ?
+ entries_page : adapter->max_rx_add_entries_per_subcrq;
+
+ adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
+ adapter->req_rx_queues = adapter->opt_rx_comp_queues;
+ adapter->req_rx_add_queues = adapter->max_rx_add_queues;
+
+ adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
+ }
+
memset(&crq, 0, sizeof(crq));
crq.request_capability.first = IBMVNIC_CRQ_CMD;
crq.request_capability.cmd = REQUEST_CAPABILITY;
@@ -1642,20 +1846,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
}
-
- kfree(allqueues);
-
- return;
-
-rx_failed:
- kfree(adapter->tx_scrq);
- adapter->tx_scrq = NULL;
-tx_failed:
- for (i = 0; i < registered_queues; i++)
- release_sub_crq_queue(adapter, allqueues[i]);
- kfree(allqueues);
-allqueues_failed:
- ibmvnic_remove(adapter->vdev);
}
static int pending_scrq(struct ibmvnic_adapter *adapter,
@@ -1829,13 +2019,11 @@ static void send_login(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
struct ibmvnic_login_buffer *login_buffer;
- struct ibmvnic_inflight_cmd *inflight_cmd;
struct device *dev = &adapter->vdev->dev;
dma_addr_t rsp_buffer_token;
dma_addr_t buffer_token;
size_t rsp_buffer_size;
union ibmvnic_crq crq;
- unsigned long flags;
size_t buffer_size;
__be64 *tx_list_p;
__be64 *rx_list_p;
@@ -1872,11 +2060,7 @@ static void send_login(struct ibmvnic_adapter *adapter)
dev_err(dev, "Couldn't map login rsp buffer\n");
goto buf_rsp_map_failed;
}
- inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
- if (!inflight_cmd) {
- dev_err(dev, "Couldn't allocate inflight_cmd\n");
- goto inflight_alloc_failed;
- }
+
adapter->login_buf = login_buffer;
adapter->login_buf_token = buffer_token;
adapter->login_buf_sz = buffer_size;
@@ -1927,20 +2111,10 @@ static void send_login(struct ibmvnic_adapter *adapter)
crq.login.cmd = LOGIN;
crq.login.ioba = cpu_to_be32(buffer_token);
crq.login.len = cpu_to_be32(buffer_size);
-
- memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
-
- spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_add_tail(&inflight_cmd->list, &adapter->inflight);
- spin_unlock_irqrestore(&adapter->inflight_lock, flags);
-
ibmvnic_send_crq(adapter, &crq);
return;
-inflight_alloc_failed:
- dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
- DMA_FROM_DEVICE);
buf_rsp_map_failed:
kfree(login_rsp_buffer);
buf_rsp_alloc_failed:
@@ -2064,6 +2238,10 @@ static void send_cap_queries(struct ibmvnic_adapter *adapter)
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
+ atomic_inc(&adapter->running_cap_crqs);
+ ibmvnic_send_crq(adapter, &crq);
+
crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
@@ -2242,77 +2420,22 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
kfree(error_buff);
}
-static void handle_dump_size_rsp(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
-{
- int len = be32_to_cpu(crq->request_dump_size_rsp.len);
- struct ibmvnic_inflight_cmd *inflight_cmd;
- struct device *dev = &adapter->vdev->dev;
- union ibmvnic_crq newcrq;
- unsigned long flags;
-
- /* allocate and map buffer */
- adapter->dump_data = kmalloc(len, GFP_KERNEL);
- if (!adapter->dump_data) {
- complete(&adapter->fw_done);
- return;
- }
-
- adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
- DMA_FROM_DEVICE);
-
- if (dma_mapping_error(dev, adapter->dump_data_token)) {
- if (!firmware_has_feature(FW_FEATURE_CMO))
- dev_err(dev, "Couldn't map dump data\n");
- kfree(adapter->dump_data);
- complete(&adapter->fw_done);
- return;
- }
-
- inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
- if (!inflight_cmd) {
- dma_unmap_single(dev, adapter->dump_data_token, len,
- DMA_FROM_DEVICE);
- kfree(adapter->dump_data);
- complete(&adapter->fw_done);
- return;
- }
-
- memset(&newcrq, 0, sizeof(newcrq));
- newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
- newcrq.request_dump.cmd = REQUEST_DUMP;
- newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
- newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
-
- memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
-
- spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_add_tail(&inflight_cmd->list, &adapter->inflight);
- spin_unlock_irqrestore(&adapter->inflight_lock, flags);
-
- ibmvnic_send_crq(adapter, &newcrq);
-}
-
-static void handle_error_indication(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
+static void request_error_information(struct ibmvnic_adapter *adapter,
+ union ibmvnic_crq *err_crq)
{
- int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
- struct ibmvnic_inflight_cmd *inflight_cmd;
struct device *dev = &adapter->vdev->dev;
+ struct net_device *netdev = adapter->netdev;
struct ibmvnic_error_buff *error_buff;
- union ibmvnic_crq new_crq;
+ unsigned long timeout = msecs_to_jiffies(30000);
+ union ibmvnic_crq crq;
unsigned long flags;
-
- dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
- crq->error_indication.
- flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
- be32_to_cpu(crq->error_indication.error_id),
- be16_to_cpu(crq->error_indication.error_cause));
+ int rc, detail_len;
error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
if (!error_buff)
return;
+ detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
if (!error_buff->buff) {
kfree(error_buff);
@@ -2322,43 +2445,61 @@ static void handle_error_indication(union ibmvnic_crq *crq,
error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, error_buff->dma)) {
- if (!firmware_has_feature(FW_FEATURE_CMO))
- dev_err(dev, "Couldn't map error buffer\n");
- kfree(error_buff->buff);
- kfree(error_buff);
- return;
- }
-
- inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
- if (!inflight_cmd) {
- dma_unmap_single(dev, error_buff->dma, detail_len,
- DMA_FROM_DEVICE);
+ netdev_err(netdev, "Couldn't map error buffer\n");
kfree(error_buff->buff);
kfree(error_buff);
return;
}
error_buff->len = detail_len;
- error_buff->error_id = crq->error_indication.error_id;
+ error_buff->error_id = err_crq->error_indication.error_id;
spin_lock_irqsave(&adapter->error_list_lock, flags);
list_add_tail(&error_buff->list, &adapter->errors);
spin_unlock_irqrestore(&adapter->error_list_lock, flags);
- memset(&new_crq, 0, sizeof(new_crq));
- new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
- new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
- new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
- new_crq.request_error_info.len = cpu_to_be32(detail_len);
- new_crq.request_error_info.error_id = crq->error_indication.error_id;
+ memset(&crq, 0, sizeof(crq));
+ crq.request_error_info.first = IBMVNIC_CRQ_CMD;
+ crq.request_error_info.cmd = REQUEST_ERROR_INFO;
+ crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
+ crq.request_error_info.len = cpu_to_be32(detail_len);
+ crq.request_error_info.error_id = err_crq->error_indication.error_id;
+
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc) {
+ netdev_err(netdev, "failed to request error information\n");
+ goto err_info_fail;
+ }
+
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+ netdev_err(netdev, "timeout waiting for error information\n");
+ goto err_info_fail;
+ }
+
+ return;
+
+err_info_fail:
+ spin_lock_irqsave(&adapter->error_list_lock, flags);
+ list_del(&error_buff->list);
+ spin_unlock_irqrestore(&adapter->error_list_lock, flags);
+
+ kfree(error_buff->buff);
+ kfree(error_buff);
+}
- memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
+static void handle_error_indication(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
- spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_add_tail(&inflight_cmd->list, &adapter->inflight);
- spin_unlock_irqrestore(&adapter->inflight_lock, flags);
+ dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
+ crq->error_indication.flags
+ & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
+ be32_to_cpu(crq->error_indication.error_id),
+ be16_to_cpu(crq->error_indication.error_cause));
- ibmvnic_send_crq(adapter, &new_crq);
+ if (be32_to_cpu(crq->error_indication.error_id))
+ request_error_information(adapter, crq);
}
static void handle_change_mac_rsp(union ibmvnic_crq *crq,
@@ -2428,9 +2569,9 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
*req_value,
(long int)be64_to_cpu(crq->request_capability_rsp.
number), name);
- release_sub_crqs_no_irqs(adapter);
+ release_sub_crqs(adapter);
*req_value = be64_to_cpu(crq->request_capability_rsp.number);
- init_sub_crqs(adapter, 1);
+ ibmvnic_send_req_caps(adapter, 1);
return;
default:
dev_err(dev, "Error %d in request cap rsp\n",
@@ -2473,7 +2614,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
struct ibmvnic_login_buffer *login = adapter->login_buf;
- union ibmvnic_crq crq;
int i;
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
@@ -2508,11 +2648,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
}
complete(&adapter->init_done);
- memset(&crq, 0, sizeof(crq));
- crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
- crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
- ibmvnic_send_crq(adapter, &crq);
-
return 0;
}
@@ -2687,6 +2822,12 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
adapter->vlan_header_insertion);
break;
+ case RX_VLAN_HEADER_INSERTION:
+ adapter->rx_vlan_header_insertion =
+ be64_to_cpu(crq->query_capability.number);
+ netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
+ adapter->rx_vlan_header_insertion);
+ break;
case MAX_TX_SG_ENTRIES:
adapter->max_tx_sg_entries =
be64_to_cpu(crq->query_capability.number);
@@ -2743,524 +2884,8 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
out:
if (atomic_read(&adapter->running_cap_crqs) == 0) {
adapter->wait_capability = false;
- init_sub_crqs(adapter, 0);
- /* We're done querying the capabilities, initialize sub-crqs */
- }
-}
-
-static void handle_control_ras_rsp(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
-{
- u8 correlator = crq->control_ras_rsp.correlator;
- struct device *dev = &adapter->vdev->dev;
- bool found = false;
- int i;
-
- if (crq->control_ras_rsp.rc.code) {
- dev_warn(dev, "Control ras failed rc=%d\n",
- crq->control_ras_rsp.rc.code);
- return;
- }
-
- for (i = 0; i < adapter->ras_comp_num; i++) {
- if (adapter->ras_comps[i].correlator == correlator) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- dev_warn(dev, "Correlator not found on control_ras_rsp\n");
- return;
- }
-
- switch (crq->control_ras_rsp.op) {
- case IBMVNIC_TRACE_LEVEL:
- adapter->ras_comps[i].trace_level = crq->control_ras.level;
- break;
- case IBMVNIC_ERROR_LEVEL:
- adapter->ras_comps[i].error_check_level =
- crq->control_ras.level;
- break;
- case IBMVNIC_TRACE_PAUSE:
- adapter->ras_comp_int[i].paused = 1;
- break;
- case IBMVNIC_TRACE_RESUME:
- adapter->ras_comp_int[i].paused = 0;
- break;
- case IBMVNIC_TRACE_ON:
- adapter->ras_comps[i].trace_on = 1;
- break;
- case IBMVNIC_TRACE_OFF:
- adapter->ras_comps[i].trace_on = 0;
- break;
- case IBMVNIC_CHG_TRACE_BUFF_SZ:
- /* trace_buff_sz is 3 bytes, stuff it into an int */
- ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
- ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
- crq->control_ras_rsp.trace_buff_sz[0];
- ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
- crq->control_ras_rsp.trace_buff_sz[1];
- ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
- crq->control_ras_rsp.trace_buff_sz[2];
- break;
- default:
- dev_err(dev, "invalid op %d on control_ras_rsp",
- crq->control_ras_rsp.op);
- }
-}
-
-static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
- loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_fw_trace_entry *trace;
- int num = ras_comp_int->num;
- union ibmvnic_crq crq;
- dma_addr_t trace_tok;
-
- if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
- return 0;
-
- trace =
- dma_alloc_coherent(dev,
- be32_to_cpu(adapter->ras_comps[num].
- trace_buff_size), &trace_tok,
- GFP_KERNEL);
- if (!trace) {
- dev_err(dev, "Couldn't alloc trace buffer\n");
- return 0;
+ ibmvnic_send_req_caps(adapter, 0);
}
-
- memset(&crq, 0, sizeof(crq));
- crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
- crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
- crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
- crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
- crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
-
- init_completion(&adapter->fw_done);
- ibmvnic_send_crq(adapter, &crq);
- wait_for_completion(&adapter->fw_done);
-
- if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
- len =
- be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
- *ppos;
-
- copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
-
- dma_free_coherent(dev,
- be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
- trace, trace_tok);
- *ppos += len;
- return len;
-}
-
-static const struct file_operations trace_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = trace_read,
-};
-
-static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
- loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- int num = ras_comp_int->num;
- char buff[5]; /* 1 or 0 plus \n and \0 */
- int size;
-
- size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
-
- if (*ppos >= size)
- return 0;
-
- copy_to_user(user_buf, buff, size);
- *ppos += size;
- return size;
-}
-
-static ssize_t paused_write(struct file *file, const char __user *user_buf,
- size_t len, loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- int num = ras_comp_int->num;
- union ibmvnic_crq crq;
- unsigned long val;
- char buff[9]; /* decimal max int plus \n and \0 */
-
- copy_from_user(buff, user_buf, sizeof(buff));
- val = kstrtoul(buff, 10, NULL);
-
- adapter->ras_comp_int[num].paused = val ? 1 : 0;
-
- memset(&crq, 0, sizeof(crq));
- crq.control_ras.first = IBMVNIC_CRQ_CMD;
- crq.control_ras.cmd = CONTROL_RAS;
- crq.control_ras.correlator = adapter->ras_comps[num].correlator;
- crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
- ibmvnic_send_crq(adapter, &crq);
-
- return len;
-}
-
-static const struct file_operations paused_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = paused_read,
- .write = paused_write,
-};
-
-static ssize_t tracing_read(struct file *file, char __user *user_buf,
- size_t len, loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- int num = ras_comp_int->num;
- char buff[5]; /* 1 or 0 plus \n and \0 */
- int size;
-
- size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
-
- if (*ppos >= size)
- return 0;
-
- copy_to_user(user_buf, buff, size);
- *ppos += size;
- return size;
-}
-
-static ssize_t tracing_write(struct file *file, const char __user *user_buf,
- size_t len, loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- int num = ras_comp_int->num;
- union ibmvnic_crq crq;
- unsigned long val;
- char buff[9]; /* decimal max int plus \n and \0 */
-
- copy_from_user(buff, user_buf, sizeof(buff));
- val = kstrtoul(buff, 10, NULL);
-
- memset(&crq, 0, sizeof(crq));
- crq.control_ras.first = IBMVNIC_CRQ_CMD;
- crq.control_ras.cmd = CONTROL_RAS;
- crq.control_ras.correlator = adapter->ras_comps[num].correlator;
- crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
-
- return len;
-}
-
-static const struct file_operations tracing_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = tracing_read,
- .write = tracing_write,
-};
-
-static ssize_t error_level_read(struct file *file, char __user *user_buf,
- size_t len, loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- int num = ras_comp_int->num;
- char buff[5]; /* decimal max char plus \n and \0 */
- int size;
-
- size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
-
- if (*ppos >= size)
- return 0;
-
- copy_to_user(user_buf, buff, size);
- *ppos += size;
- return size;
-}
-
-static ssize_t error_level_write(struct file *file, const char __user *user_buf,
- size_t len, loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- int num = ras_comp_int->num;
- union ibmvnic_crq crq;
- unsigned long val;
- char buff[9]; /* decimal max int plus \n and \0 */
-
- copy_from_user(buff, user_buf, sizeof(buff));
- val = kstrtoul(buff, 10, NULL);
-
- if (val > 9)
- val = 9;
-
- memset(&crq, 0, sizeof(crq));
- crq.control_ras.first = IBMVNIC_CRQ_CMD;
- crq.control_ras.cmd = CONTROL_RAS;
- crq.control_ras.correlator = adapter->ras_comps[num].correlator;
- crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
- crq.control_ras.level = val;
- ibmvnic_send_crq(adapter, &crq);
-
- return len;
-}
-
-static const struct file_operations error_level_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = error_level_read,
- .write = error_level_write,
-};
-
-static ssize_t trace_level_read(struct file *file, char __user *user_buf,
- size_t len, loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- int num = ras_comp_int->num;
- char buff[5]; /* decimal max char plus \n and \0 */
- int size;
-
- size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
- if (*ppos >= size)
- return 0;
-
- copy_to_user(user_buf, buff, size);
- *ppos += size;
- return size;
-}
-
-static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
- size_t len, loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- union ibmvnic_crq crq;
- unsigned long val;
- char buff[9]; /* decimal max int plus \n and \0 */
-
- copy_from_user(buff, user_buf, sizeof(buff));
- val = kstrtoul(buff, 10, NULL);
- if (val > 9)
- val = 9;
-
- memset(&crq, 0, sizeof(crq));
- crq.control_ras.first = IBMVNIC_CRQ_CMD;
- crq.control_ras.cmd = CONTROL_RAS;
- crq.control_ras.correlator =
- adapter->ras_comps[ras_comp_int->num].correlator;
- crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
- crq.control_ras.level = val;
- ibmvnic_send_crq(adapter, &crq);
-
- return len;
-}
-
-static const struct file_operations trace_level_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = trace_level_read,
- .write = trace_level_write,
-};
-
-static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
- size_t len, loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- int num = ras_comp_int->num;
- char buff[9]; /* decimal max int plus \n and \0 */
- int size;
-
- size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
- if (*ppos >= size)
- return 0;
-
- copy_to_user(user_buf, buff, size);
- *ppos += size;
- return size;
-}
-
-static ssize_t trace_buff_size_write(struct file *file,
- const char __user *user_buf, size_t len,
- loff_t *ppos)
-{
- struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
- struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
- union ibmvnic_crq crq;
- unsigned long val;
- char buff[9]; /* decimal max int plus \n and \0 */
-
- copy_from_user(buff, user_buf, sizeof(buff));
- val = kstrtoul(buff, 10, NULL);
-
- memset(&crq, 0, sizeof(crq));
- crq.control_ras.first = IBMVNIC_CRQ_CMD;
- crq.control_ras.cmd = CONTROL_RAS;
- crq.control_ras.correlator =
- adapter->ras_comps[ras_comp_int->num].correlator;
- crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
- /* trace_buff_sz is 3 bytes, stuff an int into it */
- crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
- crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
- crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
- ibmvnic_send_crq(adapter, &crq);
-
- return len;
-}
-
-static const struct file_operations trace_size_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = trace_buff_size_read,
- .write = trace_buff_size_write,
-};
-
-static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
-{
- struct device *dev = &adapter->vdev->dev;
- struct dentry *dir_ent;
- struct dentry *ent;
- int i;
-
- debugfs_remove_recursive(adapter->ras_comps_ent);
-
- adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
- adapter->debugfs_dir);
- if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
- dev_info(dev, "debugfs create ras_comps dir failed\n");
- return;
- }
-
- for (i = 0; i < adapter->ras_comp_num; i++) {
- dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
- adapter->ras_comps_ent);
- if (!dir_ent || IS_ERR(dir_ent)) {
- dev_info(dev, "debugfs create %s dir failed\n",
- adapter->ras_comps[i].name);
- continue;
- }
-
- adapter->ras_comp_int[i].adapter = adapter;
- adapter->ras_comp_int[i].num = i;
- adapter->ras_comp_int[i].desc_blob.data =
- &adapter->ras_comps[i].description;
- adapter->ras_comp_int[i].desc_blob.size =
- sizeof(adapter->ras_comps[i].description);
-
- /* Don't need to remember the dentry's because the debugfs dir
- * gets removed recursively
- */
- ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
- &adapter->ras_comp_int[i].desc_blob);
- ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
- dir_ent, &adapter->ras_comp_int[i],
- &trace_size_ops);
- ent = debugfs_create_file("trace_level",
- S_IRUGO |
- (adapter->ras_comps[i].trace_level !=
- 0xFF ? S_IWUSR : 0),
- dir_ent, &adapter->ras_comp_int[i],
- &trace_level_ops);
- ent = debugfs_create_file("error_level",
- S_IRUGO |
- (adapter->
- ras_comps[i].error_check_level !=
- 0xFF ? S_IWUSR : 0),
- dir_ent, &adapter->ras_comp_int[i],
- &trace_level_ops);
- ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
- dir_ent, &adapter->ras_comp_int[i],
- &tracing_ops);
- ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
- dir_ent, &adapter->ras_comp_int[i],
- &paused_ops);
- ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
- &adapter->ras_comp_int[i],
- &trace_ops);
- }
-}
-
-static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
-{
- int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
- struct device *dev = &adapter->vdev->dev;
- union ibmvnic_crq newcrq;
-
- adapter->ras_comps = dma_alloc_coherent(dev, len,
- &adapter->ras_comps_tok,
- GFP_KERNEL);
- if (!adapter->ras_comps) {
- if (!firmware_has_feature(FW_FEATURE_CMO))
- dev_err(dev, "Couldn't alloc fw comps buffer\n");
- return;
- }
-
- adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
- sizeof(struct ibmvnic_fw_comp_internal),
- GFP_KERNEL);
- if (!adapter->ras_comp_int)
- dma_free_coherent(dev, len, adapter->ras_comps,
- adapter->ras_comps_tok);
-
- memset(&newcrq, 0, sizeof(newcrq));
- newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
- newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
- newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
- newcrq.request_ras_comps.len = cpu_to_be32(len);
- ibmvnic_send_crq(adapter, &newcrq);
-}
-
-static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
-{
- struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
- struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff, *tmp2;
- unsigned long flags;
- unsigned long flags2;
-
- spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
- switch (inflight_cmd->crq.generic.cmd) {
- case LOGIN:
- dma_unmap_single(dev, adapter->login_buf_token,
- adapter->login_buf_sz,
- DMA_BIDIRECTIONAL);
- dma_unmap_single(dev, adapter->login_rsp_buf_token,
- adapter->login_rsp_buf_sz,
- DMA_BIDIRECTIONAL);
- kfree(adapter->login_rsp_buf);
- kfree(adapter->login_buf);
- break;
- case REQUEST_DUMP:
- complete(&adapter->fw_done);
- break;
- case REQUEST_ERROR_INFO:
- spin_lock_irqsave(&adapter->error_list_lock, flags2);
- list_for_each_entry_safe(error_buff, tmp2,
- &adapter->errors, list) {
- dma_unmap_single(dev, error_buff->dma,
- error_buff->len,
- DMA_FROM_DEVICE);
- kfree(error_buff->buff);
- list_del(&error_buff->list);
- kfree(error_buff);
- }
- spin_unlock_irqrestore(&adapter->error_list_lock,
- flags2);
- break;
- }
- list_del(&inflight_cmd->list);
- kfree(inflight_cmd);
- }
- spin_unlock_irqrestore(&adapter->inflight_lock, flags);
}
static void ibmvnic_xport_event(struct work_struct *work)
@@ -3271,7 +2896,6 @@ static void ibmvnic_xport_event(struct work_struct *work)
struct device *dev = &adapter->vdev->dev;
long rc;
- ibmvnic_free_inflight(adapter);
release_sub_crqs(adapter);
if (adapter->migrated) {
rc = ibmvnic_reenable_crq_queue(adapter);
@@ -3290,11 +2914,12 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
struct ibmvnic_generic_crq *gen_crq = &crq->generic;
struct net_device *netdev = adapter->netdev;
struct device *dev = &adapter->vdev->dev;
+ u64 *u64_crq = (u64 *)crq;
long rc;
netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
- ((unsigned long int *)crq)[0],
- ((unsigned long int *)crq)[1]);
+ (unsigned long int)cpu_to_be64(u64_crq[0]),
+ (unsigned long int)cpu_to_be64(u64_crq[1]));
switch (gen_crq->first) {
case IBMVNIC_CRQ_INIT_RSP:
switch (gen_crq->cmd) {
@@ -3374,9 +2999,14 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
handle_login_rsp(crq, adapter);
break;
case LOGICAL_LINK_STATE_RSP:
- netdev_dbg(netdev, "Got Logical Link State Response\n");
+ netdev_dbg(netdev,
+ "Got Logical Link State Response, state: %d rc: %d\n",
+ crq->logical_link_state_rsp.link_state,
+ crq->logical_link_state_rsp.rc.code);
adapter->logical_link_state =
crq->logical_link_state_rsp.link_state;
+ adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
+ complete(&adapter->init_done);
break;
case LINK_STATE_INDICATION:
netdev_dbg(netdev, "Got Logical Link State Indication\n");
@@ -3401,14 +3031,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
netdev_dbg(netdev, "Got Statistics Response\n");
complete(&adapter->stats_done);
break;
- case REQUEST_DUMP_SIZE_RSP:
- netdev_dbg(netdev, "Got Request Dump Size Response\n");
- handle_dump_size_rsp(crq, adapter);
- break;
- case REQUEST_DUMP_RSP:
- netdev_dbg(netdev, "Got Request Dump Response\n");
- complete(&adapter->fw_done);
- break;
case QUERY_IP_OFFLOAD_RSP:
netdev_dbg(netdev, "Got Query IP offload Response\n");
handle_query_ip_offload_rsp(adapter);
@@ -3421,26 +3043,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
sizeof(adapter->ip_offload_ctrl),
DMA_TO_DEVICE);
- /* We're done with the queries, perform the login */
- send_login(adapter);
- break;
- case REQUEST_RAS_COMP_NUM_RSP:
- netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
- if (crq->request_ras_comp_num_rsp.rc.code == 10) {
- netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
- break;
- }
- adapter->ras_comp_num =
- be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
- handle_request_ras_comp_num_rsp(crq, adapter);
- break;
- case REQUEST_RAS_COMPS_RSP:
- netdev_dbg(netdev, "Got Request RAS Comps Response\n");
- handle_request_ras_comps_rsp(crq, adapter);
- break;
- case CONTROL_RAS_RSP:
- netdev_dbg(netdev, "Got Control RAS Response\n");
- handle_control_ras_rsp(crq, adapter);
+ complete(&adapter->init_done);
break;
case COLLECT_FW_TRACE_RSP:
netdev_dbg(netdev, "Got Collect firmware trace Response\n");
@@ -3455,12 +3058,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
{
struct ibmvnic_adapter *adapter = instance;
- unsigned long flags;
- spin_lock_irqsave(&adapter->crq.lock, flags);
- vio_disable_interrupts(adapter->vdev);
tasklet_schedule(&adapter->tasklet);
- spin_unlock_irqrestore(&adapter->crq.lock, flags);
return IRQ_HANDLED;
}
@@ -3468,32 +3067,23 @@ static void ibmvnic_tasklet(void *data)
{
struct ibmvnic_adapter *adapter = data;
struct ibmvnic_crq_queue *queue = &adapter->crq;
- struct vio_dev *vdev = adapter->vdev;
union ibmvnic_crq *crq;
unsigned long flags;
bool done = false;
spin_lock_irqsave(&queue->lock, flags);
- vio_disable_interrupts(vdev);
while (!done) {
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
ibmvnic_handle_crq(crq, adapter);
crq->generic.first = 0;
}
- vio_enable_interrupts(vdev);
- crq = ibmvnic_next_crq(adapter);
- if (crq) {
- vio_disable_interrupts(vdev);
- ibmvnic_handle_crq(crq, adapter);
- crq->generic.first = 0;
- } else {
- /* remain in tasklet until all
- * capabilities responses are received
- */
- if (!adapter->wait_capability)
- done = true;
- }
+
+ /* remain in tasklet until all
+ * capabilities responses are received
+ */
+ if (!adapter->wait_capability)
+ done = true;
}
/* if capabilities CRQ's were sent in this tasklet, the following
* tasklet must wait until all responses are received
@@ -3547,12 +3137,15 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
return rc;
}
-static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
+static void release_crq_queue(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_crq_queue *crq = &adapter->crq;
struct vio_dev *vdev = adapter->vdev;
long rc;
+ if (!crq->msgs)
+ return;
+
netdev_dbg(adapter->netdev, "Releasing CRQ\n");
free_irq(vdev->irq, adapter);
tasklet_kill(&adapter->tasklet);
@@ -3563,15 +3156,19 @@ static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
DMA_BIDIRECTIONAL);
free_page((unsigned long)crq->msgs);
+ crq->msgs = NULL;
}
-static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
+static int init_crq_queue(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_crq_queue *crq = &adapter->crq;
struct device *dev = &adapter->vdev->dev;
struct vio_dev *vdev = adapter->vdev;
int rc, retrc = -ENOMEM;
+ if (crq->msgs)
+ return 0;
+
crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
/* Should we allocate more than one page? */
@@ -3633,48 +3230,10 @@ reg_crq_failed:
dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
map_failed:
free_page((unsigned long)crq->msgs);
+ crq->msgs = NULL;
return retrc;
}
-/* debugfs for dump */
-static int ibmvnic_dump_show(struct seq_file *seq, void *v)
-{
- struct net_device *netdev = seq->private;
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- struct device *dev = &adapter->vdev->dev;
- union ibmvnic_crq crq;
-
- memset(&crq, 0, sizeof(crq));
- crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
- crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
-
- init_completion(&adapter->fw_done);
- ibmvnic_send_crq(adapter, &crq);
- wait_for_completion(&adapter->fw_done);
-
- seq_write(seq, adapter->dump_data, adapter->dump_data_size);
-
- dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
- DMA_BIDIRECTIONAL);
-
- kfree(adapter->dump_data);
-
- return 0;
-}
-
-static int ibmvnic_dump_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ibmvnic_dump_show, inode->i_private);
-}
-
-static const struct file_operations ibmvnic_dump_ops = {
- .owner = THIS_MODULE,
- .open = ibmvnic_dump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static void handle_crq_init_rsp(struct work_struct *work)
{
struct ibmvnic_adapter *adapter = container_of(work,
@@ -3702,26 +3261,6 @@ static void handle_crq_init_rsp(struct work_struct *work)
goto task_failed;
}
- do {
- if (adapter->renegotiate) {
- adapter->renegotiate = false;
- release_sub_crqs_no_irqs(adapter);
-
- reinit_completion(&adapter->init_done);
- send_cap_queries(adapter);
- if (!wait_for_completion_timeout(&adapter->init_done,
- timeout)) {
- dev_err(dev, "Passive init timeout\n");
- goto task_failed;
- }
- }
- } while (adapter->renegotiate);
- rc = init_sub_crq_irqs(adapter);
-
- if (rc)
- goto task_failed;
-
- netdev->real_num_tx_queues = adapter->req_tx_queues;
netdev->mtu = adapter->req_mtu - ETH_HLEN;
if (adapter->failover) {
@@ -3753,14 +3292,40 @@ task_failed:
dev_err(dev, "Passive initialization was not successful\n");
}
-static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
+static int ibmvnic_init(struct ibmvnic_adapter *adapter)
{
+ struct device *dev = &adapter->vdev->dev;
unsigned long timeout = msecs_to_jiffies(30000);
+ int rc;
+
+ rc = init_crq_queue(adapter);
+ if (rc) {
+ dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
+ return rc;
+ }
+
+ init_completion(&adapter->init_done);
+ ibmvnic_send_crq_init(adapter);
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+ dev_err(dev, "Initialization sequence timed out\n");
+ release_crq_queue(adapter);
+ return -1;
+ }
+
+ rc = init_sub_crqs(adapter);
+ if (rc) {
+ dev_err(dev, "Initialization of sub crqs failed\n");
+ release_crq_queue(adapter);
+ }
+
+ return rc;
+}
+
+static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
+{
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
unsigned char *mac_addr_p;
- struct dentry *ent;
- char buf[17]; /* debugfs name buf */
int rc;
dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
@@ -3798,91 +3363,27 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
spin_lock_init(&adapter->stats_lock);
- rc = ibmvnic_init_crq_queue(adapter);
- if (rc) {
- dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
- goto free_netdev;
- }
-
INIT_LIST_HEAD(&adapter->errors);
- INIT_LIST_HEAD(&adapter->inflight);
spin_lock_init(&adapter->error_list_lock);
- spin_lock_init(&adapter->inflight_lock);
-
- adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
- sizeof(struct ibmvnic_statistics),
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
- if (!firmware_has_feature(FW_FEATURE_CMO))
- dev_err(&dev->dev, "Couldn't map stats buffer\n");
- rc = -ENOMEM;
- goto free_crq;
- }
-
- snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
- ent = debugfs_create_dir(buf, NULL);
- if (!ent || IS_ERR(ent)) {
- dev_info(&dev->dev, "debugfs create directory failed\n");
- adapter->debugfs_dir = NULL;
- } else {
- adapter->debugfs_dir = ent;
- ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
- netdev, &ibmvnic_dump_ops);
- if (!ent || IS_ERR(ent)) {
- dev_info(&dev->dev,
- "debugfs create dump file failed\n");
- adapter->debugfs_dump = NULL;
- } else {
- adapter->debugfs_dump = ent;
- }
- }
- init_completion(&adapter->init_done);
- ibmvnic_send_crq_init(adapter);
- if (!wait_for_completion_timeout(&adapter->init_done, timeout))
- return 0;
-
- do {
- if (adapter->renegotiate) {
- adapter->renegotiate = false;
- release_sub_crqs_no_irqs(adapter);
-
- reinit_completion(&adapter->init_done);
- send_cap_queries(adapter);
- if (!wait_for_completion_timeout(&adapter->init_done,
- timeout))
- return 0;
- }
- } while (adapter->renegotiate);
-
- rc = init_sub_crq_irqs(adapter);
+ rc = ibmvnic_init(adapter);
if (rc) {
- dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
- goto free_debugfs;
+ free_netdev(netdev);
+ return rc;
}
- netdev->real_num_tx_queues = adapter->req_tx_queues;
netdev->mtu = adapter->req_mtu - ETH_HLEN;
+ adapter->is_closed = false;
rc = register_netdev(netdev);
if (rc) {
dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
- goto free_sub_crqs;
+ free_netdev(netdev);
+ return rc;
}
dev_info(&dev->dev, "ibmvnic registered\n");
return 0;
-
-free_sub_crqs:
- release_sub_crqs(adapter);
-free_debugfs:
- if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
- debugfs_remove_recursive(adapter->debugfs_dir);
-free_crq:
- ibmvnic_release_crq_queue(adapter);
-free_netdev:
- free_netdev(netdev);
- return rc;
}
static int ibmvnic_remove(struct vio_dev *dev)
@@ -3892,23 +3393,9 @@ static int ibmvnic_remove(struct vio_dev *dev)
unregister_netdev(netdev);
+ release_resources(adapter);
release_sub_crqs(adapter);
-
- ibmvnic_release_crq_queue(adapter);
-
- if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
- debugfs_remove_recursive(adapter->debugfs_dir);
-
- dma_unmap_single(&dev->dev, adapter->stats_token,
- sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
-
- if (adapter->ras_comps)
- dma_free_coherent(&dev->dev,
- adapter->ras_comp_num *
- sizeof(struct ibmvnic_fw_component),
- adapter->ras_comps, adapter->ras_comps_tok);
-
- kfree(adapter->ras_comp_int);
+ release_crq_queue(adapter);
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
@@ -3933,7 +3420,6 @@ static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
adapter = netdev_priv(netdev);
ret += PAGE_SIZE; /* the crq message queue */
- ret += adapter->bounce_buffer_size;
ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 1993b42666f7..a69979f6f19d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -518,8 +518,8 @@ struct ibmvnic_change_mac_addr {
u8 first;
u8 cmd;
u8 mac_addr[6];
- struct ibmvnic_rc rc;
u8 reserved[4];
+ struct ibmvnic_rc rc;
} __packed __aligned(8);
struct ibmvnic_multicast_ctrl {
@@ -733,6 +733,7 @@ enum ibmvnic_capabilities {
REQ_MTU = 21,
MAX_MULTICAST_FILTERS = 22,
VLAN_HEADER_INSERTION = 23,
+ RX_VLAN_HEADER_INSERTION = 24,
MAX_TX_SG_ENTRIES = 25,
RX_SG_SUPPORTED = 26,
RX_SG_REQUESTED = 27,
@@ -772,20 +773,10 @@ enum ibmvnic_commands {
ERROR_INDICATION = 0x08,
REQUEST_ERROR_INFO = 0x09,
REQUEST_ERROR_RSP = 0x89,
- REQUEST_DUMP_SIZE = 0x0A,
- REQUEST_DUMP_SIZE_RSP = 0x8A,
- REQUEST_DUMP = 0x0B,
- REQUEST_DUMP_RSP = 0x8B,
LOGICAL_LINK_STATE = 0x0C,
LOGICAL_LINK_STATE_RSP = 0x8C,
REQUEST_STATISTICS = 0x0D,
REQUEST_STATISTICS_RSP = 0x8D,
- REQUEST_RAS_COMP_NUM = 0x0E,
- REQUEST_RAS_COMP_NUM_RSP = 0x8E,
- REQUEST_RAS_COMPS = 0x0F,
- REQUEST_RAS_COMPS_RSP = 0x8F,
- CONTROL_RAS = 0x10,
- CONTROL_RAS_RSP = 0x90,
COLLECT_FW_TRACE = 0x11,
COLLECT_FW_TRACE_RSP = 0x91,
LINK_STATE_INDICATION = 0x12,
@@ -806,8 +797,6 @@ enum ibmvnic_commands {
ACL_CHANGE_INDICATION = 0x1A,
ACL_QUERY = 0x1B,
ACL_QUERY_RSP = 0x9B,
- REQUEST_DEBUG_STATS = 0x1C,
- REQUEST_DEBUG_STATS_RSP = 0x9C,
QUERY_MAP = 0x1D,
QUERY_MAP_RSP = 0x9D,
REQUEST_MAP = 0x1E,
@@ -880,7 +869,6 @@ struct ibmvnic_tx_buff {
int index;
int pool_index;
bool last_frag;
- bool used_bounce;
union sub_crq indir_arr[6];
u8 hdr_data[140];
dma_addr_t indir_dma;
@@ -925,18 +913,6 @@ struct ibmvnic_error_buff {
__be32 error_id;
};
-struct ibmvnic_fw_comp_internal {
- struct ibmvnic_adapter *adapter;
- int num;
- struct debugfs_blob_wrapper desc_blob;
- int paused;
-};
-
-struct ibmvnic_inflight_cmd {
- union ibmvnic_crq crq;
- struct list_head list;
-};
-
struct ibmvnic_adapter {
struct vio_dev *vdev;
struct net_device *netdev;
@@ -948,12 +924,8 @@ struct ibmvnic_adapter {
dma_addr_t ip_offload_ctrl_tok;
bool migrated;
u32 msg_enable;
- void *bounce_buffer;
- int bounce_buffer_size;
- dma_addr_t bounce_buffer_dma;
/* Statistics */
- struct net_device_stats net_stats;
struct ibmvnic_statistics stats;
dma_addr_t stats_token;
struct completion stats_done;
@@ -992,26 +964,12 @@ struct ibmvnic_adapter {
struct ibmvnic_tx_pool *tx_pool;
bool closing;
struct completion init_done;
+ int init_done_rc;
struct list_head errors;
spinlock_t error_list_lock;
- /* debugfs */
- struct dentry *debugfs_dir;
- struct dentry *debugfs_dump;
struct completion fw_done;
- char *dump_data;
- dma_addr_t dump_data_token;
- int dump_data_size;
- int ras_comp_num;
- struct ibmvnic_fw_component *ras_comps;
- struct ibmvnic_fw_comp_internal *ras_comp_int;
- dma_addr_t ras_comps_tok;
- struct dentry *ras_comps_ent;
-
- /* in-flight commands that allocate and/or map memory*/
- struct list_head inflight;
- spinlock_t inflight_lock;
/* partner capabilities */
u64 min_tx_queues;
@@ -1037,6 +995,7 @@ struct ibmvnic_adapter {
u64 req_mtu;
u64 max_multicast_filters;
u64 vlan_header_insertion;
+ u64 rx_vlan_header_insertion;
u64 max_tx_sg_entries;
u64 rx_sg_supported;
u64 rx_sg_requested;
@@ -1052,4 +1011,5 @@ struct ibmvnic_adapter {
struct work_struct ibmvnic_xport;
struct tasklet_struct tasklet;
bool failover;
+ bool is_closed;
};
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 1349b45f014d..1542a2158e96 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -235,17 +235,6 @@ config I40E_DCB
If unsure, say N.
-config I40E_FCOE
- bool "Fibre Channel over Ethernet (FCoE)"
- default n
- depends on I40E && DCB && FCOE
- ---help---
- Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
- in the driver. This will create new netdev for exclusive FCoE
- use with XL710 FCoE offloads enabled.
-
- If unsure, say N.
-
config I40EVF
tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 975eeb885ca2..ec8aa4562cc9 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -103,104 +103,104 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
-static int e1000_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int e1000_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ u32 supported, advertising;
if (hw->media_type == e1000_media_type_copper) {
- ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full|
- SUPPORTED_Autoneg |
- SUPPORTED_TP);
- ecmd->advertising = ADVERTISED_TP;
+ supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+ advertising = ADVERTISED_TP;
if (hw->autoneg == 1) {
- ecmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
/* the e1000 autoneg seems to match ethtool nicely */
- ecmd->advertising |= hw->autoneg_advertised;
+ advertising |= hw->autoneg_advertised;
}
- ecmd->port = PORT_TP;
- ecmd->phy_address = hw->phy_addr;
-
- if (hw->mac_type == e1000_82543)
- ecmd->transceiver = XCVR_EXTERNAL;
- else
- ecmd->transceiver = XCVR_INTERNAL;
-
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = hw->phy_addr;
} else {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg);
+ supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg);
+ advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg);
- ecmd->port = PORT_FIBRE;
-
- if (hw->mac_type >= e1000_82545)
- ecmd->transceiver = XCVR_INTERNAL;
- else
- ecmd->transceiver = XCVR_EXTERNAL;
+ cmd->base.port = PORT_FIBRE;
}
if (er32(STATUS) & E1000_STATUS_LU) {
e1000_get_speed_and_duplex(hw, &adapter->link_speed,
&adapter->link_duplex);
- ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ cmd->base.speed = adapter->link_speed;
/* unfortunately FULL_DUPLEX != DUPLEX_FULL
* and HALF_DUPLEX != DUPLEX_HALF
*/
if (adapter->link_duplex == FULL_DUPLEX)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
+ cmd->base.autoneg = ((hw->media_type == e1000_media_type_fiber) ||
hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
/* MDI-X => 1; MDI => 0 */
if ((hw->media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
- ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
+ cmd->base.eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
ETH_TP_MDI_X : ETH_TP_MDI);
else
- ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
if (hw->mdix == AUTO_ALL_MODES)
- ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
else
- ecmd->eth_tp_mdix_ctrl = hw->mdix;
+ cmd->base.eth_tp_mdix_ctrl = hw->mdix;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int e1000_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int e1000_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
/* MDI setting is only allowed when autoneg enabled because
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
- if (ecmd->eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl) {
if (hw->media_type != e1000_media_type_copper)
return -EOPNOTSUPP;
- if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
- (ecmd->autoneg != AUTONEG_ENABLE)) {
+ if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (cmd->base.autoneg != AUTONEG_ENABLE)) {
e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
return -EINVAL;
}
@@ -209,32 +209,31 @@ static int e1000_set_settings(struct net_device *netdev,
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->autoneg = 1;
if (hw->media_type == e1000_media_type_fiber)
hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg;
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
else
- hw->autoneg_advertised = ecmd->advertising |
+ hw->autoneg_advertised = advertising |
ADVERTISED_TP |
ADVERTISED_Autoneg;
- ecmd->advertising = hw->autoneg_advertised;
} else {
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
/* calling this overrides forced MDI setting */
- if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+ if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) {
clear_bit(__E1000_RESETTING, &adapter->flags);
return -EINVAL;
}
}
/* MDI-X => 2; MDI => 1; Auto => 3 */
- if (ecmd->eth_tp_mdix_ctrl) {
- if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ if (cmd->base.eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
hw->mdix = AUTO_ALL_MODES;
else
- hw->mdix = ecmd->eth_tp_mdix_ctrl;
+ hw->mdix = cmd->base.eth_tp_mdix_ctrl;
}
/* reset the link */
@@ -1875,8 +1874,6 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
}
static const struct ethtool_ops e1000_ethtool_ops = {
- .get_settings = e1000_get_settings,
- .set_settings = e1000_set_settings,
.get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs,
@@ -1901,6 +1898,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = e1000_get_link_ksettings,
+ .set_link_ksettings = e1000_set_link_ksettings,
};
void e1000_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 93fc6c67306b..bd8b05fe8258 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -131,7 +131,6 @@ static void e1000_watchdog(struct work_struct *work);
static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
struct net_device *netdev);
-static struct net_device_stats *e1000_get_stats(struct net_device *netdev);
static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
static int e1000_set_mac(struct net_device *netdev, void *p);
static irqreturn_t e1000_intr(int irq, void *data);
@@ -846,7 +845,6 @@ static const struct net_device_ops e1000_netdev_ops = {
.ndo_open = e1000_open,
.ndo_stop = e1000_close,
.ndo_start_xmit = e1000_xmit_frame,
- .ndo_get_stats = e1000_get_stats,
.ndo_set_rx_mode = e1000_set_rx_mode,
.ndo_set_mac_address = e1000_set_mac,
.ndo_tx_timeout = e1000_tx_timeout,
@@ -3530,19 +3528,6 @@ static void e1000_reset_task(struct work_struct *work)
}
/**
- * e1000_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the watchdog.
- **/
-static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
-{
- /* only return the current stats */
- return &netdev->stats;
-}
-
-/**
* e1000_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index a29b12e80855..c7c994eb410e 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -135,7 +135,8 @@ enum e1000_boards {
board_pchlan,
board_pch2lan,
board_pch_lpt,
- board_pch_spt
+ board_pch_spt,
+ board_pch_cnp
};
struct e1000_ps_page {
@@ -378,18 +379,22 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
* INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n)
* bits to count nanoseconds leaving the rest for fractional nonseconds.
*/
-#define INCVALUE_96MHz 125
-#define INCVALUE_SHIFT_96MHz 17
-#define INCPERIOD_SHIFT_96MHz 2
-#define INCPERIOD_96MHz (12 >> INCPERIOD_SHIFT_96MHz)
+#define INCVALUE_96MHZ 125
+#define INCVALUE_SHIFT_96MHZ 17
+#define INCPERIOD_SHIFT_96MHZ 2
+#define INCPERIOD_96MHZ (12 >> INCPERIOD_SHIFT_96MHZ)
-#define INCVALUE_25MHz 40
-#define INCVALUE_SHIFT_25MHz 18
-#define INCPERIOD_25MHz 1
+#define INCVALUE_25MHZ 40
+#define INCVALUE_SHIFT_25MHZ 18
+#define INCPERIOD_25MHZ 1
-#define INCVALUE_24MHz 125
-#define INCVALUE_SHIFT_24MHz 14
-#define INCPERIOD_24MHz 3
+#define INCVALUE_24MHZ 125
+#define INCVALUE_SHIFT_24MHZ 14
+#define INCPERIOD_24MHZ 3
+
+#define INCVALUE_38400KHZ 26
+#define INCVALUE_SHIFT_38400KHZ 19
+#define INCPERIOD_38400KHZ 1
/* Another drawback of scaling the incvalue by a large factor is the
* 64-bit SYSTIM register overflows more quickly. This is dealt with
@@ -515,6 +520,7 @@ extern const struct e1000_info e1000_pch_info;
extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_pch_spt_info;
+extern const struct e1000_info e1000_pch_cnp_info;
extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 7aff68a4a4df..e23dbd9190d6 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -117,55 +117,52 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
-static int e1000_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int e1000_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 speed;
+ u32 speed, supported, advertising;
if (hw->phy.media_type == e1000_media_type_copper) {
- ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_TP);
+ supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
if (hw->phy.type == e1000_phy_ife)
- ecmd->supported &= ~SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_TP;
+ supported &= ~SUPPORTED_1000baseT_Full;
+ advertising = ADVERTISED_TP;
if (hw->mac.autoneg == 1) {
- ecmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
/* the e1000 autoneg seems to match ethtool nicely */
- ecmd->advertising |= hw->phy.autoneg_advertised;
+ advertising |= hw->phy.autoneg_advertised;
}
- ecmd->port = PORT_TP;
- ecmd->phy_address = hw->phy.addr;
- ecmd->transceiver = XCVR_INTERNAL;
-
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = hw->phy.addr;
} else {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg);
+ supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg);
+ advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg);
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
+ cmd->base.port = PORT_FIBRE;
}
speed = SPEED_UNKNOWN;
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
if (netif_running(netdev)) {
if (netif_carrier_ok(netdev)) {
speed = adapter->link_speed;
- ecmd->duplex = adapter->link_duplex - 1;
+ cmd->base.duplex = adapter->link_duplex - 1;
}
} else if (!pm_runtime_suspended(netdev->dev.parent)) {
u32 status = er32(STATUS);
@@ -179,30 +176,36 @@ static int e1000_get_settings(struct net_device *netdev,
speed = SPEED_10;
if (status & E1000_STATUS_FD)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
}
}
- ethtool_cmd_speed_set(ecmd, speed);
- ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
+ cmd->base.speed = speed;
+ cmd->base.autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if ((hw->phy.media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
- ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI;
+ cmd->base.eth_tp_mdix = hw->phy.is_mdix ?
+ ETH_TP_MDI_X : ETH_TP_MDI;
else
- ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
if (hw->phy.mdix == AUTO_ALL_MODES)
- ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
else
- ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+ cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix;
if (hw->phy.media_type != e1000_media_type_copper)
- ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
@@ -262,12 +265,16 @@ err_inval:
return -EINVAL;
}
-static int e1000_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int e1000_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int ret_val = 0;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
pm_runtime_get_sync(netdev->dev.parent);
@@ -285,14 +292,14 @@ static int e1000_set_settings(struct net_device *netdev,
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
- if (ecmd->eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl) {
if (hw->phy.media_type != e1000_media_type_copper) {
ret_val = -EOPNOTSUPP;
goto out;
}
- if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
- (ecmd->autoneg != AUTONEG_ENABLE)) {
+ if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (cmd->base.autoneg != AUTONEG_ENABLE)) {
e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
ret_val = -EINVAL;
goto out;
@@ -302,35 +309,35 @@ static int e1000_set_settings(struct net_device *netdev,
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
if (hw->phy.media_type == e1000_media_type_fiber)
hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
ADVERTISED_FIBRE | ADVERTISED_Autoneg;
else
- hw->phy.autoneg_advertised = ecmd->advertising |
+ hw->phy.autoneg_advertised = advertising |
ADVERTISED_TP | ADVERTISED_Autoneg;
- ecmd->advertising = hw->phy.autoneg_advertised;
+ advertising = hw->phy.autoneg_advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
} else {
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
/* calling this overrides forced MDI setting */
- if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+ if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) {
ret_val = -EINVAL;
goto out;
}
}
/* MDI-X => 2; MDI => 1; Auto => 3 */
- if (ecmd->eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl) {
/* fix up the value for auto (3 => 0) as zero is mapped
* internally to auto
*/
- if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
hw->phy.mdix = AUTO_ALL_MODES;
else
- hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+ hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl;
}
/* reset the link */
@@ -904,19 +911,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
+ /* fall through */
+ case e1000_pch_cnp:
mask |= BIT(18);
break;
default:
break;
}
- if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt))
+ if (mac->type >= e1000_pch_lpt)
wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
E1000_FWSM_WLOCK_MAC_SHIFT;
for (i = 0; i < mac->rar_entry_count; i++) {
- if ((mac->type == e1000_pch_lpt) ||
- (mac->type == e1000_pch_spt)) {
+ if (mac->type >= e1000_pch_lpt) {
/* Cannot test write-protected SHRAL[n] registers */
if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
continue;
@@ -1525,7 +1533,7 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 rctl, fext_nvm11, tarc0;
- if (hw->mac.type == e1000_pch_spt) {
+ if (hw->mac.type >= e1000_pch_spt) {
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
@@ -1569,6 +1577,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
switch (hw->mac.type) {
case e1000_pch_spt:
+ case e1000_pch_cnp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
@@ -2313,8 +2322,6 @@ static int e1000e_get_ts_info(struct net_device *netdev,
}
static const struct ethtool_ops e1000_ethtool_ops = {
- .get_settings = e1000_get_settings,
- .set_settings = e1000_set_settings,
.get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs,
@@ -2342,6 +2349,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_ts_info = e1000e_get_ts_info,
.get_eee = e1000e_get_eee,
.set_eee = e1000e_set_eee,
+ .get_link_ksettings = e1000_get_link_ksettings,
+ .set_link_ksettings = e1000_set_link_ksettings,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 4e733bf1a38e..66bd5060a65b 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -96,6 +96,10 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_SPT_I219_V4 0x15D8
#define E1000_DEV_ID_PCH_SPT_I219_LM5 0x15E3
#define E1000_DEV_ID_PCH_SPT_I219_V5 0x15D6
+#define E1000_DEV_ID_PCH_CNP_I219_LM6 0x15BD
+#define E1000_DEV_ID_PCH_CNP_I219_V6 0x15BE
+#define E1000_DEV_ID_PCH_CNP_I219_LM7 0x15BB
+#define E1000_DEV_ID_PCH_CNP_I219_V7 0x15BC
#define E1000_REVISION_4 4
@@ -118,6 +122,7 @@ enum e1000_mac_type {
e1000_pch2lan,
e1000_pch_lpt,
e1000_pch_spt,
+ e1000_pch_cnp,
};
enum e1000_media_type {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f3aaca743ea3..68ea8b4555ab 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -237,7 +237,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
if (ret_val)
return false;
out:
- if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) {
+ if (hw->mac.type >= e1000_pch_lpt) {
/* Only unforce SMBus if ME is not active */
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
/* Unforce SMBus mode in PHY */
@@ -333,6 +333,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_pch_lpt:
case e1000_pch_spt:
+ case e1000_pch_cnp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -474,6 +475,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
+ case e1000_pch_cnp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -607,7 +609,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
nvm->type = e1000_nvm_flash_sw;
- if (hw->mac.type == e1000_pch_spt) {
+ if (hw->mac.type >= e1000_pch_spt) {
/* in SPT, gfpreg doesn't exist. NVM size is taken from the
* STRAP register. This is because in SPT the GbE Flash region
* is no longer accessed through the flash registers. Instead,
@@ -715,6 +717,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
/* fall-through */
case e1000_pch_lpt:
case e1000_pch_spt:
+ case e1000_pch_cnp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -732,7 +735,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
break;
}
- if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) {
+ if (mac->type >= e1000_pch_lpt) {
mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
mac->ops.rar_set = e1000_rar_set_pch_lpt;
mac->ops.setup_physical_interface =
@@ -1399,9 +1402,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* aggressive resulting in many collisions. To avoid this, increase
* the IPG and reduce Rx latency in the PHY.
*/
- if (((hw->mac.type == e1000_pch2lan) ||
- (hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt)) && link) {
+ if ((hw->mac.type >= e1000_pch2lan) && link) {
u16 speed, duplex;
e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex);
@@ -1412,7 +1413,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
tipg_reg |= 0xFF;
/* Reduce Rx latency in analog PHY */
emi_val = 0;
- } else if (hw->mac.type == e1000_pch_spt &&
+ } else if (hw->mac.type >= e1000_pch_spt &&
duplex == FULL_DUPLEX && speed != SPEED_1000) {
tipg_reg |= 0xC;
emi_val = 1;
@@ -1435,8 +1436,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
emi_addr = I217_RX_CONFIG;
ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
- if (hw->mac.type == e1000_pch_lpt ||
- hw->mac.type == e1000_pch_spt) {
+ if (hw->mac.type >= e1000_pch_lpt) {
u16 phy_reg;
e1e_rphy_locked(hw, I217_PLL_CLOCK_GATE_REG, &phy_reg);
@@ -1452,7 +1452,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- if (hw->mac.type == e1000_pch_spt) {
+ if (hw->mac.type >= e1000_pch_spt) {
u16 data;
u16 ptr_gap;
@@ -1502,7 +1502,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* on power up.
* Set the Beacon Duration for I217 to 8 usec
*/
- if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) {
+ if (hw->mac.type >= e1000_pch_lpt) {
u32 mac_reg;
mac_reg = er32(FEXTNVM4);
@@ -1520,8 +1520,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
}
- if ((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt)) {
+ if (hw->mac.type >= e1000_pch_lpt) {
/* Set platform power management values for
* Latency Tolerance Reporting (LTR)
*/
@@ -1533,15 +1532,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
/* Clear link partner's EEE ability */
hw->dev_spec.ich8lan.eee_lp_ability = 0;
- /* FEXTNVM6 K1-off workaround */
- if (hw->mac.type == e1000_pch_spt) {
- u32 pcieanacfg = er32(PCIEANACFG);
+ if (hw->mac.type >= e1000_pch_lpt) {
u32 fextnvm6 = er32(FEXTNVM6);
- if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
- fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
- else
- fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
+ if (hw->mac.type == e1000_pch_spt) {
+ /* FEXTNVM6 K1-off workaround - for SPT only */
+ u32 pcieanacfg = er32(PCIEANACFG);
+
+ if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
+ fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
+ else
+ fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
+ }
ew32(FEXTNVM6, fextnvm6);
}
@@ -1640,6 +1642,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
+ case e1000_pch_cnp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2091,6 +2094,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
+ case e1000_pch_cnp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -3125,6 +3129,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
switch (hw->mac.type) {
case e1000_pch_spt:
+ case e1000_pch_cnp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -3380,7 +3385,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
/* Clear FCERR and DAEL in hw status by writing 1 */
hsfsts.hsf_status.flcerr = 1;
hsfsts.hsf_status.dael = 1;
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
else
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
@@ -3399,7 +3404,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
* Begin by setting Flash Cycle Done.
*/
hsfsts.hsf_status.flcdone = 1;
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
else
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
@@ -3423,7 +3428,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
* now set the Flash Cycle Done.
*/
hsfsts.hsf_status.flcdone = 1;
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
ew32flash(ICH_FLASH_HSFSTS,
hsfsts.regval & 0xFFFF);
else
@@ -3450,13 +3455,13 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
u32 i = 0;
/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
else
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
hsflctl.hsf_ctrl.flcgo = 1;
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
else
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
@@ -3527,7 +3532,7 @@ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
/* In SPT, only 32 bits access is supported,
* so this function should not be called.
*/
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
return -E1000_ERR_NVM;
else
ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
@@ -3634,8 +3639,7 @@ static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
s32 ret_val = -E1000_ERR_NVM;
u8 count = 0;
- if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
- hw->mac.type != e1000_pch_spt)
+ if (offset > ICH_FLASH_LINEAR_ADDR_MASK || hw->mac.type < e1000_pch_spt)
return -E1000_ERR_NVM;
flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
hw->nvm.flash_base_addr);
@@ -4068,6 +4072,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_pch_lpt:
case e1000_pch_spt:
+ case e1000_pch_cnp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
@@ -4153,7 +4158,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
s32 ret_val;
u8 count = 0;
- if (hw->mac.type == e1000_pch_spt) {
+ if (hw->mac.type >= e1000_pch_spt) {
if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
return -E1000_ERR_NVM;
} else {
@@ -4173,7 +4178,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
/* In SPT, This register is in Lan memory space, not
* flash. Therefore, only 32 bit access is supported
*/
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
else
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
@@ -4185,7 +4190,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
* not flash. Therefore, only 32 bit access is
* supported
*/
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
else
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
@@ -4243,7 +4248,7 @@ static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
s32 ret_val;
u8 count = 0;
- if (hw->mac.type == e1000_pch_spt) {
+ if (hw->mac.type >= e1000_pch_spt) {
if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
return -E1000_ERR_NVM;
}
@@ -4259,7 +4264,7 @@ static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
/* In SPT, This register is in Lan memory space, not
* flash. Therefore, only 32 bit access is supported
*/
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
hsflctl.regval = er32flash(ICH_FLASH_HSFSTS)
>> 16;
else
@@ -4272,7 +4277,7 @@ static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
* not flash. Therefore, only 32 bit access is
* supported
*/
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
else
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
@@ -4464,14 +4469,14 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
/* Write a value 11 (block Erase) in Flash
* Cycle field in hw flash control
*/
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
hsflctl.regval =
er32flash(ICH_FLASH_HSFSTS) >> 16;
else
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
ew32flash(ICH_FLASH_HSFSTS,
hsflctl.regval << 16);
else
@@ -4894,8 +4899,7 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
ew32(RFCTL, reg);
/* Enable ECC on Lynxpoint */
- if ((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt)) {
+ if (hw->mac.type >= e1000_pch_lpt) {
reg = er32(PBECCSTS);
reg |= E1000_PBECCSTS_ECC_ENABLE;
ew32(PBECCSTS, reg);
@@ -5299,7 +5303,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
(device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
(device_id == E1000_DEV_ID_PCH_I218_LM3) ||
(device_id == E1000_DEV_ID_PCH_I218_V3) ||
- (hw->mac.type == e1000_pch_spt)) {
+ (hw->mac.type >= e1000_pch_spt)) {
u32 fextnvm6 = er32(FEXTNVM6);
ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
@@ -5865,7 +5869,8 @@ const struct e1000_info e1000_pch2_info = {
| FLAG_HAS_JUMBO_FRAMES
| FLAG_APME_IN_WUC,
.flags2 = FLAG2_HAS_PHY_STATS
- | FLAG2_HAS_EEE,
+ | FLAG2_HAS_EEE
+ | FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 26,
.max_hw_frame_size = 9022,
.get_variants = e1000_get_variants_ich8lan,
@@ -5914,3 +5919,23 @@ const struct e1000_info e1000_pch_spt_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &spt_nvm_ops,
};
+
+const struct e1000_info e1000_pch_cnp_info = {
+ .mac = e1000_pch_cnp,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9022,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &spt_nvm_ops,
+};
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e9af89ad039c..b3679728caac 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -71,6 +71,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pch2lan] = &e1000_pch2_info,
[board_pch_lpt] = &e1000_pch_lpt_info,
[board_pch_spt] = &e1000_pch_spt_info,
+ [board_pch_cnp] = &e1000_pch_cnp_info,
};
struct e1000_reg_info {
@@ -1791,8 +1792,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
}
/* Reset on uncorrectable ECC error */
- if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt))) {
+ if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
@@ -1872,8 +1872,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
}
/* Reset on uncorrectable ECC error */
- if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt))) {
+ if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
@@ -2241,8 +2240,7 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
if (adapter->msix_entries) {
ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC);
- } else if ((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt)) {
+ } else if (hw->mac.type >= e1000_pch_lpt) {
ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
} else {
ew32(IMS, IMS_ENABLE_MASK);
@@ -3000,8 +2998,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
hw->mac.ops.config_collision_dist(hw);
- /* SPT Si errata workaround to avoid data corruption */
- if (hw->mac.type == e1000_pch_spt) {
+ /* SPT and CNP Si errata workaround to avoid data corruption */
+ if (hw->mac.type >= e1000_pch_spt) {
u32 reg_val;
reg_val = er32(IOSFPC);
@@ -3497,8 +3495,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
/* Make sure clock is enabled on I217/I218/I219 before checking
* the frequency
*/
- if (((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt)) &&
+ if ((hw->mac.type >= e1000_pch_lpt) &&
!(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
!(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
u32 fextnvm7 = er32(FEXTNVM7);
@@ -3511,37 +3508,58 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
switch (hw->mac.type) {
case e1000_pch2lan:
+ /* Stable 96MHz frequency */
+ incperiod = INCPERIOD_96MHZ;
+ incvalue = INCVALUE_96MHZ;
+ shift = INCVALUE_SHIFT_96MHZ;
+ adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ;
+ break;
case e1000_pch_lpt:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 96MHz frequency */
- incperiod = INCPERIOD_96MHz;
- incvalue = INCVALUE_96MHz;
- shift = INCVALUE_SHIFT_96MHz;
- adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
+ incperiod = INCPERIOD_96MHZ;
+ incvalue = INCVALUE_96MHZ;
+ shift = INCVALUE_SHIFT_96MHZ;
+ adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ;
} else {
/* Stable 25MHz frequency */
- incperiod = INCPERIOD_25MHz;
- incvalue = INCVALUE_25MHz;
- shift = INCVALUE_SHIFT_25MHz;
+ incperiod = INCPERIOD_25MHZ;
+ incvalue = INCVALUE_25MHZ;
+ shift = INCVALUE_SHIFT_25MHZ;
adapter->cc.shift = shift;
}
break;
case e1000_pch_spt:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
- incperiod = INCPERIOD_24MHz;
- incvalue = INCVALUE_24MHz;
- shift = INCVALUE_SHIFT_24MHz;
+ incperiod = INCPERIOD_24MHZ;
+ incvalue = INCVALUE_24MHZ;
+ shift = INCVALUE_SHIFT_24MHZ;
adapter->cc.shift = shift;
break;
}
return -EINVAL;
+ case e1000_pch_cnp:
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
+ /* Stable 24MHz frequency */
+ incperiod = INCPERIOD_24MHZ;
+ incvalue = INCVALUE_24MHZ;
+ shift = INCVALUE_SHIFT_24MHZ;
+ adapter->cc.shift = shift;
+ } else {
+ /* Stable 38400KHz frequency */
+ incperiod = INCPERIOD_38400KHZ;
+ incvalue = INCVALUE_38400KHZ;
+ shift = INCVALUE_SHIFT_38400KHZ;
+ adapter->cc.shift = shift;
+ }
+ break;
case e1000_82574:
case e1000_82583:
/* Stable 25MHz frequency */
- incperiod = INCPERIOD_25MHz;
- incvalue = INCVALUE_25MHz;
- shift = INCVALUE_SHIFT_25MHz;
+ incperiod = INCPERIOD_25MHZ;
+ incvalue = INCVALUE_25MHZ;
+ shift = INCVALUE_SHIFT_25MHZ;
adapter->cc.shift = shift;
break;
default:
@@ -4032,6 +4050,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
+ case e1000_pch_cnp:
fc->refresh_time = 0x0400;
if (adapter->netdev->mtu <= ETH_DATA_LEN) {
@@ -4076,7 +4095,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
}
}
- if (hw->mac.type == e1000_pch_spt)
+ if (hw->mac.type >= e1000_pch_spt)
e1000_flush_desc_rings(adapter);
/* Allow time for pending master requests to run */
mac->ops.reset_hw(hw);
@@ -4151,7 +4170,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
phy_data &= ~IGP02E1000_PM_SPD;
e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
}
- if (hw->mac.type == e1000_pch_spt && adapter->int_mode == 0) {
+ if (hw->mac.type >= e1000_pch_spt && adapter->int_mode == 0) {
u32 reg;
/* Fextnvm7 @ 0xe4[2] = 1 */
@@ -4285,7 +4304,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
if (!pci_channel_offline(adapter->pdev)) {
if (reset)
e1000e_reset(adapter);
- else if (hw->mac.type == e1000_pch_spt)
+ else if (hw->mac.type >= e1000_pch_spt)
e1000_flush_desc_rings(adapter);
}
e1000_clean_tx_ring(adapter->tx_ring);
@@ -4973,8 +4992,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->stats.mgpdc += er32(MGTPDC);
/* Correctable ECC Errors */
- if ((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt)) {
+ if (hw->mac.type >= e1000_pch_lpt) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
@@ -6348,8 +6366,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
- } else if ((hw->mac.type == e1000_pch_lpt) ||
- (hw->mac.type == e1000_pch_spt)) {
+ } else if (hw->mac.type >= e1000_pch_lpt) {
if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
/* ULP does not support wake from unicast, multicast
* or broadcast.
@@ -7508,6 +7525,10 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V4), board_pch_spt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM5), board_pch_spt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V5), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM6), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V6), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM7), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V7), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 34cc3be0df8e..b366885487a8 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -301,8 +301,8 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
- if (((hw->mac.type != e1000_pch_lpt) &&
- (hw->mac.type != e1000_pch_spt)) ||
+ case e1000_pch_cnp:
+ if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
break;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 52b979443cde..689c413b7782 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -65,14 +65,16 @@ enum fm10k_ring_state_t {
__FM10K_TX_DETECT_HANG,
__FM10K_HANG_CHECK_ARMED,
__FM10K_TX_XPS_INIT_DONE,
+ /* This must be last and is used to calculate BITMAP size */
+ __FM10K_TX_STATE_SIZE__,
};
#define check_for_tx_hang(ring) \
- test_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
+ test_bit(__FM10K_TX_DETECT_HANG, (ring)->state)
#define set_check_for_tx_hang(ring) \
- set_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
+ set_bit(__FM10K_TX_DETECT_HANG, (ring)->state)
#define clear_check_for_tx_hang(ring) \
- clear_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
+ clear_bit(__FM10K_TX_DETECT_HANG, (ring)->state)
struct fm10k_tx_buffer {
struct fm10k_tx_desc *next_to_watch;
@@ -126,7 +128,7 @@ struct fm10k_ring {
struct fm10k_rx_buffer *rx_buffer;
};
u32 __iomem *tail;
- unsigned long state;
+ DECLARE_BITMAP(state, __FM10K_TX_STATE_SIZE__);
dma_addr_t dma; /* phys. address of descriptor ring */
unsigned int size; /* length in bytes */
@@ -249,18 +251,46 @@ struct fm10k_udp_port {
/* one work queue for entire driver */
extern struct workqueue_struct *fm10k_workqueue;
+/* The following enumeration contains flags which indicate or enable modified
+ * driver behaviors. To avoid race conditions, the flags are stored in
+ * a BITMAP in the fm10k_intfc structure. The BITMAP should be accessed using
+ * atomic *_bit() operations.
+ */
+enum fm10k_flags_t {
+ FM10K_FLAG_RESET_REQUESTED,
+ FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+ FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+ FM10K_FLAG_SWPRI_CONFIG,
+ /* __FM10K_FLAGS_SIZE__ is used to calculate the size of
+ * interface->flags and must be the last value in this
+ * enumeration.
+ */
+ __FM10K_FLAGS_SIZE__
+};
+
+enum fm10k_state_t {
+ __FM10K_RESETTING,
+ __FM10K_DOWN,
+ __FM10K_SERVICE_SCHED,
+ __FM10K_SERVICE_REQUEST,
+ __FM10K_SERVICE_DISABLE,
+ __FM10K_MBX_LOCK,
+ __FM10K_LINK_DOWN,
+ __FM10K_UPDATING_STATS,
+ /* This value must be last and determines the BITMAP size */
+ __FM10K_STATE_SIZE__,
+};
+
struct fm10k_intfc {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev;
struct fm10k_l2_accel *l2_accel; /* pointer to L2 acceleration list */
struct pci_dev *pdev;
- unsigned long state;
+ DECLARE_BITMAP(state, __FM10K_STATE_SIZE__);
+
+ /* Access flag values using atomic *_bit() operations */
+ DECLARE_BITMAP(flags, __FM10K_FLAGS_SIZE__);
- u32 flags;
-#define FM10K_FLAG_RESET_REQUESTED (u32)(BIT(0))
-#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(BIT(1))
-#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(BIT(2))
-#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(3))
int xcast_mode;
/* Tx fast path data */
@@ -352,22 +382,12 @@ struct fm10k_intfc {
u16 vid;
};
-enum fm10k_state_t {
- __FM10K_RESETTING,
- __FM10K_DOWN,
- __FM10K_SERVICE_SCHED,
- __FM10K_SERVICE_DISABLE,
- __FM10K_MBX_LOCK,
- __FM10K_LINK_DOWN,
- __FM10K_UPDATING_STATS,
-};
-
static inline void fm10k_mbx_lock(struct fm10k_intfc *interface)
{
/* busy loop if we cannot obtain the lock as some calls
* such as ndo_set_rx_mode may be made in atomic context
*/
- while (test_and_set_bit(__FM10K_MBX_LOCK, &interface->state))
+ while (test_and_set_bit(__FM10K_MBX_LOCK, interface->state))
udelay(20);
}
@@ -375,12 +395,12 @@ static inline void fm10k_mbx_unlock(struct fm10k_intfc *interface)
{
/* flush memory to make sure state is correct */
smp_mb__before_atomic();
- clear_bit(__FM10K_MBX_LOCK, &interface->state);
+ clear_bit(__FM10K_MBX_LOCK, interface->state);
}
static inline int fm10k_mbx_trylock(struct fm10k_intfc *interface)
{
- return !test_and_set_bit(__FM10K_MBX_LOCK, &interface->state);
+ return !test_and_set_bit(__FM10K_MBX_LOCK, interface->state);
}
/* fm10k_test_staterr - test bits in Rx descriptor status and error fields */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 0c84fef750f4..c7234f35f8ff 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -562,7 +562,7 @@ static int fm10k_set_ringparam(struct net_device *netdev,
return 0;
}
- while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
+ while (test_and_set_bit(__FM10K_RESETTING, interface->state))
usleep_range(1000, 2000);
if (!netif_running(interface->netdev)) {
@@ -648,7 +648,7 @@ err_setup:
fm10k_up(interface);
vfree(temp_ring);
clear_reset:
- clear_bit(__FM10K_RESETTING, &interface->state);
+ clear_bit(__FM10K_RESETTING, interface->state);
return err;
}
@@ -716,7 +716,8 @@ static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
/* fall through */
case UDP_V4_FLOW:
- if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP)
+ if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+ interface->flags))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
/* fall through */
case SCTP_V4_FLOW:
@@ -732,7 +733,8 @@ static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
cmd->data |= RXH_IP_SRC | RXH_IP_DST;
break;
case UDP_V6_FLOW:
- if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP)
+ if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+ interface->flags))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
cmd->data |= RXH_IP_SRC | RXH_IP_DST;
break;
@@ -764,12 +766,13 @@ static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-#define UDP_RSS_FLAGS (FM10K_FLAG_RSS_FIELD_IPV4_UDP | \
- FM10K_FLAG_RSS_FIELD_IPV6_UDP)
static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
struct ethtool_rxnfc *nfc)
{
- u32 flags = interface->flags;
+ int rss_ipv4_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+ interface->flags);
+ int rss_ipv6_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+ interface->flags);
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
@@ -793,10 +796,12 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
return -EINVAL;
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- flags &= ~FM10K_FLAG_RSS_FIELD_IPV4_UDP;
+ clear_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+ interface->flags);
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- flags |= FM10K_FLAG_RSS_FIELD_IPV4_UDP;
+ set_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+ interface->flags);
break;
default:
return -EINVAL;
@@ -808,10 +813,12 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
return -EINVAL;
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- flags &= ~FM10K_FLAG_RSS_FIELD_IPV6_UDP;
+ clear_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+ interface->flags);
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- flags |= FM10K_FLAG_RSS_FIELD_IPV6_UDP;
+ set_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+ interface->flags);
break;
default:
return -EINVAL;
@@ -835,28 +842,41 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
return -EINVAL;
}
- /* if we changed something we need to update flags */
- if (flags != interface->flags) {
+ /* If something changed we need to update the MRQC register. Note that
+ * test_bit() is guaranteed to return strictly 0 or 1, so testing for
+ * equality is safe.
+ */
+ if ((rss_ipv4_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+ interface->flags)) ||
+ (rss_ipv6_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+ interface->flags))) {
struct fm10k_hw *hw = &interface->hw;
+ bool warn = false;
u32 mrqc;
- if ((flags & UDP_RSS_FLAGS) &&
- !(interface->flags & UDP_RSS_FLAGS))
- netif_warn(interface, drv, interface->netdev,
- "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
-
- interface->flags = flags;
-
/* Perform hash on these packet types */
mrqc = FM10K_MRQC_IPV4 |
FM10K_MRQC_TCP_IPV4 |
FM10K_MRQC_IPV6 |
FM10K_MRQC_TCP_IPV6;
- if (flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP)
+ if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
+ interface->flags)) {
mrqc |= FM10K_MRQC_UDP_IPV4;
- if (flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP)
+ warn = true;
+ }
+ if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
+ interface->flags)) {
mrqc |= FM10K_MRQC_UDP_IPV6;
+ warn = true;
+ }
+
+ /* If we enable UDP RSS display a warning that this may cause
+ * fragmented UDP packets to arrive out of order.
+ */
+ if (warn)
+ netif_warn(interface, drv, interface->netdev,
+ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
}
@@ -939,7 +959,7 @@ static void fm10k_self_test(struct net_device *dev,
memset(data, 0, sizeof(*data) * FM10K_TEST_LEN);
- if (FM10K_REMOVED(hw)) {
+ if (FM10K_REMOVED(hw->hw_addr)) {
netif_err(interface, drv, dev,
"Interface removed - test blocked\n");
eth_test->flags |= ETH_TEST_FL_FAILED;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5bb233a9614c..9dffaba85ae6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -34,7 +34,7 @@ const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
static const char fm10k_driver_string[] = DRV_SUMMARY;
static const char fm10k_copyright[] =
- "Copyright (c) 2013 - 2016 Intel Corporation.";
+ "Copyright(c) 2013 - 2017 Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
@@ -1175,13 +1175,13 @@ bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
/* update completed stats and continue */
tx_ring->tx_stats.tx_done_old = tx_done;
/* reset the countdown */
- clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
+ clear_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state);
return false;
}
/* make sure it is true for two checks in a row */
- return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
+ return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state);
}
/**
@@ -1191,9 +1191,9 @@ bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
{
/* Do the reset outside of interrupt context */
- if (!test_bit(__FM10K_DOWN, &interface->state)) {
+ if (!test_bit(__FM10K_DOWN, interface->state)) {
interface->tx_timeout_count++;
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
fm10k_service_event_schedule(interface);
}
}
@@ -1214,7 +1214,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
unsigned int budget = q_vector->tx.work_limit;
unsigned int i = tx_ring->next_to_clean;
- if (test_bit(__FM10K_DOWN, &interface->state))
+ if (test_bit(__FM10K_DOWN, interface->state))
return true;
tx_buffer = &tx_ring->tx_buffer[i];
@@ -1344,7 +1344,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__FM10K_DOWN, &interface->state)) {
+ !test_bit(__FM10K_DOWN, interface->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 01db688cf539..24f2f6f86f5a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -737,6 +737,23 @@ static void fm10k_tx_timeout(struct net_device *netdev)
}
}
+/**
+ * fm10k_host_mbx_ready - Check PF interface's mailbox readiness
+ * @interface: board private structure
+ *
+ * This function checks if the PF interface's mailbox is ready before queueing
+ * mailbox messages for transmission. This will prevent filling the TX mailbox
+ * queue when the receiver is not ready. VF interfaces are exempt from this
+ * check since it will block all PF-VF mailbox messages from being sent from
+ * the VF to the PF at initialization.
+ **/
+static bool fm10k_host_mbx_ready(struct fm10k_intfc *interface)
+{
+ struct fm10k_hw *hw = &interface->hw;
+
+ return (hw->mac.type == fm10k_mac_vf || interface->host_ready);
+}
+
static int fm10k_uc_vlan_unsync(struct net_device *netdev,
const unsigned char *uc_addr)
{
@@ -745,12 +762,15 @@ static int fm10k_uc_vlan_unsync(struct net_device *netdev,
u16 glort = interface->glort;
u16 vid = interface->vid;
bool set = !!(vid / VLAN_N_VID);
- int err;
+ int err = -EHOSTDOWN;
/* drop any leading bits on the VLAN ID */
vid &= VLAN_N_VID - 1;
- err = hw->mac.ops.update_uc_addr(hw, glort, uc_addr, vid, set, 0);
+ if (fm10k_host_mbx_ready(interface))
+ err = hw->mac.ops.update_uc_addr(hw, glort, uc_addr,
+ vid, set, 0);
+
if (err)
return err;
@@ -766,12 +786,14 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev,
u16 glort = interface->glort;
u16 vid = interface->vid;
bool set = !!(vid / VLAN_N_VID);
- int err;
+ int err = -EHOSTDOWN;
/* drop any leading bits on the VLAN ID */
vid &= VLAN_N_VID - 1;
- err = hw->mac.ops.update_mc_addr(hw, glort, mc_addr, vid, set);
+ if (fm10k_host_mbx_ready(interface))
+ err = hw->mac.ops.update_mc_addr(hw, glort, mc_addr, vid, set);
+
if (err)
return err;
@@ -822,7 +844,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
/* Do not throw an error if the interface is down. We will sync once
* we come up
*/
- if (test_bit(__FM10K_DOWN, &interface->state))
+ if (test_bit(__FM10K_DOWN, interface->state))
return 0;
fm10k_mbx_lock(interface);
@@ -834,9 +856,13 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
goto err_out;
}
- /* update our base MAC address */
- err = hw->mac.ops.update_uc_addr(hw, interface->glort, hw->mac.addr,
- vid, set, 0);
+ /* update our base MAC address if host's mailbox is ready */
+ if (fm10k_host_mbx_ready(interface))
+ err = hw->mac.ops.update_uc_addr(hw, interface->glort,
+ hw->mac.addr, vid, set, 0);
+ else
+ err = -EHOSTDOWN;
+
if (err)
goto err_out;
@@ -907,12 +933,15 @@ static int __fm10k_uc_sync(struct net_device *dev,
if (!is_valid_ether_addr(addr))
return -EADDRNOTAVAIL;
- /* update table with current entries */
+ /* update table with current entries if host's mailbox is ready */
+ if (!fm10k_host_mbx_ready(interface))
+ return -EHOSTDOWN;
+
for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
err = hw->mac.ops.update_uc_addr(hw, glort, addr,
- vid, sync, 0);
+ vid, sync, 0);
if (err)
return err;
}
@@ -970,7 +999,10 @@ static int __fm10k_mc_sync(struct net_device *dev,
struct fm10k_hw *hw = &interface->hw;
u16 vid, glort = interface->glort;
- /* update table with current entries */
+ /* update table with current entries if host's mailbox is ready */
+ if (!fm10k_host_mbx_ready(interface))
+ return 0;
+
for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
@@ -1018,8 +1050,10 @@ static void fm10k_set_rx_mode(struct net_device *dev)
if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC)
fm10k_clear_unused_vlans(interface);
- /* update xcast mode */
- hw->mac.ops.update_xcast_mode(hw, interface->glort, xcast_mode);
+ /* update xcast mode if host's mailbox is ready */
+ if (fm10k_host_mbx_ready(interface))
+ hw->mac.ops.update_xcast_mode(hw, interface->glort,
+ xcast_mode);
/* record updated xcast mode state */
interface->xcast_mode = xcast_mode;
@@ -1054,8 +1088,10 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
fm10k_mbx_lock(interface);
- /* Enable logical port */
- hw->mac.ops.update_lport_state(hw, glort, interface->glort_count, true);
+ /* Enable logical port if host's mailbox is ready */
+ if (fm10k_host_mbx_ready(interface))
+ hw->mac.ops.update_lport_state(hw, glort,
+ interface->glort_count, true);
/* update VLAN table */
hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0,
@@ -1069,12 +1105,18 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
hw->mac.ops.update_vlan(hw, vid, 0, true);
- hw->mac.ops.update_uc_addr(hw, glort, hw->mac.addr,
- vid, true, 0);
+
+ /* Update unicast entries if host's mailbox is ready */
+ if (fm10k_host_mbx_ready(interface))
+ hw->mac.ops.update_uc_addr(hw, glort, hw->mac.addr,
+ vid, true, 0);
}
- /* update xcast mode before synchronizing addresses */
- hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
+ /* update xcast mode before synchronizing addresses if host's mailbox
+ * is ready
+ */
+ if (fm10k_host_mbx_ready(interface))
+ hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
/* synchronize all of the addresses */
__dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
@@ -1096,9 +1138,12 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface)
fm10k_mbx_lock(interface);
- /* clear the logical port state on lower device */
- hw->mac.ops.update_lport_state(hw, interface->glort,
- interface->glort_count, false);
+ /* clear the logical port state on lower device if host's mailbox is
+ * ready
+ */
+ if (fm10k_host_mbx_ready(interface))
+ hw->mac.ops.update_lport_state(hw, interface->glort,
+ interface->glort_count, false);
fm10k_mbx_unlock(interface);
@@ -1115,8 +1160,8 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface)
* @netdev: network interface device structure
* @stats: storage space for 64bit statistics
*
- * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This
- * function replaces fm10k_get_stats for kernels which support it.
+ * Obtain 64bit statistics in a way that is safe for both 32bit and 64bit
+ * architectures.
*/
static void fm10k_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
@@ -1207,7 +1252,7 @@ int fm10k_setup_tc(struct net_device *dev, u8 tc)
goto err_open;
/* flag to indicate SWPRI has yet to be updated */
- interface->flags |= FM10K_FLAG_SWPRI_CONFIG;
+ set_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags);
return 0;
err_open:
@@ -1226,7 +1271,9 @@ static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return fm10k_setup_tc(dev, tc->tc);
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return fm10k_setup_tc(dev, tc->mqprio->num_tc);
}
static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
@@ -1317,8 +1364,13 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
fm10k_mbx_lock(interface);
glort = l2_accel->dglort + 1 + i;
- hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_MULTI);
- hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, 0, true, 0);
+
+ if (fm10k_host_mbx_ready(interface)) {
+ hw->mac.ops.update_xcast_mode(hw, glort,
+ FM10K_XCAST_MODE_MULTI);
+ hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr,
+ 0, true, 0);
+ }
fm10k_mbx_unlock(interface);
@@ -1352,8 +1404,13 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
fm10k_mbx_lock(interface);
glort = l2_accel->dglort + 1 + i;
- hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_NONE);
- hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, 0, false, 0);
+
+ if (fm10k_host_mbx_ready(interface)) {
+ hw->mac.ops.update_xcast_mode(hw, glort,
+ FM10K_XCAST_MODE_NONE);
+ hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr,
+ 0, false, 0);
+ }
fm10k_mbx_unlock(interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index e372a5823480..3e26d27ad213 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -19,6 +19,7 @@
*/
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/aer.h>
#include "fm10k.h"
@@ -92,18 +93,29 @@ static int fm10k_hw_ready(struct fm10k_intfc *interface)
void fm10k_service_event_schedule(struct fm10k_intfc *interface)
{
- if (!test_bit(__FM10K_SERVICE_DISABLE, &interface->state) &&
- !test_and_set_bit(__FM10K_SERVICE_SCHED, &interface->state))
+ if (!test_bit(__FM10K_SERVICE_DISABLE, interface->state) &&
+ !test_and_set_bit(__FM10K_SERVICE_SCHED, interface->state)) {
+ clear_bit(__FM10K_SERVICE_REQUEST, interface->state);
queue_work(fm10k_workqueue, &interface->service_task);
+ } else {
+ set_bit(__FM10K_SERVICE_REQUEST, interface->state);
+ }
}
static void fm10k_service_event_complete(struct fm10k_intfc *interface)
{
- WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
+ WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, interface->state));
/* flush memory to make sure state is correct before next watchog */
smp_mb__before_atomic();
- clear_bit(__FM10K_SERVICE_SCHED, &interface->state);
+ clear_bit(__FM10K_SERVICE_SCHED, interface->state);
+
+ /* If a service event was requested since we started, immediately
+ * re-schedule now. This ensures we don't drop a request until the
+ * next timer event.
+ */
+ if (test_bit(__FM10K_SERVICE_REQUEST, interface->state))
+ fm10k_service_event_schedule(interface);
}
/**
@@ -136,7 +148,7 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
if (~value) {
interface->hw.hw_addr = interface->uc_addr;
netif_device_attach(netdev);
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
netdev_warn(netdev, "PCIe link restored, device now attached\n");
return;
}
@@ -158,7 +170,7 @@ static void fm10k_prepare_for_reset(struct fm10k_intfc *interface)
/* put off any impending NetWatchDogTimeout */
netif_trans_update(netdev);
- while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
+ while (test_and_set_bit(__FM10K_RESETTING, interface->state))
usleep_range(1000, 2000);
rtnl_lock();
@@ -241,7 +253,7 @@ static int fm10k_handle_reset(struct fm10k_intfc *interface)
rtnl_unlock();
- clear_bit(__FM10K_RESETTING, &interface->state);
+ clear_bit(__FM10K_RESETTING, interface->state);
return err;
err_open:
@@ -253,7 +265,7 @@ reinit_err:
rtnl_unlock();
- clear_bit(__FM10K_RESETTING, &interface->state);
+ clear_bit(__FM10K_RESETTING, interface->state);
return err;
}
@@ -272,11 +284,10 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
static void fm10k_reset_subtask(struct fm10k_intfc *interface)
{
- if (!(interface->flags & FM10K_FLAG_RESET_REQUESTED))
+ if (!test_and_clear_bit(FM10K_FLAG_RESET_REQUESTED,
+ interface->flags))
return;
- interface->flags &= ~FM10K_FLAG_RESET_REQUESTED;
-
netdev_err(interface->netdev, "Reset interface\n");
fm10k_reinit(interface);
@@ -295,7 +306,7 @@ static void fm10k_configure_swpri_map(struct fm10k_intfc *interface)
int i;
/* clear flag indicating update is needed */
- interface->flags &= ~FM10K_FLAG_SWPRI_CONFIG;
+ clear_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags);
/* these registers are only available on the PF */
if (hw->mac.type != fm10k_mac_pf)
@@ -316,14 +327,14 @@ static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
struct fm10k_hw *hw = &interface->hw;
s32 err;
- if (test_bit(__FM10K_LINK_DOWN, &interface->state)) {
+ if (test_bit(__FM10K_LINK_DOWN, interface->state)) {
interface->host_ready = false;
if (time_is_after_jiffies(interface->link_down_event))
return;
- clear_bit(__FM10K_LINK_DOWN, &interface->state);
+ clear_bit(__FM10K_LINK_DOWN, interface->state);
}
- if (interface->flags & FM10K_FLAG_SWPRI_CONFIG) {
+ if (test_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags)) {
if (rtnl_trylock()) {
fm10k_configure_swpri_map(interface);
rtnl_unlock();
@@ -335,7 +346,7 @@ static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
err = hw->mac.ops.get_host_state(hw, &interface->host_ready);
if (err && time_is_before_jiffies(interface->last_reset))
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
/* free the lock */
fm10k_mbx_unlock(interface);
@@ -411,7 +422,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
int i;
/* ensure only one thread updates stats at a time */
- if (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state))
+ if (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
return;
/* do not allow stats update via service task for next second */
@@ -492,7 +503,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
net_stats->rx_errors = rx_errors;
net_stats->rx_dropped = interface->stats.nodesc_drop.count;
- clear_bit(__FM10K_UPDATING_STATS, &interface->state);
+ clear_bit(__FM10K_UPDATING_STATS, interface->state);
}
/**
@@ -522,7 +533,7 @@ static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
* controller to flush Tx.
*/
if (some_tx_pending)
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
}
/**
@@ -532,8 +543,8 @@ static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
{
/* if interface is down do nothing */
- if (test_bit(__FM10K_DOWN, &interface->state) ||
- test_bit(__FM10K_RESETTING, &interface->state))
+ if (test_bit(__FM10K_DOWN, interface->state) ||
+ test_bit(__FM10K_RESETTING, interface->state))
return;
if (interface->host_ready)
@@ -563,8 +574,8 @@ static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
int i;
/* If we're down or resetting, just bail */
- if (test_bit(__FM10K_DOWN, &interface->state) ||
- test_bit(__FM10K_RESETTING, &interface->state))
+ if (test_bit(__FM10K_DOWN, interface->state) ||
+ test_bit(__FM10K_RESETTING, interface->state))
return;
/* rate limit tx hang checks to only once every 2 seconds */
@@ -663,7 +674,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
FM10K_PFVTCTL_FTAG_DESC_ENABLE);
/* Initialize XPS */
- if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, &ring->state) &&
+ if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, ring->state) &&
ring->q_vector)
netif_set_xps_queue(ring->netdev,
&ring->q_vector->affinity_mask,
@@ -743,6 +754,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
/* disable queue to avoid issues while updating state */
rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
rxqctl &= ~FM10K_RXQCTL_ENABLE;
+ fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
fm10k_write_flush(hw);
/* possible poll here to verify ring resources have been cleaned */
@@ -863,9 +875,9 @@ static void fm10k_configure_dglort(struct fm10k_intfc *interface)
FM10K_MRQC_IPV6 |
FM10K_MRQC_TCP_IPV6;
- if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP)
+ if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, interface->flags))
mrqc |= FM10K_MRQC_UDP_IPV4;
- if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP)
+ if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, interface->flags))
mrqc |= FM10K_MRQC_UDP_IPV6;
fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
@@ -980,7 +992,7 @@ void fm10k_netpoll(struct net_device *netdev)
int i;
/* if interface is down do nothing */
- if (test_bit(__FM10K_DOWN, &interface->state))
+ if (test_bit(__FM10K_DOWN, interface->state))
return;
for (i = 0; i < interface->num_q_vectors; i++)
@@ -1167,13 +1179,13 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
}
if (err == FM10K_ERR_RESET_REQUESTED)
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
/* if switch toggled state we should reset GLORTs */
if (eicr & FM10K_EICR_SWITCHNOTREADY) {
/* force link down for at least 4 seconds */
interface->link_down_event = jiffies + (4 * HZ);
- set_bit(__FM10K_LINK_DOWN, &interface->state);
+ set_bit(__FM10K_LINK_DOWN, interface->state);
/* reset dglort_map back to no config */
hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
@@ -1246,12 +1258,12 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
/* MAC was changed so we need reset */
if (is_valid_ether_addr(hw->mac.perm_addr) &&
!ether_addr_equal(hw->mac.perm_addr, hw->mac.addr))
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
/* VLAN override was changed, or default VLAN changed */
if ((vlan_override != hw->mac.vlan_override) ||
(default_vid != hw->mac.default_vid))
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
return 0;
}
@@ -1325,7 +1337,7 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
if (!err && hw->swapi.status) {
/* force link down for a reasonable delay */
interface->link_down_event = jiffies + (2 * HZ);
- set_bit(__FM10K_LINK_DOWN, &interface->state);
+ set_bit(__FM10K_LINK_DOWN, interface->state);
/* reset dglort_map back to no config */
hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
@@ -1356,7 +1368,7 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
/* we need to reset if port count was just updated */
if (dglort_map != hw->mac.dglort_map)
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
return 0;
}
@@ -1395,7 +1407,7 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
/* we need to reset if default VLAN was just updated */
if (pvid != hw->mac.default_vid)
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
hw->mac.default_vid = pvid;
@@ -1623,10 +1635,10 @@ void fm10k_up(struct fm10k_intfc *interface)
hw->mac.ops.update_int_moderator(hw);
/* enable statistics capture again */
- clear_bit(__FM10K_UPDATING_STATS, &interface->state);
+ clear_bit(__FM10K_UPDATING_STATS, interface->state);
/* clear down bit to indicate we are ready to go */
- clear_bit(__FM10K_DOWN, &interface->state);
+ clear_bit(__FM10K_DOWN, interface->state);
/* enable polling cleanups */
fm10k_napi_enable_all(interface);
@@ -1660,7 +1672,7 @@ void fm10k_down(struct fm10k_intfc *interface)
int err, i = 0, count = 0;
/* signal that we are down to the interrupt handler and service task */
- if (test_and_set_bit(__FM10K_DOWN, &interface->state))
+ if (test_and_set_bit(__FM10K_DOWN, interface->state))
return;
/* call carrier off first to avoid false dev_watchdog timeouts */
@@ -1680,7 +1692,7 @@ void fm10k_down(struct fm10k_intfc *interface)
fm10k_update_stats(interface);
/* prevent updating statistics while we're down */
- while (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state))
+ while (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
usleep_range(1000, 2000);
/* skip waiting for TX DMA if we lost PCIe link */
@@ -1849,8 +1861,8 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
memcpy(interface->rssrk, rss_key, sizeof(rss_key));
/* Start off interface as being down */
- set_bit(__FM10K_DOWN, &interface->state);
- set_bit(__FM10K_UPDATING_STATS, &interface->state);
+ set_bit(__FM10K_DOWN, interface->state);
+ set_bit(__FM10K_UPDATING_STATS, interface->state);
return 0;
}
@@ -2027,7 +2039,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* must ensure it is disabled since we haven't yet requested the timer
* or work item.
*/
- set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ set_bit(__FM10K_SERVICE_DISABLE, interface->state);
err = fm10k_mbx_request_irq(interface);
if (err)
@@ -2068,7 +2080,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
fm10k_iov_configure(pdev, 0);
/* clear the service task disable bit to allow service task to start */
- clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
return 0;
@@ -2106,7 +2118,7 @@ static void fm10k_remove(struct pci_dev *pdev)
del_timer_sync(&interface->service_timer);
- set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ set_bit(__FM10K_SERVICE_DISABLE, interface->state);
cancel_work_sync(&interface->service_task);
/* free netdev, this may bounce the interrupts due to setup_tc */
@@ -2145,7 +2157,7 @@ static void fm10k_prepare_suspend(struct fm10k_intfc *interface)
* stopped. We stop the watchdog task until after we resume software
* activity.
*/
- set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ set_bit(__FM10K_SERVICE_DISABLE, interface->state);
cancel_work_sync(&interface->service_task);
fm10k_prepare_for_reset(interface);
@@ -2171,10 +2183,10 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
/* force link to stay down for a second to prevent link flutter */
interface->link_down_event = jiffies + (HZ);
- set_bit(__FM10K_LINK_DOWN, &interface->state);
+ set_bit(__FM10K_LINK_DOWN, interface->state);
/* clear the service task disable bit to allow service task to start */
- clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
fm10k_service_event_schedule(interface);
return err;
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 3b3c63e54ed6..3da482c3d68d 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -28,6 +28,9 @@
# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver
#
+ccflags-y += -I$(src)
+subdir-ccflags-y += -I$(src)
+
obj-$(CONFIG_I40E) += i40e.o
i40e-objs := i40e_main.o \
@@ -45,4 +48,3 @@ i40e-objs := i40e_main.o \
i40e_virtchnl_pf.o
i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
-i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 82d8040fa418..cdde3cc28fb5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -56,9 +56,6 @@
#include <linux/ptp_clock_kernel.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
-#ifdef I40E_FCOE
-#include "i40e_fcoe.h"
-#endif
#include "i40e_client.h"
#include "i40e_virtchnl.h"
#include "i40e_virtchnl_pf.h"
@@ -85,10 +82,6 @@
(((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64)
#define I40E_FDIR_RING 0
#define I40E_FDIR_RING_COUNT 32
-#ifdef I40E_FCOE
-#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */
-#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
-#endif /* I40E_FCOE */
#define I40E_MAX_AQ_BUF_SIZE 4096
#define I40E_AQ_LEN 256
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
@@ -98,14 +91,6 @@
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
-/* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0)
-#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
-#define I40E_PRIV_FLAGS_FD_ATR BIT(2)
-#define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
-#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(4)
-#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT BIT(5)
-
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
#define I40E_NVM_VERSION_HI_SHIFT 12
@@ -140,7 +125,6 @@ enum i40e_state_t {
__I40E_CONFIG_BUSY,
__I40E_CONFIG_DONE,
__I40E_DOWN,
- __I40E_NEEDS_RESTART,
__I40E_SERVICE_SCHED,
__I40E_ADMINQ_EVENT_PENDING,
__I40E_MDD_EVENT_PENDING,
@@ -153,15 +137,28 @@ enum i40e_state_t {
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_REQUESTED,
__I40E_EMP_RESET_INTR_RECEIVED,
- __I40E_FILTER_OVERFLOW_PROMISC,
__I40E_SUSPENDED,
__I40E_PTP_TX_IN_PROGRESS,
__I40E_BAD_EEPROM,
__I40E_DOWN_REQUESTED,
__I40E_FD_FLUSH_REQUESTED,
__I40E_RESET_FAILED,
- __I40E_PORT_TX_SUSPENDED,
+ __I40E_PORT_SUSPENDED,
__I40E_VF_DISABLE,
+ /* This must be last as it determines the size of the BITMAP */
+ __I40E_STATE_SIZE__,
+};
+
+/* VSI state flags */
+enum i40e_vsi_state_t {
+ __I40E_VSI_DOWN,
+ __I40E_VSI_NEEDS_RESTART,
+ __I40E_VSI_SYNCING_FILTERS,
+ __I40E_VSI_OVERFLOW_PROMISC,
+ __I40E_VSI_REINIT_REQUESTED,
+ __I40E_VSI_DOWN_REQUESTED,
+ /* This must be last as it determines the size of the BITMAP */
+ __I40E_VSI_STATE_SIZE__,
};
enum i40e_interrupt_policy {
@@ -202,17 +199,32 @@ enum i40e_fd_stat_idx {
#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
+/* The following structure contains the data parsed from the user-defined
+ * field of the ethtool_rx_flow_spec structure.
+ */
+struct i40e_rx_flow_userdef {
+ bool flex_filter;
+ u16 flex_word;
+ u16 flex_offset;
+};
+
struct i40e_fdir_filter {
struct hlist_node fdir_node;
/* filter ipnut set */
u8 flow_type;
u8 ip4_proto;
/* TX packet view of src and dst */
- __be32 dst_ip[4];
- __be32 src_ip[4];
+ __be32 dst_ip;
+ __be32 src_ip;
__be16 src_port;
__be16 dst_port;
__be32 sctp_v_tag;
+
+ /* Flexible data to match within the packet payload */
+ __be16 flex_word;
+ u16 flex_offset;
+ bool flex_filter;
+
/* filter control */
u16 q_index;
u8 flex_off;
@@ -244,15 +256,85 @@ struct i40e_tc_configuration {
};
struct i40e_udp_port_config {
- __be16 index;
+ /* AdminQ command interface expects port number in Host byte order */
+ u16 port;
u8 type;
};
+/* macros related to FLX_PIT */
+#define I40E_FLEX_SET_FSIZE(fsize) (((fsize) << \
+ I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_FSIZE_MASK)
+#define I40E_FLEX_SET_DST_WORD(dst) (((dst) << \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_MASK)
+#define I40E_FLEX_SET_SRC_WORD(src) (((src) << \
+ I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK)
+#define I40E_FLEX_PREP_VAL(dst, fsize, src) (I40E_FLEX_SET_DST_WORD(dst) | \
+ I40E_FLEX_SET_FSIZE(fsize) | \
+ I40E_FLEX_SET_SRC_WORD(src))
+
+#define I40E_FLEX_PIT_GET_SRC(flex) (((flex) & \
+ I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) >> \
+ I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_FLEX_PIT_GET_DST(flex) (((flex) & \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_MASK) >> \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_FLEX_PIT_GET_FSIZE(flex) (((flex) & \
+ I40E_PRTQF_FLX_PIT_FSIZE_MASK) >> \
+ I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+
+#define I40E_MAX_FLEX_SRC_OFFSET 0x1F
+
+/* macros related to GLQF_ORT */
+#define I40E_ORT_SET_IDX(idx) (((idx) << \
+ I40E_GLQF_ORT_PIT_INDX_SHIFT) & \
+ I40E_GLQF_ORT_PIT_INDX_MASK)
+
+#define I40E_ORT_SET_COUNT(count) (((count) << \
+ I40E_GLQF_ORT_FIELD_CNT_SHIFT) & \
+ I40E_GLQF_ORT_FIELD_CNT_MASK)
+
+#define I40E_ORT_SET_PAYLOAD(payload) (((payload) << \
+ I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) & \
+ I40E_GLQF_ORT_FLX_PAYLOAD_MASK)
+
+#define I40E_ORT_PREP_VAL(idx, count, payload) (I40E_ORT_SET_IDX(idx) | \
+ I40E_ORT_SET_COUNT(count) | \
+ I40E_ORT_SET_PAYLOAD(payload))
+
+#define I40E_L3_GLQF_ORT_IDX 34
+#define I40E_L4_GLQF_ORT_IDX 35
+
+/* Flex PIT register index */
+#define I40E_FLEX_PIT_IDX_START_L2 0
+#define I40E_FLEX_PIT_IDX_START_L3 3
+#define I40E_FLEX_PIT_IDX_START_L4 6
+
+#define I40E_FLEX_PIT_TABLE_SIZE 3
+
+#define I40E_FLEX_DEST_UNUSED 63
+
+#define I40E_FLEX_INDEX_ENTRIES 8
+
+/* Flex MASK to disable all flexible entries */
+#define I40E_FLEX_INPUT_MASK (I40E_FLEX_50_MASK | I40E_FLEX_51_MASK | \
+ I40E_FLEX_52_MASK | I40E_FLEX_53_MASK | \
+ I40E_FLEX_54_MASK | I40E_FLEX_55_MASK | \
+ I40E_FLEX_56_MASK | I40E_FLEX_57_MASK)
+
+struct i40e_flex_pit {
+ struct list_head list;
+ u16 src_offset;
+ u8 pit_index;
+};
+
/* struct that defines the Ethernet device */
struct i40e_pf {
struct pci_dev *pdev;
struct i40e_hw hw;
- unsigned long state;
+ DECLARE_BITMAP(state, __I40E_STATE_SIZE__);
struct msix_entry *msix_entries;
bool fc_autoneg_status;
@@ -262,10 +344,6 @@ struct i40e_pf {
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
u16 num_req_vfs; /* num VFs requested for this VF */
u16 num_vf_qps; /* num queue pairs per VF */
-#ifdef I40E_FCOE
- u16 num_fcoe_qps; /* num fcoe queues this PF has set up */
- u16 num_fcoe_msix; /* num queue vectors per fcoe pool */
-#endif /* I40E_FCOE */
u16 num_lan_qps; /* num lan queues this PF has set up */
u16 num_lan_msix; /* num queue vectors for the base PF vsi */
u16 num_fdsb_msix; /* num queue vectors for sideband Fdir */
@@ -285,7 +363,23 @@ struct i40e_pf {
u32 fd_flush_cnt;
u32 fd_add_err;
u32 fd_atr_cnt;
- u32 fd_tcp_rule;
+
+ /* Book-keeping of side-band filter count per flow-type.
+ * This is used to detect and handle input set changes for
+ * respective flow-type.
+ */
+ u16 fd_tcp4_filter_cnt;
+ u16 fd_udp4_filter_cnt;
+ u16 fd_sctp4_filter_cnt;
+ u16 fd_ip4_filter_cnt;
+
+ /* Flexible filter table values that need to be programmed into
+ * hardware, which expects L3 and L4 to be programmed separately. We
+ * need to ensure that the values are in ascended order and don't have
+ * duplicates, so we track each L3 and L4 values in separate lists.
+ */
+ struct list_head l3_flex_pit_list;
+ struct list_head l4_flex_pit_list;
struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
u16 pending_udp_bitmap;
@@ -307,21 +401,15 @@ struct i40e_pf {
#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
#define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8)
-#define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9)
#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10)
-#ifdef I40E_FCOE
-#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11)
-#endif /* I40E_FCOE */
-#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16)
-#define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17)
-#define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18)
#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19)
#define I40E_FLAG_DCB_ENABLED BIT_ULL(20)
#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21)
#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22)
+#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(23)
+#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(24)
#define I40E_FLAG_PTP BIT_ULL(25)
#define I40E_FLAG_MFP_ENABLED BIT_ULL(26)
#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27)
@@ -348,16 +436,13 @@ struct i40e_pf {
#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51)
#define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(52)
#define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(53)
-#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(54)
+#define I40E_FLAG_CLIENT_RESET BIT_ULL(54)
#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55)
+#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(56)
+#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(57)
+#define I40E_FLAG_LEGACY_RX BIT_ULL(58)
- /* tracks features that get auto disabled by errors */
- u64 auto_disable_flags;
-
-#ifdef I40E_FCOE
- struct i40e_fcoe fcoe;
-
-#endif /* I40E_FCOE */
+ struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
struct i40e_hw_port_stats stats_offsets;
@@ -412,8 +497,6 @@ struct i40e_pf {
*/
u16 dcbx_cap;
- u32 fcoe_hmc_filt_num;
- u32 fcoe_hmc_cntx_num;
struct i40e_filter_control_settings filter_settings;
struct ptp_clock *ptp_clock;
@@ -517,7 +600,7 @@ struct i40e_vsi {
bool stat_offsets_loaded;
u32 current_netdev_flags;
- unsigned long state;
+ DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__);
#define I40E_VSI_FLAG_FILTER_CHANGED BIT(0)
#define I40E_VSI_FLAG_VEB_OWNER BIT(1)
unsigned long flags;
@@ -533,16 +616,10 @@ struct i40e_vsi {
struct rtnl_link_stats64 net_stats_offsets;
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
-#ifdef I40E_FCOE
- struct i40e_fcoe_stats fcoe_stats;
- struct i40e_fcoe_stats fcoe_stats_offsets;
- bool fcoe_stat_offsets_loaded;
-#endif
u32 tx_restart;
u32 tx_busy;
u64 tx_linearize;
u64 tx_force_wb;
- u64 tx_lost_interrupt;
u32 rx_buf_failed;
u32 rx_page_failed;
@@ -628,9 +705,6 @@ struct i40e_q_vector {
u8 num_ringpairs; /* total number of ring pairs in vector */
-#define I40E_Q_VECTOR_HUNG_DETECT 0 /* Bit Index for hung detection logic */
- unsigned long hung_detected; /* Set/Reset for hung_detection logic */
-
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
@@ -696,27 +770,49 @@ static inline void i40e_vsi_setup_irqhandler(struct i40e_vsi *vsi,
}
/**
- * i40e_rx_is_programming_status - check for programming status descriptor
- * @qw: the first quad word of the program status descriptor
+ * i40e_get_fd_cnt_all - get the total FD filter space available
+ * @pf: pointer to the PF struct
+ **/
+static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
+{
+ return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
+}
+
+/**
+ * i40e_read_fd_input_set - reads value of flow director input set register
+ * @pf: pointer to the PF struct
+ * @addr: register addr
*
- * The value of in the descriptor length field indicate if this
- * is a programming status descriptor for flow director or FCoE
- * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
- * it is a packet descriptor.
+ * This function reads value of flow director input set register
+ * specified by 'addr' (which is specific to flow-type)
**/
-static inline bool i40e_rx_is_programming_status(u64 qw)
+static inline u64 i40e_read_fd_input_set(struct i40e_pf *pf, u16 addr)
{
- return I40E_RX_PROG_STATUS_DESC_LENGTH ==
- (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
+ u64 val;
+
+ val = i40e_read_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 1));
+ val <<= 32;
+ val += i40e_read_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 0));
+
+ return val;
}
/**
- * i40e_get_fd_cnt_all - get the total FD filter space available
+ * i40e_write_fd_input_set - writes value into flow director input set register
* @pf: pointer to the PF struct
+ * @addr: register addr
+ * @val: value to be written
+ *
+ * This function writes specified value to the register specified by 'addr'.
+ * This register is input set register based on flow-type.
**/
-static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
+static inline void i40e_write_fd_input_set(struct i40e_pf *pf,
+ u16 addr, u64 val)
{
- return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
+ i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 1),
+ (u32)(val >> 32));
+ i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 0),
+ (u32)(val & 0xFFFFFFFFULL));
}
/* needed by i40e_ethtool.c */
@@ -725,7 +821,7 @@ void i40e_down(struct i40e_vsi *vsi);
extern const char i40e_driver_name[];
extern const char i40e_driver_version_str[];
void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
-void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired);
int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
@@ -773,17 +869,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1);
int i40e_vsi_release(struct i40e_vsi *vsi);
-#ifdef I40E_FCOE
-void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
- struct i40e_vsi_context *ctxt,
- u8 enabled_tc, bool is_add);
-#endif
void i40e_service_event_schedule(struct i40e_pf *pf);
void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
u8 *msg, u16 len);
int i40e_vsi_start_rings(struct i40e_vsi *vsi);
void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
+void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi);
+int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi);
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc);
@@ -813,8 +906,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
-int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
- enum i40e_client_type type);
+int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id);
/**
* i40e_irq_dynamic_enable - Enable default interrupt generation settings
* @vsi: pointer to a vsi
@@ -838,20 +930,7 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
-#ifdef I40E_FCOE
-void i40e_get_netdev_stats_struct(struct net_device *netdev,
- struct rtnl_link_stats64 *storage);
-int i40e_set_mac(struct net_device *netdev, void *p);
-void i40e_set_rx_mode(struct net_device *netdev);
-#endif
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
-#ifdef I40E_FCOE
-void i40e_tx_timeout(struct net_device *netdev);
-int i40e_vlan_rx_add_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid);
-int i40e_vlan_rx_kill_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid);
-#endif
int i40e_open(struct net_device *netdev);
int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi);
@@ -865,25 +944,6 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
-#ifdef I40E_FCOE
-int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc);
-void i40e_netpoll(struct net_device *netdev);
-int i40e_fcoe_enable(struct net_device *netdev);
-int i40e_fcoe_disable(struct net_device *netdev);
-int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt);
-u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
-void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi);
-void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
-void i40e_init_pf_fcoe(struct i40e_pf *pf);
-int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi);
-void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi);
-int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc,
- struct sk_buff *skb);
-void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc, u8 prog_id);
-#endif /* I40E_FCOE */
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
#ifdef CONFIG_I40E_DCB
void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 56fb27298936..ba04988e0598 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -850,8 +850,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
*/
if (i40e_asq_done(hw))
break;
- usleep_range(1000, 2000);
- total_delay++;
+ udelay(50);
+ total_delay += 50;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index d92aad38afdc..2349fbe04bd2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -151,7 +151,7 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 451f48b7540a..5eb04114e13f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -132,6 +132,10 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
+ /* Proxy commands */
+ i40e_aqc_opc_set_proxy_config = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
/* LAA */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -139,6 +143,10 @@ enum i40e_admin_queue_opc {
/* PXE */
i40e_aqc_opc_clear_pxe_mode = 0x0110,
+ /* WoL commands */
+ i40e_aqc_opc_set_wol_filter = 0x0120,
+ i40e_aqc_opc_get_wake_reason = 0x0121,
+
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -177,10 +185,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_remove_control_packet_filter = 0x025B,
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
+ i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
+ /* Pipeline Personalization Profile */
+ i40e_aqc_opc_write_personalization_profile = 0x0270,
+ i40e_aqc_opc_get_personalization_profile_list = 0x0271,
+
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -563,6 +576,56 @@ struct i40e_aqc_clear_pxe {
I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+/* Set WoL Filter (0x0120) */
+
+struct i40e_aqc_set_wol_filter {
+ __le16 filter_index;
+#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
+ I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
+#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
+ I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
+ __le16 cmd_flags;
+#define I40E_AQC_SET_WOL_FILTER 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
+#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
+ __le16 valid_flags;
+#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+
+struct i40e_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
+/* Get Wake Reason (0x0121) */
+
+struct i40e_aqc_get_wake_reason_completion {
+ u8 reserved_1[2];
+ __le16 wake_reason;
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
+ u8 reserved_2[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
+
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -645,6 +708,8 @@ struct i40e_aqc_set_port_parameters {
#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
__le16 bad_frame_vsi;
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
__le16 default_seid; /* reserved for command */
u8 reserved[10];
};
@@ -696,6 +761,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
/* Set Switch Configuration (direct 0x0205) */
struct i40e_aqc_set_switch_config {
__le16 flags;
+/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
__le16 valid_flags;
@@ -1369,6 +1435,36 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+/* Pipeline Personalization Profile */
+struct i40e_aqc_write_personalization_profile {
+ u8 flags;
+ u8 reserved[3];
+ __le32 profile_track_id;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
+
+struct i40e_aqc_write_ppp_resp {
+ __le32 error_offset;
+ __le32 error_info;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct i40e_aqc_get_applied_profiles {
+ u8 flags;
+#define I40E_AQC_GET_PPP_GET_CONF 0x1
+#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2
+ u8 rsv[3];
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles);
+
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1844,11 +1940,12 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 external_power_ability;
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
+#define I40E_AQ_PWR_CLASS_MASK 0x03
u8 reserved[4];
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index d570219efd9f..c3b81a97558e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -32,16 +32,10 @@
#include "i40e_client.h"
static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR;
-
+static struct i40e_client *registered_client;
static LIST_HEAD(i40e_devices);
static DEFINE_MUTEX(i40e_device_mutex);
-static LIST_HEAD(i40e_clients);
-static DEFINE_MUTEX(i40e_client_mutex);
-
-static LIST_HEAD(i40e_client_instances);
-static DEFINE_MUTEX(i40e_client_instance_mutex);
-
static int i40e_client_virtchnl_send(struct i40e_info *ldev,
struct i40e_client *client,
u32 vf_id, u8 *msg, u16 len);
@@ -67,28 +61,6 @@ static struct i40e_ops i40e_lan_ops = {
};
/**
- * i40e_client_type_to_vsi_type - convert client type to vsi type
- * @client_type: the i40e_client type
- *
- * returns the related vsi type value
- **/
-static
-enum i40e_vsi_type i40e_client_type_to_vsi_type(enum i40e_client_type type)
-{
- switch (type) {
- case I40E_CLIENT_IWARP:
- return I40E_VSI_IWARP;
-
- case I40E_CLIENT_VMDQ2:
- return I40E_VSI_VMDQ2;
-
- default:
- pr_err("i40e: Client type unknown\n");
- return I40E_VSI_TYPE_UNKNOWN;
- }
-}
-
-/**
* i40e_client_get_params - Get the params that can change at runtime
* @vsi: the VSI with the message
* @param: clinet param struct
@@ -134,31 +106,22 @@ int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
void
i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
{
- struct i40e_client_instance *cdev;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_client_instance *cdev = pf->cinst;
- if (!vsi)
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->virtchnl_receive) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance virtual channel receive routine\n");
return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.pf == vsi->back) {
- if (!cdev->client ||
- !cdev->client->ops ||
- !cdev->client->ops->virtchnl_receive) {
- dev_dbg(&vsi->back->pdev->dev,
- "Cannot locate client instance virtual channel receive routine\n");
- continue;
- }
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort virtchnl_receive\n");
- continue;
- }
- cdev->client->ops->virtchnl_receive(&cdev->lan_info,
- cdev->client,
- vf_id, msg, len);
- }
}
- mutex_unlock(&i40e_client_instance_mutex);
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort virtchnl_receive\n");
+ return;
+ }
+ cdev->client->ops->virtchnl_receive(&cdev->lan_info, cdev->client,
+ vf_id, msg, len);
}
/**
@@ -169,39 +132,30 @@ i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
**/
void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
{
- struct i40e_client_instance *cdev;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_client_instance *cdev = pf->cinst;
struct i40e_params params;
- if (!vsi)
+ if (!cdev || !cdev->client)
return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.pf == vsi->back) {
- if (!cdev->client ||
- !cdev->client->ops ||
- !cdev->client->ops->l2_param_change) {
- dev_dbg(&vsi->back->pdev->dev,
- "Cannot locate client instance l2_param_change routine\n");
- continue;
- }
+ if (!cdev->client->ops || !cdev->client->ops->l2_param_change) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance l2_param_change routine\n");
+ return;
+ }
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
+ return;
+ }
memset(&params, 0, sizeof(params));
i40e_client_get_params(vsi, &params);
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
- continue;
- }
- cdev->lan_info.params = params;
- cdev->client->ops->l2_param_change(&cdev->lan_info,
- cdev->client,
- &params);
- }
- }
- mutex_unlock(&i40e_client_instance_mutex);
+ memcpy(&cdev->lan_info.params, &params, sizeof(struct i40e_params));
+ cdev->client->ops->l2_param_change(&cdev->lan_info, cdev->client,
+ &params);
}
/**
- * i40e_client_release_qvlist
+ * i40e_client_release_qvlist - release MSI-X vector mapping for client
* @ldev: pointer to L2 context.
*
**/
@@ -237,26 +191,19 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev)
**/
void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
{
- struct i40e_client_instance *cdev;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_client_instance *cdev = pf->cinst;
- if (!vsi)
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->close) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance close routine\n");
return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.netdev == vsi->netdev) {
- if (!cdev->client ||
- !cdev->client->ops || !cdev->client->ops->close) {
- dev_dbg(&vsi->back->pdev->dev,
- "Cannot locate client instance close routine\n");
- continue;
- }
- cdev->client->ops->close(&cdev->lan_info, cdev->client,
- reset);
- clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
- i40e_client_release_qvlist(&cdev->lan_info);
- }
}
- mutex_unlock(&i40e_client_instance_mutex);
+ cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ i40e_client_release_qvlist(&cdev->lan_info);
}
/**
@@ -268,30 +215,20 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
**/
void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
{
- struct i40e_client_instance *cdev;
+ struct i40e_client_instance *cdev = pf->cinst;
- if (!pf)
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->vf_reset) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF reset routine\n");
+ return;
+ }
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n");
return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.pf == pf) {
- if (!cdev->client ||
- !cdev->client->ops ||
- !cdev->client->ops->vf_reset) {
- dev_dbg(&pf->pdev->dev,
- "Cannot locate client instance VF reset routine\n");
- continue;
- }
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n");
- continue;
- }
- cdev->client->ops->vf_reset(&cdev->lan_info,
- cdev->client, vf_id);
- }
}
- mutex_unlock(&i40e_client_instance_mutex);
+ cdev->client->ops->vf_reset(&cdev->lan_info, cdev->client, vf_id);
}
/**
@@ -303,30 +240,21 @@ void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
**/
void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
{
- struct i40e_client_instance *cdev;
+ struct i40e_client_instance *cdev = pf->cinst;
- if (!pf)
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->vf_enable) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF enable routine\n");
+ return;
+ }
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n");
return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.pf == pf) {
- if (!cdev->client ||
- !cdev->client->ops ||
- !cdev->client->ops->vf_enable) {
- dev_dbg(&pf->pdev->dev,
- "Cannot locate client instance VF enable routine\n");
- continue;
- }
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n");
- continue;
- }
- cdev->client->ops->vf_enable(&cdev->lan_info,
- cdev->client, num_vfs);
- }
}
- mutex_unlock(&i40e_client_instance_mutex);
+ cdev->client->ops->vf_enable(&cdev->lan_info, cdev->client, num_vfs);
}
/**
@@ -337,37 +265,25 @@ void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
* If there is a client of the specified type attached to this PF, call
* its vf_capable routine
**/
-int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
- enum i40e_client_type type)
+int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id)
{
- struct i40e_client_instance *cdev;
+ struct i40e_client_instance *cdev = pf->cinst;
int capable = false;
- if (!pf)
- return false;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.pf == pf) {
- if (!cdev->client ||
- !cdev->client->ops ||
- !cdev->client->ops->vf_capable ||
- !(cdev->client->type == type)) {
- dev_dbg(&pf->pdev->dev,
- "Cannot locate client instance VF capability routine\n");
- continue;
- }
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-capable\n");
- continue;
- }
- capable = cdev->client->ops->vf_capable(&cdev->lan_info,
- cdev->client,
- vf_id);
- break;
- }
+ if (!cdev || !cdev->client)
+ goto out;
+ if (!cdev->client->ops || !cdev->client->ops->vf_capable) {
+ dev_info(&pf->pdev->dev,
+ "Cannot locate client instance VF capability routine\n");
+ goto out;
}
- mutex_unlock(&i40e_client_instance_mutex);
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state))
+ goto out;
+
+ capable = cdev->client->ops->vf_capable(&cdev->lan_info,
+ cdev->client,
+ vf_id);
+out:
return capable;
}
@@ -377,27 +293,19 @@ int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
* @client: pointer to a client struct in the client list.
* @existing: if there was already an existing instance
*
- * Returns cdev ptr on success or if already exists, NULL on failure
**/
-static
-struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
- struct i40e_client *client,
- bool *existing)
+static void i40e_client_add_instance(struct i40e_pf *pf)
{
- struct i40e_client_instance *cdev;
+ struct i40e_client_instance *cdev = NULL;
struct netdev_hw_addr *mac = NULL;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
- *existing = true;
- goto out;
- }
- }
+ if (!registered_client || pf->cinst)
+ return;
+
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
- goto out;
+ return;
cdev->lan_info.pf = (void *)pf;
cdev->lan_info.netdev = vsi->netdev;
@@ -417,7 +325,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
if (i40e_client_get_params(vsi, &cdev->lan_info.params)) {
kfree(cdev);
cdev = NULL;
- goto out;
+ return;
}
cdev->lan_info.msix_count = pf->num_iwarp_msix;
@@ -430,41 +338,20 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
else
dev_err(&pf->pdev->dev, "MAC address list is empty!\n");
- cdev->client = client;
- INIT_LIST_HEAD(&cdev->list);
- list_add(&cdev->list, &i40e_client_instances);
-out:
- mutex_unlock(&i40e_client_instance_mutex);
- return cdev;
+ cdev->client = registered_client;
+ pf->cinst = cdev;
}
/**
* i40e_client_del_instance - removes a client instance from the list
* @pf: pointer to the board struct
*
- * Returns 0 on success or non-0 on error
**/
static
-int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client)
+void i40e_client_del_instance(struct i40e_pf *pf)
{
- struct i40e_client_instance *cdev, *tmp;
- int ret = -ENODEV;
-
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
- if ((cdev->lan_info.pf != pf) || (cdev->client != client))
- continue;
-
- dev_info(&pf->pdev->dev, "Deleted instance of Client %s, of dev %d bus=0x%02x func=0x%02x)\n",
- client->name, pf->hw.pf_id,
- pf->hw.bus.device, pf->hw.bus.func);
- list_del(&cdev->list);
- kfree(cdev);
- ret = 0;
- break;
- }
- mutex_unlock(&i40e_client_instance_mutex);
- return ret;
+ kfree(pf->cinst);
+ pf->cinst = NULL;
}
/**
@@ -473,67 +360,50 @@ int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client)
**/
void i40e_client_subtask(struct i40e_pf *pf)
{
+ struct i40e_client *client = registered_client;
struct i40e_client_instance *cdev;
- struct i40e_client *client;
- bool existing = false;
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
int ret = 0;
if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
return;
pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+ cdev = pf->cinst;
/* If we're down or resetting, just bail */
- if (test_bit(__I40E_DOWN, &pf->state) ||
- test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ if (test_bit(__I40E_DOWN, pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, pf->state))
return;
- /* Check client state and instantiate client if client registered */
- mutex_lock(&i40e_client_mutex);
- list_for_each_entry(client, &i40e_clients, list) {
- /* first check client is registered */
- if (!test_bit(__I40E_CLIENT_REGISTERED, &client->state))
- continue;
-
- /* Do we also need the LAN VSI to be up, to create instance */
- if (!(client->flags & I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE)) {
- /* check if L2 VSI is up, if not we are not ready */
- if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
- continue;
- } else {
- dev_warn(&pf->pdev->dev, "This client %s is being instantiated at probe\n",
- client->name);
- }
-
- /* Add the client instance to the instance list */
- cdev = i40e_client_add_instance(pf, client, &existing);
- if (!cdev)
- continue;
-
- if (!existing) {
- dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x dev=0x%02x func=0x%02x\n",
- client->name, pf->hw.pf_id,
- pf->hw.bus.bus_id, pf->hw.bus.device,
- pf->hw.bus.func);
- }
+ if (!client || !cdev)
+ return;
- mutex_lock(&i40e_client_instance_mutex);
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- /* Send an Open request to the client */
- if (client->ops && client->ops->open)
- ret = client->ops->open(&cdev->lan_info,
- client);
- if (!ret) {
- set_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state);
- } else {
- /* remove client instance */
- i40e_client_del_instance(pf, client);
+ /* Here we handle client opens. If the client is down, but
+ * the netdev is up, then open the client.
+ */
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ if (!test_bit(__I40E_VSI_DOWN, vsi->state) &&
+ client->ops && client->ops->open) {
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ ret = client->ops->open(&cdev->lan_info, client);
+ if (ret) {
+ /* Remove failed client instance */
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state);
+ i40e_client_del_instance(pf);
}
}
- mutex_unlock(&i40e_client_instance_mutex);
+ } else {
+ /* Likewise for client close. If the client is up, but the netdev
+ * is down, then close the client.
+ */
+ if (test_bit(__I40E_VSI_DOWN, vsi->state) &&
+ client->ops && client->ops->close) {
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ client->ops->close(&cdev->lan_info, client, false);
+ i40e_client_release_qvlist(&cdev->lan_info);
+ }
}
- mutex_unlock(&i40e_client_mutex);
}
/**
@@ -566,6 +436,12 @@ int i40e_lan_add_device(struct i40e_pf *pf)
pf->hw.pf_id, pf->hw.bus.bus_id,
pf->hw.bus.device, pf->hw.bus.func);
+ /* If a client has already been registered, we need to add an instance
+ * of it to our new LAN device.
+ */
+ if (registered_client)
+ i40e_client_add_instance(pf);
+
/* Since in some cases register may have happened before a device gets
* added, we can schedule a subtask to go initiate the clients if
* they can be launched at probe time.
@@ -589,6 +465,9 @@ int i40e_lan_del_device(struct i40e_pf *pf)
struct i40e_device *ldev, *tmp;
int ret = -ENODEV;
+ /* First, remove any client instance. */
+ i40e_client_del_instance(pf);
+
mutex_lock(&i40e_device_mutex);
list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {
if (ldev->pf == pf) {
@@ -601,7 +480,6 @@ int i40e_lan_del_device(struct i40e_pf *pf)
break;
}
}
-
mutex_unlock(&i40e_device_mutex);
return ret;
}
@@ -610,22 +488,24 @@ int i40e_lan_del_device(struct i40e_pf *pf)
* i40e_client_release - release client specific resources
* @client: pointer to the registered client
*
- * Return 0 on success or < 0 on error
**/
-static int i40e_client_release(struct i40e_client *client)
+static void i40e_client_release(struct i40e_client *client)
{
- struct i40e_client_instance *cdev, *tmp;
+ struct i40e_client_instance *cdev;
+ struct i40e_device *ldev;
struct i40e_pf *pf;
- int ret = 0;
- LIST_HEAD(cdevs_tmp);
-
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
- if (strncmp(cdev->client->name, client->name,
- I40E_CLIENT_STR_LENGTH))
+ mutex_lock(&i40e_device_mutex);
+ list_for_each_entry(ldev, &i40e_devices, list) {
+ pf = ldev->pf;
+ cdev = pf->cinst;
+ if (!cdev)
continue;
- pf = (struct i40e_pf *)cdev->lan_info.pf;
+
+ while (test_and_set_bit(__I40E_SERVICE_SCHED,
+ pf->state))
+ usleep_range(500, 1000);
+
if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
if (client->ops && client->ops->close)
client->ops->close(&cdev->lan_info, client,
@@ -637,18 +517,13 @@ static int i40e_client_release(struct i40e_client *client)
"Client %s instance for PF id %d closed\n",
client->name, pf->hw.pf_id);
}
- /* delete the client instance from the list */
- list_move(&cdev->list, &cdevs_tmp);
+ /* delete the client instance */
+ i40e_client_del_instance(pf);
dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
client->name);
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
}
- mutex_unlock(&i40e_client_instance_mutex);
-
- /* free the client device and release its vsi */
- list_for_each_entry_safe(cdev, tmp, &cdevs_tmp, list) {
- kfree(cdev);
- }
- return ret;
+ mutex_unlock(&i40e_device_mutex);
}
/**
@@ -664,6 +539,7 @@ static void i40e_client_prepare(struct i40e_client *client)
mutex_lock(&i40e_device_mutex);
list_for_each_entry(ldev, &i40e_devices, list) {
pf = ldev->pf;
+ i40e_client_add_instance(pf);
/* Start the client subtask */
pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
i40e_service_event_schedule(pf);
@@ -785,15 +661,15 @@ static void i40e_client_request_reset(struct i40e_info *ldev,
switch (reset_level) {
case I40E_CLIENT_RESET_LEVEL_PF:
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
break;
case I40E_CLIENT_RESET_LEVEL_CORE:
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
break;
default:
dev_warn(&pf->pdev->dev,
- "Client %s instance for PF id %d request an unsupported reset: %d.\n",
- client->name, pf->hw.pf_id, reset_level);
+ "Client for PF id %d requested an unsupported reset: %d.\n",
+ pf->hw.pf_id, reset_level);
break;
}
@@ -852,8 +728,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
} else {
update = false;
dev_warn(&pf->pdev->dev,
- "Client %s instance for PF id %d request an unsupported Config: %x.\n",
- client->name, pf->hw.pf_id, flag);
+ "Client for PF id %d request an unsupported Config: %x.\n",
+ pf->hw.pf_id, flag);
}
if (update) {
@@ -878,7 +754,6 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
int i40e_register_client(struct i40e_client *client)
{
int ret = 0;
- enum i40e_vsi_type vsi_type;
if (!client) {
ret = -EIO;
@@ -891,11 +766,9 @@ int i40e_register_client(struct i40e_client *client)
goto out;
}
- mutex_lock(&i40e_client_mutex);
- if (i40e_client_is_registered(client)) {
+ if (registered_client) {
pr_info("i40e: Client %s has already been registered!\n",
client->name);
- mutex_unlock(&i40e_client_mutex);
ret = -EEXIST;
goto out;
}
@@ -908,22 +781,11 @@ int i40e_register_client(struct i40e_client *client)
client->version.major, client->version.minor,
client->version.build,
i40e_client_interface_version_str);
- mutex_unlock(&i40e_client_mutex);
ret = -EIO;
goto out;
}
- vsi_type = i40e_client_type_to_vsi_type(client->type);
- if (vsi_type == I40E_VSI_TYPE_UNKNOWN) {
- pr_info("i40e: Failed to register client %s due to unknown client type %d\n",
- client->name, client->type);
- mutex_unlock(&i40e_client_mutex);
- ret = -EIO;
- goto out;
- }
- list_add(&client->list, &i40e_clients);
- set_bit(__I40E_CLIENT_REGISTERED, &client->state);
- mutex_unlock(&i40e_client_mutex);
+ registered_client = client;
i40e_client_prepare(client);
@@ -943,29 +805,21 @@ int i40e_unregister_client(struct i40e_client *client)
{
int ret = 0;
- /* When a unregister request comes through we would have to send
- * a close for each of the client instances that were opened.
- * client_release function is called to handle this.
- */
- mutex_lock(&i40e_client_mutex);
- if (!client || i40e_client_release(client)) {
- ret = -EIO;
- goto out;
- }
-
- /* TODO: check if device is in reset, or if that matters? */
- if (!i40e_client_is_registered(client)) {
+ if (registered_client != client) {
pr_info("i40e: Client %s has not been registered\n",
client->name);
ret = -ENODEV;
goto out;
}
- clear_bit(__I40E_CLIENT_REGISTERED, &client->state);
- list_del(&client->list);
- pr_info("i40e: Unregistered client %s with return code %d\n",
- client->name, ret);
+ registered_client = NULL;
+ /* When a unregister request comes through we would have to send
+ * a close for each of the client instances that were opened.
+ * client_release function is called to handle this.
+ */
+ i40e_client_release(client);
+
+ pr_info("i40e: Unregistered client %s\n", client->name);
out:
- mutex_unlock(&i40e_client_mutex);
return ret;
}
EXPORT_SYMBOL(i40e_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index 528bd79b05fe..15b21a5315b5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -57,11 +57,6 @@ enum i40e_client_instance_state {
__I40E_CLIENT_INSTANCE_OPENED,
};
-enum i40e_client_type {
- I40E_CLIENT_IWARP,
- I40E_CLIENT_VMDQ2
-};
-
struct i40e_ops;
struct i40e_client;
@@ -214,7 +209,8 @@ struct i40e_client {
u32 flags;
#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
- enum i40e_client_type type;
+ u8 type;
+#define I40E_CLIENT_IWARP 0
const struct i40e_client_ops *ops; /* client ops provided by the client */
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index ece57d6a6e23..24f020655291 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1088,33 +1088,6 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
}
-#ifdef I40E_FCOE
-
-/**
- * i40e_get_san_mac_addr - get SAN MAC address
- * @hw: pointer to the HW structure
- * @mac_addr: pointer to SAN MAC address
- *
- * Reads the adapter's SAN MAC address from NVM
- **/
-i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
-{
- struct i40e_aqc_mac_address_read_data addrs;
- i40e_status status;
- u16 flags = 0;
-
- status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
- if (status)
- return status;
-
- if (flags & I40E_AQC_SAN_ADDR_VALID)
- ether_addr_copy(mac_addr, addrs.pf_san_mac);
- else
- status = I40E_ERR_INVALID_MAC_ADDR;
-
- return status;
-}
-#endif
/**
* i40e_read_pba_string - Reads part number string from EEPROM
@@ -4990,7 +4963,9 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
int retry = 5;
u32 val = 0;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
@@ -5049,7 +5024,9 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
bool use_register;
int retry = 5;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
@@ -5065,3 +5042,215 @@ do_retry:
if (status || use_register)
wr32(hw, reg_addr, reg_val);
}
+
+/**
+ * i40e_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @track_id: package tracking id
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_write_personalization_profile *cmd =
+ (struct i40e_aqc_write_personalization_profile *)
+ &desc.params.raw;
+ struct i40e_aqc_write_ppp_resp *resp;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_write_personalization_profile);
+
+ desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->profile_track_id = cpu_to_le32(track_id);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_applied_profiles *cmd =
+ (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_personalization_profile_list);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->flags = flags;
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_find_segment_in_package
+ * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_generic_seg_header *
+i40e_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_hdr)
+{
+ struct i40e_generic_seg_header *segment;
+ u32 i;
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < pkg_hdr->segment_count; i++) {
+ segment =
+ (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
+ pkg_hdr->segment_offset[i]);
+
+ if (segment->type == segment_type)
+ return segment;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40e_write_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be downloaded
+ * @track_id: package tracking id
+ *
+ * Handles the download of a complete package.
+ */
+enum i40e_status_code
+i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ i40e_status status = 0;
+ struct i40e_section_table *sec_tbl;
+ struct i40e_profile_section_header *sec = NULL;
+ u32 dev_cnt;
+ u32 vendor_dev_id;
+ u32 *nvm;
+ u32 section_size = 0;
+ u32 offset = 0, info = 0;
+ u32 i;
+
+ if (!track_id) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL)
+ if (hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (i == dev_cnt) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ nvm = (u32 *)&profile->device_table[dev_cnt];
+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec = (struct i40e_profile_section_header *)((u8 *)profile +
+ sec_tbl->section_offset[i]);
+
+ /* Skip 'AQ', 'note' and 'name' sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write profile */
+ status = i40e_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: offset %d, info %d",
+ offset, info);
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40e_add_pinfo_to_list
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+enum i40e_status_code
+i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ i40e_status status = 0;
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_profile_info *pinfo;
+ u32 offset = 0, info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_PPP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+
+ status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 267ad2588255..8f326f87a815 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -158,9 +158,12 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" vlgrp: & = %p\n", vsi->active_vlans);
dev_info(&pf->pdev->dev,
- " state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
- vsi->state, vsi->flags,
- vsi->netdev_registered, vsi->current_netdev_flags);
+ " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
+ vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
+ for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++)
+ dev_info(&pf->pdev->dev,
+ " state[%d] = %08lx\n",
+ i, vsi->state[i]);
if (vsi == pf->vsi[pf->lan_vsi])
dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
pf->hw.mac.addr,
@@ -174,7 +177,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
}
dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
vsi->active_filters, vsi->promisc_threshold,
- (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) ?
+ (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ?
"ON" : "OFF"));
nstat = i40e_get_vsi_stats_struct(vsi);
dev_info(&pf->pdev->dev,
@@ -384,6 +387,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" base_queue = %d, num_queue_pairs = %d, num_desc = %d\n",
vsi->base_queue, vsi->num_queue_pairs, vsi->num_desc);
dev_info(&pf->pdev->dev, " type = %i\n", vsi->type);
+ if (vsi->type == I40E_VSI_SRIOV)
+ dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id);
dev_info(&pf->pdev->dev,
" info: valid_sections = 0x%04x, switch_id = 0x%04x\n",
vsi->info.valid_sections, vsi->info.switch_id);
@@ -484,25 +489,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
vsi->bw_ets_limit_credits[i],
vsi->bw_ets_max_quanta[i]);
}
-#ifdef I40E_FCOE
- if (vsi->type == I40E_VSI_FCOE) {
- dev_info(&pf->pdev->dev,
- " fcoe_stats: rx_packets = %llu, rx_dwords = %llu, rx_dropped = %llu\n",
- vsi->fcoe_stats.rx_fcoe_packets,
- vsi->fcoe_stats.rx_fcoe_dwords,
- vsi->fcoe_stats.rx_fcoe_dropped);
- dev_info(&pf->pdev->dev,
- " fcoe_stats: tx_packets = %llu, tx_dwords = %llu\n",
- vsi->fcoe_stats.tx_fcoe_packets,
- vsi->fcoe_stats.tx_fcoe_dwords);
- dev_info(&pf->pdev->dev,
- " fcoe_stats: bad_crc = %llu, last_error = %llu\n",
- vsi->fcoe_stats.fcoe_bad_fccrc,
- vsi->fcoe_stats.fcoe_last_error);
- dev_info(&pf->pdev->dev, " fcoe_stats: ddp_count = %llu\n",
- vsi->fcoe_stats.fcoe_ddp_count);
- }
-#endif
}
/**
@@ -713,6 +699,47 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
}
}
+/**
+ * i40e_dbg_dump_vf - dump VF info
+ * @pf: the i40e_pf created in command write
+ * @vf_id: the vf_id from the user
+ **/
+static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
+{
+ struct i40e_vf *vf;
+ struct i40e_vsi *vsi;
+
+ if (!pf->num_alloc_vfs) {
+ dev_info(&pf->pdev->dev, "no VFs allocated\n");
+ } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) {
+ vf = &pf->vf[vf_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
+ vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
+ dev_info(&pf->pdev->dev, " num MDD=%lld, invalid msg=%lld, valid msg=%lld\n",
+ vf->num_mdd_events,
+ vf->num_invalid_msgs,
+ vf->num_valid_msgs);
+ } else {
+ dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
+ }
+}
+
+/**
+ * i40e_dbg_dump_vf_all - dump VF info for all VFs
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_vf_all(struct i40e_pf *pf)
+{
+ int i;
+
+ if (!pf->num_alloc_vfs)
+ dev_info(&pf->pdev->dev, "no VFs enabled!\n");
+ else
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ i40e_dbg_dump_vf(pf, i);
+}
+
#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
/**
* i40e_dbg_command_write - write into command datum
@@ -731,6 +758,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
struct i40e_vsi *vsi;
int vsi_seid;
int veb_seid;
+ int vf_id;
int cnt;
/* don't allow partial writes */
@@ -933,6 +961,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
i40e_dbg_dump_veb_seid(pf, vsi_seid);
else
i40e_dbg_dump_veb_all(pf);
+ } else if (strncmp(&cmd_buf[5], "vf", 2) == 0) {
+ cnt = sscanf(&cmd_buf[7], "%i", &vf_id);
+ if (cnt > 0)
+ i40e_dbg_dump_vf(pf, vf_id);
+ else
+ i40e_dbg_dump_vf_all(pf);
} else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
int ring_id, desc_n;
if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
@@ -1128,6 +1162,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
dev_info(&pf->pdev->dev, "dump reset stats\n");
dev_info(&pf->pdev->dev, "dump port\n");
+ dev_info(&pf->pdev->dev, "dump vf [vf_id]\n");
dev_info(&pf->pdev->dev,
"dump debug fwdata <cluster_id> <table_id> <index>\n");
}
@@ -1674,7 +1709,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
} else if (!vsi->netdev) {
dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n",
vsi_seid);
- } else if (test_bit(__I40E_DOWN, &vsi->state)) {
+ } else if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n",
vsi_seid);
} else if (rtnl_trylock()) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index a22e26200bcc..7a8eb486b9ea 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -89,7 +89,6 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
I40E_VSI_STAT("tx_linearize", tx_linearize),
I40E_VSI_STAT("tx_force_wb", tx_force_wb),
- I40E_VSI_STAT("tx_lost_interrupt", tx_lost_interrupt),
I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
};
@@ -162,19 +161,6 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
};
-#ifdef I40E_FCOE
-static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
- I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc),
- I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped),
- I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets),
- I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords),
- I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count),
- I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error),
- I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets),
- I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords),
-};
-
-#endif /* I40E_FCOE */
#define I40E_QUEUE_STATS_LEN(n) \
(((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
* 2 /* Tx and Rx together */ \
@@ -182,17 +168,9 @@ static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
-#ifdef I40E_FCOE
-#define I40E_FCOE_STATS_LEN ARRAY_SIZE(i40e_gstrings_fcoe_stats)
-#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
- I40E_FCOE_STATS_LEN + \
- I40E_MISC_STATS_LEN + \
- I40E_QUEUE_STATS_LEN((n)))
-#else
#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n)))
-#endif /* I40E_FCOE */
#define I40E_PFC_STATS_LEN ( \
(FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
@@ -228,22 +206,37 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
-static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
- "MFP",
- "LinkPolling",
- "flow-director-atr",
- "veb-stats",
- "hw-atr-eviction",
+struct i40e_priv_flags {
+ char flag_string[ETH_GSTRING_LEN];
+ u64 flag;
+ bool read_only;
};
-#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
+#define I40E_PRIV_FLAG(_name, _flag, _read_only) { \
+ .flag_string = _name, \
+ .flag = _flag, \
+ .read_only = _read_only, \
+}
+
+static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
+ /* NOTE: MFP setting cannot be changed */
+ I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1),
+ I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
+ I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
+ I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
+ I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_CAPABLE, 0),
+ I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
+};
+
+#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
/* Private flags with a global effect, restricted to PF 0 */
-static const char i40e_gl_priv_flags_strings[][ETH_GSTRING_LEN] = {
- "vf-true-promisc-support",
+static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = {
+ I40E_PRIV_FLAG("vf-true-promisc-support",
+ I40E_FLAG_TRUE_PROMISC_SUPPORT, 0),
};
-#define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_priv_flags_strings)
+#define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags)
/**
* i40e_partition_setting_complaint - generic complaint for MFP restriction
@@ -387,7 +380,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
*
**/
static void i40e_get_settings_link_up(struct i40e_hw *hw,
- struct ethtool_cmd *ecmd,
+ struct ethtool_link_ksettings *cmd,
struct net_device *netdev,
struct i40e_pf *pf)
{
@@ -395,90 +388,96 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
u32 link_speed = hw_link_info->link_speed;
u32 e_advertising = 0x0;
u32 e_supported = 0x0;
+ u32 supported, advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
/* Initialize supported and advertised settings based on phy settings */
switch (hw_link_info->phy_type) {
case I40E_PHY_TYPE_40GBASE_CR4:
case I40E_PHY_TYPE_40GBASE_CR4_CU:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_40000baseCR4_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_40000baseCR4_Full;
+ supported = SUPPORTED_Autoneg |
+ SUPPORTED_40000baseCR4_Full;
+ advertising = ADVERTISED_Autoneg |
+ ADVERTISED_40000baseCR4_Full;
break;
case I40E_PHY_TYPE_XLAUI:
case I40E_PHY_TYPE_XLPPI:
case I40E_PHY_TYPE_40GBASE_AOC:
- ecmd->supported = SUPPORTED_40000baseCR4_Full;
+ supported = SUPPORTED_40000baseCR4_Full;
break;
case I40E_PHY_TYPE_40GBASE_SR4:
- ecmd->supported = SUPPORTED_40000baseSR4_Full;
+ supported = SUPPORTED_40000baseSR4_Full;
break;
case I40E_PHY_TYPE_40GBASE_LR4:
- ecmd->supported = SUPPORTED_40000baseLR4_Full;
+ supported = SUPPORTED_40000baseLR4_Full;
break;
case I40E_PHY_TYPE_10GBASE_SR:
case I40E_PHY_TYPE_10GBASE_LR:
case I40E_PHY_TYPE_1000BASE_SX:
case I40E_PHY_TYPE_1000BASE_LX:
- ecmd->supported = SUPPORTED_10000baseT_Full;
+ supported = SUPPORTED_10000baseT_Full;
if (hw_link_info->module_type[2] &
I40E_MODULE_TYPE_1000BASE_SX ||
hw_link_info->module_type[2] &
I40E_MODULE_TYPE_1000BASE_LX) {
- ecmd->supported |= SUPPORTED_1000baseT_Full;
+ supported |= SUPPORTED_1000baseT_Full;
if (hw_link_info->requested_speeds &
I40E_LINK_SPEED_1GB)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
}
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ advertising |= ADVERTISED_10000baseT_Full;
break;
case I40E_PHY_TYPE_10GBASE_T:
case I40E_PHY_TYPE_1000BASE_T:
case I40E_PHY_TYPE_100BASE_TX:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full;
- ecmd->advertising = ADVERTISED_Autoneg;
+ supported = SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full;
+ advertising = ADVERTISED_Autoneg;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ advertising |= ADVERTISED_10000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
break;
case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
+ supported = SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ advertising = ADVERTISED_Autoneg |
+ ADVERTISED_1000baseT_Full;
break;
case I40E_PHY_TYPE_10GBASE_CR1_CU:
case I40E_PHY_TYPE_10GBASE_CR1:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full;
+ supported = SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full;
+ advertising = ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full;
break;
case I40E_PHY_TYPE_XAUI:
case I40E_PHY_TYPE_XFI:
case I40E_PHY_TYPE_SFI:
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
case I40E_PHY_TYPE_10GBASE_AOC:
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->advertising = SUPPORTED_10000baseT_Full;
+ supported = SUPPORTED_10000baseT_Full;
+ advertising = SUPPORTED_10000baseT_Full;
break;
case I40E_PHY_TYPE_SGMII:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
+ supported = SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
- ecmd->supported |= SUPPORTED_100baseT_Full;
+ supported |= SUPPORTED_100baseT_Full;
if (hw_link_info->requested_speeds &
I40E_LINK_SPEED_100MB)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
}
break;
case I40E_PHY_TYPE_40GBASE_KR4:
@@ -486,25 +485,25 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_10GBASE_KX4:
case I40E_PHY_TYPE_1000BASE_KX:
- ecmd->supported |= SUPPORTED_40000baseKR4_Full |
- SUPPORTED_20000baseKR2_Full |
- SUPPORTED_10000baseKR_Full |
- SUPPORTED_10000baseKX4_Full |
- SUPPORTED_1000baseKX_Full |
- SUPPORTED_Autoneg;
- ecmd->advertising |= ADVERTISED_40000baseKR4_Full |
- ADVERTISED_20000baseKR2_Full |
- ADVERTISED_10000baseKR_Full |
- ADVERTISED_10000baseKX4_Full |
- ADVERTISED_1000baseKX_Full |
- ADVERTISED_Autoneg;
+ supported |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_10000baseKR_Full |
+ SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg;
+ advertising |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_20000baseKR2_Full |
+ ADVERTISED_10000baseKR_Full |
+ ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_1000baseKX_Full |
+ ADVERTISED_Autoneg;
break;
case I40E_PHY_TYPE_25GBASE_KR:
case I40E_PHY_TYPE_25GBASE_CR:
case I40E_PHY_TYPE_25GBASE_SR:
case I40E_PHY_TYPE_25GBASE_LR:
- ecmd->supported = SUPPORTED_Autoneg;
- ecmd->advertising = ADVERTISED_Autoneg;
+ supported = SUPPORTED_Autoneg;
+ advertising = ADVERTISED_Autoneg;
/* TODO: add speeds when ethtool is ready to support*/
break;
default:
@@ -520,38 +519,43 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
i40e_phy_type_to_ethtool(pf, &e_supported,
&e_advertising);
- ecmd->supported = ecmd->supported & e_supported;
- ecmd->advertising = ecmd->advertising & e_advertising;
+ supported = supported & e_supported;
+ advertising = advertising & e_advertising;
/* Set speed and duplex */
switch (link_speed) {
case I40E_LINK_SPEED_40GB:
- ethtool_cmd_speed_set(ecmd, SPEED_40000);
+ cmd->base.speed = SPEED_40000;
break;
case I40E_LINK_SPEED_25GB:
#ifdef SPEED_25000
- ethtool_cmd_speed_set(ecmd, SPEED_25000);
+ cmd->base.speed = SPEED_25000;
#else
netdev_info(netdev,
"Speed is 25G, display not supported by this version of ethtool.\n");
#endif
break;
case I40E_LINK_SPEED_20GB:
- ethtool_cmd_speed_set(ecmd, SPEED_20000);
+ cmd->base.speed = SPEED_20000;
break;
case I40E_LINK_SPEED_10GB:
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ cmd->base.speed = SPEED_10000;
break;
case I40E_LINK_SPEED_1GB:
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
break;
case I40E_LINK_SPEED_100MB:
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
break;
default:
break;
}
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
}
/**
@@ -562,18 +566,24 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
* Reports link settings that can be determined when link is down
**/
static void i40e_get_settings_link_down(struct i40e_hw *hw,
- struct ethtool_cmd *ecmd,
+ struct ethtool_link_ksettings *cmd,
struct i40e_pf *pf)
{
+ u32 supported, advertising;
+
/* link is down and the driver needs to fall back on
* supported phy types to figure out what info to display
*/
- i40e_phy_type_to_ethtool(pf, &ecmd->supported,
- &ecmd->advertising);
+ i40e_phy_type_to_ethtool(pf, &supported, &advertising);
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
/* With no link speed and duplex are unknown */
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
/**
@@ -583,74 +593,85 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw,
*
* Reports speed/duplex settings based on media_type
**/
-static int i40e_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int i40e_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+ u32 advertising;
if (link_up)
- i40e_get_settings_link_up(hw, ecmd, netdev, pf);
+ i40e_get_settings_link_up(hw, cmd, netdev, pf);
else
- i40e_get_settings_link_down(hw, ecmd, pf);
+ i40e_get_settings_link_down(hw, cmd, pf);
/* Now set the settings that don't rely on link being up/down */
/* Set autoneg settings */
- ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+ cmd->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
switch (hw->phy.media_type) {
case I40E_MEDIA_TYPE_BACKPLANE:
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_Backplane;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_Backplane;
- ecmd->port = PORT_NONE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ Backplane);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Backplane);
+ cmd->base.port = PORT_NONE;
break;
case I40E_MEDIA_TYPE_BASET:
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+ cmd->base.port = PORT_TP;
break;
case I40E_MEDIA_TYPE_DA:
case I40E_MEDIA_TYPE_CX4:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_DA;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+ cmd->base.port = PORT_DA;
break;
case I40E_MEDIA_TYPE_FIBER:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ cmd->base.port = PORT_FIBRE;
break;
case I40E_MEDIA_TYPE_UNKNOWN:
default:
- ecmd->port = PORT_OTHER;
+ cmd->base.port = PORT_OTHER;
break;
}
- /* Set transceiver */
- ecmd->transceiver = XCVR_EXTERNAL;
-
/* Set flow control settings */
- ecmd->supported |= SUPPORTED_Pause;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
switch (hw->fc.requested_mode) {
case I40E_FC_FULL:
- ecmd->advertising |= ADVERTISED_Pause;
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Pause);
break;
case I40E_FC_TX_PAUSE:
- ecmd->advertising |= ADVERTISED_Asym_Pause;
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
break;
case I40E_FC_RX_PAUSE:
- ecmd->advertising |= (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
break;
default:
- ecmd->advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ ethtool_convert_link_mode_to_legacy_u32(
+ &advertising, cmd->link_modes.advertising);
+
+ advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising, advertising);
break;
}
@@ -664,8 +685,8 @@ static int i40e_get_settings(struct net_device *netdev,
*
* Set speed/duplex per media_types advertised/forced
**/
-static int i40e_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int i40e_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp abilities;
@@ -673,12 +694,15 @@ static int i40e_set_settings(struct net_device *netdev,
struct i40e_pf *pf = np->vsi->back;
struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw = &pf->hw;
- struct ethtool_cmd safe_ecmd;
+ struct ethtool_link_ksettings safe_cmd;
+ struct ethtool_link_ksettings copy_cmd;
i40e_status status = 0;
bool change = false;
+ int timeout = 50;
int err = 0;
- u8 autoneg;
+ u32 autoneg;
u32 advertise;
+ u32 tmp;
/* Changing port settings is not supported if this isn't the
* port's controlling PF
@@ -706,33 +730,47 @@ static int i40e_set_settings(struct net_device *netdev,
return -EOPNOTSUPP;
}
+ /* copy the cmd to copy_cmd to avoid modifying the origin */
+ memcpy(&copy_cmd, cmd, sizeof(struct ethtool_link_ksettings));
+
/* get our own copy of the bits to check against */
- memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
- i40e_get_settings(netdev, &safe_ecmd);
+ memset(&safe_cmd, 0, sizeof(struct ethtool_link_ksettings));
+ i40e_get_link_ksettings(netdev, &safe_cmd);
- /* save autoneg and speed out of ecmd */
- autoneg = ecmd->autoneg;
- advertise = ecmd->advertising;
+ /* save autoneg and speed out of cmd */
+ autoneg = cmd->base.autoneg;
+ ethtool_convert_link_mode_to_legacy_u32(&advertise,
+ cmd->link_modes.advertising);
/* set autoneg and speed back to what they currently are */
- ecmd->autoneg = safe_ecmd.autoneg;
- ecmd->advertising = safe_ecmd.advertising;
+ copy_cmd.base.autoneg = safe_cmd.base.autoneg;
+ ethtool_convert_link_mode_to_legacy_u32(
+ &tmp, safe_cmd.link_modes.advertising);
+ ethtool_convert_legacy_u32_to_link_mode(
+ copy_cmd.link_modes.advertising, tmp);
+
+ copy_cmd.base.cmd = safe_cmd.base.cmd;
- ecmd->cmd = safe_ecmd.cmd;
- /* If ecmd and safe_ecmd are not the same now, then they are
+ /* If copy_cmd and safe_cmd are not the same now, then they are
* trying to set something that we do not support
*/
- if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd)))
+ if (memcmp(&copy_cmd, &safe_cmd, sizeof(struct ethtool_link_ksettings)))
return -EOPNOTSUPP;
- while (test_bit(__I40E_CONFIG_BUSY, &vsi->state))
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
usleep_range(1000, 2000);
+ }
/* Get the current phy config */
status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
NULL);
- if (status)
- return -EAGAIN;
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
/* Copy abilities to config in case autoneg is not
* set below
@@ -745,9 +783,11 @@ static int i40e_set_settings(struct net_device *netdev,
/* If autoneg was not already enabled */
if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
/* If autoneg is not supported, return error */
- if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
+ if (!ethtool_link_ksettings_test_link_mode(
+ &safe_cmd, supported, Autoneg)) {
netdev_info(netdev, "Autoneg not supported on this phy\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
/* Autoneg is allowed to change */
config.abilities = abilities.abilities |
@@ -760,11 +800,13 @@ static int i40e_set_settings(struct net_device *netdev,
/* If autoneg is supported 10GBASE_T is the only PHY
* that can disable it, so otherwise return error
*/
- if (safe_ecmd.supported & SUPPORTED_Autoneg &&
+ if (ethtool_link_ksettings_test_link_mode(
+ &safe_cmd, supported, Autoneg) &&
hw->phy.link_info.phy_type !=
I40E_PHY_TYPE_10GBASE_T) {
netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
/* Autoneg is allowed to change */
config.abilities = abilities.abilities &
@@ -773,8 +815,12 @@ static int i40e_set_settings(struct net_device *netdev,
}
}
- if (advertise & ~safe_ecmd.supported)
- return -EINVAL;
+ ethtool_convert_link_mode_to_legacy_u32(&tmp,
+ safe_cmd.link_modes.supported);
+ if (advertise & ~tmp) {
+ err = -EINVAL;
+ goto done;
+ }
if (advertise & ADVERTISED_100baseT_Full)
config.link_speed |= I40E_LINK_SPEED_100MB;
@@ -830,7 +876,8 @@ static int i40e_set_settings(struct net_device *netdev,
netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n",
i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
- return -EAGAIN;
+ err = -EAGAIN;
+ goto done;
}
status = i40e_update_link_info(hw);
@@ -843,6 +890,9 @@ static int i40e_set_settings(struct net_device *netdev,
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
}
+done:
+ clear_bit(__I40E_CONFIG_BUSY, pf->state);
+
return err;
}
@@ -937,7 +987,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
}
/* If we have link and don't have autoneg */
- if (!test_bit(__I40E_DOWN, &pf->state) &&
+ if (!test_bit(__I40E_DOWN, pf->state) &&
!(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
/* Send message that it might not necessarily work*/
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
@@ -989,10 +1039,10 @@ static int i40e_set_pauseparam(struct net_device *netdev,
err = -EAGAIN;
}
- if (!test_bit(__I40E_DOWN, &pf->state)) {
+ if (!test_bit(__I40E_DOWN, pf->state)) {
/* Give it a little more time to try to come back */
msleep(75);
- if (!test_bit(__I40E_DOWN, &pf->state))
+ if (!test_bit(__I40E_DOWN, pf->state))
return i40e_nway_reset(netdev);
}
@@ -1089,8 +1139,8 @@ static int i40e_get_eeprom(struct net_device *netdev,
/* make sure it is the right magic for NVMUpdate */
if ((eeprom->magic >> 16) != hw->device_id)
errno = -EINVAL;
- else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
- test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+ else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
errno = -EBUSY;
else
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
@@ -1165,6 +1215,11 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
struct i40e_hw *hw = &np->vsi->back->hw;
u32 val;
+#define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF
+ if (hw->mac.type == I40E_MAC_X722) {
+ val = X722_EEPROM_SCOPE_LIMIT + 1;
+ return val;
+ }
val = (rd32(hw, I40E_GLPCI_LBARCTRL)
& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
@@ -1191,8 +1246,8 @@ static int i40e_set_eeprom(struct net_device *netdev,
/* check for NVMUpdate access method */
else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
errno = -EINVAL;
- else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
- test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+ else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
errno = -EBUSY;
else
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
@@ -1252,6 +1307,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 new_rx_count, new_tx_count;
+ int timeout = 50;
int i, err = 0;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -1276,8 +1332,12 @@ static int i40e_set_ringparam(struct net_device *netdev,
(new_rx_count == vsi->rx_rings[0]->count))
return 0;
- while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
usleep_range(1000, 2000);
+ }
if (!netif_running(vsi->netdev)) {
/* simple case - set for the next time the netdev is started */
@@ -1425,7 +1485,7 @@ free_tx:
}
done:
- clear_bit(__I40E_CONFIG_BUSY, &pf->state);
+ clear_bit(__I40E_CONFIG_BUSY, pf->state);
return err;
}
@@ -1483,13 +1543,6 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
-#ifdef I40E_FCOE
- for (j = 0; j < I40E_FCOE_STATS_LEN; j++) {
- p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset;
- data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
-#endif
rcu_read_lock();
for (j = 0; j < vsi->num_queue_pairs; j++) {
tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
@@ -1577,13 +1630,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
i40e_gstrings_misc_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
-#ifdef I40E_FCOE
- for (i = 0; i < I40E_FCOE_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_fcoe_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
-#endif
for (i = 0; i < vsi->num_queue_pairs; i++) {
snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i);
p += ETH_GSTRING_LEN;
@@ -1648,12 +1694,18 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
break;
case ETH_SS_PRIV_FLAGS:
- memcpy(data, i40e_priv_flags_strings,
- I40E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
- data += I40E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN;
- if (pf->hw.pf_id == 0)
- memcpy(data, i40e_gl_priv_flags_strings,
- I40E_GL_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_priv_flags[i].flag_string);
+ p += ETH_GSTRING_LEN;
+ }
+ if (pf->hw.pf_id != 0)
+ break;
+ for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40e_gl_gstrings_priv_flags[i].flag_string);
+ p += ETH_GSTRING_LEN;
+ }
break;
default:
break;
@@ -1774,7 +1826,7 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf)
int i;
for (i = 0; i < pf->num_alloc_vfs; i++)
- if (test_bit(I40E_VF_STAT_ACTIVE, &vfs[i].vf_states))
+ if (test_bit(I40E_VF_STATE_ACTIVE, &vfs[i].vf_states))
return true;
return false;
}
@@ -1795,7 +1847,7 @@ static void i40e_diag_test(struct net_device *netdev,
/* Offline tests */
netif_info(pf, drv, netdev, "offline testing starting\n");
- set_bit(__I40E_TESTING, &pf->state);
+ set_bit(__I40E_TESTING, pf->state);
if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
dev_warn(&pf->pdev->dev,
@@ -1805,7 +1857,7 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_INTR] = 1;
data[I40E_ETH_TEST_LINK] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
- clear_bit(__I40E_TESTING, &pf->state);
+ clear_bit(__I40E_TESTING, pf->state);
goto skip_ol_tests;
}
@@ -1819,7 +1871,7 @@ static void i40e_diag_test(struct net_device *netdev,
* link then the following link test would have
* to be moved to before the reset
*/
- i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1834,8 +1886,8 @@ static void i40e_diag_test(struct net_device *netdev,
if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- clear_bit(__I40E_TESTING, &pf->state);
- i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
+ clear_bit(__I40E_TESTING, pf->state);
+ i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
if (if_running)
i40e_open(netdev);
@@ -2285,6 +2337,102 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
}
/**
+ * i40e_check_mask - Check whether a mask field is set
+ * @mask: the full mask value
+ * @field; mask of the field to check
+ *
+ * If the given mask is fully set, return positive value. If the mask for the
+ * field is fully unset, return zero. Otherwise return a negative error code.
+ **/
+static int i40e_check_mask(u64 mask, u64 field)
+{
+ u64 value = mask & field;
+
+ if (value == field)
+ return 1;
+ else if (!value)
+ return 0;
+ else
+ return -1;
+}
+
+/**
+ * i40e_parse_rx_flow_user_data - Deconstruct user-defined data
+ * @fsp: pointer to rx flow specification
+ * @data: pointer to userdef data structure for storage
+ *
+ * Read the user-defined data and deconstruct the value into a structure. No
+ * other code should read the user-defined data, so as to ensure that every
+ * place consistently reads the value correctly.
+ *
+ * The user-defined field is a 64bit Big Endian format value, which we
+ * deconstruct by reading bits or bit fields from it. Single bit flags shall
+ * be defined starting from the highest bits, while small bit field values
+ * shall be defined starting from the lowest bits.
+ *
+ * Returns 0 if the data is valid, and non-zero if the userdef data is invalid
+ * and the filter should be rejected. The data structure will always be
+ * modified even if FLOW_EXT is not set.
+ *
+ **/
+static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+ struct i40e_rx_flow_userdef *data)
+{
+ u64 value, mask;
+ int valid;
+
+ /* Zero memory first so it's always consistent. */
+ memset(data, 0, sizeof(*data));
+
+ if (!(fsp->flow_type & FLOW_EXT))
+ return 0;
+
+ value = be64_to_cpu(*((__be64 *)fsp->h_ext.data));
+ mask = be64_to_cpu(*((__be64 *)fsp->m_ext.data));
+
+#define I40E_USERDEF_FLEX_WORD GENMASK_ULL(15, 0)
+#define I40E_USERDEF_FLEX_OFFSET GENMASK_ULL(31, 16)
+#define I40E_USERDEF_FLEX_FILTER GENMASK_ULL(31, 0)
+
+ valid = i40e_check_mask(mask, I40E_USERDEF_FLEX_FILTER);
+ if (valid < 0) {
+ return -EINVAL;
+ } else if (valid) {
+ data->flex_word = value & I40E_USERDEF_FLEX_WORD;
+ data->flex_offset =
+ (value & I40E_USERDEF_FLEX_OFFSET) >> 16;
+ data->flex_filter = true;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_fill_rx_flow_user_data - Fill in user-defined data field
+ * @fsp: pointer to rx_flow specification
+ *
+ * Reads the userdef data structure and properly fills in the user defined
+ * fields of the rx_flow_spec.
+ **/
+static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+ struct i40e_rx_flow_userdef *data)
+{
+ u64 value = 0, mask = 0;
+
+ if (data->flex_filter) {
+ value |= data->flex_word;
+ value |= (u64)data->flex_offset << 16;
+ mask |= I40E_USERDEF_FLEX_FILTER;
+ }
+
+ if (value || mask)
+ fsp->flow_type |= FLOW_EXT;
+
+ *((__be64 *)fsp->h_ext.data) = cpu_to_be64(value);
+ *((__be64 *)fsp->m_ext.data) = cpu_to_be64(mask);
+}
+
+/**
* i40e_get_ethtool_fdir_all - Populates the rule count of a command
* @pf: Pointer to the physical function struct
* @cmd: The command to get or set Rx flow classification rules
@@ -2335,8 +2483,11 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
{
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct i40e_rx_flow_userdef userdef = {0};
struct i40e_fdir_filter *rule = NULL;
struct hlist_node *node2;
+ u64 input_set;
+ u16 index;
hlist_for_each_entry_safe(rule, node2,
&pf->fdir_filter_list, fdir_node) {
@@ -2359,8 +2510,48 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
*/
fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
- fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
- fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip;
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip;
+
+ switch (rule->flow_type) {
+ case SCTP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ break;
+ case TCP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ break;
+ case UDP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ break;
+ case IP_USER_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ break;
+ default:
+ /* If we have stored a filter with a flow type not listed here
+ * it is almost certainly a driver bug. WARN(), and then
+ * assign the input_set as if all fields are enabled to avoid
+ * reading unassigned memory.
+ */
+ WARN(1, "Missing input set index for flow_type %d\n",
+ rule->flow_type);
+ input_set = 0xFFFFFFFFFFFFFFFFULL;
+ goto no_input_set;
+ }
+
+ input_set = i40e_read_fd_input_set(pf, index);
+
+no_input_set:
+ if (input_set & I40E_L3_SRC_MASK)
+ fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFF);
+
+ if (input_set & I40E_L3_DST_MASK)
+ fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFF);
+
+ if (input_set & I40E_L4_SRC_MASK)
+ fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFFFFFF);
+
+ if (input_set & I40E_L4_DST_MASK)
+ fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFFFFFF);
if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
fsp->ring_cookie = RX_CLS_FLOW_DISC;
@@ -2372,11 +2563,24 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
if (vsi && vsi->type == I40E_VSI_SRIOV) {
- fsp->h_ext.data[1] = htonl(vsi->vf_id);
- fsp->m_ext.data[1] = htonl(0x1);
+ /* VFs are zero-indexed by the driver, but ethtool
+ * expects them to be one-indexed, so add one here
+ */
+ u64 ring_vf = vsi->vf_id + 1;
+
+ ring_vf <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ fsp->ring_cookie |= ring_vf;
}
}
+ if (rule->flex_filter) {
+ userdef.flex_filter = true;
+ userdef.flex_word = be16_to_cpu(rule->flex_word);
+ userdef.flex_offset = rule->flex_offset;
+ }
+
+ i40e_fill_rx_flow_user_data(fsp, &userdef);
+
return 0;
}
@@ -2574,24 +2778,6 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
}
/**
- * i40e_match_fdir_input_set - Match a new filter against an existing one
- * @rule: The filter already added
- * @input: The new filter to comapre against
- *
- * Returns true if the two input set match
- **/
-static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
- struct i40e_fdir_filter *input)
-{
- if ((rule->dst_ip[0] != input->dst_ip[0]) ||
- (rule->src_ip[0] != input->src_ip[0]) ||
- (rule->dst_port != input->dst_port) ||
- (rule->src_port != input->src_port))
- return false;
- return true;
-}
-
-/**
* i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
* @vsi: Pointer to the targeted VSI
* @input: The filter to update or NULL to indicate deletion
@@ -2626,22 +2812,22 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
/* if there is an old rule occupying our place remove it */
if (rule && (rule->fd_id == sw_idx)) {
- if (input && !i40e_match_fdir_input_set(rule, input))
- err = i40e_add_del_fdir(vsi, rule, false);
- else if (!input)
- err = i40e_add_del_fdir(vsi, rule, false);
+ /* Remove this rule, since we're either deleting it, or
+ * replacing it.
+ */
+ err = i40e_add_del_fdir(vsi, rule, false);
hlist_del(&rule->fdir_node);
kfree(rule);
pf->fdir_pf_active_filters--;
}
- /* If no input this was a delete, err should be 0 if a rule was
- * successfully found and removed from the list else -EINVAL
+ /* If we weren't given an input, this is a delete, so just return the
+ * error code indicating if there was an entry at the requested slot
*/
if (!input)
return err;
- /* initialize node and set software index */
+ /* Otherwise, install the new rule as requested */
INIT_HLIST_NODE(&input->fdir_node);
/* add filter to the list */
@@ -2658,6 +2844,69 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
}
/**
+ * i40e_prune_flex_pit_list - Cleanup unused entries in FLX_PIT table
+ * @pf: pointer to PF structure
+ *
+ * This function searches the list of filters and determines which FLX_PIT
+ * entries are still required. It will prune any entries which are no longer
+ * in use after the deletion.
+ **/
+static void i40e_prune_flex_pit_list(struct i40e_pf *pf)
+{
+ struct i40e_flex_pit *entry, *tmp;
+ struct i40e_fdir_filter *rule;
+
+ /* First, we'll check the l3 table */
+ list_for_each_entry_safe(entry, tmp, &pf->l3_flex_pit_list, list) {
+ bool found = false;
+
+ hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) {
+ if (rule->flow_type != IP_USER_FLOW)
+ continue;
+ if (rule->flex_filter &&
+ rule->flex_offset == entry->src_offset) {
+ found = true;
+ break;
+ }
+ }
+
+ /* If we didn't find the filter, then we can prune this entry
+ * from the list.
+ */
+ if (!found) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+
+ /* Followed by the L4 table */
+ list_for_each_entry_safe(entry, tmp, &pf->l4_flex_pit_list, list) {
+ bool found = false;
+
+ hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) {
+ /* Skip this filter if it's L3, since we already
+ * checked those in the above loop
+ */
+ if (rule->flow_type == IP_USER_FLOW)
+ continue;
+ if (rule->flex_filter &&
+ rule->flex_offset == entry->src_offset) {
+ found = true;
+ break;
+ }
+ }
+
+ /* If we didn't find the filter, then we can prune this entry
+ * from the list.
+ */
+ if (!found) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+}
+
+/**
* i40e_del_fdir_entry - Deletes a Flow Director filter entry
* @vsi: Pointer to the targeted VSI
* @cmd: The command to get or set Rx flow classification rules
@@ -2675,20 +2924,700 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
struct i40e_pf *pf = vsi->back;
int ret = 0;
- if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
- test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
return -EBUSY;
- if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
return -EBUSY;
ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
+ i40e_prune_flex_pit_list(pf);
+
i40e_fdir_check_and_reenable(pf);
return ret;
}
/**
+ * i40e_unused_pit_index - Find an unused PIT index for given list
+ * @pf: the PF data structure
+ *
+ * Find the first unused flexible PIT index entry. We search both the L3 and
+ * L4 flexible PIT lists so that the returned index is unique and unused by
+ * either currently programmed L3 or L4 filters. We use a bit field as storage
+ * to track which indexes are already used.
+ **/
+static u8 i40e_unused_pit_index(struct i40e_pf *pf)
+{
+ unsigned long available_index = 0xFF;
+ struct i40e_flex_pit *entry;
+
+ /* We need to make sure that the new index isn't in use by either L3
+ * or L4 filters so that IP_USER_FLOW filters can program both L3 and
+ * L4 to use the same index.
+ */
+
+ list_for_each_entry(entry, &pf->l4_flex_pit_list, list)
+ clear_bit(entry->pit_index, &available_index);
+
+ list_for_each_entry(entry, &pf->l3_flex_pit_list, list)
+ clear_bit(entry->pit_index, &available_index);
+
+ return find_first_bit(&available_index, 8);
+}
+
+/**
+ * i40e_find_flex_offset - Find an existing flex src_offset
+ * @flex_pit_list: L3 or L4 flex PIT list
+ * @src_offset: new src_offset to find
+ *
+ * Searches the flex_pit_list for an existing offset. If no offset is
+ * currently programmed, then this will return an ERR_PTR if there is no space
+ * to add a new offset, otherwise it returns NULL.
+ **/
+static
+struct i40e_flex_pit *i40e_find_flex_offset(struct list_head *flex_pit_list,
+ u16 src_offset)
+{
+ struct i40e_flex_pit *entry;
+ int size = 0;
+
+ /* Search for the src_offset first. If we find a matching entry
+ * already programmed, we can simply re-use it.
+ */
+ list_for_each_entry(entry, flex_pit_list, list) {
+ size++;
+ if (entry->src_offset == src_offset)
+ return entry;
+ }
+
+ /* If we haven't found an entry yet, then the provided src offset has
+ * not yet been programmed. We will program the src offset later on,
+ * but we need to indicate whether there is enough space to do so
+ * here. We'll make use of ERR_PTR for this purpose.
+ */
+ if (size >= I40E_FLEX_PIT_TABLE_SIZE)
+ return ERR_PTR(-ENOSPC);
+
+ return NULL;
+}
+
+/**
+ * i40e_add_flex_offset - Add src_offset to flex PIT table list
+ * @flex_pit_list: L3 or L4 flex PIT list
+ * @src_offset: new src_offset to add
+ * @pit_index: the PIT index to program
+ *
+ * This function programs the new src_offset to the list. It is expected that
+ * i40e_find_flex_offset has already been tried and returned NULL, indicating
+ * that this offset is not programmed, and that the list has enough space to
+ * store another offset.
+ *
+ * Returns 0 on success, and negative value on error.
+ **/
+static int i40e_add_flex_offset(struct list_head *flex_pit_list,
+ u16 src_offset,
+ u8 pit_index)
+{
+ struct i40e_flex_pit *new_pit, *entry;
+
+ new_pit = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!new_pit)
+ return -ENOMEM;
+
+ new_pit->src_offset = src_offset;
+ new_pit->pit_index = pit_index;
+
+ /* We need to insert this item such that the list is sorted by
+ * src_offset in ascending order.
+ */
+ list_for_each_entry(entry, flex_pit_list, list) {
+ if (new_pit->src_offset < entry->src_offset) {
+ list_add_tail(&new_pit->list, &entry->list);
+ return 0;
+ }
+
+ /* If we found an entry with our offset already programmed we
+ * can simply return here, after freeing the memory. However,
+ * if the pit_index does not match we need to report an error.
+ */
+ if (new_pit->src_offset == entry->src_offset) {
+ int err = 0;
+
+ /* If the PIT index is not the same we can't re-use
+ * the entry, so we must report an error.
+ */
+ if (new_pit->pit_index != entry->pit_index)
+ err = -EINVAL;
+
+ kfree(new_pit);
+ return err;
+ }
+ }
+
+ /* If we reached here, then we haven't yet added the item. This means
+ * that we should add the item at the end of the list.
+ */
+ list_add_tail(&new_pit->list, flex_pit_list);
+ return 0;
+}
+
+/**
+ * __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table
+ * @pf: Pointer to the PF structure
+ * @flex_pit_list: list of flexible src offsets in use
+ * #flex_pit_start: index to first entry for this section of the table
+ *
+ * In order to handle flexible data, the hardware uses a table of values
+ * called the FLX_PIT table. This table is used to indicate which sections of
+ * the input correspond to what PIT index values. Unfortunately, hardware is
+ * very restrictive about programming this table. Entries must be ordered by
+ * src_offset in ascending order, without duplicates. Additionally, unused
+ * entries must be set to the unused index value, and must have valid size and
+ * length according to the src_offset ordering.
+ *
+ * This function will reprogram the FLX_PIT register from a book-keeping
+ * structure that we guarantee is already ordered correctly, and has no more
+ * than 3 entries.
+ *
+ * To make things easier, we only support flexible values of one word length,
+ * rather than allowing variable length flexible values.
+ **/
+static void __i40e_reprogram_flex_pit(struct i40e_pf *pf,
+ struct list_head *flex_pit_list,
+ int flex_pit_start)
+{
+ struct i40e_flex_pit *entry = NULL;
+ u16 last_offset = 0;
+ int i = 0, j = 0;
+
+ /* First, loop over the list of flex PIT entries, and reprogram the
+ * registers.
+ */
+ list_for_each_entry(entry, flex_pit_list, list) {
+ /* We have to be careful when programming values for the
+ * largest SRC_OFFSET value. It is possible that adding
+ * additional empty values at the end would overflow the space
+ * for the SRC_OFFSET in the FLX_PIT register. To avoid this,
+ * we check here and add the empty values prior to adding the
+ * largest value.
+ *
+ * To determine this, we will use a loop from i+1 to 3, which
+ * will determine whether the unused entries would have valid
+ * SRC_OFFSET. Note that there cannot be extra entries past
+ * this value, because the only valid values would have been
+ * larger than I40E_MAX_FLEX_SRC_OFFSET, and thus would not
+ * have been added to the list in the first place.
+ */
+ for (j = i + 1; j < 3; j++) {
+ u16 offset = entry->src_offset + j;
+ int index = flex_pit_start + i;
+ u32 value = I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED,
+ 1,
+ offset - 3);
+
+ if (offset > I40E_MAX_FLEX_SRC_OFFSET) {
+ i40e_write_rx_ctl(&pf->hw,
+ I40E_PRTQF_FLX_PIT(index),
+ value);
+ i++;
+ }
+ }
+
+ /* Now, we can program the actual value into the table */
+ i40e_write_rx_ctl(&pf->hw,
+ I40E_PRTQF_FLX_PIT(flex_pit_start + i),
+ I40E_FLEX_PREP_VAL(entry->pit_index + 50,
+ 1,
+ entry->src_offset));
+ i++;
+ }
+
+ /* In order to program the last entries in the table, we need to
+ * determine the valid offset. If the list is empty, we'll just start
+ * with 0. Otherwise, we'll start with the last item offset and add 1.
+ * This ensures that all entries have valid sizes. If we don't do this
+ * correctly, the hardware will disable flexible field parsing.
+ */
+ if (!list_empty(flex_pit_list))
+ last_offset = list_prev_entry(entry, list)->src_offset + 1;
+
+ for (; i < 3; i++, last_offset++) {
+ i40e_write_rx_ctl(&pf->hw,
+ I40E_PRTQF_FLX_PIT(flex_pit_start + i),
+ I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED,
+ 1,
+ last_offset));
+ }
+}
+
+/**
+ * i40e_reprogram_flex_pit - Reprogram all FLX_PIT tables after input set change
+ * @pf: pointer to the PF structure
+ *
+ * This function reprograms both the L3 and L4 FLX_PIT tables. See the
+ * internal helper function for implementation details.
+ **/
+static void i40e_reprogram_flex_pit(struct i40e_pf *pf)
+{
+ __i40e_reprogram_flex_pit(pf, &pf->l3_flex_pit_list,
+ I40E_FLEX_PIT_IDX_START_L3);
+
+ __i40e_reprogram_flex_pit(pf, &pf->l4_flex_pit_list,
+ I40E_FLEX_PIT_IDX_START_L4);
+
+ /* We also need to program the L3 and L4 GLQF ORT register */
+ i40e_write_rx_ctl(&pf->hw,
+ I40E_GLQF_ORT(I40E_L3_GLQF_ORT_IDX),
+ I40E_ORT_PREP_VAL(I40E_FLEX_PIT_IDX_START_L3,
+ 3, 1));
+
+ i40e_write_rx_ctl(&pf->hw,
+ I40E_GLQF_ORT(I40E_L4_GLQF_ORT_IDX),
+ I40E_ORT_PREP_VAL(I40E_FLEX_PIT_IDX_START_L4,
+ 3, 1));
+}
+
+/**
+ * i40e_flow_str - Converts a flow_type into a human readable string
+ * @flow_type: the flow type from a flow specification
+ *
+ * Currently only flow types we support are included here, and the string
+ * value attempts to match what ethtool would use to configure this flow type.
+ **/
+static const char *i40e_flow_str(struct ethtool_rx_flow_spec *fsp)
+{
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ return "tcp4";
+ case UDP_V4_FLOW:
+ return "udp4";
+ case SCTP_V4_FLOW:
+ return "sctp4";
+ case IP_USER_FLOW:
+ return "ip4";
+ default:
+ return "unknown";
+ }
+}
+
+/**
+ * i40e_pit_index_to_mask - Return the FLEX mask for a given PIT index
+ * @pit_index: PIT index to convert
+ *
+ * Returns the mask for a given PIT index. Will return 0 if the pit_index is
+ * of range.
+ **/
+static u64 i40e_pit_index_to_mask(int pit_index)
+{
+ switch (pit_index) {
+ case 0:
+ return I40E_FLEX_50_MASK;
+ case 1:
+ return I40E_FLEX_51_MASK;
+ case 2:
+ return I40E_FLEX_52_MASK;
+ case 3:
+ return I40E_FLEX_53_MASK;
+ case 4:
+ return I40E_FLEX_54_MASK;
+ case 5:
+ return I40E_FLEX_55_MASK;
+ case 6:
+ return I40E_FLEX_56_MASK;
+ case 7:
+ return I40E_FLEX_57_MASK;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * i40e_print_input_set - Show changes between two input sets
+ * @vsi: the vsi being configured
+ * @old: the old input set
+ * @new: the new input set
+ *
+ * Print the difference between old and new input sets by showing which series
+ * of words are toggled on or off. Only displays the bits we actually support
+ * changing.
+ **/
+static void i40e_print_input_set(struct i40e_vsi *vsi, u64 old, u64 new)
+{
+ struct i40e_pf *pf = vsi->back;
+ bool old_value, new_value;
+ int i;
+
+ old_value = !!(old & I40E_L3_SRC_MASK);
+ new_value = !!(new & I40E_L3_SRC_MASK);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "L3 source address: %s -> %s\n",
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+
+ old_value = !!(old & I40E_L3_DST_MASK);
+ new_value = !!(new & I40E_L3_DST_MASK);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "L3 destination address: %s -> %s\n",
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+
+ old_value = !!(old & I40E_L4_SRC_MASK);
+ new_value = !!(new & I40E_L4_SRC_MASK);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "L4 source port: %s -> %s\n",
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+
+ old_value = !!(old & I40E_L4_DST_MASK);
+ new_value = !!(new & I40E_L4_DST_MASK);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "L4 destination port: %s -> %s\n",
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+
+ old_value = !!(old & I40E_VERIFY_TAG_MASK);
+ new_value = !!(new & I40E_VERIFY_TAG_MASK);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "SCTP verification tag: %s -> %s\n",
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+
+ /* Show change of flexible filter entries */
+ for (i = 0; i < I40E_FLEX_INDEX_ENTRIES; i++) {
+ u64 flex_mask = i40e_pit_index_to_mask(i);
+
+ old_value = !!(old & flex_mask);
+ new_value = !!(new & flex_mask);
+ if (old_value != new_value)
+ netif_info(pf, drv, vsi->netdev, "FLEX index %d: %s -> %s\n",
+ i,
+ old_value ? "ON" : "OFF",
+ new_value ? "ON" : "OFF");
+ }
+
+ netif_info(pf, drv, vsi->netdev, " Current input set: %0llx\n",
+ old);
+ netif_info(pf, drv, vsi->netdev, "Requested input set: %0llx\n",
+ new);
+}
+
+/**
+ * i40e_check_fdir_input_set - Check that a given rx_flow_spec mask is valid
+ * @vsi: pointer to the targeted VSI
+ * @fsp: pointer to Rx flow specification
+ * @userdef: userdefined data from flow specification
+ *
+ * Ensures that a given ethtool_rx_flow_spec has a valid mask. Some support
+ * for partial matches exists with a few limitations. First, hardware only
+ * supports masking by word boundary (2 bytes) and not per individual bit.
+ * Second, hardware is limited to using one mask for a flow type and cannot
+ * use a separate mask for each filter.
+ *
+ * To support these limitations, if we already have a configured filter for
+ * the specified type, this function enforces that new filters of the type
+ * match the configured input set. Otherwise, if we do not have a filter of
+ * the specified type, we allow the input set to be updated to match the
+ * desired filter.
+ *
+ * To help ensure that administrators understand why filters weren't displayed
+ * as supported, we print a diagnostic message displaying how the input set
+ * would change and warning to delete the preexisting filters if required.
+ *
+ * Returns 0 on successful input set match, and a negative return code on
+ * failure.
+ **/
+static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
+ struct ethtool_rx_flow_spec *fsp,
+ struct i40e_rx_flow_userdef *userdef)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct ethtool_tcpip4_spec *tcp_ip4_spec;
+ struct ethtool_usrip4_spec *usr_ip4_spec;
+ u64 current_mask, new_mask;
+ bool new_flex_offset = false;
+ bool flex_l3 = false;
+ u16 *fdir_filter_count;
+ u16 index, src_offset = 0;
+ u8 pit_index = 0;
+ int err;
+
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case SCTP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ fdir_filter_count = &pf->fd_sctp4_filter_cnt;
+ break;
+ case TCP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ fdir_filter_count = &pf->fd_tcp4_filter_cnt;
+ break;
+ case UDP_V4_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ fdir_filter_count = &pf->fd_udp4_filter_cnt;
+ break;
+ case IP_USER_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ fdir_filter_count = &pf->fd_ip4_filter_cnt;
+ flex_l3 = true;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Read the current input set from register memory. */
+ current_mask = i40e_read_fd_input_set(pf, index);
+ new_mask = current_mask;
+
+ /* Determine, if any, the required changes to the input set in order
+ * to support the provided mask.
+ *
+ * Hardware only supports masking at word (2 byte) granularity and does
+ * not support full bitwise masking. This implementation simplifies
+ * even further and only supports fully enabled or fully disabled
+ * masks for each field, even though we could split the ip4src and
+ * ip4dst fields.
+ */
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case SCTP_V4_FLOW:
+ new_mask &= ~I40E_VERIFY_TAG_MASK;
+ /* Fall through */
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ tcp_ip4_spec = &fsp->m_u.tcp_ip4_spec;
+
+ /* IPv4 source address */
+ if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))
+ new_mask |= I40E_L3_SRC_MASK;
+ else if (!tcp_ip4_spec->ip4src)
+ new_mask &= ~I40E_L3_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* IPv4 destination address */
+ if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
+ new_mask |= I40E_L3_DST_MASK;
+ else if (!tcp_ip4_spec->ip4dst)
+ new_mask &= ~I40E_L3_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* L4 source port */
+ if (tcp_ip4_spec->psrc == htons(0xFFFF))
+ new_mask |= I40E_L4_SRC_MASK;
+ else if (!tcp_ip4_spec->psrc)
+ new_mask &= ~I40E_L4_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* L4 destination port */
+ if (tcp_ip4_spec->pdst == htons(0xFFFF))
+ new_mask |= I40E_L4_DST_MASK;
+ else if (!tcp_ip4_spec->pdst)
+ new_mask &= ~I40E_L4_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* Filtering on Type of Service is not supported. */
+ if (tcp_ip4_spec->tos)
+ return -EOPNOTSUPP;
+
+ break;
+ case IP_USER_FLOW:
+ usr_ip4_spec = &fsp->m_u.usr_ip4_spec;
+
+ /* IPv4 source address */
+ if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))
+ new_mask |= I40E_L3_SRC_MASK;
+ else if (!usr_ip4_spec->ip4src)
+ new_mask &= ~I40E_L3_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* IPv4 destination address */
+ if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
+ new_mask |= I40E_L3_DST_MASK;
+ else if (!usr_ip4_spec->ip4dst)
+ new_mask &= ~I40E_L3_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* First 4 bytes of L4 header */
+ if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF))
+ new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK;
+ else if (!usr_ip4_spec->l4_4_bytes)
+ new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+ else
+ return -EOPNOTSUPP;
+
+ /* Filtering on Type of Service is not supported. */
+ if (usr_ip4_spec->tos)
+ return -EOPNOTSUPP;
+
+ /* Filtering on IP version is not supported */
+ if (usr_ip4_spec->ip_ver)
+ return -EINVAL;
+
+ /* Filtering on L4 protocol is not supported */
+ if (usr_ip4_spec->proto)
+ return -EINVAL;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* First, clear all flexible filter entries */
+ new_mask &= ~I40E_FLEX_INPUT_MASK;
+
+ /* If we have a flexible filter, try to add this offset to the correct
+ * flexible filter PIT list. Once finished, we can update the mask.
+ * If the src_offset changed, we will get a new mask value which will
+ * trigger an input set change.
+ */
+ if (userdef->flex_filter) {
+ struct i40e_flex_pit *l3_flex_pit = NULL, *flex_pit = NULL;
+
+ /* Flexible offset must be even, since the flexible payload
+ * must be aligned on 2-byte boundary.
+ */
+ if (userdef->flex_offset & 0x1) {
+ dev_warn(&pf->pdev->dev,
+ "Flexible data offset must be 2-byte aligned\n");
+ return -EINVAL;
+ }
+
+ src_offset = userdef->flex_offset >> 1;
+
+ /* FLX_PIT source offset value is only so large */
+ if (src_offset > I40E_MAX_FLEX_SRC_OFFSET) {
+ dev_warn(&pf->pdev->dev,
+ "Flexible data must reside within first 64 bytes of the packet payload\n");
+ return -EINVAL;
+ }
+
+ /* See if this offset has already been programmed. If we get
+ * an ERR_PTR, then the filter is not safe to add. Otherwise,
+ * if we get a NULL pointer, this means we will need to add
+ * the offset.
+ */
+ flex_pit = i40e_find_flex_offset(&pf->l4_flex_pit_list,
+ src_offset);
+ if (IS_ERR(flex_pit))
+ return PTR_ERR(flex_pit);
+
+ /* IP_USER_FLOW filters match both L4 (ICMP) and L3 (unknown)
+ * packet types, and thus we need to program both L3 and L4
+ * flexible values. These must have identical flexible index,
+ * as otherwise we can't correctly program the input set. So
+ * we'll find both an L3 and L4 index and make sure they are
+ * the same.
+ */
+ if (flex_l3) {
+ l3_flex_pit =
+ i40e_find_flex_offset(&pf->l3_flex_pit_list,
+ src_offset);
+ if (IS_ERR(l3_flex_pit))
+ return PTR_ERR(l3_flex_pit);
+
+ if (flex_pit) {
+ /* If we already had a matching L4 entry, we
+ * need to make sure that the L3 entry we
+ * obtained uses the same index.
+ */
+ if (l3_flex_pit) {
+ if (l3_flex_pit->pit_index !=
+ flex_pit->pit_index) {
+ return -EINVAL;
+ }
+ } else {
+ new_flex_offset = true;
+ }
+ } else {
+ flex_pit = l3_flex_pit;
+ }
+ }
+
+ /* If we didn't find an existing flex offset, we need to
+ * program a new one. However, we don't immediately program it
+ * here because we will wait to program until after we check
+ * that it is safe to change the input set.
+ */
+ if (!flex_pit) {
+ new_flex_offset = true;
+ pit_index = i40e_unused_pit_index(pf);
+ } else {
+ pit_index = flex_pit->pit_index;
+ }
+
+ /* Update the mask with the new offset */
+ new_mask |= i40e_pit_index_to_mask(pit_index);
+ }
+
+ /* If the mask and flexible filter offsets for this filter match the
+ * currently programmed values we don't need any input set change, so
+ * this filter is safe to install.
+ */
+ if (new_mask == current_mask && !new_flex_offset)
+ return 0;
+
+ netif_info(pf, drv, vsi->netdev, "Input set change requested for %s flows:\n",
+ i40e_flow_str(fsp));
+ i40e_print_input_set(vsi, current_mask, new_mask);
+ if (new_flex_offset) {
+ netif_info(pf, drv, vsi->netdev, "FLEX index %d: Offset -> %d",
+ pit_index, src_offset);
+ }
+
+ /* Hardware input sets are global across multiple ports, so even the
+ * main port cannot change them when in MFP mode as this would impact
+ * any filters on the other ports.
+ */
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ netif_err(pf, drv, vsi->netdev, "Cannot change Flow Director input sets while MFP is enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* This filter requires us to update the input set. However, hardware
+ * only supports one input set per flow type, and does not support
+ * separate masks for each filter. This means that we can only support
+ * a single mask for all filters of a specific type.
+ *
+ * If we have preexisting filters, they obviously depend on the
+ * current programmed input set. Display a diagnostic message in this
+ * case explaining why the filter could not be accepted.
+ */
+ if (*fdir_filter_count) {
+ netif_err(pf, drv, vsi->netdev, "Cannot change input set for %s flows until %d preexisting filters are removed\n",
+ i40e_flow_str(fsp),
+ *fdir_filter_count);
+ return -EOPNOTSUPP;
+ }
+
+ i40e_write_fd_input_set(pf, index, new_mask);
+
+ /* Add the new offset and update table, if necessary */
+ if (new_flex_offset) {
+ err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset,
+ pit_index);
+ if (err)
+ return err;
+
+ if (flex_l3) {
+ err = i40e_add_flex_offset(&pf->l3_flex_pit_list,
+ src_offset,
+ pit_index);
+ if (err)
+ return err;
+ }
+
+ i40e_reprogram_flex_pit(pf);
+ }
+
+ return 0;
+}
+
+/**
* i40e_add_fdir_ethtool - Add/Remove Flow Director filters
* @vsi: pointer to the targeted VSI
* @cmd: command to get or set RX flow classification rules
@@ -2699,11 +3628,13 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
struct ethtool_rxnfc *cmd)
{
+ struct i40e_rx_flow_userdef userdef;
struct ethtool_rx_flow_spec *fsp;
struct i40e_fdir_filter *input;
+ u16 dest_vsi = 0, q_index = 0;
struct i40e_pf *pf;
int ret = -EINVAL;
- u16 vf_id;
+ u8 dest_ctl;
if (!vsi)
return -EINVAL;
@@ -2712,26 +3643,61 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return -EOPNOTSUPP;
- if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
+ if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)
return -ENOSPC;
- if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
- test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
return -EBUSY;
- if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
return -EBUSY;
fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ /* Parse the user-defined field */
+ if (i40e_parse_rx_flow_user_data(fsp, &userdef))
+ return -EINVAL;
+
+ /* Extended MAC field is not supported */
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return -EINVAL;
+
+ ret = i40e_check_fdir_input_set(vsi, fsp, &userdef);
+ if (ret)
+ return ret;
+
if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
pf->hw.func_caps.fd_filters_guaranteed)) {
return -EINVAL;
}
- if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
- (fsp->ring_cookie >= vsi->num_queue_pairs))
- return -EINVAL;
+ /* ring_cookie is either the drop index, or is a mask of the queue
+ * index and VF id we wish to target.
+ */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+
+ if (!vf) {
+ if (ring >= vsi->num_queue_pairs)
+ return -EINVAL;
+ dest_vsi = vsi->id;
+ } else {
+ /* VFs are zero-indexed, so we subtract one here */
+ vf--;
+
+ if (vf >= pf->num_alloc_vfs)
+ return -EINVAL;
+ if (ring >= pf->vf[vf].num_queue_pairs)
+ return -EINVAL;
+ dest_vsi = pf->vf[vf].lan_vsi_id;
+ }
+ dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+ q_index = ring;
+ }
input = kzalloc(sizeof(*input), GFP_KERNEL);
@@ -2739,20 +3705,14 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
return -ENOMEM;
input->fd_id = fsp->location;
-
- if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
- input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
- else
- input->dest_ctl =
- I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
-
- input->q_index = fsp->ring_cookie;
- input->flex_off = 0;
- input->pctype = 0;
- input->dest_vsi = vsi->id;
+ input->q_index = q_index;
+ input->dest_vsi = dest_vsi;
+ input->dest_ctl = dest_ctl;
input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
- input->flow_type = fsp->flow_type;
+ input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+ input->flow_type = fsp->flow_type & ~FLOW_EXT;
input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
/* Reverse the src and dest notion, since the HW expects them to be from
@@ -2760,33 +3720,29 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
*/
input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
- input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
- input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
-
- if (ntohl(fsp->m_ext.data[1])) {
- vf_id = ntohl(fsp->h_ext.data[1]);
- if (vf_id >= pf->num_alloc_vfs) {
- netif_info(pf, drv, vsi->netdev,
- "Invalid VF id %d\n", vf_id);
- goto free_input;
- }
- /* Find vsi id from vf id and override dest vsi */
- input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
- if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
- netif_info(pf, drv, vsi->netdev,
- "Invalid queue id %d for VF %d\n",
- input->q_index, vf_id);
- goto free_input;
- }
+ input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+
+ if (userdef.flex_filter) {
+ input->flex_filter = true;
+ input->flex_word = cpu_to_be16(userdef.flex_word);
+ input->flex_offset = userdef.flex_offset;
}
ret = i40e_add_del_fdir(vsi, input, true);
-free_input:
if (ret)
- kfree(input);
- else
- i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
+ goto free_input;
+
+ /* Add the input filter to the fdir_input_list, possibly replacing
+ * a previous filter. Do not free the input structure after adding it
+ * to the list as this would cause a use-after-free bug.
+ */
+ i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
+ return 0;
+
+free_input:
+ kfree(input);
return ret;
}
@@ -3036,7 +3992,7 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
* @dev: network interface device structure
*
* The get string set count and the string set should be matched for each
- * flag returned. Add new strings for each flag to the i40e_priv_flags_strings
+ * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags
* array.
*
* Returns a u32 bitmap of flags.
@@ -3046,19 +4002,27 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- u32 ret_flags = 0;
+ u32 i, j, ret_flags = 0;
+
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+ const struct i40e_priv_flags *priv_flags;
+
+ priv_flags = &i40e_gstrings_priv_flags[i];
+
+ if (priv_flags->flag & pf->flags)
+ ret_flags |= BIT(i);
+ }
+
+ if (pf->hw.pf_id != 0)
+ return ret_flags;
+
+ for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
+ const struct i40e_priv_flags *priv_flags;
- ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
- I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
- ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
- I40E_PRIV_FLAGS_FD_ATR : 0;
- ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
- I40E_PRIV_FLAGS_VEB_STATS : 0;
- ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
- 0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
- if (pf->hw.pf_id == 0) {
- ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ?
- I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT : 0;
+ priv_flags = &i40e_gl_gstrings_priv_flags[j];
+
+ if (priv_flags->flag & pf->flags)
+ ret_flags |= BIT(i + j);
}
return ret_flags;
@@ -3074,54 +4038,66 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- u16 sw_flags = 0, valid_flags = 0;
- bool reset_required = false;
- bool promisc_change = false;
- int ret;
+ u64 changed_flags;
+ u32 i, j;
- /* NOTE: MFP is not settable */
+ changed_flags = pf->flags;
- if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
- pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
- else
- pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED;
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+ const struct i40e_priv_flags *priv_flags;
- /* allow the user to control the state of the Flow
- * Director ATR (Application Targeted Routing) feature
- * of the driver
+ priv_flags = &i40e_gstrings_priv_flags[i];
+
+ if (priv_flags->read_only)
+ continue;
+
+ if (flags & BIT(i))
+ pf->flags |= priv_flags->flag;
+ else
+ pf->flags &= ~(priv_flags->flag);
+ }
+
+ if (pf->hw.pf_id != 0)
+ goto flags_complete;
+
+ for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
+ const struct i40e_priv_flags *priv_flags;
+
+ priv_flags = &i40e_gl_gstrings_priv_flags[j];
+
+ if (priv_flags->read_only)
+ continue;
+
+ if (flags & BIT(i + j))
+ pf->flags |= priv_flags->flag;
+ else
+ pf->flags &= ~(priv_flags->flag);
+ }
+
+flags_complete:
+ /* check for flags that changed */
+ changed_flags ^= pf->flags;
+
+ /* Process any additional changes needed as a result of flag changes.
+ * The changed_flags value reflects the list of bits that were
+ * changed in the code above.
*/
- if (flags & I40E_PRIV_FLAGS_FD_ATR) {
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- } else {
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
-
- /* flush current ATR settings */
- set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
- }
-
- if ((flags & I40E_PRIV_FLAGS_VEB_STATS) &&
- !(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
- pf->flags |= I40E_FLAG_VEB_STATS_ENABLED;
- reset_required = true;
- } else if (!(flags & I40E_PRIV_FLAGS_VEB_STATS) &&
- (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
- pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
- reset_required = true;
- }
-
- if (pf->hw.pf_id == 0) {
- if ((flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
- !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
- pf->flags |= I40E_FLAG_TRUE_PROMISC_SUPPORT;
- promisc_change = true;
- } else if (!(flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
- (pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
- pf->flags &= ~I40E_FLAG_TRUE_PROMISC_SUPPORT;
- promisc_change = true;
- }
+
+ /* Flush current ATR settings if ATR was disabled */
+ if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ !(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) {
+ pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
}
- if (promisc_change) {
+
+ /* Only allow ATR evict on hardware that is capable of handling it */
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+ pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+
+ if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) {
+ u16 sw_flags = 0, valid_flags = 0;
+ int ret;
+
if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
@@ -3137,22 +4113,17 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
}
}
- if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) &&
- (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
- pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
- else
- pf->auto_disable_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE;
-
- /* if needed, issue reset to cause things to take effect */
- if (reset_required)
- i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
+ /* Issue reset to cause things to take effect, as additional bits
+ * are added we will need to create a mask of bits requiring reset
+ */
+ if ((changed_flags & I40E_FLAG_VEB_STATS_ENABLED) ||
+ ((changed_flags & I40E_FLAG_LEGACY_RX) && netif_running(dev)))
+ i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
return 0;
}
static const struct ethtool_ops i40e_ethtool_ops = {
- .get_settings = i40e_get_settings,
- .set_settings = i40e_set_settings,
.get_drvinfo = i40e_get_drvinfo,
.get_regs_len = i40e_get_regs_len,
.get_regs = i40e_get_regs,
@@ -3189,6 +4160,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_priv_flags = i40e_set_priv_flags,
.get_per_queue_coalesce = i40e_get_per_queue_coalesce,
.set_per_queue_coalesce = i40e_set_per_queue_coalesce,
+ .get_link_ksettings = i40e_get_link_ksettings,
+ .set_link_ksettings = i40e_set_link_ksettings,
};
void i40e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 82a95cc2c8ee..d5c9c9e06ff5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -32,6 +32,12 @@
#include "i40e.h"
#include "i40e_diag.h"
#include <net/udp_tunnel.h>
+/* All i40e tracepoints are defined by the include below, which
+ * must be included exactly once across the whole kernel with
+ * CREATE_TRACE_POINTS defined
+ */
+#define CREATE_TRACE_POINTS
+#include "i40e_trace.h"
const char i40e_driver_name[] = "i40e";
static const char i40e_driver_string[] =
@@ -39,9 +45,9 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
-#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 27
+#define DRV_VERSION_MAJOR 2
+#define DRV_VERSION_MINOR 1
+#define DRV_VERSION_BUILD 14
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -50,13 +56,16 @@ static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporatio
/* a bit of forward declarations */
static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
-static void i40e_handle_reset_warning(struct i40e_pf *pf);
+static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
static int i40e_add_vsi(struct i40e_vsi *vsi);
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static int i40e_reset(struct i40e_pf *pf);
+static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
@@ -286,8 +295,8 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
**/
void i40e_service_event_schedule(struct i40e_pf *pf)
{
- if (!test_bit(__I40E_DOWN, &pf->state) &&
- !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+ if (!test_bit(__I40E_VSI_DOWN, pf->state) &&
+ !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
queue_work(i40e_wq, &pf->service_task);
}
@@ -299,11 +308,7 @@ void i40e_service_event_schedule(struct i40e_pf *pf)
* device is munged, not just the one netdev port, so go for the full
* reset.
**/
-#ifdef I40E_FCOE
-void i40e_tx_timeout(struct net_device *netdev)
-#else
static void i40e_tx_timeout(struct net_device *netdev)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -372,13 +377,13 @@ static void i40e_tx_timeout(struct net_device *netdev)
switch (pf->tx_timeout_recovery_level) {
case 1:
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
break;
case 2:
- set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
break;
case 3:
- set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
break;
default:
netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
@@ -408,10 +413,7 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
* Returns the address of the device statistics structure.
* The statistics are actually updated from the service task.
**/
-#ifndef I40E_FCOE
-static
-#endif
-void i40e_get_netdev_stats_struct(struct net_device *netdev,
+static void i40e_get_netdev_stats_struct(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -420,7 +422,7 @@ void i40e_get_netdev_stats_struct(struct net_device *netdev,
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
int i;
- if (test_bit(__I40E_DOWN, &vsi->state))
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
return;
if (!vsi->tx_rings)
@@ -723,55 +725,6 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
veb->stat_offsets_loaded = true;
}
-#ifdef I40E_FCOE
-/**
- * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
- * @vsi: the VSI that is capable of doing FCoE
- **/
-static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
-{
- struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_fcoe_stats *ofs;
- struct i40e_fcoe_stats *fs; /* device's eth stats */
- int idx;
-
- if (vsi->type != I40E_VSI_FCOE)
- return;
-
- idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET;
- fs = &vsi->fcoe_stats;
- ofs = &vsi->fcoe_stats_offsets;
-
- i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
- i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
- i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
- i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
- i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
- i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
- i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->fcoe_last_error, &fs->fcoe_last_error);
- i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
-
- vsi->fcoe_stat_offsets_loaded = true;
-}
-
-#endif
/**
* i40e_update_vsi_stats - Update the vsi statistics counters.
* @vsi: the VSI to be updated
@@ -790,7 +743,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */
u32 tx_restart, tx_busy;
- u64 tx_lost_interrupt;
struct i40e_ring *p;
u32 rx_page, rx_buf;
u64 bytes, packets;
@@ -801,8 +753,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
u64 tx_p, tx_b;
u16 q;
- if (test_bit(__I40E_DOWN, &vsi->state) ||
- test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
+ test_bit(__I40E_CONFIG_BUSY, pf->state))
return;
ns = i40e_get_vsi_stats_struct(vsi);
@@ -816,7 +768,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_b = rx_p = 0;
tx_b = tx_p = 0;
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
- tx_lost_interrupt = 0;
rx_page = 0;
rx_buf = 0;
rcu_read_lock();
@@ -835,7 +786,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_busy += p->tx_stats.tx_busy;
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
- tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
/* Rx queue is part of the same block as Tx queue */
p = &p[1];
@@ -854,7 +804,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
vsi->tx_busy = tx_busy;
vsi->tx_linearize = tx_linearize;
vsi->tx_force_wb = tx_force_wb;
- vsi->tx_lost_interrupt = tx_lost_interrupt;
vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf;
@@ -1101,13 +1050,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
&osd->rx_lpi_count, &nsd->rx_lpi_count);
if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
- !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+ !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED))
nsd->fd_sb_status = true;
else
nsd->fd_sb_status = false;
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
- !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
nsd->fd_atr_status = true;
else
nsd->fd_atr_status = false;
@@ -1129,9 +1078,6 @@ void i40e_update_stats(struct i40e_vsi *vsi)
i40e_update_pf_stats(pf);
i40e_update_vsi_stats(vsi);
-#ifdef I40E_FCOE
- i40e_update_fcoe_stats(vsi);
-#endif
}
/**
@@ -1400,7 +1346,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
* to failed, so we don't bother to try sending the filter
* to the hardware.
*/
- if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))
+ if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))
f->state = I40E_FILTER_FAILED;
else
f->state = I40E_FILTER_NEW;
@@ -1562,11 +1508,7 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
*
* Returns 0 on success, negative on failure
**/
-#ifdef I40E_FCOE
-int i40e_set_mac(struct net_device *netdev, void *p)
-#else
static int i40e_set_mac(struct net_device *netdev, void *p)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -1583,8 +1525,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
return 0;
}
- if (test_bit(__I40E_DOWN, &vsi->back->state) ||
- test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
return -EADDRNOTAVAIL;
if (ether_addr_equal(hw->mac.addr, addr->sa_data))
@@ -1626,17 +1568,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
*
* Setup VSI queue mapping for enabled traffic classes.
**/
-#ifdef I40E_FCOE
-void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
- struct i40e_vsi_context *ctxt,
- u8 enabled_tc,
- bool is_add)
-#else
static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt,
u8 enabled_tc,
bool is_add)
-#endif
{
struct i40e_pf *pf = vsi->back;
u16 sections = 0;
@@ -1686,11 +1621,6 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
qcount = min_t(int, pf->alloc_rss_size,
num_tc_qps);
break;
-#ifdef I40E_FCOE
- case I40E_VSI_FCOE:
- qcount = num_tc_qps;
- break;
-#endif
case I40E_VSI_FDIR:
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
@@ -1800,11 +1730,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
* i40e_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure
**/
-#ifdef I40E_FCOE
-void i40e_set_rx_mode(struct net_device *netdev)
-#else
static void i40e_set_rx_mode(struct net_device *netdev)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -1883,19 +1809,12 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
static
struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
{
- while (next) {
- next = hlist_entry(next->hlist.next,
- typeof(struct i40e_new_mac_filter),
- hlist);
-
- /* keep going if we found a broadcast filter */
- if (next && is_broadcast_ether_addr(next->f->macaddr))
- continue;
-
- break;
+ hlist_for_each_entry_continue(next, hlist) {
+ if (!is_broadcast_ether_addr(next->f->macaddr))
+ return next;
}
- return next;
+ return NULL;
}
/**
@@ -2001,7 +1920,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
if (fcnt != num_add) {
*promisc_changed = true;
- set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
i40e_aq_str(hw, aq_err),
@@ -2084,7 +2003,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
struct i40e_aqc_add_macvlan_element_data *add_list;
struct i40e_aqc_remove_macvlan_element_data *del_list;
- while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
+ while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
usleep_range(1000, 2000);
pf = vsi->back;
@@ -2220,8 +2139,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
num_add = 0;
hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
- if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
- &vsi->state)) {
+ if (test_bit(__I40E_VSI_OVERFLOW_PROMISC,
+ vsi->state)) {
new->state = I40E_FILTER_FAILED;
continue;
}
@@ -2308,20 +2227,20 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
* safely exit if we didn't just enter, we no longer have any failed
* filters, and we have reduced filters below the threshold value.
*/
- if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) &&
+ if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) &&
!promisc_changed && !failed_filters &&
(vsi->active_filters < vsi->promisc_threshold)) {
dev_info(&pf->pdev->dev,
"filter logjam cleared on %s, leaving overflow promiscuous mode\n",
vsi_name);
- clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
promisc_changed = true;
vsi->promisc_threshold = 0;
}
/* if the VF is not trusted do not do promisc */
if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
- clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
goto out;
}
@@ -2346,12 +2265,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
if ((changed_flags & IFF_PROMISC) ||
(promisc_changed &&
- test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) {
+ test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))) {
bool cur_promisc;
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
- test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
- &vsi->state));
+ test_bit(__I40E_VSI_OVERFLOW_PROMISC,
+ vsi->state));
if ((vsi->type == I40E_VSI_MAIN) &&
(pf->lan_veb != I40E_NO_VEB) &&
!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
@@ -2434,7 +2353,7 @@ out:
if (retval)
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
+ clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
return retval;
err_no_memory:
@@ -2446,7 +2365,7 @@ err_no_memory_locked:
spin_unlock_bh(&vsi->mac_filter_hash_lock);
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
+ clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
return -ENOMEM;
}
@@ -2487,13 +2406,15 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
netdev_info(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi);
- i40e_notify_client_of_l2_param_changes(vsi);
+ pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
+ I40E_FLAG_CLIENT_L2_CHANGE);
return 0;
}
@@ -2707,13 +2628,8 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
*
* net_device_ops implementation for adding vlan ids
**/
-#ifdef I40E_FCOE
-int i40e_vlan_rx_add_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
-#else
static int i40e_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -2744,13 +2660,8 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
*
* net_device_ops implementation for removing vlan ids
**/
-#ifdef I40E_FCOE
-int i40e_vlan_rx_kill_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
-#else
static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -2920,9 +2831,6 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
-#ifdef I40E_FCOE
- i40e_fcoe_setup_ddp_resources(vsi);
-#endif
return err;
}
@@ -2942,9 +2850,6 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
i40e_free_rx_resources(vsi->rx_rings[i]);
-#ifdef I40E_FCOE
- i40e_fcoe_free_ddp_resources(vsi);
-#endif
}
/**
@@ -3015,9 +2920,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
tx_ctx.qlen = ring->count;
tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED));
-#ifdef I40E_FCOE
- tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
-#endif
tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
/* FDIR VSI tx ring can still use RS bit and writebacks */
if (vsi->type != I40E_VSI_FDIR)
@@ -3098,7 +3000,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->rx_buf_len = vsi->rx_buf_len;
- rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
+ BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
rx_ctx.base = (ring->dma / 128);
rx_ctx.qlen = ring->count;
@@ -3120,9 +3023,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.l2tsel = 1;
/* this controls whether VLAN is stripped from inner headers */
rx_ctx.showiv = 0;
-#ifdef I40E_FCOE
- rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
-#endif
/* set the prefena field to 1 because the manual says to */
rx_ctx.prefena = 1;
@@ -3144,6 +3044,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
return -ENOMEM;
}
+ /* configure Rx buffer alignment */
+ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ clear_ring_build_skb_enabled(ring);
+ else
+ set_ring_build_skb_enabled(ring);
+
/* cache tail for quicker writes, and clear the reg before use */
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
@@ -3181,27 +3087,21 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
int err = 0;
u16 i;
- if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
- vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
- + ETH_FCS_LEN + VLAN_HLEN;
- else
- vsi->max_frame = I40E_RXBUFFER_2048;
-
- vsi->rx_buf_len = I40E_RXBUFFER_2048;
-
-#ifdef I40E_FCOE
- /* setup rx buffer for FCoE */
- if ((vsi->type == I40E_VSI_FCOE) &&
- (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
- vsi->rx_buf_len = I40E_RXBUFFER_3072;
- vsi->max_frame = I40E_RXBUFFER_3072;
+ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
+ vsi->max_frame = I40E_MAX_RXBUFFER;
+ vsi->rx_buf_len = I40E_RXBUFFER_2048;
+#if (PAGE_SIZE < 8192)
+ } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+ vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+ vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+#endif
+ } else {
+ vsi->max_frame = I40E_MAX_RXBUFFER;
+ vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
+ I40E_RXBUFFER_2048;
}
-#endif /* I40E_FCOE */
- /* round up for the chip's needs */
- vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
- BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
-
/* set up individual rings */
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_configure_rx_ring(vsi->rx_rings[i]);
@@ -3281,6 +3181,12 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
+ /* Reset FDir counters as we're replaying all existing filters */
+ pf->fd_tcp4_filter_cnt = 0;
+ pf->fd_udp4_filter_cnt = 0;
+ pf->fd_sctp4_filter_cnt = 0;
+ pf->fd_ip4_filter_cnt = 0;
+
hlist_for_each_entry_safe(filter, node,
&pf->fdir_filter_list, fdir_node) {
i40e_add_del_fdir(vsi, filter, true);
@@ -3705,29 +3611,29 @@ static irqreturn_t i40e_intr(int irq, void *data)
* this is not a performance path and napi_schedule()
* can deal with rescheduling.
*/
- if (!test_bit(__I40E_DOWN, &pf->state))
+ if (!test_bit(__I40E_VSI_DOWN, pf->state))
napi_schedule_irqoff(&q_vector->napi);
}
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
- set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
+ set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
}
if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
- set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+ set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
}
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
- set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
+ set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
}
if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
- if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
- set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
+ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
val = rd32(hw, I40E_GLGEN_RSTAT);
val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
@@ -3738,7 +3644,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
pf->globr_count++;
} else if (val == I40E_RESET_EMPR) {
pf->empr_count++;
- set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
+ set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
}
}
@@ -3771,7 +3677,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
(icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
dev_info(&pf->pdev->dev, "device will be reset\n");
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
}
ena_mask &= ~icr0_remaining;
@@ -3781,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
enable_intr:
/* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
- if (!test_bit(__I40E_DOWN, &pf->state)) {
+ if (!test_bit(__I40E_VSI_DOWN, pf->state)) {
i40e_service_event_schedule(pf);
i40e_irq_dynamic_enable_icr0(pf, false);
}
@@ -3993,11 +3899,7 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
* This is used by netconsole to send skbs without having to re-enable
* interrupts. It's not called while the normal interrupt routine is executing.
**/
-#ifdef I40E_FCOE
-void i40e_netpoll(struct net_device *netdev)
-#else
static void i40e_netpoll(struct net_device *netdev)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -4005,7 +3907,7 @@ static void i40e_netpoll(struct net_device *netdev)
int i;
/* if interface is down do nothing */
- if (test_bit(__I40E_DOWN, &vsi->state))
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
return;
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
@@ -4017,6 +3919,8 @@ static void i40e_netpoll(struct net_device *netdev)
}
#endif
+#define I40E_QTX_ENA_WAIT_COUNT 50
+
/**
* i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
* @pf: the PF being configured
@@ -4047,6 +3951,50 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
}
/**
+ * i40e_control_tx_q - Start or stop a particular Tx queue
+ * @pf: the PF structure
+ * @pf_q: the PF queue to configure
+ * @enable: start or stop the queue
+ *
+ * This function enables or disables a single queue. Note that any delay
+ * required after the operation is expected to be handled by the caller of
+ * this function.
+ **/
+static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 tx_reg;
+ int i;
+
+ /* warn the TX unit of coming changes */
+ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
+ if (!enable)
+ usleep_range(10, 20);
+
+ for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
+ tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
+ if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Skip if the queue is already in the requested state */
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ return;
+
+ /* turn on/off the queue */
+ if (enable) {
+ wr32(hw, I40E_QTX_HEAD(pf_q), 0);
+ tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+ } else {
+ tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ }
+
+ wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
+}
+
+/**
* i40e_vsi_control_tx - Start or stop a VSI's rings
* @vsi: the VSI being configured
* @enable: start or stop the rings
@@ -4054,41 +4002,11 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
{
struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- int i, j, pf_q, ret = 0;
- u32 tx_reg;
+ int i, pf_q, ret = 0;
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
-
- /* warn the TX unit of coming changes */
- i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
- if (!enable)
- usleep_range(10, 20);
-
- for (j = 0; j < 50; j++) {
- tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
- if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
- ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
- break;
- usleep_range(1000, 2000);
- }
- /* Skip if the queue is already in the requested state */
- if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- continue;
-
- /* turn on/off the queue */
- if (enable) {
- wr32(hw, I40E_QTX_HEAD(pf_q), 0);
- tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
- } else {
- tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
- }
-
- wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
- /* No waiting for the Tx queue to disable */
- if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
- continue;
+ i40e_control_tx_q(pf, pf_q, enable);
/* wait for the change to finish */
ret = i40e_pf_txq_wait(pf, pf_q, enable);
@@ -4100,8 +4018,6 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
}
}
- if (hw->revision_id == 0)
- mdelay(50);
return ret;
}
@@ -4135,6 +4051,43 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
}
/**
+ * i40e_control_rx_q - Start or stop a particular Rx queue
+ * @pf: the PF structure
+ * @pf_q: the PF queue to configure
+ * @enable: start or stop the queue
+ *
+ * This function enables or disables a single queue. Note that any delay
+ * required after the operation is expected to be handled by the caller of
+ * this function.
+ **/
+static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 rx_reg;
+ int i;
+
+ for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
+ rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
+ if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Skip if the queue is already in the requested state */
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ return;
+
+ /* turn on/off the queue */
+ if (enable)
+ rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+ else
+ rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+
+ wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
+}
+
+/**
* i40e_vsi_control_rx - Start or stop a VSI's rings
* @vsi: the VSI being configured
* @enable: start or stop the rings
@@ -4142,33 +4095,11 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
{
struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- int i, j, pf_q, ret = 0;
- u32 rx_reg;
+ int i, pf_q, ret = 0;
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- for (j = 0; j < 50; j++) {
- rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
- if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
- ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
- break;
- usleep_range(1000, 2000);
- }
-
- /* Skip if the queue is already in the requested state */
- if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- continue;
-
- /* turn on/off the queue */
- if (enable)
- rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
- else
- rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
- wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
- /* No waiting for the Tx queue to disable */
- if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
- continue;
+ i40e_control_rx_q(pf, pf_q, enable);
/* wait for the change to finish */
ret = i40e_pf_rxq_wait(pf, pf_q, enable);
@@ -4180,6 +4111,12 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
}
}
+ /* Due to HW errata, on Rx disable only, the register can indicate done
+ * before it really is. Needs 50ms to be sure
+ */
+ if (!enable)
+ mdelay(50);
+
return ret;
}
@@ -4206,6 +4143,10 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
**/
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
{
+ /* When port TX is suspended, don't wait */
+ if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
+ return i40e_vsi_stop_rings_no_wait(vsi);
+
/* do rx first for enable and last for disable
* Ignore return value, we need to shutdown whatever we can
*/
@@ -4214,6 +4155,29 @@ void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
}
/**
+ * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
+ * @vsi: the VSI being shutdown
+ *
+ * This function stops all the rings for a VSI but does not delay to verify
+ * that rings have been disabled. It is expected that the caller is shutting
+ * down multiple VSIs at once and will delay together for all the VSIs after
+ * initiating the shutdown. This is particularly useful for shutting down lots
+ * of VFs together. Otherwise, a large delay can be incurred while configuring
+ * each VSI in serial.
+ **/
+void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int i, pf_q;
+
+ pf_q = vsi->base_queue;
+ for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ i40e_control_tx_q(pf, pf_q, false);
+ i40e_control_rx_q(pf, pf_q, false);
+ }
+}
+
+/**
* i40e_vsi_free_irq - Free the irq association with the OS
* @vsi: the VSI being configured
**/
@@ -4471,17 +4435,16 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
**/
static void i40e_vsi_close(struct i40e_vsi *vsi)
{
- bool reset = false;
-
- if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
+ struct i40e_pf *pf = vsi->back;
+ if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
i40e_down(vsi);
i40e_vsi_free_irq(vsi);
i40e_vsi_free_tx_resources(vsi);
i40e_vsi_free_rx_resources(vsi);
vsi->current_netdev_flags = 0;
- if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
- reset = true;
- i40e_notify_client_of_netdev_close(vsi, reset);
+ pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ pf->flags |= I40E_FLAG_CLIENT_RESET;
}
/**
@@ -4490,18 +4453,10 @@ static void i40e_vsi_close(struct i40e_vsi *vsi)
**/
static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
{
- if (test_bit(__I40E_DOWN, &vsi->state))
- return;
-
- /* No need to disable FCoE VSI when Tx suspended */
- if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
- vsi->type == I40E_VSI_FCOE) {
- dev_dbg(&vsi->back->pdev->dev,
- "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
return;
- }
- set_bit(__I40E_NEEDS_RESTART, &vsi->state);
+ set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
else
@@ -4514,10 +4469,9 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
**/
static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
{
- if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
+ if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
return;
- clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
else
@@ -4552,21 +4506,20 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
}
}
-#ifdef CONFIG_I40E_DCB
/**
* i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
* @vsi: the VSI being configured
*
- * This function waits for the given VSI's queues to be disabled.
+ * Wait until all queues on a given VSI have been disabled.
**/
-static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
+int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int i, pf_q, ret;
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- /* Check and wait for the disable status of the queue */
+ /* Check and wait for the Tx queue */
ret = i40e_pf_txq_wait(pf, pf_q, false);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -4574,11 +4527,7 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
vsi->seid, pf_q);
return ret;
}
- }
-
- pf_q = vsi->base_queue;
- for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- /* Check and wait for the disable status of the queue */
+ /* Check and wait for the Tx queue */
ret = i40e_pf_rxq_wait(pf, pf_q, false);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -4591,6 +4540,7 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
return 0;
}
+#ifdef CONFIG_I40E_DCB
/**
* i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
* @pf: the PF
@@ -4603,8 +4553,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
int v, ret = 0;
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
- /* No need to wait for FCoE VSI queues */
- if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
+ if (pf->vsi[v]) {
ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
if (ret)
break;
@@ -4622,16 +4571,15 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
* @vsi: Pointer to VSI struct
*
* This function checks specified queue for given VSI. Detects hung condition.
- * Sets hung bit since it is two step process. Before next run of service task
- * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
- * hung condition remain unchanged and during subsequent run, this function
- * issues SW interrupt to recover from hung condition.
+ * We proactively detect hung TX queues by checking if interrupts are disabled
+ * but there are pending descriptors. If it appears hung, attempt to recover
+ * by triggering a SW interrupt.
**/
static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
{
struct i40e_ring *tx_ring = NULL;
struct i40e_pf *pf;
- u32 head, val, tx_pending_hw;
+ u32 val, tx_pending;
int i;
pf = vsi->back;
@@ -4657,47 +4605,15 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
else
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
- head = i40e_get_head(tx_ring);
-
- tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
-
- /* HW is done executing descriptors, updated HEAD write back,
- * but SW hasn't processed those descriptors. If interrupt is
- * not generated from this point ON, it could result into
- * dev_watchdog detecting timeout on those netdev_queue,
- * hence proactively trigger SW interrupt.
- */
- if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
- /* NAPI Poll didn't run and clear since it was set */
- if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
- &tx_ring->q_vector->hung_detected)) {
- netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
- vsi->seid, q_idx, tx_pending_hw,
- tx_ring->next_to_clean, head,
- tx_ring->next_to_use,
- readl(tx_ring->tail));
- netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
- vsi->seid, q_idx, val);
- i40e_force_wb(vsi, tx_ring->q_vector);
- } else {
- /* First Chance - detected possible hung */
- set_bit(I40E_Q_VECTOR_HUNG_DETECT,
- &tx_ring->q_vector->hung_detected);
- }
- }
+ tx_pending = i40e_get_tx_pending(tx_ring);
- /* This is the case where we have interrupts missing,
- * so the tx_pending in HW will most likely be 0, but we
- * will have tx_pending in SW since the WB happened but the
- * interrupt got lost.
+ /* Interrupts are disabled and TX pending is non-zero,
+ * trigger the SW interrupt (don't wait). Worst case
+ * there will be one extra interrupt which may result
+ * into not cleaning any queues because queues are cleaned.
*/
- if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
- (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
- local_bh_disable();
- if (napi_reschedule(&tx_ring->q_vector->napi))
- tx_ring->tx_stats.tx_lost_interrupt++;
- local_bh_enable();
- }
+ if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
+ i40e_force_wb(vsi, tx_ring->q_vector);
}
/**
@@ -4721,8 +4637,8 @@ static void i40e_detect_recover_hung(struct i40e_pf *pf)
return;
/* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
- if (test_bit(__I40E_DOWN, &vsi->back->state) ||
- test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
return;
/* Make sure type is MAIN VSI */
@@ -5228,20 +5144,12 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
continue;
/* - Enable all TCs for the LAN VSI
-#ifdef I40E_FCOE
- * - For FCoE VSI only enable the TC configured
- * as per the APP TLV
-#endif
* - For all others keep them at TC0 for now
*/
if (v == pf->lan_vsi)
tc_map = i40e_pf_get_tc_map(pf);
else
tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
-#ifdef I40E_FCOE
- if (pf->vsi[v]->type == I40E_VSI_FCOE)
- tc_map = i40e_get_fcoe_tc_map(pf);
-#endif /* #ifdef I40E_FCOE */
ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
if (ret) {
@@ -5277,7 +5185,7 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
}
@@ -5308,10 +5216,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
(hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
dev_info(&pf->pdev->dev,
"DCBX offload is not supported or is disabled for this PF.\n");
-
- if (pf->flags & I40E_FLAG_MFP_ENABLED)
- goto out;
-
} else {
/* When status is not DISABLED then DCBX in FW */
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
@@ -5449,7 +5353,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
if (err)
return err;
- clear_bit(__I40E_DOWN, &vsi->state);
+ clear_bit(__I40E_VSI_DOWN, vsi->state);
i40e_napi_enable_all(vsi);
i40e_vsi_enable_irq(vsi);
@@ -5472,13 +5376,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
/* replay FDIR SB filters */
if (vsi->type == I40E_VSI_FDIR) {
/* reset fd counters */
- pf->fd_add_err = pf->fd_atr_cnt = 0;
- if (pf->fd_tcp_rule > 0) {
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
- pf->fd_tcp_rule = 0;
- }
+ pf->fd_add_err = 0;
+ pf->fd_atr_cnt = 0;
i40e_fdir_filter_restore(vsi);
}
@@ -5503,12 +5402,12 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back;
WARN_ON(in_interrupt());
- while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
usleep_range(1000, 2000);
i40e_down(vsi);
i40e_up(vsi);
- clear_bit(__I40E_CONFIG_BUSY, &pf->state);
+ clear_bit(__I40E_CONFIG_BUSY, pf->state);
}
/**
@@ -5535,7 +5434,7 @@ void i40e_down(struct i40e_vsi *vsi)
int i;
/* It is assumed that the caller of this function
- * sets the vsi->state __I40E_DOWN bit.
+ * sets the vsi->state __I40E_VSI_DOWN bit.
*/
if (vsi->netdev) {
netif_carrier_off(vsi->netdev);
@@ -5550,8 +5449,6 @@ void i40e_down(struct i40e_vsi *vsi)
i40e_clean_rx_ring(vsi->rx_rings[i]);
}
- i40e_notify_client_of_netdev_close(vsi, false);
-
}
/**
@@ -5612,17 +5509,15 @@ exit:
return ret;
}
-#ifdef I40E_FCOE
-int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
-#else
static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct tc_to_netdev *tc)
-#endif
{
- if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
+ if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return i40e_setup_tc(netdev, tc->tc);
+
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return i40e_setup_tc(netdev, tc->mqprio->num_tc);
}
/**
@@ -5645,8 +5540,8 @@ int i40e_open(struct net_device *netdev)
int err;
/* disallow open during test or if eeprom is broken */
- if (test_bit(__I40E_TESTING, &pf->state) ||
- test_bit(__I40E_BAD_EEPROM, &pf->state))
+ if (test_bit(__I40E_TESTING, pf->state) ||
+ test_bit(__I40E_BAD_EEPROM, pf->state))
return -EBUSY;
netif_carrier_off(netdev);
@@ -5675,6 +5570,8 @@ int i40e_open(struct net_device *netdev)
* Finish initialization of the VSI.
*
* Returns 0 on success, negative value on failure
+ *
+ * Note: expects to be called while under rtnl_lock()
**/
int i40e_vsi_open(struct i40e_vsi *vsi)
{
@@ -5738,7 +5635,7 @@ err_setup_rx:
err_setup_tx:
i40e_vsi_free_tx_resources(vsi);
if (vsi == pf->vsi[pf->lan_vsi])
- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
return err;
}
@@ -5753,6 +5650,7 @@ err_setup_tx:
static void i40e_fdir_filter_exit(struct i40e_pf *pf)
{
struct i40e_fdir_filter *filter;
+ struct i40e_flex_pit *pit_entry, *tmp;
struct hlist_node *node2;
hlist_for_each_entry_safe(filter, node2,
@@ -5760,7 +5658,43 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
hlist_del(&filter->fdir_node);
kfree(filter);
}
+
+ list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
+ list_del(&pit_entry->list);
+ kfree(pit_entry);
+ }
+ INIT_LIST_HEAD(&pf->l3_flex_pit_list);
+
+ list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
+ list_del(&pit_entry->list);
+ kfree(pit_entry);
+ }
+ INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+
pf->fdir_pf_active_filters = 0;
+ pf->fd_tcp4_filter_cnt = 0;
+ pf->fd_udp4_filter_cnt = 0;
+ pf->fd_sctp4_filter_cnt = 0;
+ pf->fd_ip4_filter_cnt = 0;
+
+ /* Reprogram the default input set for TCP/IPv4 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+ /* Reprogram the default input set for UDP/IPv4 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+ /* Reprogram the default input set for SCTP/IPv4 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+ /* Reprogram the default input set for Other/IPv4 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
}
/**
@@ -5787,12 +5721,14 @@ int i40e_close(struct net_device *netdev)
* i40e_do_reset - Start a PF or Core Reset sequence
* @pf: board private structure
* @reset_flags: which reset is requested
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
*
* The essential difference in resets is that the PF Reset
* doesn't clear the packet buffers, doesn't reset the PE
* firmware, and doesn't bother the other PFs on the chip.
**/
-void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
{
u32 val;
@@ -5838,7 +5774,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
* for the Core Reset.
*/
dev_dbg(&pf->pdev->dev, "PFR requested\n");
- i40e_handle_reset_warning(pf);
+ i40e_handle_reset_warning(pf, lock_acquired);
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
@@ -5850,10 +5786,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
struct i40e_vsi *vsi = pf->vsi[v];
if (vsi != NULL &&
- test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
+ test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
+ vsi->state))
i40e_vsi_reinit_locked(pf->vsi[v]);
- clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
- }
}
} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
int v;
@@ -5864,10 +5799,10 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
struct i40e_vsi *vsi = pf->vsi[v];
if (vsi != NULL &&
- test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
- set_bit(__I40E_DOWN, &vsi->state);
+ test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
+ vsi->state)) {
+ set_bit(__I40E_VSI_DOWN, vsi->state);
i40e_down(vsi);
- clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
}
}
} else {
@@ -6007,7 +5942,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
else
pf->flags &= ~I40E_FLAG_DCB_ENABLED;
- set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
+ set_bit(__I40E_PORT_SUSPENDED, pf->state);
/* Reconfiguration needed quiesce all VSIs */
i40e_pf_quiesce_all_vsi(pf);
@@ -6016,7 +5951,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
ret = i40e_resume_port_tx(pf);
- clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
+ clear_bit(__I40E_PORT_SUSPENDED, pf->state);
/* In case of error no point in resuming VSIs */
if (ret)
goto exit;
@@ -6025,12 +5960,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
ret = i40e_pf_wait_queues_disabled(pf);
if (ret) {
/* Schedule PF reset to recover */
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
} else {
i40e_pf_unquiesce_all_vsi(pf);
- /* Notify the client for the DCB changes */
- i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
+ pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
+ I40E_FLAG_CLIENT_L2_CHANGE);
}
exit:
@@ -6047,7 +5982,7 @@ exit:
void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
{
rtnl_lock();
- i40e_do_reset(pf, reset_flags);
+ i40e_do_reset(pf, reset_flags, true);
rtnl_unlock();
}
@@ -6140,34 +6075,33 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
u32 fcnt_prog, fcnt_avail;
struct hlist_node *node;
- if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
return;
- /* Check if, FD SB or ATR was auto disabled and if there is enough room
- * to re-enable
- */
+ /* Check if we have enough room to re-enable FDir SB capability. */
fcnt_prog = i40e_get_global_fd_count(pf);
fcnt_avail = pf->fdir_pf_filter_count;
if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
(pf->fd_add_err == 0) ||
(i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
- if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
- pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
+ pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED;
+ if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ (I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
}
- /* Wait for some more space to be available to turn on ATR. We also
- * must check that no existing ntuple rules for TCP are in effect
+ /* We should wait for even more space before re-enabling ATR.
+ * Additionally, we cannot enable ATR as long as we still have TCP SB
+ * rules active.
*/
- if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->fd_tcp_rule == 0)) {
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
+ (pf->fd_tcp4_filter_cnt == 0)) {
+ if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
+ pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
}
}
@@ -6218,7 +6152,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
}
pf->fd_flush_timestamp = jiffies;
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
/* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1,
I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
@@ -6237,9 +6171,9 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
} else {
/* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
- if (!disable_atr)
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+ if (!disable_atr && !pf->fd_tcp4_filter_cnt)
+ pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
}
@@ -6269,10 +6203,10 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
{
/* if interface is down do nothing */
- if (test_bit(__I40E_DOWN, &pf->state))
+ if (test_bit(__I40E_VSI_DOWN, pf->state))
return;
- if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
i40e_fdir_flush_and_replay(pf);
i40e_fdir_check_and_reenable(pf);
@@ -6286,14 +6220,11 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
**/
static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
{
- if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
+ if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
return;
switch (vsi->type) {
case I40E_VSI_MAIN:
-#ifdef I40E_FCOE
- case I40E_VSI_FCOE:
-#endif
if (!vsi->netdev || !vsi->netdev_registered)
break;
@@ -6382,11 +6313,11 @@ static void i40e_link_event(struct i40e_pf *pf)
if (new_link == old_link &&
new_link_speed == old_link_speed &&
- (test_bit(__I40E_DOWN, &vsi->state) ||
+ (test_bit(__I40E_VSI_DOWN, vsi->state) ||
new_link == netif_carrier_ok(vsi->netdev)))
return;
- if (!test_bit(__I40E_DOWN, &vsi->state))
+ if (!test_bit(__I40E_VSI_DOWN, vsi->state))
i40e_print_link_message(vsi, new_link);
/* Notify the base of the switch tree connected to
@@ -6413,8 +6344,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
int i;
/* if interface is down do nothing */
- if (test_bit(__I40E_DOWN, &pf->state) ||
- test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ if (test_bit(__I40E_VSI_DOWN, pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, pf->state))
return;
/* make sure we don't do these things too often */
@@ -6452,44 +6383,44 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
{
u32 reset_flags = 0;
- rtnl_lock();
- if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
+ if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_REINIT_REQUESTED);
- clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
+ clear_bit(__I40E_REINIT_REQUESTED, pf->state);
}
- if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
+ if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
- clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
}
- if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
+ if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
- clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
+ clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
}
- if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
+ if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
- clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
+ clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
}
- if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
- reset_flags |= BIT(__I40E_DOWN_REQUESTED);
- clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
+ if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) {
+ reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED);
+ clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state);
}
/* If there's a recovery already waiting, it takes
* precedence before starting a new reset sequence.
*/
- if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
- i40e_handle_reset_warning(pf);
- goto unlock;
+ if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
+ i40e_prep_for_reset(pf, false);
+ i40e_reset(pf);
+ i40e_rebuild(pf, false, false);
}
/* If we're already down or resetting, just bail */
if (reset_flags &&
- !test_bit(__I40E_DOWN, &pf->state) &&
- !test_bit(__I40E_CONFIG_BUSY, &pf->state))
- i40e_do_reset(pf, reset_flags);
-
-unlock:
- rtnl_unlock();
+ !test_bit(__I40E_VSI_DOWN, pf->state) &&
+ !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
+ rtnl_lock();
+ i40e_do_reset(pf, reset_flags, true);
+ rtnl_unlock();
+ }
}
/**
@@ -6534,7 +6465,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
u32 val;
/* Do not run clean AQ when PF reset fails */
- if (test_bit(__I40E_RESET_FAILED, &pf->state))
+ if (test_bit(__I40E_RESET_FAILED, pf->state))
return;
/* check for error indications */
@@ -6635,9 +6566,11 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
opcode);
break;
}
- } while (pending && (i++ < pf->adminq_work_limit));
+ } while (i++ < pf->adminq_work_limit);
+
+ if (i < pf->adminq_work_limit)
+ clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
- clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
/* re-enable Admin queue interrupt cause */
val = rd32(hw, I40E_PFINT_ICR0_ENA);
val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
@@ -6662,13 +6595,13 @@ static void i40e_verify_eeprom(struct i40e_pf *pf)
if (err) {
dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
err);
- set_bit(__I40E_BAD_EEPROM, &pf->state);
+ set_bit(__I40E_BAD_EEPROM, pf->state);
}
}
- if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+ if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
- clear_bit(__I40E_BAD_EEPROM, &pf->state);
+ clear_bit(__I40E_BAD_EEPROM, pf->state);
}
}
@@ -6975,17 +6908,19 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
/**
* i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
*
* Close up the VFs and other things in prep for PF Reset.
**/
-static void i40e_prep_for_reset(struct i40e_pf *pf)
+static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
{
struct i40e_hw *hw = &pf->hw;
i40e_status ret = 0;
u32 v;
- clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
- if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+ clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
+ if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
return;
if (i40e_check_asq_alive(&pf->hw))
i40e_vc_notify_reset(pf);
@@ -6993,7 +6928,12 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
/* quiesce the VSIs and their queues that are not already DOWN */
+ /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
+ if (!lock_acquired)
+ rtnl_lock();
i40e_pf_quiesce_all_vsi(pf);
+ if (!lock_acquired)
+ rtnl_unlock();
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
@@ -7028,31 +6968,41 @@ static void i40e_send_version(struct i40e_pf *pf)
}
/**
- * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
* @pf: board private structure
- * @reinit: if the Main VSI needs to re-initialized.
**/
-static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
+static int i40e_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- u8 set_fc_aq_fail = 0;
i40e_status ret;
- u32 val;
- u32 v;
- /* Now we wait for GRST to settle out.
- * We don't have to delete the VEBs or VSIs from the hw switch
- * because the reset will make them disappear.
- */
ret = i40e_pf_reset(hw);
if (ret) {
dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
- set_bit(__I40E_RESET_FAILED, &pf->state);
- goto clear_recovery;
+ set_bit(__I40E_RESET_FAILED, pf->state);
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+ } else {
+ pf->pfr_count++;
}
- pf->pfr_count++;
+ return ret;
+}
+
+/**
+ * i40e_rebuild - rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ **/
+static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u8 set_fc_aq_fail = 0;
+ i40e_status ret;
+ u32 val;
+ int v;
- if (test_bit(__I40E_DOWN, &pf->state))
+ if (test_bit(__I40E_VSI_DOWN, pf->state))
goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
@@ -7066,7 +7016,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
/* re-verify the eeprom if we just had an EMP reset */
- if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
+ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
i40e_verify_eeprom(pf);
i40e_clear_pxe_mode(hw);
@@ -7075,8 +7025,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
goto end_core_reset;
ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
- hw->func_caps.num_rx_qp,
- pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+ hw->func_caps.num_rx_qp, 0, 0);
if (ret) {
dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
goto end_core_reset;
@@ -7095,14 +7044,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
/* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
-#ifdef I40E_FCOE
- i40e_init_pf_fcoe(pf);
-
-#endif
/* do basic switch setup */
+ if (!lock_acquired)
+ rtnl_lock();
ret = i40e_setup_pf_switch(pf, reinit);
if (ret)
- goto end_core_reset;
+ goto end_unlock;
/* The driver only wants link up/down and module qualification
* reports from firmware. Note the negative logic.
@@ -7173,7 +7120,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
if (ret) {
dev_info(&pf->pdev->dev,
"rebuild of Main VSI failed: %d\n", ret);
- goto end_core_reset;
+ goto end_unlock;
}
}
@@ -7216,18 +7163,45 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
/* restart the VSIs that were rebuilt and running before the reset */
i40e_pf_unquiesce_all_vsi(pf);
- if (pf->num_alloc_vfs) {
- for (v = 0; v < pf->num_alloc_vfs; v++)
- i40e_reset_vf(&pf->vf[v], true);
- }
+ /* Release the RTNL lock before we start resetting VFs */
+ if (!lock_acquired)
+ rtnl_unlock();
+
+ i40e_reset_all_vfs(pf, true);
/* tell the firmware that we're starting */
i40e_send_version(pf);
+ /* We've already released the lock, so don't do it again */
+ goto end_core_reset;
+
+end_unlock:
+ if (!lock_acquired)
+ rtnl_unlock();
end_core_reset:
- clear_bit(__I40E_RESET_FAILED, &pf->state);
+ clear_bit(__I40E_RESET_FAILED, pf->state);
clear_recovery:
- clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+}
+
+/**
+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ **/
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+ bool lock_acquired)
+{
+ int ret;
+ /* Now we wait for GRST to settle out.
+ * We don't have to delete the VEBs or VSIs from the hw switch
+ * because the reset will make them disappear.
+ */
+ ret = i40e_reset(pf);
+ if (!ret)
+ i40e_rebuild(pf, reinit, lock_acquired);
}
/**
@@ -7236,11 +7210,13 @@ clear_recovery:
*
* Close up the VFs and other things in prep for a Core Reset,
* then get ready to rebuild the world.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
**/
-static void i40e_handle_reset_warning(struct i40e_pf *pf)
+static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
{
- i40e_prep_for_reset(pf);
- i40e_reset_and_rebuild(pf, false);
+ i40e_prep_for_reset(pf, lock_acquired);
+ i40e_reset_and_rebuild(pf, false, lock_acquired);
}
/**
@@ -7258,7 +7234,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u32 reg;
int i;
- if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
+ if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
return;
/* find what triggered the MDD event */
@@ -7310,7 +7286,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
}
/* Queue belongs to the PF, initiate a reset */
if (pf_mdd_detected) {
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
}
}
@@ -7339,12 +7315,12 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
"Too many MDD events on VF %d, disabled\n", i);
dev_info(&pf->pdev->dev,
"Use PF Control I/F to re-enable the VF\n");
- set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+ set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
}
}
/* re-enable mdd interrupt cause */
- clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+ clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
reg = rd32(hw, I40E_PFINT_ICR0_ENA);
reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
@@ -7352,6 +7328,23 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
}
/**
+ * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
+ * @pf: board private structure
+ **/
+static void i40e_sync_udp_filters(struct i40e_pf *pf)
+{
+ int i;
+
+ /* loop through and set pending bit for all active UDP filters */
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->udp_ports[i].port)
+ pf->pending_udp_bitmap |= BIT_ULL(i);
+ }
+
+ pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+}
+
+/**
* i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
* @pf: board private structure
**/
@@ -7359,7 +7352,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
- __be16 port;
+ u16 port;
int i;
if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
@@ -7370,7 +7363,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
if (pf->pending_udp_bitmap & BIT_ULL(i)) {
pf->pending_udp_bitmap &= ~BIT_ULL(i);
- port = pf->udp_ports[i].index;
+ port = pf->udp_ports[i].port;
if (port)
ret = i40e_aq_add_udp_tunnel(hw, port,
pf->udp_ports[i].type,
@@ -7383,11 +7376,11 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
"%s %s port %d, index %d failed, err %s aq_err %s\n",
pf->udp_ports[i].type ? "vxlan" : "geneve",
port ? "add" : "delete",
- ntohs(port), i,
+ port, i,
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
- pf->udp_ports[i].index = 0;
+ pf->udp_ports[i].port = 0;
}
}
}
@@ -7405,11 +7398,10 @@ static void i40e_service_task(struct work_struct *work)
unsigned long start_time = jiffies;
/* don't bother with service tasks if a reset is in progress */
- if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
return;
- }
- if (test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
+ if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
return;
i40e_detect_recover_hung(pf);
@@ -7419,23 +7411,34 @@ static void i40e_service_task(struct work_struct *work)
i40e_vc_process_vflr_event(pf);
i40e_watchdog_subtask(pf);
i40e_fdir_reinit_subtask(pf);
- i40e_client_subtask(pf);
+ if (pf->flags & I40E_FLAG_CLIENT_RESET) {
+ /* Client subtask will reopen next time through. */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
+ pf->flags &= ~I40E_FLAG_CLIENT_RESET;
+ } else {
+ i40e_client_subtask(pf);
+ if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
+ i40e_notify_client_of_l2_param_changes(
+ pf->vsi[pf->lan_vsi]);
+ pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
+ }
+ }
i40e_sync_filters_subtask(pf);
i40e_sync_udp_filters_subtask(pf);
i40e_clean_adminq_subtask(pf);
/* flush memory to make sure state is correct before next watchdog */
smp_mb__before_atomic();
- clear_bit(__I40E_SERVICE_SCHED, &pf->state);
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
/* If the tasks have taken longer than one timer cycle or there
* is more work to be done, reschedule the service task now
* rather than wait for the timer to tick again.
*/
if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
- test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
- test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
- test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
+ test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
+ test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
+ test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
i40e_service_event_schedule(pf);
}
@@ -7492,15 +7495,6 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
I40E_REQ_DESCRIPTOR_MULTIPLE);
break;
-#ifdef I40E_FCOE
- case I40E_VSI_FCOE:
- vsi->alloc_queue_pairs = pf->num_fcoe_qps;
- vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
- I40E_REQ_DESCRIPTOR_MULTIPLE);
- vsi->num_q_vectors = pf->num_fcoe_msix;
- break;
-
-#endif /* I40E_FCOE */
default:
WARN_ON(1);
return -ENODATA;
@@ -7593,7 +7587,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
}
vsi->type = type;
vsi->back = pf;
- set_bit(__I40E_DOWN, &vsi->state);
+ set_bit(__I40E_VSI_DOWN, vsi->state);
vsi->flags = 0;
vsi->idx = vsi_idx;
vsi->int_rate_limit = 0;
@@ -7817,6 +7811,7 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
static int i40e_init_msix(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
+ int cpus, extra_vectors;
int vectors_left;
int v_budget, i;
int v_actual;
@@ -7835,9 +7830,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
* - assumes symmetric Tx/Rx pairing
* - The number of VMDq pairs
* - The CPU count within the NUMA node if iWARP is enabled
-#ifdef I40E_FCOE
- * - The number of FCOE qps.
-#endif
* Once we count this up, try the request.
*
* If we can't get what we want, we'll simplify to nearly nothing
@@ -7852,10 +7844,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
vectors_left--;
}
- /* reserve vectors for the main PF traffic queues */
- pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
+ /* reserve some vectors for the main PF traffic queues. Initially we
+ * only reserve at most 50% of the available vectors, in the case that
+ * the number of online CPUs is large. This ensures that we can enable
+ * extra features as well. Once we've enabled the other features, we
+ * will use any remaining vectors to reach as close as we can to the
+ * number of online CPUs.
+ */
+ cpus = num_online_cpus();
+ pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
vectors_left -= pf->num_lan_msix;
- v_budget += pf->num_lan_msix;
/* reserve one vector for sideband flow director */
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
@@ -7868,20 +7866,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
-#ifdef I40E_FCOE
- /* can we reserve enough for FCoE? */
- if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
- if (!vectors_left)
- pf->num_fcoe_msix = 0;
- else if (vectors_left >= pf->num_fcoe_qps)
- pf->num_fcoe_msix = pf->num_fcoe_qps;
- else
- pf->num_fcoe_msix = 1;
- v_budget += pf->num_fcoe_msix;
- vectors_left -= pf->num_fcoe_msix;
- }
-
-#endif
/* can we reserve enough for iWARP? */
if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
iwarp_requested = pf->num_iwarp_msix;
@@ -7918,6 +7902,23 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
+ /* On systems with a large number of SMP cores, we previously limited
+ * the number of vectors for num_lan_msix to be at most 50% of the
+ * available vectors, to allow for other features. Now, we add back
+ * the remaining vectors. However, we ensure that the total
+ * num_lan_msix will not exceed num_online_cpus(). To do this, we
+ * calculate the number of vectors we can add without going over the
+ * cap of CPUs. For systems with a small number of CPUs this will be
+ * zero.
+ */
+ extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
+ pf->num_lan_msix += extra_vectors;
+ vectors_left -= extra_vectors;
+
+ WARN(vectors_left < 0,
+ "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
+
+ v_budget += pf->num_lan_msix;
pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
GFP_KERNEL);
if (!pf->msix_entries)
@@ -7958,10 +7959,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
pf->num_vmdq_vsis = 1;
pf->num_vmdq_qps = 1;
-#ifdef I40E_FCOE
- pf->num_fcoe_qps = 0;
- pf->num_fcoe_msix = 0;
-#endif
/* partition out the remaining vectors */
switch (vec) {
@@ -7975,13 +7972,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
} else {
pf->num_lan_msix = 2;
}
-#ifdef I40E_FCOE
- /* give one vector to FCoE */
- if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
- pf->num_lan_msix = 1;
- pf->num_fcoe_msix = 1;
- }
-#endif
break;
default:
if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
@@ -8001,13 +7991,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
(vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
pf->num_lan_msix);
pf->num_lan_qps = pf->num_lan_msix;
-#ifdef I40E_FCOE
- /* give one vector to FCoE */
- if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
- pf->num_fcoe_msix = 1;
- vec--;
- }
-#endif
break;
}
}
@@ -8028,13 +8011,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
}
-#ifdef I40E_FCOE
-
- if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
- dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
- pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
- }
-#endif
i40e_debug(&pf->hw, I40E_DEBUG_INIT,
"MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
pf->num_lan_msix,
@@ -8133,9 +8109,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (vectors < 0) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
I40E_FLAG_IWARP_ENABLED |
-#ifdef I40E_FCOE
- I40E_FLAG_FCOE_ENABLED |
-#endif
I40E_FLAG_RSS_ENABLED |
I40E_FLAG_DCB_CAPABLE |
I40E_FLAG_DCB_ENABLED |
@@ -8196,7 +8169,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
/* Only request the irq if this is the first time through, and
* not when we're rebuilding after a Reset
*/
- if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
+ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
err = request_irq(pf->msix_entries[0].vector,
i40e_intr, 0, pf->int_name, pf);
if (err) {
@@ -8368,13 +8341,10 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
if (vsi->type == I40E_VSI_MAIN) {
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
- seed_dw[i]);
+ wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
} else if (vsi->type == I40E_VSI_SRIOV) {
for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw,
- I40E_VFQF_HKEY1(i, vf_id),
- seed_dw[i]);
+ wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
} else {
dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
}
@@ -8392,9 +8362,7 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
return -EINVAL;
for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw,
- I40E_VFQF_HLUT1(i, vf_id),
- lut_dw[i]);
+ wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
} else {
dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
}
@@ -8522,9 +8490,12 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
/* Determine the RSS size of the VSI */
- if (!vsi->rss_size)
- vsi->rss_size = min_t(int, pf->alloc_rss_size,
- vsi->num_queue_pairs);
+ if (!vsi->rss_size) {
+ u16 qcount;
+
+ qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
+ vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
+ }
if (!vsi->rss_size)
return -EINVAL;
@@ -8558,6 +8529,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
*
* returns 0 if rss is not enabled, if enabled returns the final rss queue
* count which may be different from the requested queue count.
+ * Note: expects to be called while under rtnl_lock()
**/
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
{
@@ -8570,12 +8542,14 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
new_rss_size = min_t(int, queue_count, pf->rss_size_max);
if (queue_count != vsi->num_queue_pairs) {
+ u16 qcount;
+
vsi->req_queue_pairs = queue_count;
- i40e_prep_for_reset(pf);
+ i40e_prep_for_reset(pf, true);
pf->alloc_rss_size = new_rss_size;
- i40e_reset_and_rebuild(pf, true);
+ i40e_reset_and_rebuild(pf, true, true);
/* Discard the user configured hash keys and lut, if less
* queues are enabled.
@@ -8587,8 +8561,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
}
/* Reset vsi->rss_size, as number of enabled queues changed */
- vsi->rss_size = min_t(int, pf->alloc_rss_size,
- vsi->num_queue_pairs);
+ qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
+ vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
i40e_pf_config_rss(pf);
}
@@ -8821,10 +8795,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
}
-#ifdef I40E_FCOE
- i40e_init_pf_fcoe(pf);
-
-#endif /* I40E_FCOE */
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
@@ -8851,9 +8821,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.aq.api_min_ver > 4))) {
/* Supported in FW API version higher than 1.4 */
pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
- pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+ pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
} else {
- pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+ pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
}
pf->eeprom_version = 0xDEAD;
@@ -8913,16 +8883,16 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
need_reset = true;
i40e_fdir_filter_exit(pf);
}
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_SB_AUTO_DISABLED);
/* reset fd counters */
- pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
- pf->fdir_pf_active_filters = 0;
+ pf->fd_add_err = 0;
+ pf->fd_atr_cnt = 0;
/* if ATR was auto disabled it can be re-enabled. */
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
+ pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
}
}
@@ -8955,6 +8925,7 @@ static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
* i40e_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
+ * Note: expects to be called while under rtnl_lock()
**/
static int i40e_set_features(struct net_device *netdev,
netdev_features_t features)
@@ -8978,7 +8949,7 @@ static int i40e_set_features(struct net_device *netdev,
need_reset = i40e_set_ntuple(pf, features);
if (need_reset)
- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
return 0;
}
@@ -8990,12 +8961,12 @@ static int i40e_set_features(struct net_device *netdev,
*
* Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
**/
-static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
+static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
{
u8 i;
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->udp_ports[i].index == port)
+ if (pf->udp_ports[i].port == port)
return i;
}
@@ -9013,7 +8984,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- __be16 port = ti->port;
+ u16 port = ntohs(ti->port);
u8 next_idx;
u8 idx;
@@ -9021,8 +8992,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
/* Check if port already exists */
if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "port %d already offloaded\n",
- ntohs(port));
+ netdev_info(netdev, "port %d already offloaded\n", port);
return;
}
@@ -9031,7 +9001,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
- ntohs(port));
+ port);
return;
}
@@ -9049,7 +9019,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
}
/* New port: add it and mark its index in the bitmap */
- pf->udp_ports[next_idx].index = port;
+ pf->udp_ports[next_idx].port = port;
pf->pending_udp_bitmap |= BIT_ULL(next_idx);
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
}
@@ -9065,7 +9035,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- __be16 port = ti->port;
+ u16 port = ntohs(ti->port);
u8 idx;
idx = i40e_get_udp_port_idx(pf, port);
@@ -9090,14 +9060,14 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
/* if port exists, set it to 0 (mark for deletion)
* and make it pending
*/
- pf->udp_ports[idx].index = 0;
+ pf->udp_ports[idx].port = 0;
pf->pending_udp_bitmap |= BIT_ULL(idx);
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
return;
not_found:
netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
- ntohs(port));
+ port);
}
static int i40e_get_phys_port_id(struct net_device *netdev,
@@ -9174,6 +9144,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
* is to change the mode then that requires a PF reset to
* allow rebuild of the components with required hardware
* bridge mode enabled.
+ *
+ * Note: expects to be called while under rtnl_lock()
**/
static int i40e_ndo_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh,
@@ -9229,7 +9201,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
else
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED),
+ true);
break;
}
}
@@ -9352,10 +9325,6 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_poll_controller = i40e_netpoll,
#endif
.ndo_setup_tc = __i40e_setup_tc,
-#ifdef I40E_FCOE
- .ndo_fcoe_enable = i40e_fcoe_enable,
- .ndo_fcoe_disable = i40e_fcoe_disable,
-#endif
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
@@ -9388,6 +9357,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
u8 broadcast[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
int etherdev_size;
+ netdev_features_t hw_enc_features;
+ netdev_features_t hw_features;
etherdev_size = sizeof(struct i40e_netdev_priv);
netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
@@ -9398,52 +9369,57 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np = netdev_priv(netdev);
np->vsi = vsi;
- netdev->hw_enc_features |= NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_HIGHDMA |
- NETIF_F_SOFT_FEATURES |
- NETIF_F_TSO |
- NETIF_F_TSO_ECN |
- NETIF_F_TSO6 |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM |
- NETIF_F_GSO_IPXIP4 |
- NETIF_F_GSO_IPXIP6 |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_PARTIAL |
- NETIF_F_SCTP_CRC |
- NETIF_F_RXHASH |
- NETIF_F_RXCSUM |
- 0;
+ hw_enc_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_SOFT_FEATURES |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ 0;
if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+ netdev->hw_enc_features |= hw_enc_features;
+
/* record features VLANs can make use of */
- netdev->vlan_features |= netdev->hw_enc_features |
- NETIF_F_TSO_MANGLEID;
+ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
netdev->hw_features |= NETIF_F_NTUPLE;
+ hw_features = hw_enc_features |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
- netdev->hw_features |= netdev->hw_enc_features |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->hw_features |= hw_features;
- netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
ether_addr_copy(mac_addr, hw->mac.perm_addr);
- /* The following steps are necessary to prevent reception
- * of tagged packets - some older NVM configurations load a
- * default a MAC-VLAN filter that accepts any tagged packet
- * which must be replaced by a normal filter.
+ /* The following steps are necessary for two reasons. First,
+ * some older NVM configurations load a default MAC-VLAN
+ * filter that will accept any tagged packet, and we want to
+ * replace this with a normal filter. Additionally, it is
+ * possible our MAC address was provided by the platform using
+ * Open Firmware or similar.
+ *
+ * Thus, we need to remove the default filter and install one
+ * specific to the MAC address.
*/
i40e_rm_default_mac_filter(vsi, mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
@@ -9489,9 +9465,6 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
netdev->netdev_ops = &i40e_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
i40e_set_ethtool_ops(netdev);
-#ifdef I40E_FCOE
- i40e_fcoe_config_netdev(netdev, vsi);
-#endif
/* MTU range: 68 - 9706 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -9715,16 +9688,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break;
-#ifdef I40E_FCOE
- case I40E_VSI_FCOE:
- ret = i40e_fcoe_vsi_init(vsi, &ctxt);
- if (ret) {
- dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
- return ret;
- }
- break;
-
-#endif /* I40E_FCOE */
case I40E_VSI_IWARP:
/* send down message to iWARP */
break;
@@ -9751,7 +9714,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
}
vsi->active_filters = 0;
- clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
spin_lock_bh(&vsi->mac_filter_hash_lock);
/* If macvlan filters already exist, force them to get loaded */
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
@@ -9804,7 +9767,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
return -ENODEV;
}
if (vsi == pf->vsi[pf->lan_vsi] &&
- !test_bit(__I40E_DOWN, &pf->state)) {
+ !test_bit(__I40E_VSI_DOWN, pf->state)) {
dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
return -ENODEV;
}
@@ -10141,7 +10104,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
}
}
case I40E_VSI_VMDQ2:
- case I40E_VSI_FCOE:
ret = i40e_config_netdev(vsi);
if (ret)
goto err_netdev;
@@ -10789,6 +10751,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_ptp_init(pf);
+ /* repopulate tunnel port filters */
+ i40e_sync_udp_filters(pf);
+
return ret;
}
@@ -10801,9 +10766,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
int queues_left;
pf->num_lan_qps = 0;
-#ifdef I40E_FCOE
- pf->num_fcoe_qps = 0;
-#endif
/* Find the max queues to be put into basic use. We'll always be
* using TC0, whether or not DCB is running, and TC0 will get the
@@ -10820,9 +10782,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
/* make sure all the fancies are disabled */
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
I40E_FLAG_IWARP_ENABLED |
-#ifdef I40E_FCOE
- I40E_FLAG_FCOE_ENABLED |
-#endif
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_CAPABLE |
@@ -10839,9 +10798,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
I40E_FLAG_IWARP_ENABLED |
-#ifdef I40E_FCOE
- I40E_FLAG_FCOE_ENABLED |
-#endif
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_ENABLED |
@@ -10862,22 +10818,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= pf->num_lan_qps;
}
-#ifdef I40E_FCOE
- if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
- if (I40E_DEFAULT_FCOE <= queues_left) {
- pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
- } else if (I40E_MINIMUM_FCOE <= queues_left) {
- pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
- } else {
- pf->num_fcoe_qps = 0;
- pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
- dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
- }
-
- queues_left -= pf->num_fcoe_qps;
- }
-
-#endif
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
if (queues_left > 1) {
queues_left -= 1; /* save 1 queue for FD */
@@ -10909,9 +10849,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
queues_left);
-#ifdef I40E_FCOE
- dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
-#endif
}
/**
@@ -10978,10 +10915,6 @@ static void i40e_print_features(struct i40e_pf *pf)
i += snprintf(&buf[i], REMAIN(i), " Geneve");
if (pf->flags & I40E_FLAG_PTP)
i += snprintf(&buf[i], REMAIN(i), " PTP");
-#ifdef I40E_FCOE
- if (pf->flags & I40E_FLAG_FCOE_ENABLED)
- i += snprintf(&buf[i], REMAIN(i), " FCOE");
-#endif
if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
i += snprintf(&buf[i], REMAIN(i), " VEB");
else
@@ -10994,20 +10927,18 @@ static void i40e_print_features(struct i40e_pf *pf)
/**
* i40e_get_platform_mac_addr - get platform-specific MAC address
- *
* @pdev: PCI device information struct
* @pf: board private structure
*
- * Look up the MAC address in Open Firmware on systems that support it,
- * and use IDPROM on SPARC if no OF address is found. On return, the
- * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value
- * has been selected.
+ * Look up the MAC address for the device. First we'll try
+ * eth_platform_get_mac_address, which will check Open Firmware, or arch
+ * specific fallback. Otherwise, we'll default to the stored value in
+ * firmware.
**/
static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
{
- pf->flags &= ~I40E_FLAG_PF_MAC;
- if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
- pf->flags |= I40E_FLAG_PF_MAC;
+ if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
+ i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
}
/**
@@ -11072,7 +11003,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pf->next_vsi = 0;
pf->pdev = pdev;
- set_bit(__I40E_DOWN, &pf->state);
+ set_bit(__I40E_VSI_DOWN, pf->state);
hw = &pf->hw;
hw->back = pf;
@@ -11098,6 +11029,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.bus_id = pdev->bus->number;
pf->instance = pfs_found;
+ INIT_LIST_HEAD(&pf->l3_flex_pit_list);
+ INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+
/* set up the locks for the AQ, do this only once in probe
* and destroy them only once in remove
*/
@@ -11196,8 +11130,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
- hw->func_caps.num_rx_qp,
- pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+ hw->func_caps.num_rx_qp, 0, 0);
if (err) {
dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
goto err_init_lan_hmc;
@@ -11219,9 +11152,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_aq_stop_lldp(hw, true, NULL);
}
- i40e_get_mac_addr(hw, hw->mac.addr);
/* allow a platform config to override the HW addr */
i40e_get_platform_mac_addr(pdev, pf);
+
if (!is_valid_ether_addr(hw->mac.addr)) {
dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
err = -EIO;
@@ -11232,18 +11165,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_get_port_mac_addr(hw, hw->mac.port_addr);
if (is_valid_ether_addr(hw->mac.port_addr))
pf->flags |= I40E_FLAG_PORT_ID_VALID;
-#ifdef I40E_FCOE
- err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
- if (err)
- dev_info(&pdev->dev,
- "(non-fatal) SAN MAC retrieval failed: %d\n", err);
- if (!is_valid_ether_addr(hw->mac.san_addr)) {
- dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
- hw->mac.san_addr);
- ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
- }
- dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
-#endif /* I40E_FCOE */
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
@@ -11261,8 +11182,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->service_timer_period = HZ;
INIT_WORK(&pf->service_task, i40e_service_task);
- clear_bit(__I40E_SERVICE_SCHED, &pf->state);
- pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
@@ -11300,7 +11220,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* prep for VF support */
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
- !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+ !test_bit(__I40E_BAD_EEPROM, pf->state)) {
if (pci_num_vf(pdev))
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
}
@@ -11373,7 +11293,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* before setting up the misc vector or we get a race and the vector
* ends up disabled forever.
*/
- clear_bit(__I40E_DOWN, &pf->state);
+ clear_bit(__I40E_VSI_DOWN, pf->state);
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI
@@ -11393,7 +11313,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* prep for VF support */
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
- !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+ !test_bit(__I40E_BAD_EEPROM, pf->state)) {
/* disable link interrupts for VFs */
val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
@@ -11434,16 +11354,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
round_jiffies(jiffies + pf->service_timer_period));
/* add this PF to client device list and launch a client service task */
- err = i40e_lan_add_device(pf);
- if (err)
- dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
- err);
-
-#ifdef I40E_FCOE
- /* create FCoE interface */
- i40e_fcoe_vsi_setup(pf);
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ err = i40e_lan_add_device(pf);
+ if (err)
+ dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
+ err);
+ }
-#endif
#define PCI_SPEED_SIZE 8
#define PCI_WIDTH_SIZE 8
/* Devices on the IOSF bus do not have this information
@@ -11531,7 +11448,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Unwind what we've done if something failed in the setup */
err_vsis:
- set_bit(__I40E_DOWN, &pf->state);
+ set_bit(__I40E_VSI_DOWN, pf->state);
i40e_clear_interrupt_scheme(pf);
kfree(pf->vsi);
err_switch_setup:
@@ -11582,13 +11499,18 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
/* no more scheduling of any task */
- set_bit(__I40E_SUSPENDED, &pf->state);
- set_bit(__I40E_DOWN, &pf->state);
+ set_bit(__I40E_SUSPENDED, pf->state);
+ set_bit(__I40E_VSI_DOWN, pf->state);
if (pf->service_timer.data)
del_timer_sync(&pf->service_timer);
if (pf->service_task.func)
cancel_work_sync(&pf->service_task);
+ /* Client close must be called explicitly here because the timer
+ * has been stopped.
+ */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
@@ -11615,10 +11537,11 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
/* remove attached clients */
- ret_code = i40e_lan_del_device(pf);
- if (ret_code) {
- dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
- ret_code);
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ ret_code = i40e_lan_del_device(pf);
+ if (ret_code)
+ dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
+ ret_code);
}
/* shutdown and destroy the HMC */
@@ -11685,9 +11608,9 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
}
/* shutdown all operations */
- if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
+ if (!test_bit(__I40E_SUSPENDED, pf->state)) {
rtnl_lock();
- i40e_prep_for_reset(pf);
+ i40e_prep_for_reset(pf, true);
rtnl_unlock();
}
@@ -11752,11 +11675,11 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
struct i40e_pf *pf = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "%s\n", __func__);
- if (test_bit(__I40E_SUSPENDED, &pf->state))
+ if (test_bit(__I40E_SUSPENDED, pf->state))
return;
rtnl_lock();
- i40e_handle_reset_warning(pf);
+ i40e_handle_reset_warning(pf, true);
rtnl_unlock();
}
@@ -11816,10 +11739,10 @@ static void i40e_shutdown(struct pci_dev *pdev)
struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw;
- set_bit(__I40E_SUSPENDED, &pf->state);
- set_bit(__I40E_DOWN, &pf->state);
+ set_bit(__I40E_SUSPENDED, pf->state);
+ set_bit(__I40E_VSI_DOWN, pf->state);
rtnl_lock();
- i40e_prep_for_reset(pf);
+ i40e_prep_for_reset(pf, true);
rtnl_unlock();
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
@@ -11829,11 +11752,16 @@ static void i40e_shutdown(struct pci_dev *pdev)
cancel_work_sync(&pf->service_task);
i40e_fdir_teardown(pf);
+ /* Client close must be called explicitly here because the timer
+ * has been stopped.
+ */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+
if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf);
rtnl_lock();
- i40e_prep_for_reset(pf);
+ i40e_prep_for_reset(pf, true);
rtnl_unlock();
wr32(hw, I40E_PFPM_APM,
@@ -11860,14 +11788,14 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
struct i40e_hw *hw = &pf->hw;
int retval = 0;
- set_bit(__I40E_SUSPENDED, &pf->state);
- set_bit(__I40E_DOWN, &pf->state);
+ set_bit(__I40E_SUSPENDED, pf->state);
+ set_bit(__I40E_VSI_DOWN, pf->state);
if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf);
rtnl_lock();
- i40e_prep_for_reset(pf);
+ i40e_prep_for_reset(pf, true);
rtnl_unlock();
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
@@ -11912,10 +11840,10 @@ static int i40e_resume(struct pci_dev *pdev)
pci_wake_from_d3(pdev, false);
/* handling the reset will rebuild the device state */
- if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
- clear_bit(__I40E_DOWN, &pf->state);
+ if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
+ clear_bit(__I40E_VSI_DOWN, pf->state);
rtnl_lock();
- i40e_reset_and_rebuild(pf, false);
+ i40e_reset_and_rebuild(pf, false, true);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 38ee18f11124..800bd55d0159 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -292,14 +292,14 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = 0;
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- i40e_release_nvm(hw);
+ } else {
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
}
- } else {
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ i40e_release_nvm(hw);
}
return ret_code;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index fea81ed065db..80e66da6b145 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -78,7 +78,4 @@ do { \
} while (0)
typedef enum i40e_status_code i40e_status;
-#ifdef CONFIG_I40E_FCOE
-#define I40E_FCOE
-#endif
#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 2551fc827444..c56d976cf85a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -304,9 +304,6 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
u32 pba_num_size);
i40e_status i40e_validate_mac_addr(u8 *mac_addr);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
-#ifdef I40E_FCOE
-i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
-#endif
/* prototype for functions used for NVM access */
i40e_status i40e_init_nvm(struct i40e_hw *hw);
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
@@ -380,4 +377,21 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
+i40e_status i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details);
+struct i40e_generic_seg_header *
+i40e_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_header);
+enum i40e_status_code
+i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+enum i40e_status_code
+i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 2caee35528fa..18c1cc08da97 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -358,7 +358,7 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL;
- clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state);
+ clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
}
/**
@@ -768,7 +768,7 @@ void i40e_ptp_stop(struct i40e_pf *pf)
if (pf->ptp_tx_skb) {
dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL;
- clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state);
+ clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
}
if (pf->ptp_clock) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
new file mode 100644
index 000000000000..d3e55f54a05e
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -0,0 +1,229 @@
+/*******************************************************************************
+ *
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2017 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* Modeled on trace-events-sample.h */
+
+/* The trace subsystem name for i40e will be "i40e".
+ *
+ * This file is named i40e_trace.h.
+ *
+ * Since this include file's name is different from the trace
+ * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end
+ * of this file.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i40e
+
+/* See trace-events-sample.h for a detailed description of why this
+ * guard clause is different from most normal include files.
+ */
+#if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _I40E_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+/**
+ * i40e_trace() macro enables shared code to refer to trace points
+ * like:
+ *
+ * trace_i40e{,vf}_example(args...)
+ *
+ * ... as:
+ *
+ * i40e_trace(example, args...)
+ *
+ * ... to resolve to the PF or VF version of the tracepoint without
+ * ifdefs, and to allow tracepoints to be disabled entirely at build
+ * time.
+ *
+ * Trace point should always be referred to in the driver via this
+ * macro.
+ *
+ * Similarly, i40e_trace_enabled(trace_name) wraps references to
+ * trace_i40e{,vf}_<trace_name>_enabled() functions.
+ */
+#define _I40E_TRACE_NAME(trace_name) (trace_ ## i40e ## _ ## trace_name)
+#define I40E_TRACE_NAME(trace_name) _I40E_TRACE_NAME(trace_name)
+
+#define i40e_trace(trace_name, args...) I40E_TRACE_NAME(trace_name)(args)
+
+#define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)()
+
+/* Events common to PF and VF. Corresponding versions will be defined
+ * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace()
+ * macro above will select the right trace point name for the driver
+ * being built from shared code.
+ */
+
+/* Events related to a vsi & ring */
+DECLARE_EVENT_CLASS(
+ i40e_tx_template,
+
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf),
+
+ /* The convention here is to make the first fields in the
+ * TP_STRUCT match the TP_PROTO exactly. This enables the use
+ * of the args struct generated by the tplist tool (from the
+ * bcc-tools package) to be used for those fields. To access
+ * fields other than the tracepoint args will require the
+ * tplist output to be adjusted.
+ */
+ TP_STRUCT__entry(
+ __field(void*, ring)
+ __field(void*, desc)
+ __field(void*, buf)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->desc = desc;
+ __entry->buf = buf;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s ring: %p desc: %p buf %p",
+ __get_str(devname), __entry->ring,
+ __entry->desc, __entry->buf)
+);
+
+DEFINE_EVENT(
+ i40e_tx_template, i40e_clean_tx_irq,
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf));
+
+DEFINE_EVENT(
+ i40e_tx_template, i40e_clean_tx_irq_unmap,
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf));
+
+DECLARE_EVENT_CLASS(
+ i40e_rx_template,
+
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_32byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb),
+
+ TP_STRUCT__entry(
+ __field(void*, ring)
+ __field(void*, desc)
+ __field(void*, skb)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->desc = desc;
+ __entry->skb = skb;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s ring: %p desc: %p skb %p",
+ __get_str(devname), __entry->ring,
+ __entry->desc, __entry->skb)
+);
+
+DEFINE_EVENT(
+ i40e_rx_template, i40e_clean_rx_irq,
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_32byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb));
+
+DEFINE_EVENT(
+ i40e_rx_template, i40e_clean_rx_irq_rx,
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_32byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb));
+
+DECLARE_EVENT_CLASS(
+ i40e_xmit_template,
+
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring),
+
+ TP_STRUCT__entry(
+ __field(void*, skb)
+ __field(void*, ring)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->skb = skb;
+ __entry->ring = ring;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s skb: %p ring: %p",
+ __get_str(devname), __entry->skb,
+ __entry->ring)
+);
+
+DEFINE_EVENT(
+ i40e_xmit_template, i40e_xmit_frame_ring,
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring));
+
+DEFINE_EVENT(
+ i40e_xmit_template, i40e_xmit_frame_ring_drop,
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring));
+
+/* Events unique to the PF. */
+
+#endif /* _I40E_TRACE_H_ */
+/* This must be outside ifdef _I40E_TRACE_H */
+
+/* This trace include file is not located in the .../include/trace
+ * with the kernel tracepoint definitions, because we're a loadable
+ * module.
+ */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE i40e_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 97d46058d71d..29321a6167a6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -27,6 +27,7 @@
#include <linux/prefetch.h>
#include <net/busy_poll.h>
#include "i40e.h"
+#include "i40e_trace.h"
#include "i40e_prototype.h"
static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
@@ -71,6 +72,9 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
(fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+ flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
+ (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
+
/* Use LAN VSI Id if not programmed by user */
flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
@@ -203,7 +207,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
struct i40e_pf *pf = vsi->back;
struct udphdr *udp;
struct iphdr *ip;
- bool err = false;
u8 *raw_packet;
int ret;
static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
@@ -219,18 +222,28 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
+ sizeof(struct iphdr));
- ip->daddr = fd_data->dst_ip[0];
+ ip->daddr = fd_data->dst_ip;
udp->dest = fd_data->dst_port;
- ip->saddr = fd_data->src_ip[0];
+ ip->saddr = fd_data->src_ip;
udp->source = fd_data->src_port;
+ if (fd_data->flex_filter) {
+ u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
+ __be16 pattern = fd_data->flex_word;
+ u16 off = fd_data->flex_offset;
+
+ *((__force __be16 *)(payload + off)) = pattern;
+ }
+
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
if (ret) {
dev_info(&pf->pdev->dev,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
- err = true;
+ /* Free the packet buffer since it wasn't added to the ring */
+ kfree(raw_packet);
+ return -EOPNOTSUPP;
} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
dev_info(&pf->pdev->dev,
@@ -241,10 +254,13 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
"Filter deleted for PCTYPE %d loc = %d\n",
fd_data->pctype, fd_data->fd_id);
}
- if (err)
- kfree(raw_packet);
- return err ? -EOPNOTSUPP : 0;
+ if (add)
+ pf->fd_udp4_filter_cnt++;
+ else
+ pf->fd_udp4_filter_cnt--;
+
+ return 0;
}
#define I40E_TCPIP_DUMMY_PACKET_LEN 54
@@ -263,7 +279,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
struct i40e_pf *pf = vsi->back;
struct tcphdr *tcp;
struct iphdr *ip;
- bool err = false;
u8 *raw_packet;
int ret;
/* Dummy packet */
@@ -281,39 +296,110 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
+ sizeof(struct iphdr));
- ip->daddr = fd_data->dst_ip[0];
+ ip->daddr = fd_data->dst_ip;
tcp->dest = fd_data->dst_port;
- ip->saddr = fd_data->src_ip[0];
+ ip->saddr = fd_data->src_ip;
tcp->source = fd_data->src_port;
+ if (fd_data->flex_filter) {
+ u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
+ __be16 pattern = fd_data->flex_word;
+ u16 off = fd_data->flex_offset;
+
+ *((__force __be16 *)(payload + off)) = pattern;
+ }
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
+ fd_data->pctype, fd_data->fd_id, ret);
+ /* Free the packet buffer since it wasn't added to the ring */
+ kfree(raw_packet);
+ return -EOPNOTSUPP;
+ } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
+ if (add)
+ dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
+ fd_data->pctype, fd_data->fd_id);
+ else
+ dev_info(&pf->pdev->dev,
+ "Filter deleted for PCTYPE %d loc = %d\n",
+ fd_data->pctype, fd_data->fd_id);
+ }
+
if (add) {
- pf->fd_tcp_rule++;
+ pf->fd_tcp4_filter_cnt++;
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
} else {
- pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
- (pf->fd_tcp_rule - 1) : 0;
- if (pf->fd_tcp_rule == 0) {
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- }
+ pf->fd_tcp4_filter_cnt--;
}
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
- ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+ return 0;
+}
+
+#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
+/**
+ * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct sctphdr *sctp;
+ struct iphdr *ip;
+ u8 *raw_packet;
+ int ret;
+ /* Dummy packet */
+ static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+ raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+ if (!raw_packet)
+ return -ENOMEM;
+ memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
+
+ ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+ sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
+ + sizeof(struct iphdr));
+ ip->daddr = fd_data->dst_ip;
+ sctp->dest = fd_data->dst_port;
+ ip->saddr = fd_data->src_ip;
+ sctp->source = fd_data->src_port;
+
+ if (fd_data->flex_filter) {
+ u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
+ __be16 pattern = fd_data->flex_word;
+ u16 off = fd_data->flex_offset;
+
+ *((__force __be16 *)(payload + off)) = pattern;
+ }
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
if (ret) {
dev_info(&pf->pdev->dev,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
- err = true;
+ /* Free the packet buffer since it wasn't added to the ring */
+ kfree(raw_packet);
+ return -EOPNOTSUPP;
} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
- dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d loc = %d\n",
fd_data->pctype, fd_data->fd_id);
else
dev_info(&pf->pdev->dev,
@@ -321,10 +407,12 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
fd_data->pctype, fd_data->fd_id);
}
- if (err)
- kfree(raw_packet);
+ if (add)
+ pf->fd_sctp4_filter_cnt++;
+ else
+ pf->fd_sctp4_filter_cnt--;
- return err ? -EOPNOTSUPP : 0;
+ return 0;
}
#define I40E_IP_DUMMY_PACKET_LEN 34
@@ -343,7 +431,6 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
{
struct i40e_pf *pf = vsi->back;
struct iphdr *ip;
- bool err = false;
u8 *raw_packet;
int ret;
int i;
@@ -359,18 +446,29 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
- ip->saddr = fd_data->src_ip[0];
- ip->daddr = fd_data->dst_ip[0];
+ ip->saddr = fd_data->src_ip;
+ ip->daddr = fd_data->dst_ip;
ip->protocol = 0;
+ if (fd_data->flex_filter) {
+ u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
+ __be16 pattern = fd_data->flex_word;
+ u16 off = fd_data->flex_offset;
+
+ *((__force __be16 *)(payload + off)) = pattern;
+ }
+
fd_data->pctype = i;
ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
-
if (ret) {
dev_info(&pf->pdev->dev,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
- err = true;
+ /* The packet buffer wasn't added to the ring so we
+ * need to free it now.
+ */
+ kfree(raw_packet);
+ return -EOPNOTSUPP;
} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
dev_info(&pf->pdev->dev,
@@ -383,10 +481,12 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
}
}
- if (err)
- kfree(raw_packet);
+ if (add)
+ pf->fd_ip4_filter_cnt++;
+ else
+ pf->fd_ip4_filter_cnt--;
- return err ? -EOPNOTSUPP : 0;
+ return 0;
}
/**
@@ -409,6 +509,9 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
case UDP_V4_FLOW:
ret = i40e_add_del_fdir_udpv4(vsi, input, add);
break;
+ case SCTP_V4_FLOW:
+ ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
+ break;
case IP_USER_FLOW:
switch (input->ip4_proto) {
case IPPROTO_TCP:
@@ -417,19 +520,23 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
case IPPROTO_UDP:
ret = i40e_add_del_fdir_udpv4(vsi, input, add);
break;
+ case IPPROTO_SCTP:
+ ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
+ break;
case IPPROTO_IP:
ret = i40e_add_del_fdir_ipv4(vsi, input, add);
break;
default:
/* We cannot support masking based on protocol */
- goto unsupported_flow;
+ dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
+ input->ip4_proto);
+ return -EINVAL;
}
break;
default:
-unsupported_flow:
- dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
+ dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
input->flow_type);
- ret = -EINVAL;
+ return -EINVAL;
}
/* The buffer allocated here will be normally be freed by
@@ -476,7 +583,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
* progress do nothing, once flush is complete the state will
* be cleared.
*/
- if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
return;
pf->fd_add_err++;
@@ -484,9 +591,9 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
- set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+ pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
+ pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
}
/* filter programming failed most likely due to table full */
@@ -498,12 +605,10 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
*/
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
- !(pf->auto_disable_flags &
- I40E_FLAG_FD_SB_ENABLED)) {
+ !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) {
+ pf->flags |= I40E_FLAG_FD_SB_AUTO_DISABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
- pf->auto_disable_flags |=
- I40E_FLAG_FD_SB_ENABLED;
}
}
} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
@@ -599,19 +704,15 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
- * @in_sw: is tx_pending being checked in SW or HW
*
* Since there is no access to the ring head register
* in XL710, we need to use our local copies
**/
-u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
+u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
u32 head, tail;
- if (!in_sw)
- head = i40e_get_head(ring);
- else
- head = ring->next_to_clean;
+ head = i40e_get_head(ring);
tail = readl(ring->tail);
if (head != tail)
@@ -657,6 +758,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
+ i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* we have caught up to head, no work left to do */
if (tx_head == tx_desc)
break;
@@ -683,6 +785,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
+ i40e_trace(clean_tx_irq_unmap,
+ tx_ring, tx_desc, tx_buf);
tx_buf++;
tx_desc++;
@@ -734,11 +838,11 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
* them to be written back in case we stay in NAPI.
* In this mode on X722 we do not enable Interrupt.
*/
- unsigned int j = i40e_get_tx_pending(tx_ring, false);
+ unsigned int j = i40e_get_tx_pending(tx_ring);
if (budget &&
((j / WB_STRIDE) == 0) && (j > 0) &&
- !test_bit(__I40E_DOWN, &vsi->state) &&
+ !test_bit(__I40E_VSI_DOWN, vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
@@ -756,7 +860,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__I40E_DOWN, &vsi->state)) {
+ !test_bit(__I40E_VSI_DOWN, vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
@@ -930,9 +1034,29 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
}
/**
+ * i40e_rx_is_programming_status - check for programming status descriptor
+ * @qw: qword representing status_error_len in CPU ordering
+ *
+ * The value of in the descriptor length field indicate if this
+ * is a programming status descriptor for flow director or FCoE
+ * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
+ * it is a packet descriptor.
+ **/
+static inline bool i40e_rx_is_programming_status(u64 qw)
+{
+ /* The Rx filter programming status and SPH bit occupy the same
+ * spot in the descriptor. Since we don't support packet split we
+ * can just reuse the bit as an indication that this is a
+ * programming status descriptor.
+ */
+ return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
+}
+
+/**
* i40e_clean_programming_status - clean the programming status descriptor
* @rx_ring: the rx ring that has this descriptor
* @rx_desc: the rx descriptor written back by HW
+ * @qw: qword representing status_error_len in CPU ordering
*
* Flow director should handle FD_FILTER_STATUS to check its filter programming
* status being successful or not and take actions accordingly. FCoE should
@@ -940,22 +1064,23 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
*
**/
static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc)
+ union i40e_rx_desc *rx_desc,
+ u64 qw)
{
- u64 qw;
+ u32 ntc = rx_ring->next_to_clean + 1;
u8 id;
- qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+
id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
i40e_fd_handle_status(rx_ring, rx_desc, id);
-#ifdef I40E_FCOE
- else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
- (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
- i40e_fcoe_handle_status(rx_ring, rx_desc, id);
-#endif
}
/**
@@ -1010,7 +1135,6 @@ err:
**/
void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
unsigned long bi_size;
u16 i;
@@ -1030,8 +1154,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_bi->page)
continue;
- dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
- __free_pages(rx_bi->page, 0);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_bi->dma,
+ rx_bi->page_offset,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ I40E_RX_DMA_ATTR);
+
+ __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
rx_bi->page = NULL;
rx_bi->page_offset = 0;
@@ -1132,6 +1270,17 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
+ * i40e_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
+}
+
+/**
* i40e_alloc_mapped_page - recycle or make a new page
* @rx_ring: ring to use
* @bi: rx_buffer struct to modify
@@ -1152,27 +1301,33 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
}
/* alloc new page for storage */
- page = dev_alloc_page();
+ page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ I40E_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, 0);
+ __free_pages(page, i40e_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = i40e_rx_offset(rx_ring);
+
+ /* initialize pagecnt_bias to 1 representing we fully own page */
+ bi->pagecnt_bias = 1;
return true;
}
@@ -1219,6 +1374,12 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
if (!i40e_alloc_mapped_page(rx_ring, bi))
goto no_buffers;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
@@ -1259,8 +1420,6 @@ no_buffers:
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
- *
- * skb->protocol must be set before this function is called
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
@@ -1422,12 +1581,12 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- /* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
-
i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
skb_record_rx_queue(skb, rx_ring->queue_index);
+
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
/**
@@ -1472,7 +1631,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- *new_buff = *old_buff;
+ new_buff->dma = old_buff->dma;
+ new_buff->page = old_buff->page;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
/**
@@ -1493,8 +1655,6 @@ static inline bool i40e_page_is_reusable(struct page *page)
* the adapter for another receive
*
* @rx_buffer: buffer containing the page
- * @page: page address from rx_buffer
- * @truesize: actual size of the buffer in this page
*
* If page is reusable, rx_buffer->page_offset is adjusted to point to
* an unused region in the page.
@@ -1517,13 +1677,10 @@ static inline bool i40e_page_is_reusable(struct page *page)
*
* In either case, if the page is reusable its refcount is increased.
**/
-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
- struct page *page,
- const unsigned int truesize)
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
{
-#if (PAGE_SIZE >= 8192)
- unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
-#endif
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
/* Is any reuse possible? */
if (unlikely(!i40e_page_is_reusable(page)))
@@ -1531,21 +1688,23 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
+ if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false;
-
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= truesize;
#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
-
- if (rx_buffer->page_offset > last_offset)
+#define I40E_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
+ if (rx_buffer->page_offset > I40E_LAST_OFFSET)
return false;
#endif
- /* Inc ref count on page before passing it up to the stack */
- get_page(page);
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(!pagecnt_bias)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
return true;
}
@@ -1554,145 +1713,201 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
* i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @size: packet length from rx_desc
* @skb: sk_buff to place the data into
+ * @size: packet length from rx_desc
*
* This function will add the data contained in rx_buffer->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
+ * It will just attach the page as a frag to the skb.
*
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
+ * The function will then update the page offset.
**/
-static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
- unsigned int size,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int size)
{
- struct page *page = rx_buffer->page;
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
- unsigned int truesize = I40E_RXBUFFER_2048;
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
#endif
- unsigned int pull_len;
-
- if (unlikely(skb_is_nonlinear(skb)))
- goto add_tail_frag;
-
- /* will the data fit in the skb we allocated? if so, just
- * copy it as it is pretty small anyway
- */
- if (size <= I40E_RX_HDR_SIZE) {
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- /* page is reusable, we can reuse buffer as-is */
- if (likely(i40e_page_is_reusable(page)))
- return true;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
- /* this page cannot be reused so discard it */
- __free_pages(page, 0);
- return false;
- }
+ /* page is being used so we must update the page offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+}
- /* we need the header to contain the greater of either
- * ETH_HLEN or 60 bytes if the skb->len is less than
- * 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+/**
+ * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @size: size of buffer to add to skb
+ *
+ * This function will pull an Rx buffer from the ring and synchronize it
+ * for use by the CPU.
+ */
+static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
+ const unsigned int size)
+{
+ struct i40e_rx_buffer *rx_buffer;
- /* align pull length to size of long to optimize
- * memcpy performance
- */
- memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+ rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+ prefetchw(rx_buffer->page);
- /* update all of the pointers */
- va += pull_len;
- size -= pull_len;
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
-add_tail_frag:
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- (unsigned long)va & ~PAGE_MASK, size, truesize);
+ /* We have pulled a buffer for use, so decrement pagecnt_bias */
+ rx_buffer->pagecnt_bias--;
- return i40e_can_reuse_rx_page(rx_buffer, page, truesize);
+ return rx_buffer;
}
/**
- * i40e_fetch_rx_buffer - Allocate skb and populate it
+ * i40e_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
- * @rx_desc: descriptor containing info written by hardware
+ * @rx_buffer: rx buffer to pull data from
+ * @size: size of buffer to add to skb
*
- * This function allocates an skb on the fly, and populates it with the page
- * data from the current receive descriptor, taking care to set up the skb
- * correctly, as well as handling calling the page recycle function if
- * necessary.
+ * This function allocates an skb. It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
*/
-static inline
-struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc,
- struct sk_buff *skb)
+static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ unsigned int size)
{
- u64 local_status_error_len =
- le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- unsigned int size =
- (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- struct i40e_rx_buffer *rx_buffer;
- struct page *page;
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+ unsigned int headlen;
+ struct sk_buff *skb;
- rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
- page = rx_buffer->page;
- prefetchw(page);
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ I40E_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > I40E_RX_HDR_SIZE)
+ headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+ /* update all of the pointers */
+ size -= headlen;
+ if (size) {
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ rx_buffer->page_offset + headlen,
+ size, truesize);
- if (likely(!skb)) {
- void *page_addr = page_address(page) + rx_buffer->page_offset;
+ /* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+ } else {
+ /* buffer is unused, reset bias back to rx_buffer */
+ rx_buffer->pagecnt_bias++;
+ }
- /* prefetch first cache line of first page */
- prefetch(page_addr);
+ return skb;
+}
+
+/**
+ * i40e_build_skb - Build skb around an existing buffer
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buffer: Rx buffer to pull data from
+ * @size: size of buffer to add to skb
+ *
+ * This function builds an skb around an existing Rx buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead.
+ */
+static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
+ prefetch(va + L1_CACHE_BYTES);
#endif
+ /* build an skb around the page buffer */
+ skb = build_skb(va - I40E_SKB_PAD, truesize);
+ if (unlikely(!skb))
+ return NULL;
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- I40E_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_buff_failed++;
- return NULL;
- }
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, I40E_SKB_PAD);
+ __skb_put(skb, size);
- /* we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
- }
+ /* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
+ return skb;
+}
- /* pull page into skb */
- if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
+/**
+ * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: rx buffer to pull data from
+ *
+ * This function will clean up the contents of the rx_buffer. It will
+ * either recycle the bufer or unmap it and free the associated resources.
+ */
+static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer)
+{
+ if (i40e_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
}
/* clear contents of buffer_info */
rx_buffer->page = NULL;
-
- return skb;
}
/**
@@ -1718,11 +1933,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
prefetch(I40E_RX_DESC(rx_ring, ntc));
-#define staterrlen rx_desc->wb.qword1.status_error_len
- if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
- i40e_clean_programming_status(rx_ring, rx_desc);
- return true;
- }
/* if we are the last buffer then there is nothing else to do */
#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
@@ -1753,7 +1963,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
bool failure = false;
while (likely(total_rx_packets < budget)) {
+ struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
+ unsigned int size;
u16 vlan_tag;
u8 rx_ptype;
u64 qword;
@@ -1770,22 +1982,44 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
* which is always zero because packet split isn't used, if the
- * hardware wrote DD then it will be non-zero
+ * hardware wrote DD then the length will be non-zero
*/
- if (!i40e_test_staterr(rx_desc,
- BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
/* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * DD bit is set.
+ * any other fields out of the rx_desc until we have
+ * verified the descriptor has been written back.
*/
dma_rmb();
- skb = i40e_fetch_rx_buffer(rx_ring, rx_desc, skb);
- if (!skb)
+ if (unlikely(i40e_rx_is_programming_status(qword))) {
+ i40e_clean_programming_status(rx_ring, rx_desc, qword);
+ continue;
+ }
+ size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ if (!size)
break;
+ i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
+ rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+
+ /* retrieve a buffer from the ring */
+ if (skb)
+ i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ else if (ring_uses_build_skb(rx_ring))
+ skb = i40e_build_skb(rx_ring, rx_buffer, size);
+ else
+ skb = i40e_construct_skb(rx_ring, rx_buffer, size);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ rx_buffer->pagecnt_bias++;
+ break;
+ }
+
+ i40e_put_rx_buffer(rx_ring, rx_buffer);
cleaned_count++;
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
@@ -1798,6 +2032,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/
if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb);
+ skb = NULL;
continue;
}
@@ -1816,18 +2051,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* populate checksum, VLAN, and protocol */
i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-#ifdef I40E_FCOE
- if (unlikely(
- i40e_rx_is_fcoe(rx_ptype) &&
- !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
- dev_kfree_skb_any(skb);
- continue;
- }
-#endif
-
vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+ i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
i40e_receive_skb(rx_ring, skb, vlan_tag);
skb = NULL;
@@ -1944,7 +2171,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
}
enable_int:
- if (!test_bit(__I40E_DOWN, &vsi->state))
+ if (!test_bit(__I40E_VSI_DOWN, vsi->state))
wr32(hw, INTREG(vector - 1), txval);
if (q_vector->itr_countdown)
@@ -1973,13 +2200,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
int budget_per_ring;
int work_done = 0;
- if (test_bit(__I40E_DOWN, &vsi->state)) {
+ if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
napi_complete(napi);
return 0;
}
- /* Clear hung_detected bit */
- clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
@@ -2079,7 +2304,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
return;
- if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)
return;
/* if sampling is disabled do nothing */
@@ -2113,10 +2338,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
th = (struct tcphdr *)(hdr.network + hlen);
/* Due to lack of space, no more new filters can be programmed */
- if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
return;
- if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
- (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
/* HW ATR eviction will take care of removing filters on FIN
* and RST packets.
*/
@@ -2178,8 +2402,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
- if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
- (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
@@ -2200,15 +2423,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
* Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly.
**/
-#ifdef I40E_FCOE
-inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
- struct i40e_ring *tx_ring,
- u32 *flags)
-#else
static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring,
u32 *flags)
-#endif
{
__be16 protocol = skb->protocol;
u32 tx_flags = 0;
@@ -2409,7 +2626,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
return 0;
if (pf->ptp_tx &&
- !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
+ !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
pf->ptp_tx_skb = skb_get(skb);
} else {
@@ -2716,15 +2933,9 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
* @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc
**/
-#ifdef I40E_FCOE
-inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
- struct i40e_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset)
-#else
static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
-#endif
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
@@ -2925,6 +3136,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
/* prefetch the data, we'll need it later */
prefetch(skb->data);
+ i40e_trace(xmit_frame_ring, skb, tx_ring);
+
count = i40e_xmit_descriptor_count(skb);
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb)) {
@@ -3003,6 +3216,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
out_drop:
+ i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
dev_kfree_skb_any(first->skb);
first->skb = NULL;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index f80979025c01..f5de51124cae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -117,10 +117,9 @@ enum i40e_dyn_idx_t {
/* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_256 256
+#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
#define I40E_RXBUFFER_2048 2048
-#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
-#define I40E_RXBUFFER_4096 4096
-#define I40E_RXBUFFER_8192 8192
+#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
@@ -133,6 +132,61 @@ enum i40e_dyn_idx_t {
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+/* Attempt to maximize the headroom available for incoming frames. We
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
+ * the frame. This leaves us with 512 bytes of room. From that we need
+ * to deduct the space needed for the shared info and the padding needed
+ * to IP align the frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ * up negative. In these cases we should fall back to the legacy
+ * receive path.
+ */
+#if (PAGE_SIZE < 8192)
+#define I40E_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
+
+static inline int i40e_compute_pad(int rx_buf_len)
+{
+ int page_size, pad_size;
+
+ page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+ pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+ return pad_size;
+}
+
+static inline int i40e_skb_pad(void)
+{
+ int rx_buf_len;
+
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
+ *
+ * For a 3K buffer we need to add enough padding to allow for
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
+ * cache-line alignment.
+ */
+ if (I40E_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ else
+ rx_buf_len = I40E_RXBUFFER_1536;
+
+ /* if needed make room for NET_IP_ALIGN */
+ rx_buf_len -= NET_IP_ALIGN;
+
+ return i40e_compute_pad(rx_buf_len);
+}
+
+#define I40E_SKB_PAD i40e_skb_pad()
+#else
+#define I40E_2K_TOO_SMALL_WITH_PADDING false
+#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
/**
* i40e_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format)
@@ -255,7 +309,12 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
dma_addr_t dma;
struct page *page;
- unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 page_offset;
+#else
+ __u16 page_offset;
+#endif
+ __u16 pagecnt_bias;
};
struct i40e_queue_stats {
@@ -269,7 +328,6 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
- u64 tx_lost_interrupt;
};
struct i40e_rx_queue_stats {
@@ -335,7 +393,8 @@ struct i40e_ring {
u8 packet_stride;
u16 flags;
-#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
/* stats structs */
struct i40e_queue_stats stats;
@@ -363,6 +422,21 @@ struct i40e_ring {
*/
} ____cacheline_internodealigned_in_smp;
+static inline bool ring_uses_build_skb(struct i40e_ring *ring)
+{
+ return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
+}
+
+static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+ ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
+static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+ ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1,
@@ -384,6 +458,17 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
+static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->rx_buf_len > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
+
bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
@@ -393,15 +478,8 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
void i40e_free_tx_resources(struct i40e_ring *tx_ring);
void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
-#ifdef I40E_FCOE
-void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
- struct i40e_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset);
-int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
- struct i40e_ring *tx_ring, u32 *flags);
-#endif
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
-u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
+u32 i40e_get_tx_pending(struct i40e_ring *ring);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
@@ -483,16 +561,6 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
}
/**
- * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
- * @ptype: the packet type field from Rx descriptor write-back
- **/
-static inline bool i40e_rx_is_fcoe(u16 ptype)
-{
- return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
- (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
-}
-
-/**
* txring_txq - Find the netdev Tx ring based on the i40e Tx ring
* @ring: Tx ring to find the netdev equivalent of
**/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 939f9fdc8f85..3a18ed13edc4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -78,6 +78,7 @@ enum i40e_debug_mask {
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
I40E_DEBUG_FD = 0x00001000,
+ I40E_DEBUG_PACKAGE = 0x00002000,
I40E_DEBUG_IWARP = 0x00F00000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
@@ -1213,25 +1214,6 @@ struct i40e_veb_tc_stats {
u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
};
-#ifdef I40E_FCOE
-/* Statistics collected per function for FCoE */
-struct i40e_fcoe_stats {
- u64 rx_fcoe_packets; /* fcoeprc */
- u64 rx_fcoe_dwords; /* focedwrc */
- u64 rx_fcoe_dropped; /* fcoerpdc */
- u64 tx_fcoe_packets; /* fcoeptc */
- u64 tx_fcoe_dwords; /* focedwtc */
- u64 fcoe_bad_fccrc; /* fcoecrc */
- u64 fcoe_last_error; /* fcoelast */
- u64 fcoe_ddp_count; /* fcoeddpc */
-};
-
-/* offset to per function FCoE statistics block */
-#define I40E_FCOE_VF_STAT_OFFSET 0
-#define I40E_FCOE_PF_STAT_OFFSET 128
-#define I40E_FCOE_STAT_MAX (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF)
-
-#endif
/* Statistics collected by the MAC */
struct i40e_hw_port_stats {
/* eth stats collected by the port */
@@ -1319,125 +1301,6 @@ struct i40e_hw_port_stats {
#define I40E_SRRD_SRCTL_ATTEMPTS 100000
-#ifdef I40E_FCOE
-/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */
-
-enum i40E_fcoe_tx_ctx_desc_cmd_bits {
- I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND = 0x00, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2 = 0x01, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3 = 0x05, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2 = 0x02, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3 = 0x06, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2 = 0x03, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3 = 0x07, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL = 0x08, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL = 0x09, /* 4 BITS */
- I40E_FCOE_TX_CTX_DESC_RELOFF = 0x10,
- I40E_FCOE_TX_CTX_DESC_CLRSEQ = 0x20,
- I40E_FCOE_TX_CTX_DESC_DIFENA = 0x40,
- I40E_FCOE_TX_CTX_DESC_IL2TAG2 = 0x80
-};
-
-/* FCoE DDP Context descriptor */
-struct i40e_fcoe_ddp_context_desc {
- __le64 rsvd;
- __le64 type_cmd_foff_lsize;
-};
-
-#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT 0
-#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK (0xFULL << \
- I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT)
-
-#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT 4
-#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \
- I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT)
-
-enum i40e_fcoe_ddp_ctx_desc_cmd_bits {
- I40E_FCOE_DDP_CTX_DESC_BSIZE_512B = 0x00, /* 2 BITS */
- I40E_FCOE_DDP_CTX_DESC_BSIZE_4K = 0x01, /* 2 BITS */
- I40E_FCOE_DDP_CTX_DESC_BSIZE_8K = 0x02, /* 2 BITS */
- I40E_FCOE_DDP_CTX_DESC_BSIZE_16K = 0x03, /* 2 BITS */
- I40E_FCOE_DDP_CTX_DESC_DIFENA = 0x04, /* 1 BIT */
- I40E_FCOE_DDP_CTX_DESC_LASTSEQH = 0x08, /* 1 BIT */
-};
-
-#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT 16
-#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK (0x3FFFULL << \
- I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT)
-
-#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT 32
-#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK (0x3FFFULL << \
- I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)
-
-/* FCoE DDP/DWO Queue Context descriptor */
-struct i40e_fcoe_queue_context_desc {
- __le64 dmaindx_fbase; /* 0:11 DMAINDX, 12:63 FBASE */
- __le64 flen_tph; /* 0:12 FLEN, 13:15 TPH */
-};
-
-#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT 0
-#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK (0xFFFULL << \
- I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT)
-
-#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT 12
-#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK (0xFFFFFFFFFFFFFULL << \
- I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT)
-
-#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT 0
-#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK (0x1FFFULL << \
- I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
-
-#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT 13
-#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK (0x7ULL << \
- I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
-
-enum i40e_fcoe_queue_ctx_desc_tph_bits {
- I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC = 0x1,
- I40E_FCOE_QUEUE_CTX_DESC_TPHDATA = 0x2
-};
-
-#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT 30
-#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK (0x3ULL << \
- I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT)
-
-/* FCoE DDP/DWO Filter Context descriptor */
-struct i40e_fcoe_filter_context_desc {
- __le32 param;
- __le16 seqn;
-
- /* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */
- __le16 rsvd_dmaindx;
-
- /* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */
- __le64 flags_rsvd_lanq;
-};
-
-#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4
-#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK (0xFFF << \
- I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT)
-
-enum i40e_fcoe_filter_ctx_desc_flags_bits {
- I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP = 0x00,
- I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO = 0x01,
- I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT = 0x00,
- I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP = 0x02,
- I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 = 0x00,
- I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3 = 0x04
-};
-
-#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT 0
-#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK (0xFFULL << \
- I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT)
-
-#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8
-#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK (0x3FULL << \
- I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT)
-
-#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT 53
-#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK (0x7FFULL << \
- I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT)
-
-#endif /* I40E_FCOE */
enum i40e_switch_element_types {
I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
I40E_SWITCH_ELEMENT_TYPE_PF = 2,
@@ -1600,4 +1463,83 @@ struct i40e_lldp_variables {
#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
#define I40E_FLEX_57_SHIFT 6
#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
+
+/* Version format for PPP */
+struct i40e_ppp_version {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+#define I40E_PPP_NAME_SIZE 32
+
+/* Package header */
+struct i40e_package_header {
+ struct i40e_ppp_version version;
+ u32 segment_count;
+ u32 segment_offset[1];
+};
+
+/* Generic segment header */
+struct i40e_generic_seg_header {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_NOTES 0x00000002
+#define SEGMENT_TYPE_I40E 0x00000011
+#define SEGMENT_TYPE_X722 0x00000012
+ u32 type;
+ struct i40e_ppp_version version;
+ u32 size;
+ char name[I40E_PPP_NAME_SIZE];
+};
+
+struct i40e_metadata_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ppp_version version;
+ u32 track_id;
+ char name[I40E_PPP_NAME_SIZE];
+};
+
+struct i40e_device_id_entry {
+ u32 vendor_dev_id;
+ u32 sub_vendor_dev_id;
+};
+
+struct i40e_profile_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ppp_version version;
+ char name[I40E_PPP_NAME_SIZE];
+ u32 device_table_count;
+ struct i40e_device_id_entry device_table[1];
+};
+
+struct i40e_section_table {
+ u32 section_count;
+ u32 section_offset[1];
+};
+
+struct i40e_profile_section_header {
+ u16 tbl_size;
+ u16 data_end;
+ struct {
+#define SECTION_TYPE_INFO 0x00000010
+#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_NOTE 0x80000000
+#define SECTION_TYPE_NAME 0x80000001
+ u32 type;
+ u32 offset;
+ u32 size;
+ } section;
+};
+
+struct i40e_profile_info {
+ u32 track_id;
+ struct i40e_ppp_version version;
+ u8 op;
+#define I40E_PPP_ADD_TRACKID 0x01
+#define I40E_PPP_REMOVE_TRACKID 0x02
+ u8 reserved[7];
+ u8 name[I40E_PPP_NAME_SIZE];
+};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 974ba2baf6ea..8552192a5bde 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -163,7 +163,8 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 78460c52b7c4..95c23fbaa211 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -50,8 +50,8 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
/* Not all vfs are enabled so skip the ones that are not */
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
- !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
+ !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
continue;
/* Ignore return value on purpose - a given VF may fail, but
@@ -137,8 +137,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
return;
/* verify if the VF is in either init or active before proceeding */
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
- !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
+ !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
return;
abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
@@ -702,10 +702,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
dev_info(&pf->pdev->dev,
"Could not allocate VF broadcast filter\n");
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id),
- (u32)hena);
- i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
- (u32)(hena >> 32));
+ wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
+ wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
}
/* program mac filter */
@@ -811,6 +809,11 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
u32 reg_idx, reg;
int i, msix_vf;
+ /* Start by disabling VF's configuration API to prevent the OS from
+ * accessing the VF's VSI after it's freed / invalidated.
+ */
+ clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
+
/* free vsi & disconnect it from the parent uplink */
if (vf->lan_vsi_idx) {
i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
@@ -850,7 +853,6 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
/* reset some of the state variables keeping track of the resources */
vf->num_queue_pairs = 0;
vf->vf_states = 0;
- clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
}
/**
@@ -882,7 +884,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
vf->num_queue_pairs = total_queue_pairs;
/* VF is now completely initialized */
- set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
+ set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
error_alloc:
if (ret)
@@ -921,25 +923,30 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
}
/**
- * i40e_reset_vf
+ * i40e_trigger_vf_reset
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
*
- * reset the VF
+ * Trigger hardware to start a reset for a particular VF. Expects the caller
+ * to wait the proper amount of time to allow hardware to reset the VF before
+ * it cleans up and restores VF functionality.
**/
-void i40e_reset_vf(struct i40e_vf *vf, bool flr)
+static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
u32 reg, reg_idx, bit_idx;
- bool rsd = false;
- int i;
-
- if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
- return;
/* warn the VF */
- clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+
+ /* Disable VF's configuration API during reset. The flag is re-enabled
+ * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
+ * It's normally disabled in i40e_free_vf_res(), but it's safer
+ * to do it earlier to give some time to finish to any VF config
+ * functions that may still be running at this point.
+ */
+ clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
/* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register.
@@ -960,6 +967,79 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
if (i40e_quiesce_vf_pci(vf))
dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
vf->vf_id);
+}
+
+/**
+ * i40e_cleanup_reset_vf
+ * @vf: pointer to the VF structure
+ *
+ * Cleanup a VF after the hardware reset is finished. Expects the caller to
+ * have verified whether the reset is finished properly, and ensure the
+ * minimum amount of wait time has passed.
+ **/
+static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+
+ /* free VF resources to begin resetting the VSI state */
+ i40e_free_vf_res(vf);
+
+ /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
+ * By doing this we allow HW to access VF memory at any point. If we
+ * did it any sooner, HW could access memory while it was being freed
+ * in i40e_free_vf_res(), causing an IOMMU fault.
+ *
+ * On the other hand, this needs to be done ASAP, because the VF driver
+ * is waiting for this to happen and may report a timeout. It's
+ * harmless, but it gets logged into Guest OS kernel log, so best avoid
+ * it.
+ */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+
+ /* reallocate VF resources to finish resetting the VSI state */
+ if (!i40e_alloc_vf_res(vf)) {
+ int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ i40e_enable_vf_mappings(vf);
+ set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ /* Do not notify the client during VF init */
+ if (test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
+ &vf->vf_states))
+ i40e_notify_client_of_vf_reset(pf, abs_vf_id);
+ vf->num_vlan = 0;
+ }
+
+ /* Tell the VF driver the reset is done. This needs to be done only
+ * after VF has been fully initialized, because the VF driver may
+ * request resources immediately after setting this flag.
+ */
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+}
+
+/**
+ * i40e_reset_vf
+ * @vf: pointer to the VF structure
+ * @flr: VFLR was issued or not
+ *
+ * reset the VF
+ **/
+void i40e_reset_vf(struct i40e_vf *vf, bool flr)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool rsd = false;
+ u32 reg;
+ int i;
+
+ /* If VFs have been disabled, there is no need to reset */
+ if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+ return;
+
+ i40e_trigger_vf_reset(vf, flr);
/* poll VPGEN_VFRSTAT reg to make sure
* that reset is complete
@@ -984,35 +1064,116 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
if (!rsd)
dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
vf->vf_id);
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
- /* clear the reset bit in the VPGEN_VFRTRIG reg */
- reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
- reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ usleep_range(10000, 20000);
- /* On initial reset, we won't have any queues */
- if (vf->lan_vsi_idx == 0)
- goto complete_reset;
+ /* On initial reset, we don't have any queues to disable */
+ if (vf->lan_vsi_idx != 0)
+ i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
- i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
-complete_reset:
- /* reallocate VF resources to reset the VSI state */
- i40e_free_vf_res(vf);
- if (!i40e_alloc_vf_res(vf)) {
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- i40e_enable_vf_mappings(vf);
- set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
- clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
- /* Do not notify the client during VF init */
- if (vf->pf->num_alloc_vfs)
- i40e_notify_client_of_vf_reset(pf, abs_vf_id);
- vf->num_vlan = 0;
+ i40e_cleanup_reset_vf(vf);
+
+ i40e_flush(hw);
+ clear_bit(__I40E_VF_DISABLE, pf->state);
+}
+
+/**
+ * i40e_reset_all_vfs
+ * @pf: pointer to the PF structure
+ * @flr: VFLR was issued or not
+ *
+ * Reset all allocated VFs in one go. First, tell the hardware to reset each
+ * VF, then do all the waiting in one chunk, and finally finish restoring each
+ * VF after the wait. This is useful during PF routines which need to reset
+ * all VFs, as otherwise it must perform these resets in a serialized fashion.
+ **/
+void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+ int i, v;
+ u32 reg;
+
+ /* If we don't have any VFs, then there is nothing to reset */
+ if (!pf->num_alloc_vfs)
+ return;
+
+ /* If VFs have been disabled, there is no need to reset */
+ if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+ return;
+
+ /* Begin reset on all VFs at once */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ i40e_trigger_vf_reset(&pf->vf[v], flr);
+
+ /* HW requires some time to make sure it can flush the FIFO for a VF
+ * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
+ * sequence to make sure that it has completed. We'll keep track of
+ * the VFs using a simple iterator that increments once that VF has
+ * finished resetting.
+ */
+ for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
+ usleep_range(10000, 20000);
+
+ /* Check each VF in sequence, beginning with the VF to fail
+ * the previous check.
+ */
+ while (v < pf->num_alloc_vfs) {
+ vf = &pf->vf[v];
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
+ break;
+
+ /* If the current VF has finished resetting, move on
+ * to the next VF in sequence.
+ */
+ v++;
+ }
}
- /* tell the VF the reset is done */
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+
+ if (flr)
+ usleep_range(10000, 20000);
+
+ /* Display a warning if at least one VF didn't manage to reset in
+ * time, but continue on with the operation.
+ */
+ if (v < pf->num_alloc_vfs)
+ dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+ pf->vf[v].vf_id);
+ usleep_range(10000, 20000);
+
+ /* Begin disabling all the rings associated with VFs, but do not wait
+ * between each VF.
+ */
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ /* On initial reset, we don't have any queues to disable */
+ if (pf->vf[v].lan_vsi_idx == 0)
+ continue;
+
+ i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ }
+
+ /* Now that we've notified HW to disable all of the VF rings, wait
+ * until they finish.
+ */
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ /* On initial reset, we don't have any queues to disable */
+ if (pf->vf[v].lan_vsi_idx == 0)
+ continue;
+
+ i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ }
+
+ /* Hw may need up to 50ms to finish disabling the RX queues. We
+ * minimize the wait by delaying only once for all VFs.
+ */
+ mdelay(50);
+
+ /* Finish the reset on each VF */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ i40e_cleanup_reset_vf(&pf->vf[v]);
i40e_flush(hw);
- clear_bit(__I40E_VF_DISABLE, &pf->state);
+ clear_bit(__I40E_VF_DISABLE, pf->state);
}
/**
@@ -1029,13 +1190,25 @@ void i40e_free_vfs(struct i40e_pf *pf)
if (!pf->vf)
return;
- while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
+ while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
usleep_range(1000, 2000);
i40e_notify_client_of_vf_enable(pf, 0);
- for (i = 0; i < pf->num_alloc_vfs; i++)
- if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
- i40e_vsi_stop_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
+
+ /* Amortize wait time by stopping all VFs at the same time */
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
+ continue;
+
+ i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
+ }
+
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
+ continue;
+
+ i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
+ }
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
@@ -1046,13 +1219,11 @@ void i40e_free_vfs(struct i40e_pf *pf)
else
dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
- msleep(20); /* let any messages in transit get finished up */
-
/* free up VF resources */
tmp = pf->num_alloc_vfs;
pf->num_alloc_vfs = 0;
for (i = 0; i < tmp; i++) {
- if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
+ if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
i40e_free_vf_res(&pf->vf[i]);
/* disable qp mappings */
i40e_disable_vf_mappings(&pf->vf[i]);
@@ -1075,7 +1246,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
}
}
- clear_bit(__I40E_VF_DISABLE, &pf->state);
+ clear_bit(__I40E_VF_DISABLE, pf->state);
}
#ifdef CONFIG_PCI_IOV
@@ -1120,12 +1291,15 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */
set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
vfs[i].spoofchk = true;
- /* VF resources get allocated during reset */
- i40e_reset_vf(&vfs[i], false);
+
+ set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
}
pf->num_alloc_vfs = num_alloc_vfs;
+ /* VF resources get allocated during reset */
+ i40e_reset_all_vfs(pf, false);
+
i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
err_alloc:
@@ -1152,7 +1326,7 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
int pre_existing_vfs = pci_num_vf(pdev);
int err = 0;
- if (test_bit(__I40E_TESTING, &pf->state)) {
+ if (test_bit(__I40E_TESTING, pf->state)) {
dev_warn(&pdev->dev,
"Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
err = -EPERM;
@@ -1258,7 +1432,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
"Number of invalid messages exceeded for VF %d\n",
vf->vf_id);
dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
- set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+ set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
}
} else {
vf->num_valid_msgs++;
@@ -1333,7 +1507,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
int len = 0;
int ret;
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -1359,10 +1533,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
if (!vsi->info.pvid)
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
- if (i40e_vf_client_capable(pf, vf->vf_id, I40E_CLIENT_IWARP) &&
+ if (i40e_vf_client_capable(pf, vf->vf_id) &&
(vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) {
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP;
- set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
+ set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
}
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
@@ -1383,6 +1557,13 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
}
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP)
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP;
+
+ if ((pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
+ (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev,
@@ -1416,7 +1597,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
vf->default_lan_addr.addr);
}
- set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+ set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
err:
/* send the response back to the VF */
@@ -1439,7 +1620,7 @@ err:
**/
static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
{
- if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
i40e_reset_vf(vf, false);
}
@@ -1487,7 +1668,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
int bkt;
vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
!vsi) {
aq_ret = I40E_ERR_PARAM;
@@ -1548,9 +1729,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
"VF %d successfully set multicast promiscuous mode\n",
vf->vf_id);
if (allmulti)
- set_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+ set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
else
- clear_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
}
if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
@@ -1599,9 +1780,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
"VF %d successfully set unicast promiscuous mode\n",
vf->vf_id);
if (alluni)
- set_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+ set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
else
- clear_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
}
error_param:
@@ -1630,7 +1811,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status aq_ret = 0;
int i;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -1687,7 +1868,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
unsigned long tempmap;
int i;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -1747,7 +1928,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -1786,7 +1967,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -1828,7 +2009,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
memset(&stats, 0, sizeof(struct i40e_eth_stats));
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -1853,7 +2034,7 @@ error_param:
}
/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
-#define I40E_VC_MAX_MAC_ADDR_PER_VF 8
+#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
#define I40E_VC_MAX_VLAN_PER_VF 8
/**
@@ -1915,7 +2096,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status ret = 0;
int i;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -1984,7 +2165,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status ret = 0;
int i;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -2050,7 +2231,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
"VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
goto error_param;
}
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -2077,12 +2258,12 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
if (!ret)
vf->num_vlan++;
- if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+ if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
true,
vfl->vlan_id[i],
NULL);
- if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+ if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
true,
vfl->vlan_id[i],
@@ -2117,7 +2298,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status aq_ret = 0;
int i;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -2140,12 +2321,12 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
vf->num_vlan--;
- if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+ if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
false,
vfl->vlan_id[i],
NULL);
- if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+ if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
false,
vfl->vlan_id[i],
@@ -2171,8 +2352,8 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2202,8 +2383,8 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
(struct i40e_virtchnl_iwarp_qvlist_info *)msg;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2240,7 +2421,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
u16 vsi_id = vrk->vsi_id;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
(vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
aq_ret = I40E_ERR_PARAM;
@@ -2272,7 +2453,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
u16 vsi_id = vrl->vsi_id;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
(vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
aq_ret = I40E_ERR_PARAM;
@@ -2302,7 +2483,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status aq_ret = 0;
int len = 0;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -2339,7 +2520,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_hw *hw = &pf->hw;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -2369,7 +2550,7 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
int valid_len = 0;
/* Check if VF is disabled. */
- if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
+ if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
return I40E_ERR_PARAM;
/* Validate message length. */
@@ -2637,7 +2818,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
struct i40e_vf *vf;
int vf_id;
- if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
+ if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
return 0;
/* Re-enable the VFLR interrupt cause here, before looking for which
@@ -2650,7 +2831,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
i40e_flush(hw);
- clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
+ clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
@@ -2693,7 +2874,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
vf = &(pf->vf[vf_id]);
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
vf_id);
ret = -EAGAIN;
@@ -2782,7 +2963,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
vf = &(pf->vf[vf_id]);
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
vf_id);
ret = -EAGAIN;
@@ -2914,7 +3095,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
vf = &(pf->vf[vf_id]);
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
vf_id);
ret = -EAGAIN;
@@ -2995,7 +3176,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
vf = &(pf->vf[vf_id]);
/* first vsi is always the LAN vsi */
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
vf_id);
ret = -EAGAIN;
@@ -3114,7 +3295,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
}
vf = &(pf->vf[vf_id]);
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
vf_id);
ret = -EAGAIN;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 4012d069939a..20d7c8160e9e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -56,13 +56,14 @@ enum i40e_queue_ctrl {
/* VF states */
enum i40e_vf_states {
- I40E_VF_STAT_INIT = 0,
- I40E_VF_STAT_ACTIVE,
- I40E_VF_STAT_IWARPENA,
- I40E_VF_STAT_FCOEENA,
- I40E_VF_STAT_DISABLED,
- I40E_VF_STAT_MC_PROMISC,
- I40E_VF_STAT_UC_PROMISC,
+ I40E_VF_STATE_INIT = 0,
+ I40E_VF_STATE_ACTIVE,
+ I40E_VF_STATE_IWARPENA,
+ I40E_VF_STATE_FCOEENA,
+ I40E_VF_STATE_DISABLED,
+ I40E_VF_STATE_MC_PROMISC,
+ I40E_VF_STATE_UC_PROMISC,
+ I40E_VF_STATE_PRE_ENABLE,
};
/* VF capabilities */
@@ -87,7 +88,6 @@ struct i40e_vf {
u16 stag;
struct i40e_virtchnl_ether_addr default_lan_addr;
- struct i40e_virtchnl_ether_addr default_fcoe_addr;
u16 port_vlan_id;
bool pf_set_mac; /* The VMM admin set the VF MAC address */
bool trusted;
@@ -125,6 +125,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
void i40e_reset_vf(struct i40e_vf *vf, bool flr);
+void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
/* VF configuration related iplink handlers */
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
index 3a423836a565..a393f4a07f06 100644
--- a/drivers/net/ethernet/intel/i40evf/Makefile
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -29,8 +29,11 @@
#
#
+ccflags-y += -I$(src)
+subdir-ccflags-y += -I$(src)
+
obj-$(CONFIG_I40EVF) += i40evf.o
i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
- i40e_txrx.o i40e_common.o i40e_adminq.o
+ i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 96385156b824..8b0d4b255dea 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -797,8 +797,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
*/
if (i40evf_asq_done(hw))
break;
- usleep_range(1000, 2000);
- total_delay++;
+ udelay(50);
+ total_delay += 50;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 1f9b3b5d946d..e0bfaa3d4a21 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -151,7 +151,7 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index eeb9864bc5b1..91d8786d386d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -132,6 +132,10 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
+ /* Proxy commands */
+ i40e_aqc_opc_set_proxy_config = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
/* LAA */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -139,6 +143,10 @@ enum i40e_admin_queue_opc {
/* PXE */
i40e_aqc_opc_clear_pxe_mode = 0x0110,
+ /* WoL commands */
+ i40e_aqc_opc_set_wol_filter = 0x0120,
+ i40e_aqc_opc_get_wake_reason = 0x0121,
+
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -177,10 +185,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_remove_control_packet_filter = 0x025B,
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
+ i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
+ /* Pipeline Personalization Profile */
+ i40e_aqc_opc_write_personalization_profile = 0x0270,
+ i40e_aqc_opc_get_personalization_profile_list = 0x0271,
+
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -558,6 +571,56 @@ struct i40e_aqc_clear_pxe {
I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+/* Set WoL Filter (0x0120) */
+
+struct i40e_aqc_set_wol_filter {
+ __le16 filter_index;
+#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
+ I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
+#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
+ I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
+ __le16 cmd_flags;
+#define I40E_AQC_SET_WOL_FILTER 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
+#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
+ __le16 valid_flags;
+#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+
+struct i40e_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
+/* Get Wake Reason (0x0121) */
+
+struct i40e_aqc_get_wake_reason_completion {
+ u8 reserved_1[2];
+ __le16 wake_reason;
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
+ u8 reserved_2[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
+
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -640,6 +703,8 @@ struct i40e_aqc_set_port_parameters {
#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
__le16 bad_frame_vsi;
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
__le16 default_seid; /* reserved for command */
u8 reserved[10];
};
@@ -691,6 +756,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
/* Set Switch Configuration (direct 0x0205) */
struct i40e_aqc_set_switch_config {
__le16 flags;
+/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
__le16 valid_flags;
@@ -1364,6 +1430,36 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+/* Pipeline Personalization Profile */
+struct i40e_aqc_write_personalization_profile {
+ u8 flags;
+ u8 reserved[3];
+ __le32 profile_track_id;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
+
+struct i40e_aqc_write_ppp_resp {
+ __le32 error_offset;
+ __le32 error_info;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct i40e_aqc_get_applied_profiles {
+ u8 flags;
+#define I40E_AQC_GET_PPP_GET_CONF 0x1
+#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2
+ u8 rsv[3];
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles);
+
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1839,11 +1935,12 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 external_power_ability;
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
+#define I40E_AQ_PWR_CLASS_MASK 0x03
u8 reserved[4];
};
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 89dfdbca13db..43f10761f4ba 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -958,7 +958,9 @@ u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
int retry = 5;
u32 val = 0;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40evf_aq_rx_ctl_read_register(hw, reg_addr,
@@ -1019,7 +1021,9 @@ void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
bool use_register;
int retry = 5;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40evf_aq_rx_ctl_write_register(hw, reg_addr,
@@ -1127,3 +1131,215 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
0, NULL, 0, NULL);
}
+
+/**
+ * i40evf_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @track_id: package tracking id
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_write_personalization_profile *cmd =
+ (struct i40e_aqc_write_personalization_profile *)
+ &desc.params.raw;
+ struct i40e_aqc_write_ppp_resp *resp;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_write_personalization_profile);
+
+ desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->profile_track_id = cpu_to_le32(track_id);
+
+ status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * i40evf_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_applied_profiles *cmd =
+ (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_personalization_profile_list);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->flags = flags;
+
+ status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40evf_find_segment_in_package
+ * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_generic_seg_header *
+i40evf_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_hdr)
+{
+ struct i40e_generic_seg_header *segment;
+ u32 i;
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < pkg_hdr->segment_count; i++) {
+ segment =
+ (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
+ pkg_hdr->segment_offset[i]);
+
+ if (segment->type == segment_type)
+ return segment;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40evf_write_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be downloaded
+ * @track_id: package tracking id
+ *
+ * Handles the download of a complete package.
+ */
+enum i40e_status_code
+i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ i40e_status status = 0;
+ struct i40e_section_table *sec_tbl;
+ struct i40e_profile_section_header *sec = NULL;
+ u32 dev_cnt;
+ u32 vendor_dev_id;
+ u32 *nvm;
+ u32 section_size = 0;
+ u32 offset = 0, info = 0;
+ u32 i;
+
+ if (!track_id) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL)
+ if (hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (i == dev_cnt) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ nvm = (u32 *)&profile->device_table[dev_cnt];
+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec = (struct i40e_profile_section_header *)((u8 *)profile +
+ sec_tbl->section_offset[i]);
+
+ /* Skip 'AQ', 'note' and 'name' sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write profile */
+ status = i40evf_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: offset %d, info %d",
+ offset, info);
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40evf_add_pinfo_to_list
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+enum i40e_status_code
+i40evf_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ i40e_status status = 0;
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_profile_info *pinfo;
+ u32 offset = 0, info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_PPP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+
+ status = i40evf_aq_write_ppp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index ba6c6bda0e22..741223d5d809 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -122,4 +122,21 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
+i40e_status i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details);
+struct i40e_generic_seg_header *
+i40evf_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_header);
+enum i40e_status_code
+i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+enum i40e_status_code
+i40evf_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_trace.h b/drivers/net/ethernet/intel/i40evf/i40e_trace.h
new file mode 100644
index 000000000000..9a5100b2b7c7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_trace.h
@@ -0,0 +1,229 @@
+/*******************************************************************************
+ *
+ * Intel(R) 40-10 Gigabit Ethernet Virtual Function Driver
+ * Copyright(c) 2013 - 2017 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* Modeled on trace-events-sample.h */
+
+/* The trace subsystem name for i40evf will be "i40evf".
+ *
+ * This file is named i40e_trace.h.
+ *
+ * Since this include file's name is different from the trace
+ * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end
+ * of this file.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i40evf
+
+/* See trace-events-sample.h for a detailed description of why this
+ * guard clause is different from most normal include files.
+ */
+#if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _I40E_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+/**
+ * i40e_trace() macro enables shared code to refer to trace points
+ * like:
+ *
+ * trace_i40e{,vf}_example(args...)
+ *
+ * ... as:
+ *
+ * i40e_trace(example, args...)
+ *
+ * ... to resolve to the PF or VF version of the tracepoint without
+ * ifdefs, and to allow tracepoints to be disabled entirely at build
+ * time.
+ *
+ * Trace point should always be referred to in the driver via this
+ * macro.
+ *
+ * Similarly, i40e_trace_enabled(trace_name) wraps references to
+ * trace_i40e{,vf}_<trace_name>_enabled() functions.
+ */
+#define _I40E_TRACE_NAME(trace_name) (trace_ ## i40evf ## _ ## trace_name)
+#define I40E_TRACE_NAME(trace_name) _I40E_TRACE_NAME(trace_name)
+
+#define i40e_trace(trace_name, args...) I40E_TRACE_NAME(trace_name)(args)
+
+#define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)()
+
+/* Events common to PF and VF. Corresponding versions will be defined
+ * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace()
+ * macro above will select the right trace point name for the driver
+ * being built from shared code.
+ */
+
+/* Events related to a vsi & ring */
+DECLARE_EVENT_CLASS(
+ i40evf_tx_template,
+
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf),
+
+ /* The convention here is to make the first fields in the
+ * TP_STRUCT match the TP_PROTO exactly. This enables the use
+ * of the args struct generated by the tplist tool (from the
+ * bcc-tools package) to be used for those fields. To access
+ * fields other than the tracepoint args will require the
+ * tplist output to be adjusted.
+ */
+ TP_STRUCT__entry(
+ __field(void*, ring)
+ __field(void*, desc)
+ __field(void*, buf)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->desc = desc;
+ __entry->buf = buf;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s ring: %p desc: %p buf %p",
+ __get_str(devname), __entry->ring,
+ __entry->desc, __entry->buf)
+);
+
+DEFINE_EVENT(
+ i40evf_tx_template, i40evf_clean_tx_irq,
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf));
+
+DEFINE_EVENT(
+ i40evf_tx_template, i40evf_clean_tx_irq_unmap,
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf));
+
+DECLARE_EVENT_CLASS(
+ i40evf_rx_template,
+
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_32byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb),
+
+ TP_STRUCT__entry(
+ __field(void*, ring)
+ __field(void*, desc)
+ __field(void*, skb)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->desc = desc;
+ __entry->skb = skb;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s ring: %p desc: %p skb %p",
+ __get_str(devname), __entry->ring,
+ __entry->desc, __entry->skb)
+);
+
+DEFINE_EVENT(
+ i40evf_rx_template, i40evf_clean_rx_irq,
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_32byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb));
+
+DEFINE_EVENT(
+ i40evf_rx_template, i40evf_clean_rx_irq_rx,
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_32byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb));
+
+DECLARE_EVENT_CLASS(
+ i40evf_xmit_template,
+
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring),
+
+ TP_STRUCT__entry(
+ __field(void*, skb)
+ __field(void*, ring)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->skb = skb;
+ __entry->ring = ring;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s skb: %p ring: %p",
+ __get_str(devname), __entry->skb,
+ __entry->ring)
+);
+
+DEFINE_EVENT(
+ i40evf_xmit_template, i40evf_xmit_frame_ring,
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring));
+
+DEFINE_EVENT(
+ i40evf_xmit_template, i40evf_xmit_frame_ring_drop,
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring));
+
+/* Events unique to the VF. */
+
+#endif /* _I40E_TRACE_H_ */
+/* This must be outside ifdef _I40E_TRACE_H */
+
+/* This trace include file is not located in the .../include/trace
+ * with the kernel tracepoint definitions, because we're a loadable
+ * module.
+ */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE i40e_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index c91fcf43ccbc..dfe241a12ad0 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -28,6 +28,7 @@
#include <net/busy_poll.h>
#include "i40evf.h"
+#include "i40e_trace.h"
#include "i40e_prototype.h"
static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
@@ -137,10 +138,7 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
{
u32 head, tail;
- if (!in_sw)
- head = i40e_get_head(ring);
- else
- head = ring->next_to_clean;
+ head = ring->next_to_clean;
tail = readl(ring->tail);
if (head != tail)
@@ -165,7 +163,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
- struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int budget = vsi->work_limit;
@@ -174,8 +171,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_desc = I40E_TX_DESC(tx_ring, i);
i -= tx_ring->count;
- tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
-
do {
struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
@@ -186,8 +181,10 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
- /* we have caught up to head, no work left to do */
- if (tx_head == tx_desc)
+ i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
break;
/* clear next_to_watch to prevent false hangs */
@@ -212,6 +209,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
+ i40e_trace(clean_tx_irq_unmap,
+ tx_ring, tx_desc, tx_buf);
tx_buf++;
tx_desc++;
@@ -267,7 +266,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (budget &&
((j / WB_STRIDE) == 0) && (j > 0) &&
- !test_bit(__I40E_DOWN, &vsi->state) &&
+ !test_bit(__I40E_VSI_DOWN, vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
@@ -285,7 +284,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__I40E_DOWN, &vsi->state)) {
+ !test_bit(__I40E_VSI_DOWN, vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
@@ -464,10 +463,6 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
- /* add u32 for head writeback, align after this takes care of
- * guaranteeing this is at least one cache line in size
- */
- tx_ring->size += sizeof(u32);
tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
@@ -493,7 +488,6 @@ err:
**/
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
unsigned long bi_size;
u16 i;
@@ -513,8 +507,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_bi->page)
continue;
- dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
- __free_pages(rx_bi->page, 0);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_bi->dma,
+ rx_bi->page_offset,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ I40E_RX_DMA_ATTR);
+
+ __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
rx_bi->page = NULL;
rx_bi->page_offset = 0;
@@ -615,6 +623,17 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
+ * i40e_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
+}
+
+/**
* i40e_alloc_mapped_page - recycle or make a new page
* @rx_ring: ring to use
* @bi: rx_buffer struct to modify
@@ -635,27 +654,33 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
}
/* alloc new page for storage */
- page = dev_alloc_page();
+ page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ I40E_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, 0);
+ __free_pages(page, i40e_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = i40e_rx_offset(rx_ring);
+
+ /* initialize pagecnt_bias to 1 representing we fully own page */
+ bi->pagecnt_bias = 1;
return true;
}
@@ -702,6 +727,12 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
if (!i40e_alloc_mapped_page(rx_ring, bi))
goto no_buffers;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
@@ -742,8 +773,6 @@ no_buffers:
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
- *
- * skb->protocol must be set before this function is called
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
@@ -806,13 +835,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* If there is an outer header present that might contain a checksum
- * we need to bump the checksum level by 1 to reflect the fact that
- * we are indicating we validated the inner checksum.
- */
- if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
- skb->csum_level = 1;
-
/* Only report checksum unnecessary for TCP, UDP, or SCTP */
switch (decoded.inner_prot) {
case I40E_RX_PTYPE_INNER_PROT_TCP:
@@ -895,12 +917,12 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
{
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- /* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
-
i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
skb_record_rx_queue(skb, rx_ring->queue_index);
+
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
/**
@@ -945,7 +967,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- *new_buff = *old_buff;
+ new_buff->dma = old_buff->dma;
+ new_buff->page = old_buff->page;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
/**
@@ -966,8 +991,6 @@ static inline bool i40e_page_is_reusable(struct page *page)
* the adapter for another receive
*
* @rx_buffer: buffer containing the page
- * @page: page address from rx_buffer
- * @truesize: actual size of the buffer in this page
*
* If page is reusable, rx_buffer->page_offset is adjusted to point to
* an unused region in the page.
@@ -990,13 +1013,10 @@ static inline bool i40e_page_is_reusable(struct page *page)
*
* In either case, if the page is reusable its refcount is increased.
**/
-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
- struct page *page,
- const unsigned int truesize)
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
{
-#if (PAGE_SIZE >= 8192)
- unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
-#endif
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
/* Is any reuse possible? */
if (unlikely(!i40e_page_is_reusable(page)))
@@ -1004,21 +1024,23 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
+ if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false;
-
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= truesize;
#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
-
- if (rx_buffer->page_offset > last_offset)
+#define I40E_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
+ if (rx_buffer->page_offset > I40E_LAST_OFFSET)
return false;
#endif
- /* Inc ref count on page before passing it up to the stack */
- get_page(page);
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(!pagecnt_bias)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
return true;
}
@@ -1027,145 +1049,201 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
* i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @size: packet length from rx_desc
* @skb: sk_buff to place the data into
+ * @size: packet length from rx_desc
*
* This function will add the data contained in rx_buffer->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
+ * It will just attach the page as a frag to the skb.
*
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
+ * The function will then update the page offset.
**/
-static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
- unsigned int size,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int size)
{
- struct page *page = rx_buffer->page;
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
- unsigned int truesize = I40E_RXBUFFER_2048;
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
#endif
- unsigned int pull_len;
-
- if (unlikely(skb_is_nonlinear(skb)))
- goto add_tail_frag;
- /* will the data fit in the skb we allocated? if so, just
- * copy it as it is pretty small anyway
- */
- if (size <= I40E_RX_HDR_SIZE) {
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* page is reusable, we can reuse buffer as-is */
- if (likely(i40e_page_is_reusable(page)))
- return true;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
- /* this page cannot be reused so discard it */
- __free_pages(page, 0);
- return false;
- }
+ /* page is being used so we must update the page offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+}
- /* we need the header to contain the greater of either
- * ETH_HLEN or 60 bytes if the skb->len is less than
- * 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+/**
+ * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @size: size of buffer to add to skb
+ *
+ * This function will pull an Rx buffer from the ring and synchronize it
+ * for use by the CPU.
+ */
+static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
+ const unsigned int size)
+{
+ struct i40e_rx_buffer *rx_buffer;
- /* align pull length to size of long to optimize
- * memcpy performance
- */
- memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+ rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+ prefetchw(rx_buffer->page);
- /* update all of the pointers */
- va += pull_len;
- size -= pull_len;
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
-add_tail_frag:
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- (unsigned long)va & ~PAGE_MASK, size, truesize);
+ /* We have pulled a buffer for use, so decrement pagecnt_bias */
+ rx_buffer->pagecnt_bias--;
- return i40e_can_reuse_rx_page(rx_buffer, page, truesize);
+ return rx_buffer;
}
/**
- * i40evf_fetch_rx_buffer - Allocate skb and populate it
+ * i40e_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
- * @rx_desc: descriptor containing info written by hardware
+ * @rx_buffer: rx buffer to pull data from
+ * @size: size of buffer to add to skb
*
- * This function allocates an skb on the fly, and populates it with the page
- * data from the current receive descriptor, taking care to set up the skb
- * correctly, as well as handling calling the page recycle function if
- * necessary.
+ * This function allocates an skb. It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
*/
-static inline
-struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc,
- struct sk_buff *skb)
+static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ unsigned int size)
{
- u64 local_status_error_len =
- le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- unsigned int size =
- (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- struct i40e_rx_buffer *rx_buffer;
- struct page *page;
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+ unsigned int headlen;
+ struct sk_buff *skb;
- rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
- page = rx_buffer->page;
- prefetchw(page);
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ I40E_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > I40E_RX_HDR_SIZE)
+ headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+ /* update all of the pointers */
+ size -= headlen;
+ if (size) {
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ rx_buffer->page_offset + headlen,
+ size, truesize);
+
+ /* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+ } else {
+ /* buffer is unused, reset bias back to rx_buffer */
+ rx_buffer->pagecnt_bias++;
+ }
+
+ return skb;
+}
- if (likely(!skb)) {
- void *page_addr = page_address(page) + rx_buffer->page_offset;
+/**
+ * i40e_build_skb - Build skb around an existing buffer
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buffer: Rx buffer to pull data from
+ * @size: size of buffer to add to skb
+ *
+ * This function builds an skb around an existing Rx buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead.
+ */
+static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+ struct sk_buff *skb;
- /* prefetch first cache line of first page */
- prefetch(page_addr);
+ /* prefetch first cache line of first page */
+ prefetch(va);
#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
+ prefetch(va + L1_CACHE_BYTES);
#endif
+ /* build an skb around the page buffer */
+ skb = build_skb(va - I40E_SKB_PAD, truesize);
+ if (unlikely(!skb))
+ return NULL;
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- I40E_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_buff_failed++;
- return NULL;
- }
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, I40E_SKB_PAD);
+ __skb_put(skb, size);
- /* we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
- }
+ /* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
+ return skb;
+}
- /* pull page into skb */
- if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
+/**
+ * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: rx buffer to pull data from
+ *
+ * This function will clean up the contents of the rx_buffer. It will
+ * either recycle the bufer or unmap it and free the associated resources.
+ */
+static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer)
+{
+ if (i40e_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
}
/* clear contents of buffer_info */
rx_buffer->page = NULL;
-
- return skb;
}
/**
@@ -1221,7 +1299,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
bool failure = false;
while (likely(total_rx_packets < budget)) {
+ struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
+ unsigned int size;
u16 vlan_tag;
u8 rx_ptype;
u64 qword;
@@ -1238,22 +1318,40 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
* which is always zero because packet split isn't used, if the
- * hardware wrote DD then it will be non-zero
+ * hardware wrote DD then the length will be non-zero
*/
- if (!i40e_test_staterr(rx_desc,
- BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
/* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * DD bit is set.
+ * any other fields out of the rx_desc until we have
+ * verified the descriptor has been written back.
*/
dma_rmb();
- skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc, skb);
- if (!skb)
+ size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ if (!size)
break;
+ i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
+ rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+
+ /* retrieve a buffer from the ring */
+ if (skb)
+ i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ else if (ring_uses_build_skb(rx_ring))
+ skb = i40e_build_skb(rx_ring, rx_buffer, size);
+ else
+ skb = i40e_construct_skb(rx_ring, rx_buffer, size);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ rx_buffer->pagecnt_bias++;
+ break;
+ }
+
+ i40e_put_rx_buffer(rx_ring, rx_buffer);
cleaned_count++;
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
@@ -1266,6 +1364,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/
if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb);
+ skb = NULL;
continue;
}
@@ -1288,6 +1387,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+ i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
i40e_receive_skb(rx_ring, skb, vlan_tag);
skb = NULL;
@@ -1408,7 +1508,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
}
enable_int:
- if (!test_bit(__I40E_DOWN, &vsi->state))
+ if (!test_bit(__I40E_VSI_DOWN, vsi->state))
wr32(hw, INTREG(vector - 1), txval);
if (q_vector->itr_countdown)
@@ -1437,7 +1537,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
int budget_per_ring;
int work_done = 0;
- if (test_bit(__I40E_DOWN, &vsi->state)) {
+ if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
napi_complete(napi);
return 0;
}
@@ -1980,7 +2080,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u16 i = tx_ring->next_to_use;
u32 td_tag = 0;
dma_addr_t dma;
- u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -2016,7 +2115,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++;
i++;
- desc_count++;
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
@@ -2038,7 +2136,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++;
i++;
- desc_count++;
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
@@ -2064,46 +2161,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
- /* write last descriptor with EOP bit */
- td_cmd |= I40E_TX_DESC_CMD_EOP;
-
- /* We can OR these values together as they both are checked against
- * 4 below and at this point desc_count will be used as a boolean value
- * after this if/else block.
- */
- desc_count |= ++tx_ring->packet_stride;
-
- /* Algorithm to optimize tail and RS bit setting:
- * if queue is stopped
- * mark RS bit
- * reset packet counter
- * else if xmit_more is supported and is true
- * advance packet counter to 4
- * reset desc_count to 0
- *
- * if desc_count >= 4
- * mark RS bit
- * reset packet counter
- * if desc_count > 0
- * update tail
- *
- * Note: If there are less than 4 descriptors
- * pending and interrupts were disabled the service task will
- * trigger a force WB.
- */
- if (netif_xmit_stopped(txring_txq(tx_ring))) {
- goto do_rs;
- } else if (skb->xmit_more) {
- /* set stride to arm on next packet and reset desc_count */
- tx_ring->packet_stride = WB_STRIDE;
- desc_count = 0;
- } else if (desc_count >= WB_STRIDE) {
-do_rs:
- /* write last descriptor with RS bit set */
- td_cmd |= I40E_TX_DESC_CMD_RS;
- tx_ring->packet_stride = 0;
- }
-
+ /* write last descriptor with RS and EOP bits */
+ td_cmd |= I40E_TXD_CMD;
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag);
@@ -2119,7 +2178,7 @@ do_rs:
first->next_to_watch = tx_desc;
/* notify HW of packet */
- if (desc_count) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
@@ -2170,6 +2229,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
/* prefetch the data, we'll need it later */
prefetch(skb->data);
+ i40e_trace(xmit_frame_ring, skb, tx_ring);
+
count = i40e_xmit_descriptor_count(skb);
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb)) {
@@ -2237,6 +2298,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
out_drop:
+ i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
dev_kfree_skb_any(first->skb);
first->skb = NULL;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 8274ba68bd32..901282c87cf6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -104,10 +104,9 @@ enum i40e_dyn_idx_t {
/* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_256 256
+#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
#define I40E_RXBUFFER_2048 2048
-#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
-#define I40E_RXBUFFER_4096 4096
-#define I40E_RXBUFFER_8192 8192
+#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
@@ -120,6 +119,61 @@ enum i40e_dyn_idx_t {
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+/* Attempt to maximize the headroom available for incoming frames. We
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
+ * the frame. This leaves us with 512 bytes of room. From that we need
+ * to deduct the space needed for the shared info and the padding needed
+ * to IP align the frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ * up negative. In these cases we should fall back to the legacy
+ * receive path.
+ */
+#if (PAGE_SIZE < 8192)
+#define I40E_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
+
+static inline int i40e_compute_pad(int rx_buf_len)
+{
+ int page_size, pad_size;
+
+ page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+ pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+ return pad_size;
+}
+
+static inline int i40e_skb_pad(void)
+{
+ int rx_buf_len;
+
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
+ *
+ * For a 3K buffer we need to add enough padding to allow for
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
+ * cache-line alignment.
+ */
+ if (I40E_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ else
+ rx_buf_len = I40E_RXBUFFER_1536;
+
+ /* if needed make room for NET_IP_ALIGN */
+ rx_buf_len -= NET_IP_ALIGN;
+
+ return i40e_compute_pad(rx_buf_len);
+}
+
+#define I40E_SKB_PAD i40e_skb_pad()
+#else
+#define I40E_2K_TOO_SMALL_WITH_PADDING false
+#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
/**
* i40e_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format)
@@ -241,7 +295,12 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
dma_addr_t dma;
struct page *page;
- unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 page_offset;
+#else
+ __u16 page_offset;
+#endif
+ __u16 pagecnt_bias;
};
struct i40e_queue_stats {
@@ -321,7 +380,8 @@ struct i40e_ring {
u8 packet_stride;
u16 flags;
-#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
/* stats structs */
struct i40e_queue_stats stats;
@@ -349,6 +409,21 @@ struct i40e_ring {
*/
} ____cacheline_internodealigned_in_smp;
+static inline bool ring_uses_build_skb(struct i40e_ring *ring)
+{
+ return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
+}
+
+static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+ ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
+static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+ ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1,
@@ -370,6 +445,17 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
+static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->rx_buf_len > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
+
bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
@@ -385,20 +471,6 @@ int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40evf_chk_linearize(struct sk_buff *skb);
/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: Tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
-
-/**
* i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
* @skb: send buffer
* @tx_ring: ring to send buffer on
@@ -460,19 +532,7 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
/* we can support up to 8 data buffers for a single send */
return count != I40E_MAX_BUFFER_TXD;
}
-
-/**
- * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
- * @ptype: the packet type field from Rx descriptor write-back
- **/
-static inline bool i40e_rx_is_fcoe(u16 ptype)
-{
- return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
- (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
-}
-
/**
- * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
* @ring: Tx ring to find the netdev equivalent of
**/
static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 16bb88084bb9..bde7f24af1c6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -78,6 +78,7 @@ enum i40e_debug_mask {
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
I40E_DEBUG_FD = 0x00001000,
+ I40E_DEBUG_PACKAGE = 0x00002000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
@@ -1396,4 +1397,83 @@ enum i40e_reset_type {
#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10
#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \
I40E_FD_INSET_FLEX_WORD57_SHIFT)
+
+/* Version format for PPP */
+struct i40e_ppp_version {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+#define I40E_PPP_NAME_SIZE 32
+
+/* Package header */
+struct i40e_package_header {
+ struct i40e_ppp_version version;
+ u32 segment_count;
+ u32 segment_offset[1];
+};
+
+/* Generic segment header */
+struct i40e_generic_seg_header {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_NOTES 0x00000002
+#define SEGMENT_TYPE_I40E 0x00000011
+#define SEGMENT_TYPE_X722 0x00000012
+ u32 type;
+ struct i40e_ppp_version version;
+ u32 size;
+ char name[I40E_PPP_NAME_SIZE];
+};
+
+struct i40e_metadata_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ppp_version version;
+ u32 track_id;
+ char name[I40E_PPP_NAME_SIZE];
+};
+
+struct i40e_device_id_entry {
+ u32 vendor_dev_id;
+ u32 sub_vendor_dev_id;
+};
+
+struct i40e_profile_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ppp_version version;
+ char name[I40E_PPP_NAME_SIZE];
+ u32 device_table_count;
+ struct i40e_device_id_entry device_table[1];
+};
+
+struct i40e_section_table {
+ u32 section_count;
+ u32 section_offset[1];
+};
+
+struct i40e_profile_section_header {
+ u16 tbl_size;
+ u16 data_end;
+ struct {
+#define SECTION_TYPE_INFO 0x00000010
+#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_NOTE 0x80000000
+#define SECTION_TYPE_NAME 0x80000001
+ u32 type;
+ u32 offset;
+ u32 size;
+ } section;
+};
+
+struct i40e_profile_info {
+ u32 track_id;
+ struct i40e_ppp_version version;
+ u8 op;
+#define I40E_PPP_ADD_TRACKID 0x01
+#define I40E_PPP_REMOVE_TRACKID 0x02
+ u8 reserved[7];
+ u8 name[I40E_PPP_NAME_SIZE];
+};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index d38a2b2aea2b..c5ad0388c3d5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -81,7 +81,9 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ I40E_VIRTCHNL_OP_IWARP = 20,
I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
+ I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
@@ -161,7 +163,8 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
@@ -393,6 +396,37 @@ struct i40e_virtchnl_pf_event {
int severity;
};
+/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define I40E_QUEUE_TYPE_PE_AEQ 0x80
+#define I40E_QUEUE_INVALID_IDX 0xFFFF
+
+struct i40e_virtchnl_iwarp_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct i40e_virtchnl_iwarp_qvlist_info {
+ u32 num_vectors;
+ struct i40e_virtchnl_iwarp_qv_info qv_info[1];
+};
+
/* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 00c42d803276..b8ada6d8d890 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -49,6 +49,13 @@
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "i40evf: "
+/* VSI state flags shared with common code */
+enum i40evf_vsi_state_t {
+ __I40E_VSI_DOWN,
+ /* This must be last as it determines the size of the BITMAP */
+ __I40E_VSI_STATE_SIZE__,
+};
+
/* dummy struct to make common code less painful */
struct i40e_vsi {
struct i40evf_adapter *back;
@@ -56,10 +63,11 @@ struct i40e_vsi {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 seid;
u16 id;
- unsigned long state;
+ DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__);
int base_vector;
u16 work_limit;
u16 qs_handle;
+ void *priv; /* client driver data reference. */
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -71,10 +79,6 @@ struct i40e_vsi {
#define I40EVF_MAX_RXD 4096
#define I40EVF_MIN_RXD 64
#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
-
-/* Supported Rx Buffer Sizes */
-#define I40EVF_RXBUFFER_2048 2048
-#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
#define I40EVF_MAX_AQ_BUF_SIZE 4096
#define I40EVF_AQ_LEN 32
#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */
@@ -169,15 +173,15 @@ enum i40evf_state_t {
enum i40evf_critical_section_t {
__I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
+ __I40EVF_IN_CLIENT_TASK,
};
-/* make common code happy */
-#define __I40E_DOWN __I40EVF_DOWN
/* board specific private data structure */
struct i40evf_adapter {
struct timer_list watchdog_timer;
struct work_struct reset_task;
struct work_struct adminq_task;
+ struct delayed_work client_task;
struct delayed_work init_task;
struct i40e_q_vector *q_vectors;
struct list_head vlan_filter_list;
@@ -195,15 +199,16 @@ struct i40evf_adapter {
u64 hw_csum_rx_error;
u32 rx_desc_count;
int num_msix_vectors;
+ int num_iwarp_msix;
+ int iwarp_base_vector;
u32 client_pending;
+ struct i40e_client_instance *cinst;
struct msix_entry *msix_entries;
u32 flags;
#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40EVF_FLAG_IN_NETPOLL BIT(4)
#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
-#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8)
#define I40EVF_FLAG_RESET_PENDING BIT(9)
#define I40EVF_FLAG_RESET_NEEDED BIT(10)
@@ -211,15 +216,18 @@ struct i40evf_adapter {
#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13)
#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(14)
-#define I40EVF_FLAG_PROMISC_ON BIT(15)
-#define I40EVF_FLAG_ALLMULTI_ON BIT(16)
+#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(15)
+#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(16)
+#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(17)
+#define I40EVF_FLAG_PROMISC_ON BIT(18)
+#define I40EVF_FLAG_ALLMULTI_ON BIT(19)
+#define I40EVF_FLAG_LEGACY_RX BIT(20)
/* duplicates for common code */
-#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
-#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
#define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE
#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
+#define I40E_FLAG_LEGACY_RX I40EVF_FLAG_LEGACY_RX
/* flags for admin queue service task */
u32 aq_required;
#define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0)
@@ -246,7 +254,6 @@ struct i40evf_adapter {
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
- struct net_device_stats net_stats;
struct i40e_hw hw; /* defined in i40e_type.h */
@@ -258,10 +265,11 @@ struct i40evf_adapter {
bool link_up;
enum i40e_aq_link_speed link_speed;
enum i40e_virtchnl_ops current_op;
-#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \
+#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
(_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \
0)
+#define CLIENT_ENABLED(_a) ((_a)->cinst)
/* RSS by the PF should be preferred over RSS via other methods. */
#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -292,6 +300,12 @@ struct i40evf_adapter {
/* Ethtool Private Flags */
+/* lan device */
+struct i40e_device {
+ struct list_head list;
+ struct i40evf_adapter *vf;
+};
+
/* needed by i40evf_ethtool.c */
extern char i40evf_driver_name[];
extern const char i40evf_driver_version[];
@@ -337,4 +351,11 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen);
int i40evf_config_rss(struct i40evf_adapter *adapter);
+int i40evf_lan_add_device(struct i40evf_adapter *adapter);
+int i40evf_lan_del_device(struct i40evf_adapter *adapter);
+void i40evf_client_subtask(struct i40evf_adapter *adapter);
+void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len);
+void i40evf_notify_client_l2_params(struct i40e_vsi *vsi);
+void i40evf_notify_client_open(struct i40e_vsi *vsi);
+void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset);
#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
new file mode 100644
index 000000000000..ee737680a0e9
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
@@ -0,0 +1,564 @@
+#include <linux/list.h>
+#include <linux/errno.h>
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+#include "i40evf_client.h"
+
+static
+const char i40evf_client_interface_version_str[] = I40EVF_CLIENT_VERSION_STR;
+static struct i40e_client *vf_registered_client;
+static LIST_HEAD(i40evf_devices);
+static DEFINE_MUTEX(i40evf_device_mutex);
+
+static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u8 *msg, u16 len);
+
+static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_qvlist_info *qvlist_info);
+
+static struct i40e_ops i40evf_lan_ops = {
+ .virtchnl_send = i40evf_client_virtchnl_send,
+ .setup_qvlist = i40evf_client_setup_qvlist,
+};
+
+/**
+ * i40evf_notify_client_message - call the client message receive callback
+ * @vsi: the VSI associated with this client
+ * @msg: message buffer
+ * @len: length of message
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
+{
+ struct i40e_client_instance *cinst;
+
+ if (!vsi)
+ return;
+
+ cinst = vsi->back->cinst;
+ if (!cinst || !cinst->client || !cinst->client->ops ||
+ !cinst->client->ops->virtchnl_receive) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance virtchnl_receive function\n");
+ return;
+ }
+ cinst->client->ops->virtchnl_receive(&cinst->lan_info, cinst->client,
+ msg, len);
+}
+
+/**
+ * i40evf_notify_client_l2_params - call the client notify callback
+ * @vsi: the VSI with l2 param changes
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
+{
+ struct i40e_client_instance *cinst;
+ struct i40e_params params;
+
+ if (!vsi)
+ return;
+
+ cinst = vsi->back->cinst;
+ memset(&params, 0, sizeof(params));
+ params.mtu = vsi->netdev->mtu;
+ params.link_up = vsi->back->link_up;
+ params.qos.prio_qos[0].qs_handle = vsi->qs_handle;
+
+ if (!cinst || !cinst->client || !cinst->client->ops ||
+ !cinst->client->ops->l2_param_change) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance l2_param_change function\n");
+ return;
+ }
+ cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
+ &params);
+}
+
+/**
+ * i40evf_notify_client_open - call the client open callback
+ * @vsi: the VSI with netdev opened
+ *
+ * If there is a client to this netdev, call the client with open
+ **/
+void i40evf_notify_client_open(struct i40e_vsi *vsi)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_client_instance *cinst = adapter->cinst;
+ int ret;
+
+ if (!cinst || !cinst->client || !cinst->client->ops ||
+ !cinst->client->ops->open) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance open function\n");
+ return;
+ }
+ if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state))) {
+ ret = cinst->client->ops->open(&cinst->lan_info, cinst->client);
+ if (!ret)
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+ }
+}
+
+/**
+ * i40evf_client_release_qvlist - send a message to the PF to release iwarp qv map
+ * @ldev: pointer to L2 context.
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40evf_client_release_qvlist(struct i40e_info *ldev)
+{
+ struct i40evf_adapter *adapter = ldev->vf;
+ i40e_status err;
+
+ if (adapter->aq_required)
+ return -EAGAIN;
+
+ err = i40e_aq_send_msg_to_pf(&adapter->hw,
+ I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+ I40E_SUCCESS, NULL, 0, NULL);
+
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Unable to send iWarp vector release message to PF, error %d, aq status %d\n",
+ err, adapter->hw.aq.asq_last_status);
+
+ return err;
+}
+
+/**
+ * i40evf_notify_client_close - call the client close callback
+ * @vsi: the VSI with netdev closed
+ * @reset: true when close called due to reset pending
+ *
+ * If there is a client to this netdev, call the client with close
+ **/
+void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_client_instance *cinst = adapter->cinst;
+
+ if (!cinst || !cinst->client || !cinst->client->ops ||
+ !cinst->client->ops->close) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance close function\n");
+ return;
+ }
+ cinst->client->ops->close(&cinst->lan_info, cinst->client, reset);
+ i40evf_client_release_qvlist(&cinst->lan_info);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+}
+
+/**
+ * i40evf_client_add_instance - add a client instance to the instance list
+ * @adapter: pointer to the board struct
+ * @client: pointer to a client struct in the client list.
+ *
+ * Returns cinst ptr on success, NULL on failure
+ **/
+static struct i40e_client_instance *
+i40evf_client_add_instance(struct i40evf_adapter *adapter)
+{
+ struct i40e_client_instance *cinst = NULL;
+ struct netdev_hw_addr *mac = NULL;
+ struct i40e_vsi *vsi = &adapter->vsi;
+ int i;
+
+ if (!vf_registered_client)
+ goto out;
+
+ if (adapter->cinst) {
+ cinst = adapter->cinst;
+ goto out;
+ }
+
+ cinst = kzalloc(sizeof(*cinst), GFP_KERNEL);
+ if (!cinst)
+ goto out;
+
+ cinst->lan_info.vf = (void *)adapter;
+ cinst->lan_info.netdev = vsi->netdev;
+ cinst->lan_info.pcidev = adapter->pdev;
+ cinst->lan_info.fid = 0;
+ cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF;
+ cinst->lan_info.hw_addr = adapter->hw.hw_addr;
+ cinst->lan_info.ops = &i40evf_lan_ops;
+ cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
+ cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
+ cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
+ set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
+
+ cinst->lan_info.msix_count = adapter->num_iwarp_msix;
+ cinst->lan_info.msix_entries =
+ &adapter->msix_entries[adapter->iwarp_base_vector];
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ cinst->lan_info.params.qos.prio_qos[i].tc = 0;
+ cinst->lan_info.params.qos.prio_qos[i].qs_handle =
+ vsi->qs_handle;
+ }
+
+ mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
+ struct netdev_hw_addr, list);
+ if (mac)
+ ether_addr_copy(cinst->lan_info.lanmac, mac->addr);
+ else
+ dev_err(&adapter->pdev->dev, "MAC address list is empty!\n");
+
+ cinst->client = vf_registered_client;
+ adapter->cinst = cinst;
+out:
+ return cinst;
+}
+
+/**
+ * i40evf_client_del_instance - removes a client instance from the list
+ * @adapter: pointer to the board struct
+ * @client: pointer to the client struct
+ *
+ **/
+static
+void i40evf_client_del_instance(struct i40evf_adapter *adapter)
+{
+ kfree(adapter->cinst);
+ adapter->cinst = NULL;
+}
+
+/**
+ * i40evf_client_subtask - client maintenance work
+ * @adapter: board private structure
+ **/
+void i40evf_client_subtask(struct i40evf_adapter *adapter)
+{
+ struct i40e_client *client = vf_registered_client;
+ struct i40e_client_instance *cinst;
+ int ret = 0;
+
+ if (adapter->state < __I40EVF_DOWN)
+ return;
+
+ /* first check client is registered */
+ if (!client)
+ return;
+
+ /* Add the client instance to the instance list */
+ cinst = i40evf_client_add_instance(adapter);
+ if (!cinst)
+ return;
+
+ dev_info(&adapter->pdev->dev, "Added instance of Client %s\n",
+ client->name);
+
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
+ /* Send an Open request to the client */
+
+ if (client->ops && client->ops->open)
+ ret = client->ops->open(&cinst->lan_info, client);
+ if (!ret)
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cinst->state);
+ else
+ /* remove client instance */
+ i40evf_client_del_instance(adapter);
+ }
+}
+
+/**
+ * i40evf_lan_add_device - add a lan device struct to the list of lan devices
+ * @adapter: pointer to the board struct
+ *
+ * Returns 0 on success or none 0 on error
+ **/
+int i40evf_lan_add_device(struct i40evf_adapter *adapter)
+{
+ struct i40e_device *ldev;
+ int ret = 0;
+
+ mutex_lock(&i40evf_device_mutex);
+ list_for_each_entry(ldev, &i40evf_devices, list) {
+ if (ldev->vf == adapter) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ldev->vf = adapter;
+ INIT_LIST_HEAD(&ldev->list);
+ list_add(&ldev->list, &i40evf_devices);
+ dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
+ adapter->hw.bus.bus_id, adapter->hw.bus.device,
+ adapter->hw.bus.func);
+
+ /* Since in some cases register may have happened before a device gets
+ * added, we can schedule a subtask to go initiate the clients.
+ */
+ adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+
+out:
+ mutex_unlock(&i40evf_device_mutex);
+ return ret;
+}
+
+/**
+ * i40evf_lan_del_device - removes a lan device from the device list
+ * @adapter: pointer to the board struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_lan_del_device(struct i40evf_adapter *adapter)
+{
+ struct i40e_device *ldev, *tmp;
+ int ret = -ENODEV;
+
+ mutex_lock(&i40evf_device_mutex);
+ list_for_each_entry_safe(ldev, tmp, &i40evf_devices, list) {
+ if (ldev->vf == adapter) {
+ dev_info(&adapter->pdev->dev,
+ "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
+ adapter->hw.bus.bus_id, adapter->hw.bus.device,
+ adapter->hw.bus.func);
+ list_del(&ldev->list);
+ kfree(ldev);
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&i40evf_device_mutex);
+ return ret;
+}
+
+/**
+ * i40evf_client_release - release client specific resources
+ * @client: pointer to the registered client
+ *
+ **/
+static void i40evf_client_release(struct i40e_client *client)
+{
+ struct i40e_client_instance *cinst;
+ struct i40e_device *ldev;
+ struct i40evf_adapter *adapter;
+
+ mutex_lock(&i40evf_device_mutex);
+ list_for_each_entry(ldev, &i40evf_devices, list) {
+ adapter = ldev->vf;
+ cinst = adapter->cinst;
+ if (!cinst)
+ continue;
+ if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
+ if (client->ops && client->ops->close)
+ client->ops->close(&cinst->lan_info, client,
+ false);
+ i40evf_client_release_qvlist(&cinst->lan_info);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+
+ dev_warn(&adapter->pdev->dev,
+ "Client %s instance closed\n", client->name);
+ }
+ /* delete the client instance */
+ i40evf_client_del_instance(adapter);
+ dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n",
+ client->name);
+ }
+ mutex_unlock(&i40evf_device_mutex);
+}
+
+/**
+ * i40evf_client_prepare - prepare client specific resources
+ * @client: pointer to the registered client
+ *
+ **/
+static void i40evf_client_prepare(struct i40e_client *client)
+{
+ struct i40e_device *ldev;
+ struct i40evf_adapter *adapter;
+
+ mutex_lock(&i40evf_device_mutex);
+ list_for_each_entry(ldev, &i40evf_devices, list) {
+ adapter = ldev->vf;
+ /* Signal the watchdog to service the client */
+ adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+ }
+ mutex_unlock(&i40evf_device_mutex);
+}
+
+/**
+ * i40evf_client_virtchnl_send - send a message to the PF instance
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @msg: pointer to message buffer
+ * @len: message length
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u8 *msg, u16 len)
+{
+ struct i40evf_adapter *adapter = ldev->vf;
+ i40e_status err;
+
+ if (adapter->aq_required)
+ return -EAGAIN;
+
+ err = i40e_aq_send_msg_to_pf(&adapter->hw, I40E_VIRTCHNL_OP_IWARP,
+ I40E_SUCCESS, msg, len, NULL);
+ if (err)
+ dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
+ err, adapter->hw.aq.asq_last_status);
+
+ return err;
+}
+
+/**
+ * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @qv_info: queue and vector list
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_qvlist_info *qvlist_info)
+{
+ struct i40e_virtchnl_iwarp_qvlist_info *v_qvlist_info;
+ struct i40evf_adapter *adapter = ldev->vf;
+ struct i40e_qv_info *qv_info;
+ i40e_status err;
+ u32 v_idx, i;
+ u32 msg_size;
+
+ if (adapter->aq_required)
+ return -EAGAIN;
+
+ /* A quick check on whether the vectors belong to the client */
+ for (i = 0; i < qvlist_info->num_vectors; i++) {
+ qv_info = &qvlist_info->qv_info[i];
+ if (!qv_info)
+ continue;
+ v_idx = qv_info->v_idx;
+ if ((v_idx >=
+ (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) ||
+ (v_idx < adapter->iwarp_base_vector))
+ return -EINVAL;
+ }
+
+ v_qvlist_info = (struct i40e_virtchnl_iwarp_qvlist_info *)qvlist_info;
+ msg_size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
+ (sizeof(struct i40e_virtchnl_iwarp_qv_info) *
+ (v_qvlist_info->num_vectors - 1));
+
+ adapter->client_pending |= BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
+ err = i40e_aq_send_msg_to_pf(&adapter->hw,
+ I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
+ I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL);
+
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to send iWarp vector config message to PF, error %d, aq status %d\n",
+ err, adapter->hw.aq.asq_last_status);
+ goto out;
+ }
+
+ err = -EBUSY;
+ for (i = 0; i < 5; i++) {
+ msleep(100);
+ if (!(adapter->client_pending &
+ BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) {
+ err = 0;
+ break;
+ }
+ }
+out:
+ return err;
+}
+
+/**
+ * i40evf_register_client - Register a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_register_client(struct i40e_client *client)
+{
+ int ret = 0;
+
+ if (!client) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (strlen(client->name) == 0) {
+ pr_info("i40evf: Failed to register client with no name\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ if (vf_registered_client) {
+ pr_info("i40evf: Client %s has already been registered!\n",
+ client->name);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ if ((client->version.major != I40EVF_CLIENT_VERSION_MAJOR) ||
+ (client->version.minor != I40EVF_CLIENT_VERSION_MINOR)) {
+ pr_info("i40evf: Failed to register client %s due to mismatched client interface version\n",
+ client->name);
+ pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
+ client->version.major, client->version.minor,
+ client->version.build,
+ i40evf_client_interface_version_str);
+ ret = -EIO;
+ goto out;
+ }
+
+ vf_registered_client = client;
+
+ i40evf_client_prepare(client);
+
+ pr_info("i40evf: Registered client %s with return code %d\n",
+ client->name, ret);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(i40evf_register_client);
+
+/**
+ * i40evf_unregister_client - Unregister a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_unregister_client(struct i40e_client *client)
+{
+ int ret = 0;
+
+ /* When a unregister request comes through we would have to send
+ * a close for each of the client instances that were opened.
+ * client_release function is called to handle this.
+ */
+ i40evf_client_release(client);
+
+ if (vf_registered_client != client) {
+ pr_info("i40evf: Client %s has not been registered\n",
+ client->name);
+ ret = -ENODEV;
+ goto out;
+ }
+ vf_registered_client = NULL;
+ pr_info("i40evf: Unregistered client %s\n", client->name);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(i40evf_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
new file mode 100644
index 000000000000..7d283c7506a5
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
@@ -0,0 +1,166 @@
+#ifndef _I40E_CLIENT_H_
+#define _I40E_CLIENT_H_
+
+#define I40EVF_CLIENT_STR_LENGTH 10
+
+/* Client interface version should be updated anytime there is a change in the
+ * existing APIs or data structures.
+ */
+#define I40EVF_CLIENT_VERSION_MAJOR 0
+#define I40EVF_CLIENT_VERSION_MINOR 01
+#define I40EVF_CLIENT_VERSION_BUILD 00
+#define I40EVF_CLIENT_VERSION_STR \
+ __stringify(I40EVF_CLIENT_VERSION_MAJOR) "." \
+ __stringify(I40EVF_CLIENT_VERSION_MINOR) "." \
+ __stringify(I40EVF_CLIENT_VERSION_BUILD)
+
+struct i40e_client_version {
+ u8 major;
+ u8 minor;
+ u8 build;
+ u8 rsvd;
+};
+
+enum i40e_client_state {
+ __I40E_CLIENT_NULL,
+ __I40E_CLIENT_REGISTERED
+};
+
+enum i40e_client_instance_state {
+ __I40E_CLIENT_INSTANCE_NONE,
+ __I40E_CLIENT_INSTANCE_OPENED,
+};
+
+struct i40e_ops;
+struct i40e_client;
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define I40E_QUEUE_TYPE_PE_AEQ 0x80
+#define I40E_QUEUE_INVALID_IDX 0xFFFF
+
+struct i40e_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct i40e_qvlist_info {
+ u32 num_vectors;
+ struct i40e_qv_info qv_info[1];
+};
+
+#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
+
+/* set of LAN parameters useful for clients managed by LAN */
+
+/* Struct to hold per priority info */
+struct i40e_prio_qos_params {
+ u16 qs_handle; /* qs handle for prio */
+ u8 tc; /* TC mapped to prio */
+ u8 reserved;
+};
+
+#define I40E_CLIENT_MAX_USER_PRIORITY 8
+/* Struct to hold Client QoS */
+struct i40e_qos_params {
+ struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
+};
+
+struct i40e_params {
+ struct i40e_qos_params qos;
+ u16 mtu;
+ u16 link_up; /* boolean */
+};
+
+/* Structure to hold LAN device info for a client device */
+struct i40e_info {
+ struct i40e_client_version version;
+ u8 lanmac[6];
+ struct net_device *netdev;
+ struct pci_dev *pcidev;
+ u8 __iomem *hw_addr;
+ u8 fid; /* function id, PF id or VF id */
+#define I40E_CLIENT_FTYPE_PF 0
+#define I40E_CLIENT_FTYPE_VF 1
+ u8 ftype; /* function type, PF or VF */
+ void *vf; /* cast to i40evf_adapter */
+
+ /* All L2 params that could change during the life span of the device
+ * and needs to be communicated to the client when they change
+ */
+ struct i40e_params params;
+ struct i40e_ops *ops;
+
+ u16 msix_count; /* number of msix vectors*/
+ /* Array down below will be dynamically allocated based on msix_count */
+ struct msix_entry *msix_entries;
+ u16 itr_index; /* Which ITR index the PE driver is suppose to use */
+};
+
+struct i40e_ops {
+ /* setup_q_vector_list enables queues with a particular vector */
+ int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
+ struct i40e_qvlist_info *qv_info);
+
+ u32 (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
+ u8 *msg, u16 len);
+
+ /* If the PE Engine is unresponsive, RDMA driver can request a reset.*/
+ void (*request_reset)(struct i40e_info *ldev,
+ struct i40e_client *client);
+};
+
+struct i40e_client_ops {
+ /* Should be called from register_client() or whenever the driver is
+ * ready to create a specific client instance.
+ */
+ int (*open)(struct i40e_info *ldev, struct i40e_client *client);
+
+ /* Should be closed when netdev is unavailable or when unregister
+ * call comes in. If the close happens due to a reset, set the reset
+ * bit to true.
+ */
+ void (*close)(struct i40e_info *ldev, struct i40e_client *client,
+ bool reset);
+
+ /* called when some l2 managed parameters changes - mss */
+ void (*l2_param_change)(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_params *params);
+
+ /* called when a message is received from the PF */
+ int (*virtchnl_receive)(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u8 *msg, u16 len);
+};
+
+/* Client device */
+struct i40e_client_instance {
+ struct list_head list;
+ struct i40e_info lan_info;
+ struct i40e_client *client;
+ unsigned long state;
+};
+
+struct i40e_client {
+ struct list_head list; /* list of registered clients */
+ char name[I40EVF_CLIENT_STR_LENGTH];
+ struct i40e_client_version version;
+ unsigned long state; /* client state */
+ atomic_t ref_cnt; /* Count of all the client devices of this kind */
+ u32 flags;
+#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
+#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
+ u8 type;
+#define I40E_CLIENT_IWARP 0
+ struct i40e_client_ops *ops; /* client ops provided by the client */
+};
+
+/* used by clients */
+int i40evf_register_client(struct i40e_client *client);
+int i40evf_unregister_client(struct i40e_client *client);
+#endif /* _I40E_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 272d600c1ed0..9bb2cc7dd4e4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -63,52 +63,74 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
#define I40EVF_STATS_LEN(_dev) \
(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
+/* For now we have one and only one private flag and it is only defined
+ * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead
+ * of leaving all this code sitting around empty we will strip it unless
+ * our one private flag is actually available.
+ */
+struct i40evf_priv_flags {
+ char flag_string[ETH_GSTRING_LEN];
+ u32 flag;
+ bool read_only;
+};
+
+#define I40EVF_PRIV_FLAG(_name, _flag, _read_only) { \
+ .flag_string = _name, \
+ .flag = _flag, \
+ .read_only = _read_only, \
+}
+
+static const struct i40evf_priv_flags i40evf_gstrings_priv_flags[] = {
+ I40EVF_PRIV_FLAG("legacy-rx", I40EVF_FLAG_LEGACY_RX, 0),
+};
+
+#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_gstrings_priv_flags)
+
/**
- * i40evf_get_settings - Get Link Speed and Duplex settings
+ * i40evf_get_link_ksettings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
- * @ecmd: ethtool command
+ * @cmd: ethtool command
*
* Reports speed/duplex settings. Because this is a VF, we don't know what
* kind of link we really have, so we fake it.
**/
-static int i40evf_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int i40evf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- ecmd->supported = 0;
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->transceiver = XCVR_DUMMY1;
- ecmd->port = PORT_NONE;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = PORT_NONE;
/* Set speed and duplex */
switch (adapter->link_speed) {
case I40E_LINK_SPEED_40GB:
- ethtool_cmd_speed_set(ecmd, SPEED_40000);
+ cmd->base.speed = SPEED_40000;
break;
case I40E_LINK_SPEED_25GB:
#ifdef SPEED_25000
- ethtool_cmd_speed_set(ecmd, SPEED_25000);
+ cmd->base.speed = SPEED_25000;
#else
netdev_info(netdev,
"Speed is 25G, display not supported by this version of ethtool.\n");
#endif
break;
case I40E_LINK_SPEED_20GB:
- ethtool_cmd_speed_set(ecmd, SPEED_20000);
+ cmd->base.speed = SPEED_20000;
break;
case I40E_LINK_SPEED_10GB:
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ cmd->base.speed = SPEED_10000;
break;
case I40E_LINK_SPEED_1GB:
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
break;
case I40E_LINK_SPEED_100MB:
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
break;
default:
break;
}
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
return 0;
}
@@ -125,6 +147,8 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset)
{
if (sset == ETH_SS_STATS)
return I40EVF_STATS_LEN(netdev);
+ else if (sset == ETH_SS_PRIV_FLAGS)
+ return I40EVF_PRIV_FLAGS_STR_LEN;
else
return -EINVAL;
}
@@ -190,7 +214,83 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
p += ETH_GSTRING_LEN;
}
+ } else if (sset == ETH_SS_PRIV_FLAGS) {
+ for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40evf_gstrings_priv_flags[i].flag_string);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+/**
+ * i40evf_get_priv_flags - report device private flags
+ * @dev: network interface device structure
+ *
+ * The get string set count and the string set should be matched for each
+ * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags
+ * array.
+ *
+ * Returns a u32 bitmap of flags.
+ **/
+static u32 i40evf_get_priv_flags(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ u32 i, ret_flags = 0;
+
+ for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
+ const struct i40evf_priv_flags *priv_flags;
+
+ priv_flags = &i40evf_gstrings_priv_flags[i];
+
+ if (priv_flags->flag & adapter->flags)
+ ret_flags |= BIT(i);
+ }
+
+ return ret_flags;
+}
+
+/**
+ * i40evf_set_priv_flags - set private flags
+ * @dev: network interface device structure
+ * @flags: bit flags to be set
+ **/
+static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ u64 changed_flags;
+ u32 i;
+
+ changed_flags = adapter->flags;
+
+ for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
+ const struct i40evf_priv_flags *priv_flags;
+
+ priv_flags = &i40evf_gstrings_priv_flags[i];
+
+ if (priv_flags->read_only)
+ continue;
+
+ if (flags & BIT(i))
+ adapter->flags |= priv_flags->flag;
+ else
+ adapter->flags &= ~(priv_flags->flag);
+ }
+
+ /* check for flags that changed */
+ changed_flags ^= adapter->flags;
+
+ /* Process any additional changes needed as a result of flag changes. */
+
+ /* issue a reset to force legacy-rx change to take effect */
+ if (changed_flags & I40EVF_FLAG_LEGACY_RX) {
+ if (netif_running(netdev)) {
+ adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+ schedule_work(&adapter->reset_task);
+ }
}
+
+ return 0;
}
/**
@@ -239,6 +339,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, i40evf_driver_version, 32);
strlcpy(drvinfo->fw_version, "N/A", 4);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
}
/**
@@ -643,7 +744,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
}
static const struct ethtool_ops i40evf_ethtool_ops = {
- .get_settings = i40evf_get_settings,
.get_drvinfo = i40evf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = i40evf_get_ringparam,
@@ -651,6 +751,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.get_strings = i40evf_get_strings,
.get_ethtool_stats = i40evf_get_ethtool_stats,
.get_sset_count = i40evf_get_sset_count,
+ .get_priv_flags = i40evf_get_priv_flags,
+ .set_priv_flags = i40evf_set_priv_flags,
.get_msglevel = i40evf_get_msglevel,
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
@@ -663,6 +765,7 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.set_rxfh = i40evf_set_rxfh,
.get_channels = i40evf_get_channels,
.get_rxfh_key_size = i40evf_get_rxfh_key_size,
+ .get_link_ksettings = i40evf_get_link_ksettings,
};
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index f35dcaac5bb7..ea110a730e16 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -26,6 +26,14 @@
#include "i40evf.h"
#include "i40e_prototype.h"
+#include "i40evf_client.h"
+/* All i40evf tracepoints are defined by the include below, which must
+ * be included exactly once across the whole kernel with
+ * CREATE_TRACE_POINTS defined
+ */
+#define CREATE_TRACE_POINTS
+#include "i40e_trace.h"
+
static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
static int i40evf_close(struct net_device *netdev);
@@ -36,9 +44,9 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k"
-#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 27
+#define DRV_VERSION_MAJOR 2
+#define DRV_VERSION_MINOR 1
+#define DRV_VERSION_BUILD 14
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -489,7 +497,7 @@ static void i40evf_netpoll(struct net_device *netdev)
int i;
/* if interface is down do nothing */
- if (test_bit(__I40E_DOWN, &adapter->vsi.state))
+ if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
return;
for (i = 0; i < q_vectors; i++)
@@ -685,12 +693,39 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
**/
static void i40evf_configure_rx(struct i40evf_adapter *adapter)
{
+ unsigned int rx_buf_len = I40E_RXBUFFER_2048;
struct i40e_hw *hw = &adapter->hw;
int i;
+ /* Legacy Rx will always default to a 2048 buffer size. */
+#if (PAGE_SIZE < 8192)
+ if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
+ struct net_device *netdev = adapter->netdev;
+
+ /* For jumbo frames on systems with 4K pages we have to use
+ * an order 1 page, so we might as well increase the size
+ * of our Rx buffer to make better use of the available space
+ */
+ rx_buf_len = I40E_RXBUFFER_3072;
+
+ /* We use a 1536 buffer size for configurations with
+ * standard Ethernet mtu. On x86 this gives us enough room
+ * for shared info and 192 bytes of padding.
+ */
+ if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
+ (netdev->mtu <= ETH_DATA_LEN))
+ rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+ }
+#endif
+
for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
- adapter->rx_rings[i].rx_buf_len = I40EVF_RXBUFFER_2048;
+ adapter->rx_rings[i].rx_buf_len = rx_buf_len;
+
+ if (adapter->flags & I40EVF_FLAG_LEGACY_RX)
+ clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
+ else
+ set_ring_build_skb_enabled(&adapter->rx_rings[i]);
}
}
@@ -1053,11 +1088,13 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
static void i40evf_up_complete(struct i40evf_adapter *adapter)
{
adapter->state = __I40EVF_RUNNING;
- clear_bit(__I40E_DOWN, &adapter->vsi.state);
+ clear_bit(__I40E_VSI_DOWN, adapter->vsi.state);
i40evf_napi_enable_all(adapter);
adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+ if (CLIENT_ENABLED(adapter))
+ adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
}
@@ -1235,13 +1272,13 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
}
pairs = adapter->num_active_queues;
- /* It's easy to be greedy for MSI-X vectors, but it really
- * doesn't do us much good if we have a lot more vectors
- * than CPU's. So let's be conservative and only ask for
- * (roughly) twice the number of vectors as there are CPU's.
+ /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
+ * us much good if we have more vectors than CPUs. However, we already
+ * limit the total number of queues by the number of CPUs so we do not
+ * need any further limiting here.
*/
- v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
- v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
+ v_budget = min_t(int, pairs + NONQ_VECS,
+ (int)adapter->vf_res->max_vectors);
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
@@ -1472,6 +1509,13 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
{
int err;
+ err = i40evf_alloc_queues(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
rtnl_lock();
err = i40evf_set_interrupt_capability(adapter);
rtnl_unlock();
@@ -1488,23 +1532,16 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
goto err_alloc_q_vectors;
}
- err = i40evf_alloc_queues(adapter);
- if (err) {
- dev_err(&adapter->pdev->dev,
- "Unable to allocate memory for queues\n");
- goto err_alloc_queues;
- }
-
dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
(adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
adapter->num_active_queues);
return 0;
-err_alloc_queues:
- i40evf_free_q_vectors(adapter);
err_alloc_q_vectors:
i40evf_reset_interrupt_capability(adapter);
err_set_interrupt:
+ i40evf_free_queues(adapter);
+err_alloc_queues:
return err;
}
@@ -1685,6 +1722,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
i40evf_set_promiscuous(adapter, 0);
goto watchdog_done;
}
+ schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
@@ -1716,7 +1754,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
if (netif_running(adapter->netdev)) {
- set_bit(__I40E_DOWN, &adapter->vsi.state);
+ set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev);
adapter->link_up = false;
@@ -1773,10 +1811,17 @@ static void i40evf_reset_task(struct work_struct *work)
u32 reg_val;
int i = 0, err;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
&adapter->crit_section))
usleep_range(500, 1000);
-
+ if (CLIENT_ENABLED(adapter)) {
+ adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
+ I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
+ I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
+ I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
+ cancel_delayed_work_sync(&adapter->client_task);
+ i40evf_notify_client_close(&adapter->vsi, true);
+ }
i40evf_misc_irq_disable(adapter);
if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
@@ -1819,6 +1864,7 @@ static void i40evf_reset_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
reg_val);
i40evf_disable_vf(adapter);
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -1861,9 +1907,8 @@ continue_reset:
}
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
- /* Open RDMA Client again */
- adapter->aq_required |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
i40evf_misc_irq_enable(adapter);
mod_timer(&adapter->watchdog_timer, jiffies + 2);
@@ -1980,6 +2025,48 @@ out:
}
/**
+ * i40evf_client_task - worker thread to perform client work
+ * @work: pointer to work_struct containing our data
+ *
+ * This task handles client interactions. Because client calls can be
+ * reentrant, we can't handle them in the watchdog.
+ **/
+static void i40evf_client_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter =
+ container_of(work, struct i40evf_adapter, client_task.work);
+
+ /* If we can't get the client bit, just give up. We'll be rescheduled
+ * later.
+ */
+
+ if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
+ return;
+
+ if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
+ i40evf_client_subtask(adapter);
+ adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+ goto out;
+ }
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
+ i40evf_notify_client_close(&adapter->vsi, false);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
+ goto out;
+ }
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
+ i40evf_notify_client_open(&adapter->vsi);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
+ goto out;
+ }
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
+ i40evf_notify_client_l2_params(&adapter->vsi);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
+ }
+out:
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+}
+
+/**
* i40evf_free_all_tx_resources - Free Tx Resources for All Queues
* @adapter: board private structure
*
@@ -2147,7 +2234,9 @@ static int i40evf_close(struct net_device *netdev)
return 0;
- set_bit(__I40E_DOWN, &adapter->vsi.state);
+ set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
+ if (CLIENT_ENABLED(adapter))
+ adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
i40evf_down(adapter);
adapter->state = __I40EVF_DOWN_PENDING;
@@ -2162,21 +2251,6 @@ static int i40evf_close(struct net_device *netdev)
}
/**
- * i40evf_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
- **/
-static struct net_device_stats *i40evf_get_stats(struct net_device *netdev)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
-
- /* only return the current stats */
- return &adapter->net_stats;
-}
-
-/**
* i40evf_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -2188,6 +2262,10 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
struct i40evf_adapter *adapter = netdev_priv(netdev);
netdev->mtu = new_mtu;
+ if (CLIENT_ENABLED(adapter)) {
+ i40evf_notify_client_l2_params(&adapter->vsi);
+ adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+ }
adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
schedule_work(&adapter->reset_task);
@@ -2278,7 +2356,6 @@ static const struct net_device_ops i40evf_netdev_ops = {
.ndo_open = i40evf_open,
.ndo_stop = i40evf_close,
.ndo_start_xmit = i40evf_xmit_frame,
- .ndo_get_stats = i40evf_get_stats,
.ndo_set_rx_mode = i40evf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = i40evf_set_mac,
@@ -2328,6 +2405,8 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct i40e_vsi *vsi = &adapter->vsi;
int i;
+ netdev_features_t hw_enc_features;
+ netdev_features_t hw_features;
/* got VF config message back from PF, now we can parse it */
for (i = 0; i < vfres->num_vsis; i++) {
@@ -2339,46 +2418,52 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
return -ENODEV;
}
- netdev->hw_enc_features |= NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_HIGHDMA |
- NETIF_F_SOFT_FEATURES |
- NETIF_F_TSO |
- NETIF_F_TSO_ECN |
- NETIF_F_TSO6 |
+ hw_enc_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_SOFT_FEATURES |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ 0;
+
+ /* advertise to stack only if offloads for encapsulated packets is
+ * supported
+ */
+ if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP) {
+ hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6 |
- NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL |
- NETIF_F_SCTP_CRC |
- NETIF_F_RXHASH |
- NETIF_F_RXCSUM |
0;
- if (!(adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE))
- netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+ if (!(vfres->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
+ netdev->gso_partial_features |=
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+ netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+ netdev->hw_enc_features |= hw_enc_features;
+ }
/* record features VLANs can make use of */
- netdev->vlan_features |= netdev->hw_enc_features |
- NETIF_F_TSO_MANGLEID;
+ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
/* Write features and hw_features separately to avoid polluting
- * with, or dropping, features that are set when we registgered.
+ * with, or dropping, features that are set when we registered.
*/
- netdev->hw_features |= netdev->hw_enc_features;
+ hw_features = hw_enc_features;
- netdev->features |= netdev->hw_enc_features | I40EVF_VLAN_FEATURES;
- netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+ netdev->hw_features |= hw_features;
- /* disable VLAN features if not supported */
- if (!(vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN))
- netdev->features ^= I40EVF_VLAN_FEATURES;
+ netdev->features |= hw_features | I40EVF_VLAN_FEATURES;
adapter->vsi.id = adapter->vsi_res->vsi_id;
@@ -2519,9 +2604,6 @@ static void i40evf_init_task(struct work_struct *work)
goto err_alloc;
}
- if (hw->mac.type == I40E_MAC_X722_VF)
- adapter->flags |= I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE;
-
if (i40evf_process_config(adapter))
goto err_alloc;
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
@@ -2581,13 +2663,19 @@ static void i40evf_init_task(struct work_struct *work)
adapter->netdev_registered = true;
netif_tx_stop_all_queues(netdev);
+ if (CLIENT_ALLOWED(adapter)) {
+ err = i40evf_lan_add_device(adapter);
+ if (err)
+ dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
+ err);
+ }
dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)
dev_info(&pdev->dev, "GRO is enabled\n");
adapter->state = __I40EVF_DOWN;
- set_bit(__I40E_DOWN, &adapter->vsi.state);
+ set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
i40evf_misc_irq_enable(adapter);
adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
@@ -2745,6 +2833,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->reset_task, i40evf_reset_task);
INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
+ INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
schedule_delayed_work(&adapter->init_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
@@ -2857,14 +2946,21 @@ static void i40evf_remove(struct pci_dev *pdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40evf_mac_filter *f, *ftmp;
struct i40e_hw *hw = &adapter->hw;
+ int err;
cancel_delayed_work_sync(&adapter->init_task);
cancel_work_sync(&adapter->reset_task);
-
+ cancel_delayed_work_sync(&adapter->client_task);
if (adapter->netdev_registered) {
unregister_netdev(netdev);
adapter->netdev_registered = false;
}
+ if (CLIENT_ALLOWED(adapter)) {
+ err = i40evf_lan_del_device(adapter);
+ if (err)
+ dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
+ err);
+ }
/* Shut down all the garbage mashers on the detention level */
adapter->state = __I40EVF_REMOVE;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index bee58af390e1..deb2cb8dac6b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -26,6 +26,7 @@
#include "i40evf.h"
#include "i40e_prototype.h"
+#include "i40evf_client.h"
/* busy wait delay in msec */
#define I40EVF_BUSY_WAIT_DELAY 10
@@ -158,7 +159,9 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_ENCAP |
+ I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
@@ -233,7 +236,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
struct i40e_virtchnl_vsi_queue_config_info *vqci;
struct i40e_virtchnl_queue_pair_info *vqpi;
int pairs = adapter->num_active_queues;
- int i, len;
+ int i, len, max_frame = I40E_MAX_RXBUFFER;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -248,6 +251,11 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
if (!vqci)
return;
+ /* Limit maximum frame size when jumbo frames is not enabled */
+ if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) &&
+ (adapter->netdev->mtu <= ETH_DATA_LEN))
+ max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
@@ -259,17 +267,14 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
vqpi->txq.queue_id = i;
vqpi->txq.ring_len = adapter->tx_rings[i].count;
vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
- vqpi->txq.headwb_enabled = 1;
- vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
- (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
-
vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i;
vqpi->rxq.ring_len = adapter->rx_rings[i].count;
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
- vqpi->rxq.max_pkt_size = adapter->netdev->mtu
- + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
- vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
+ vqpi->rxq.max_pkt_size = max_frame;
+ vqpi->rxq.databuffer_size =
+ ALIGN(adapter->rx_rings[i].rx_buf_len,
+ BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
vqpi++;
}
@@ -955,17 +960,17 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
case I40E_VIRTCHNL_OP_GET_STATS: {
struct i40e_eth_stats *stats =
(struct i40e_eth_stats *)msg;
- adapter->net_stats.rx_packets = stats->rx_unicast +
- stats->rx_multicast +
- stats->rx_broadcast;
- adapter->net_stats.tx_packets = stats->tx_unicast +
- stats->tx_multicast +
- stats->tx_broadcast;
- adapter->net_stats.rx_bytes = stats->rx_bytes;
- adapter->net_stats.tx_bytes = stats->tx_bytes;
- adapter->net_stats.tx_errors = stats->tx_errors;
- adapter->net_stats.rx_dropped = stats->rx_discards;
- adapter->net_stats.tx_dropped = stats->tx_discards;
+ netdev->stats.rx_packets = stats->rx_unicast +
+ stats->rx_multicast +
+ stats->rx_broadcast;
+ netdev->stats.tx_packets = stats->tx_unicast +
+ stats->tx_multicast +
+ stats->tx_broadcast;
+ netdev->stats.rx_bytes = stats->rx_bytes;
+ netdev->stats.tx_bytes = stats->tx_bytes;
+ netdev->stats.tx_errors = stats->tx_errors;
+ netdev->stats.rx_dropped = stats->rx_discards;
+ netdev->stats.tx_dropped = stats->tx_discards;
adapter->current_stats = *stats;
}
break;
@@ -999,6 +1004,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
+ case I40E_VIRTCHNL_OP_IWARP:
+ /* Gobble zero-length replies from the PF. They indicate that
+ * a previous message was received OK, and the client doesn't
+ * care about that.
+ */
+ if (msglen && CLIENT_ENABLED(adapter))
+ i40evf_notify_client_message(&adapter->vsi,
+ msg, msglen);
+ break;
+
case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
adapter->client_pending &=
~(BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
@@ -1014,7 +1029,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
break;
default:
- if (v_opcode != adapter->current_op)
+ if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
adapter->current_op, v_opcode);
break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 8aee314332a8..d8517779439b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -39,6 +39,27 @@
#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+/* Wake Up Status */
+#define E1000_WUS_EX 0x00000004 /* Directed Exact */
+#define E1000_WUS_ARPD 0x00000020 /* Directed ARP Request */
+#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 */
+#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 */
+#define E1000_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */
+
+/* Packet types that are enabled for wake packet delivery */
+#define WAKE_PKT_WUS ( \
+ E1000_WUS_EX | \
+ E1000_WUS_ARPD | \
+ E1000_WUS_IPV4 | \
+ E1000_WUS_IPV6 | \
+ E1000_WUS_NSD)
+
+/* Wake Up Packet Length */
+#define E1000_WUPL_MASK 0x00000FFF
+
+/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */
+#define E1000_WUPM_BYTES 128
+
/* Extended Device Control */
#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */
#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index d20af6b2f581..3e7fed73df15 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -55,6 +55,10 @@
#define E1000_VF_RESET 0x01 /* VF requests reset */
#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
+/* VF requests to clear all unicast MAC filters */
+#define E1000_VF_MAC_FILTER_CLR (0x01 << E1000_VT_MSGINFO_SHIFT)
+/* VF requests to add unicast MAC filter */
+#define E1000_VF_MAC_FILTER_ADD (0x02 << E1000_VT_MSGINFO_SHIFT)
#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index acbc3abe2ddd..bf9bf9056d0c 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -111,6 +111,16 @@ struct vf_data_storage {
bool spoofchk_enabled;
};
+/* Number of unicast MAC filters reserved for the PF in the RAR registers */
+#define IGB_PF_MAC_FILTERS_RESERVED 3
+
+struct vf_mac_filter {
+ struct list_head l;
+ int vf;
+ bool free;
+ u8 vf_mac[ETH_ALEN];
+};
+
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
@@ -142,12 +152,24 @@ struct vf_data_storage {
/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_2048 2048
+#define IGB_RXBUFFER_3072 3072
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
-#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
+#define IGB_TS_HDR_LEN 16
+
+#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#if (PAGE_SIZE < 8192)
+#define IGB_MAX_FRAME_BUILD_SKB \
+ (SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048) - IGB_SKB_PAD - IGB_TS_HDR_LEN)
+#else
+#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_2048 - IGB_TS_HDR_LEN)
+#endif
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define IGB_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
#define AUTO_ALL_MODES 0
#define IGB_EEPROM_APME 0x0400
@@ -301,12 +323,51 @@ struct igb_q_vector {
};
enum e1000_ring_flags_t {
+ IGB_RING_FLAG_RX_3K_BUFFER,
+ IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG
};
+#define ring_uses_large_buffer(ring) \
+ test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+#define set_ring_uses_large_buffer(ring) \
+ set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+#define clear_ring_uses_large_buffer(ring) \
+ clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+
+#define ring_uses_build_skb(ring) \
+ test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define set_ring_build_skb_enabled(ring) \
+ set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define clear_ring_build_skb_enabled(ring) \
+ clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+
+static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring_uses_large_buffer(ring))
+ return IGB_RXBUFFER_3072;
+
+ if (ring_uses_build_skb(ring))
+ return IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN;
+#endif
+ return IGB_RXBUFFER_2048;
+}
+
+static inline unsigned int igb_rx_pg_order(struct igb_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring_uses_large_buffer(ring))
+ return 1;
+#endif
+ return 0;
+}
+
+#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring))
+
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
#define IGB_RX_DESC(R, i) \
@@ -398,6 +459,15 @@ struct igb_nfc_filter {
u16 action;
};
+struct igb_mac_addr {
+ u8 addr[ETH_ALEN];
+ u8 queue;
+ u8 state; /* bitmask */
+};
+
+#define IGB_MAC_STATE_DEFAULT 0x1
+#define IGB_MAC_STATE_IN_USE 0x2
+
/* board specific private data structure */
struct igb_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -524,6 +594,10 @@ struct igb_adapter {
/* lock for RX network flow classification filter */
spinlock_t nfc_lock;
bool etype_bitmap[MAX_ETYPE_FILTER];
+
+ struct igb_mac_addr *mac_table;
+ struct vf_mac_filter vf_macs;
+ struct vf_mac_filter *vf_mac_list;
};
/* flags controlling PTP/1588 function */
@@ -545,6 +619,7 @@ struct igb_adapter {
#define IGB_FLAG_HAS_MSIX BIT(13)
#define IGB_FLAG_EEE BIT(14)
#define IGB_FLAG_VLAN_PROMISC BIT(15)
+#define IGB_FLAG_RX_LEGACY BIT(16)
/* Media Auto Sense */
#define IGB_MAS_ENABLE_0 0X0001
@@ -558,7 +633,6 @@ struct igb_adapter {
#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
#define IGB_82576_TSYNC_SHIFT 19
-#define IGB_TS_HDR_LEN 16
enum e1000_state_t {
__IGB_TESTING,
__IGB_RESETTING,
@@ -591,7 +665,6 @@ void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
void igb_setup_tctl(struct igb_adapter *);
void igb_setup_rctl(struct igb_adapter *);
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
-void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
void igb_alloc_rx_buffers(struct igb_ring *, u16);
void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
bool igb_has_link(struct igb_adapter *adapter);
@@ -604,7 +677,7 @@ void igb_ptp_reset(struct igb_adapter *adapter);
void igb_ptp_suspend(struct igb_adapter *adapter);
void igb_ptp_rx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
struct sk_buff *skb);
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 737b664d004c..0efb62db6efd 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -144,7 +144,15 @@ static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
};
#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
-static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static const char igb_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define IGB_PRIV_FLAGS_LEGACY_RX BIT(0)
+ "legacy-rx",
+};
+
+#define IGB_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igb_priv_flags_strings)
+
+static int igb_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -152,76 +160,73 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
u32 status;
u32 speed;
+ u32 supported, advertising;
status = rd32(E1000_STATUS);
if (hw->phy.media_type == e1000_media_type_copper) {
- ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full|
- SUPPORTED_Autoneg |
- SUPPORTED_TP |
- SUPPORTED_Pause);
- ecmd->advertising = ADVERTISED_TP;
+ supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP |
+ SUPPORTED_Pause);
+ advertising = ADVERTISED_TP;
if (hw->mac.autoneg == 1) {
- ecmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
/* the e1000 autoneg seems to match ethtool nicely */
- ecmd->advertising |= hw->phy.autoneg_advertised;
+ advertising |= hw->phy.autoneg_advertised;
}
- ecmd->port = PORT_TP;
- ecmd->phy_address = hw->phy.addr;
- ecmd->transceiver = XCVR_INTERNAL;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = hw->phy.addr;
} else {
- ecmd->supported = (SUPPORTED_FIBRE |
- SUPPORTED_1000baseKX_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause);
- ecmd->advertising = (ADVERTISED_FIBRE |
- ADVERTISED_1000baseKX_Full);
+ supported = (SUPPORTED_FIBRE |
+ SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause);
+ advertising = (ADVERTISED_FIBRE |
+ ADVERTISED_1000baseKX_Full);
if (hw->mac.type == e1000_i354) {
if ((hw->device_id ==
E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) &&
!(status & E1000_STATUS_2P5_SKU_OVER)) {
- ecmd->supported |= SUPPORTED_2500baseX_Full;
- ecmd->supported &=
- ~SUPPORTED_1000baseKX_Full;
- ecmd->advertising |= ADVERTISED_2500baseX_Full;
- ecmd->advertising &=
- ~ADVERTISED_1000baseKX_Full;
+ supported |= SUPPORTED_2500baseX_Full;
+ supported &= ~SUPPORTED_1000baseKX_Full;
+ advertising |= ADVERTISED_2500baseX_Full;
+ advertising &= ~ADVERTISED_1000baseKX_Full;
}
}
if (eth_flags->e100_base_fx) {
- ecmd->supported |= SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_100baseT_Full;
+ supported |= SUPPORTED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
}
if (hw->mac.autoneg == 1)
- ecmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
+ cmd->base.port = PORT_FIBRE;
}
if (hw->mac.autoneg != 1)
- ecmd->advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
switch (hw->fc.requested_mode) {
case e1000_fc_full:
- ecmd->advertising |= ADVERTISED_Pause;
+ advertising |= ADVERTISED_Pause;
break;
case e1000_fc_rx_pause:
- ecmd->advertising |= (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ advertising |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
break;
case e1000_fc_tx_pause:
- ecmd->advertising |= ADVERTISED_Asym_Pause;
+ advertising |= ADVERTISED_Asym_Pause;
break;
default:
- ecmd->advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
}
if (status & E1000_STATUS_LU) {
if ((status & E1000_STATUS_2P5_SKU) &&
@@ -236,39 +241,46 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
if ((status & E1000_STATUS_FD) ||
hw->phy.media_type != e1000_media_type_copper)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
} else {
speed = SPEED_UNKNOWN;
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ethtool_cmd_speed_set(ecmd, speed);
+ cmd->base.speed = speed;
if ((hw->phy.media_type == e1000_media_type_fiber) ||
hw->mac.autoneg)
- ecmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
else
- ecmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if (hw->phy.media_type == e1000_media_type_copper)
- ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+ cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
ETH_TP_MDI;
else
- ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
if (hw->phy.mdix == AUTO_ALL_MODES)
- ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
else
- ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+ cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static int igb_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ u32 advertising;
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
@@ -283,12 +295,12 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
- if (ecmd->eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl) {
if (hw->phy.media_type != e1000_media_type_copper)
return -EOPNOTSUPP;
- if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
- (ecmd->autoneg != AUTONEG_ENABLE)) {
+ if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (cmd->base.autoneg != AUTONEG_ENABLE)) {
dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
return -EINVAL;
}
@@ -297,10 +309,13 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
if (hw->phy.media_type == e1000_media_type_fiber) {
- hw->phy.autoneg_advertised = ecmd->advertising |
+ hw->phy.autoneg_advertised = advertising |
ADVERTISED_FIBRE |
ADVERTISED_Autoneg;
switch (adapter->link_speed) {
@@ -320,31 +335,31 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
break;
}
} else {
- hw->phy.autoneg_advertised = ecmd->advertising |
+ hw->phy.autoneg_advertised = advertising |
ADVERTISED_TP |
ADVERTISED_Autoneg;
}
- ecmd->advertising = hw->phy.autoneg_advertised;
+ advertising = hw->phy.autoneg_advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
} else {
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
/* calling this overrides forced MDI setting */
- if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+ if (igb_set_spd_dplx(adapter, speed, cmd->base.duplex)) {
clear_bit(__IGB_RESETTING, &adapter->state);
return -EINVAL;
}
}
/* MDI-X => 2; MDI => 1; Auto => 3 */
- if (ecmd->eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl) {
/* fix up the value for auto (3 => 0) as zero is mapped
* internally to auto
*/
- if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
hw->phy.mdix = AUTO_ALL_MODES;
else
- hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+ hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl;
}
/* reset the link */
@@ -852,6 +867,8 @@ static void igb_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
+
+ drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN;
}
static void igb_get_ringparam(struct net_device *netdev,
@@ -1811,14 +1828,14 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
tx_ntc = tx_ring->next_to_clean;
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
- while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
+ while (rx_desc->wb.upper.length) {
/* check Rx buffer */
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
/* sync Rx buffer for CPU read */
dma_sync_single_for_cpu(rx_ring->dev,
rx_buffer_info->dma,
- IGB_RX_BUFSZ,
+ size,
DMA_FROM_DEVICE);
/* verify contents of skb */
@@ -1828,12 +1845,21 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* sync Rx buffer for device write */
dma_sync_single_for_device(rx_ring->dev,
rx_buffer_info->dma,
- IGB_RX_BUFSZ,
+ size,
DMA_FROM_DEVICE);
/* unmap buffer on Tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
- igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer_info->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer_info, dma),
+ dma_unmap_len(tx_buffer_info, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer_info, len, 0);
/* increment Rx/Tx next to clean counters */
rx_ntc++;
@@ -2271,6 +2297,8 @@ static int igb_get_sset_count(struct net_device *netdev, int sset)
return IGB_STATS_LEN;
case ETH_SS_TEST:
return IGB_TEST_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return IGB_PRIV_FLAGS_STR_LEN;
default:
return -ENOTSUPP;
}
@@ -2376,6 +2404,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, igb_priv_flags_strings,
+ IGB_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ break;
}
}
@@ -3388,9 +3420,38 @@ static int igb_set_channels(struct net_device *netdev,
return 0;
}
+static u32 igb_get_priv_flags(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (adapter->flags & IGB_FLAG_RX_LEGACY)
+ priv_flags |= IGB_PRIV_FLAGS_LEGACY_RX;
+
+ return priv_flags;
+}
+
+static int igb_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ unsigned int flags = adapter->flags;
+
+ flags &= ~IGB_FLAG_RX_LEGACY;
+ if (priv_flags & IGB_PRIV_FLAGS_LEGACY_RX)
+ flags |= IGB_FLAG_RX_LEGACY;
+
+ if (flags != adapter->flags) {
+ adapter->flags = flags;
+
+ /* reset interface to repopulate queues */
+ if (netif_running(netdev))
+ igb_reinit_locked(adapter);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops igb_ethtool_ops = {
- .get_settings = igb_get_settings,
- .set_settings = igb_set_settings,
.get_drvinfo = igb_get_drvinfo,
.get_regs_len = igb_get_regs_len,
.get_regs = igb_get_regs,
@@ -3426,8 +3487,12 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_rxfh = igb_set_rxfh,
.get_channels = igb_get_channels,
.set_channels = igb_set_channels,
+ .get_priv_flags = igb_get_priv_flags,
+ .set_priv_flags = igb_set_priv_flags,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
+ .get_link_ksettings = igb_get_link_ksettings,
+ .set_link_ksettings = igb_set_link_ksettings,
};
void igb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index be456bae8169..1cf74aa4ebd9 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -161,11 +161,16 @@ static void igb_vlan_mode(struct net_device *netdev,
static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
static void igb_restore_vlan(struct igb_adapter *);
-static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
+static void igb_rar_set_index(struct igb_adapter *, u32);
static void igb_ping_all_vfs(struct igb_adapter *);
static void igb_msg_task(struct igb_adapter *);
static void igb_vmm_control(struct igb_adapter *);
static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
+static void igb_flush_mac_table(struct igb_adapter *);
+static int igb_available_rars(struct igb_adapter *, u8);
+static void igb_set_default_mac_filter(struct igb_adapter *);
+static int igb_uc_sync(struct net_device *, const unsigned char *);
+static int igb_uc_unsync(struct net_device *, const unsigned char *);
static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
static int igb_ndo_set_vf_vlan(struct net_device *netdev,
@@ -554,7 +559,7 @@ rx_ring_summary:
16, 1,
page_address(buffer_info->page) +
buffer_info->page_offset,
- IGB_RX_BUFSZ, true);
+ igb_rx_bufsz(rx_ring), true);
}
}
}
@@ -1153,6 +1158,8 @@ msi_only:
pci_disable_sriov(adapter->pdev);
msleep(500);
+ kfree(adapter->vf_mac_list);
+ adapter->vf_mac_list = NULL;
kfree(adapter->vf_data);
adapter->vf_data = NULL;
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
@@ -1987,6 +1994,13 @@ void igb_reset(struct igb_adapter *adapter)
if (hw->mac.ops.init_hw(hw))
dev_err(&pdev->dev, "Hardware Error\n");
+ /* RAR registers were cleared during init_hw, clear mac table */
+ igb_flush_mac_table(adapter);
+ __dev_uc_unsync(adapter->netdev, NULL);
+
+ /* Recover default RAR entry */
+ igb_set_default_mac_filter(adapter);
+
/* Flow control settings reset on hardware reset, so guarantee flow
* control is off when forcing speed.
*/
@@ -2095,11 +2109,9 @@ static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
/* guarantee we can provide a unique filter for the unicast address */
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
struct igb_adapter *adapter = netdev_priv(dev);
- struct e1000_hw *hw = &adapter->hw;
int vfn = adapter->vfs_allocated_count;
- int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
- if (netdev_uc_count(dev) >= rar_entries)
+ if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
return -ENOMEM;
}
@@ -2517,6 +2529,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
+ igb_set_default_mac_filter(adapter);
+
/* get firmware version for ethtool -i */
igb_set_fw_version(adapter);
@@ -2761,6 +2775,7 @@ err_eeprom:
if (hw->flash_address)
iounmap(hw->flash_address);
err_sw_init:
+ kfree(adapter->mac_table);
kfree(adapter->shadow_vfta);
igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PCI_IOV
@@ -2796,6 +2811,8 @@ static int igb_disable_sriov(struct pci_dev *pdev)
msleep(500);
}
+ kfree(adapter->vf_mac_list);
+ adapter->vf_mac_list = NULL;
kfree(adapter->vf_data);
adapter->vf_data = NULL;
adapter->vfs_allocated_count = 0;
@@ -2816,8 +2833,9 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
int old_vfs = pci_num_vf(pdev);
+ struct vf_mac_filter *mac_list;
int err = 0;
- int i;
+ int num_vf_mac_filters, i;
if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
err = -EPERM;
@@ -2845,6 +2863,38 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
goto out;
}
+ /* Due to the limited number of RAR entries calculate potential
+ * number of MAC filters available for the VFs. Reserve entries
+ * for PF default MAC, PF MAC filters and at least one RAR entry
+ * for each VF for VF MAC.
+ */
+ num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
+ (1 + IGB_PF_MAC_FILTERS_RESERVED +
+ adapter->vfs_allocated_count);
+
+ adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
+ sizeof(struct vf_mac_filter),
+ GFP_KERNEL);
+
+ mac_list = adapter->vf_mac_list;
+ INIT_LIST_HEAD(&adapter->vf_macs.l);
+
+ if (adapter->vf_mac_list) {
+ /* Initialize list of VF MAC filters */
+ for (i = 0; i < num_vf_mac_filters; i++) {
+ mac_list->vf = -1;
+ mac_list->free = true;
+ list_add(&mac_list->l, &adapter->vf_macs.l);
+ mac_list++;
+ }
+ } else {
+ /* If we could not allocate memory for the VF MAC filters
+ * we can continue without this feature but warn user.
+ */
+ dev_err(&pdev->dev,
+ "Unable to allocate memory for VF MAC filter list\n");
+ }
+
/* only call pci_enable_sriov() if no VFs are allocated already */
if (!old_vfs) {
err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
@@ -2861,6 +2911,8 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
goto out;
err_out:
+ kfree(adapter->vf_mac_list);
+ adapter->vf_mac_list = NULL;
kfree(adapter->vf_data);
adapter->vf_data = NULL;
adapter->vfs_allocated_count = 0;
@@ -2937,6 +2989,7 @@ static void igb_remove(struct pci_dev *pdev)
iounmap(hw->flash_address);
pci_release_mem_regions(pdev);
+ kfree(adapter->mac_table);
kfree(adapter->shadow_vfta);
free_netdev(netdev);
@@ -3099,6 +3152,11 @@ static int igb_sw_init(struct igb_adapter *adapter)
/* Assume MSI-X interrupts, will be checked during IRQ allocation */
adapter->flags |= IGB_FLAG_HAS_MSIX;
+ adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) *
+ hw->mac.rar_entry_count, GFP_ATOMIC);
+ if (!adapter->mac_table)
+ return -ENOMEM;
+
igb_probe_vfs(adapter);
igb_init_queue_configuration(adapter);
@@ -3293,7 +3351,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vzalloc(size);
+ tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
@@ -3404,6 +3462,10 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
txdctl |= IGB_TX_HTHRESH << 8;
txdctl |= IGB_TX_WTHRESH << 16;
+ /* reinitialize tx_buffer_info */
+ memset(ring->tx_buffer_info, 0,
+ sizeof(struct igb_tx_buffer) * ring->count);
+
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
wr32(E1000_TXDCTL(reg_idx), txdctl);
}
@@ -3435,7 +3497,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vzalloc(size);
+ rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info)
goto err;
@@ -3720,6 +3782,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
struct igb_ring *ring)
{
struct e1000_hw *hw = &adapter->hw;
+ union e1000_adv_rx_desc *rx_desc;
u64 rdba = ring->dma;
int reg_idx = ring->reg_idx;
u32 srrctl = 0, rxdctl = 0;
@@ -3741,7 +3804,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
/* set descriptor configuration */
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
- srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ if (ring_uses_large_buffer(ring))
+ srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
@@ -3758,11 +3824,39 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct igb_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = IGB_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
/* enable receive descriptor fetching */
rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
wr32(E1000_RXDCTL(reg_idx), rxdctl);
}
+static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring)
+{
+ /* set build_skb and buffer size flags */
+ clear_ring_build_skb_enabled(rx_ring);
+ clear_ring_uses_large_buffer(rx_ring);
+
+ if (adapter->flags & IGB_FLAG_RX_LEGACY)
+ return;
+
+ set_ring_build_skb_enabled(rx_ring);
+
+#if (PAGE_SIZE < 8192)
+ if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
+ return;
+
+ set_ring_uses_large_buffer(rx_ring);
+#endif
+}
+
/**
* igb_configure_rx - Configure receive Unit after Reset
* @adapter: board private structure
@@ -3774,14 +3868,17 @@ static void igb_configure_rx(struct igb_adapter *adapter)
int i;
/* set the correct pool for the PF default MAC address in entry 0 */
- igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
- adapter->vfs_allocated_count);
+ igb_set_default_mac_filter(adapter);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *rx_ring = adapter->rx_ring[i];
+
+ igb_set_rx_buffer_len(adapter, rx_ring);
+ igb_configure_rx_ring(adapter, rx_ring);
+ }
}
/**
@@ -3822,55 +3919,63 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
igb_free_tx_resources(adapter->tx_ring[i]);
}
-void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
- struct igb_tx_buffer *tx_buffer)
-{
- if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
- if (dma_unmap_len(tx_buffer, len))
- dma_unmap_single(ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- } else if (dma_unmap_len(tx_buffer, len)) {
- dma_unmap_page(ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- }
- tx_buffer->next_to_watch = NULL;
- tx_buffer->skb = NULL;
- dma_unmap_len_set(tx_buffer, len, 0);
- /* buffer_info must be completely set up in the transmit path */
-}
-
/**
* igb_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
**/
static void igb_clean_tx_ring(struct igb_ring *tx_ring)
{
- struct igb_tx_buffer *buffer_info;
- unsigned long size;
- u16 i;
+ u16 i = tx_ring->next_to_clean;
+ struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
- if (!tx_ring->tx_buffer_info)
- return;
- /* Free all the Tx ring sk_buffs */
+ while (i != tx_ring->next_to_use) {
+ union e1000_adv_tx_desc *eop_desc, *tx_desc;
- for (i = 0; i < tx_ring->count; i++) {
- buffer_info = &tx_ring->tx_buffer_info[i];
- igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
- }
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
- netdev_tx_reset_queue(txring_txq(tx_ring));
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
- size = sizeof(struct igb_tx_buffer) * tx_ring->count;
- memset(tx_ring->tx_buffer_info, 0, size);
+ /* check for eop_desc to determine the end of the packet */
+ eop_desc = tx_buffer->next_to_watch;
+ tx_desc = IGB_TX_DESC(tx_ring, i);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IGB_TX_DESC(tx_ring, 0);
+ }
- /* Zero out the descriptor ring */
- memset(tx_ring->desc, 0, tx_ring->size);
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ }
+ }
+
+ /* reset BQL for queue */
+ netdev_tx_reset_queue(txring_txq(tx_ring));
+
+ /* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
}
@@ -3932,50 +4037,39 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
**/
static void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
- unsigned long size;
- u16 i;
+ u16 i = rx_ring->next_to_clean;
if (rx_ring->skb)
dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
- if (!rx_ring->rx_buffer_info)
- return;
-
/* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
+ while (i != rx_ring->next_to_alloc) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
- if (!buffer_info->page)
- continue;
-
/* Invalidate cache lines that may have been written to by
* device so that we avoid corrupting memory.
*/
dma_sync_single_range_for_cpu(rx_ring->dev,
buffer_info->dma,
buffer_info->page_offset,
- IGB_RX_BUFSZ,
+ igb_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
/* free resources associated with mapping */
dma_unmap_page_attrs(rx_ring->dev,
buffer_info->dma,
- PAGE_SIZE,
+ igb_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
+ IGB_RX_DMA_ATTR);
__page_frag_cache_drain(buffer_info->page,
buffer_info->pagecnt_bias);
- buffer_info->page = NULL;
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
}
- size = sizeof(struct igb_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -4014,8 +4108,7 @@ static int igb_set_mac(struct net_device *netdev, void *p)
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
/* set the correct pool for the new PF MAC address in entry 0 */
- igb_rar_set_qsel(adapter, hw->mac.addr, 0,
- adapter->vfs_allocated_count);
+ igb_set_default_mac_filter(adapter);
return 0;
}
@@ -4059,49 +4152,6 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
return netdev_mc_count(netdev);
}
-/**
- * igb_write_uc_addr_list - write unicast addresses to RAR table
- * @netdev: network interface device structure
- *
- * Writes unicast address list to the RAR table.
- * Returns: -ENOMEM on failure/insufficient address space
- * 0 on no addresses written
- * X on writing X addresses to the RAR table
- **/
-static int igb_write_uc_addr_list(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- unsigned int vfn = adapter->vfs_allocated_count;
- unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
- int count = 0;
-
- /* return ENOMEM indicating insufficient memory for addresses */
- if (netdev_uc_count(netdev) > rar_entries)
- return -ENOMEM;
-
- if (!netdev_uc_empty(netdev) && rar_entries) {
- struct netdev_hw_addr *ha;
-
- netdev_for_each_uc_addr(ha, netdev) {
- if (!rar_entries)
- break;
- igb_rar_set_qsel(adapter, ha->addr,
- rar_entries--,
- vfn);
- count++;
- }
- }
- /* write the addresses in reverse order to avoid write combining */
- for (; rar_entries > 0 ; rar_entries--) {
- wr32(E1000_RAH(rar_entries), 0);
- wr32(E1000_RAL(rar_entries), 0);
- }
- wrfl();
-
- return count;
-}
-
static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -4240,7 +4290,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
unsigned int vfn = adapter->vfs_allocated_count;
- u32 rctl = 0, vmolr = 0;
+ u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
int count;
/* Check for Promiscuous and All Multicast modes */
@@ -4274,8 +4324,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
- count = igb_write_uc_addr_list(netdev);
- if (count < 0) {
+ if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
rctl |= E1000_RCTL_UPE;
vmolr |= E1000_VMOLR_ROPE;
}
@@ -4298,6 +4347,14 @@ static void igb_set_rx_mode(struct net_device *netdev)
E1000_RCTL_VFE);
wr32(E1000_RCTL, rctl);
+#if (PAGE_SIZE < 8192)
+ if (!adapter->vfs_allocated_count) {
+ if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
+ rlpml = IGB_MAX_FRAME_BUILD_SKB;
+ }
+#endif
+ wr32(E1000_RLPML, rlpml);
+
/* In order to support SR-IOV and eventually VMDq it is necessary to set
* the VMOLR to enable the appropriate modes. Without this workaround
* we will have issues with VLAN tag stripping not being done for frames
@@ -4312,12 +4369,17 @@ static void igb_set_rx_mode(struct net_device *netdev)
vmolr |= rd32(E1000_VMOLR(vfn)) &
~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
- /* enable Rx jumbo frames, no need for restriction */
+ /* enable Rx jumbo frames, restrict as needed to support build_skb */
vmolr &= ~E1000_VMOLR_RLPML_MASK;
- vmolr |= MAX_JUMBO_FRAME_SIZE | E1000_VMOLR_LPE;
+#if (PAGE_SIZE < 8192)
+ if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
+ vmolr |= IGB_MAX_FRAME_BUILD_SKB;
+ else
+#endif
+ vmolr |= MAX_JUMBO_FRAME_SIZE;
+ vmolr |= E1000_VMOLR_LPE;
wr32(E1000_VMOLR(vfn), vmolr);
- wr32(E1000_RLPML, MAX_JUMBO_FRAME_SIZE);
igb_restore_vf_multicasts(adapter);
}
@@ -5256,18 +5318,32 @@ static void igb_tx_map(struct igb_ring *tx_ring,
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
+ tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */
- for (;;) {
+ while (tx_buffer != first) {
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ if (i--)
+ i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i];
- igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
- if (tx_buffer == first)
- break;
- if (i == 0)
- i = tx_ring->count;
- i--;
}
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+
tx_ring->next_to_use = i;
}
@@ -5339,7 +5415,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
out_drop:
- igb_unmap_and_free_tx_resource(tx_ring, first);
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
return NETDEV_TX_OK;
}
@@ -6304,7 +6381,6 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
{
struct e1000_hw *hw = &adapter->hw;
unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
- int rar_entry = hw->mac.rar_entry_count - (vf + 1);
u32 reg, msgbuf[3];
u8 *addr = (u8 *)(&msgbuf[1]);
@@ -6312,7 +6388,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
igb_vf_reset(adapter, vf);
/* set vf mac address */
- igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
+ igb_set_vf_mac(adapter, vf, vf_mac);
/* enable transmit and receive for vf */
reg = rd32(E1000_VFTE);
@@ -6332,18 +6408,238 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
igb_write_mbx(hw, msgbuf, 3, vf);
}
+static void igb_flush_mac_table(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
+ adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ igb_rar_set_index(adapter, i);
+ }
+}
+
+static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ /* do not count rar entries reserved for VFs MAC addresses */
+ int rar_entries = hw->mac.rar_entry_count -
+ adapter->vfs_allocated_count;
+ int i, count = 0;
+
+ for (i = 0; i < rar_entries; i++) {
+ /* do not count default entries */
+ if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
+ continue;
+
+ /* do not count "in use" entries for different queues */
+ if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
+ (adapter->mac_table[i].queue != queue))
+ continue;
+
+ count++;
+ }
+
+ return count;
+}
+
+/* Set default MAC address for the PF in the first RAR entry */
+static void igb_set_default_mac_filter(struct igb_adapter *adapter)
+{
+ struct igb_mac_addr *mac_table = &adapter->mac_table[0];
+
+ ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
+ mac_table->queue = adapter->vfs_allocated_count;
+ mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
+
+ igb_rar_set_index(adapter, 0);
+}
+
+int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
+ const u8 queue)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count -
+ adapter->vfs_allocated_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for the first empty entry in the MAC table.
+ * Do not touch entries at the end of the table reserved for the VF MAC
+ * addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)
+ continue;
+
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE;
+
+ igb_rar_set_index(adapter, i);
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
+ const u8 queue)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count -
+ adapter->vfs_allocated_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for matching entry in the MAC table based on given address
+ * and queue. Do not touch entries at the end of the table reserved
+ * for the VF MAC addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
+ continue;
+ if (adapter->mac_table[i].queue != queue)
+ continue;
+ if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
+ continue;
+
+ adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+
+ igb_rar_set_index(adapter, i);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
+
+ return min_t(int, ret, 0);
+}
+
+static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
+
+ return 0;
+}
+
+int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
+ const u32 info, const u8 *addr)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+ struct list_head *pos;
+ struct vf_mac_filter *entry = NULL;
+ int ret = 0;
+
+ switch (info) {
+ case E1000_VF_MAC_FILTER_CLR:
+ /* remove all unicast MAC filters related to the current VF */
+ list_for_each(pos, &adapter->vf_macs.l) {
+ entry = list_entry(pos, struct vf_mac_filter, l);
+ if (entry->vf == vf) {
+ entry->vf = -1;
+ entry->free = true;
+ igb_del_mac_filter(adapter, entry->vf_mac, vf);
+ }
+ }
+ break;
+ case E1000_VF_MAC_FILTER_ADD:
+ if (vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) {
+ dev_warn(&pdev->dev,
+ "VF %d requested MAC filter but is administratively denied\n",
+ vf);
+ return -EINVAL;
+ }
+
+ if (!is_valid_ether_addr(addr)) {
+ dev_warn(&pdev->dev,
+ "VF %d attempted to set invalid MAC filter\n",
+ vf);
+ return -EINVAL;
+ }
+
+ /* try to find empty slot in the list */
+ list_for_each(pos, &adapter->vf_macs.l) {
+ entry = list_entry(pos, struct vf_mac_filter, l);
+ if (entry->free)
+ break;
+ }
+
+ if (entry && entry->free) {
+ entry->free = false;
+ entry->vf = vf;
+ ether_addr_copy(entry->vf_mac, addr);
+
+ ret = igb_add_mac_filter(adapter, addr, vf);
+ ret = min_t(int, ret, 0);
+ } else {
+ ret = -ENOSPC;
+ }
+
+ if (ret == -ENOSPC)
+ dev_warn(&pdev->dev,
+ "VF %d has requested MAC filter but there is no space for it\n",
+ vf);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
{
+ struct pci_dev *pdev = adapter->pdev;
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+ u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
+
/* The VF MAC Address is stored in a packed array of bytes
* starting at the second 32 bit word of the msg array
*/
- unsigned char *addr = (char *)&msg[1];
- int err = -1;
+ unsigned char *addr = (unsigned char *)&msg[1];
+ int ret = 0;
- if (is_valid_ether_addr(addr))
- err = igb_set_vf_mac(adapter, vf, addr);
+ if (!info) {
+ if (vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) {
+ dev_warn(&pdev->dev,
+ "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
+ vf);
+ return -EINVAL;
+ }
- return err;
+ if (!is_valid_ether_addr(addr)) {
+ dev_warn(&pdev->dev,
+ "VF %d attempted to set invalid MAC\n",
+ vf);
+ return -EINVAL;
+ }
+
+ ret = igb_set_vf_mac(adapter, vf, addr);
+ } else {
+ ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
+ }
+
+ return ret;
}
static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
@@ -6400,13 +6696,7 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
switch ((msgbuf[0] & 0xFFFF)) {
case E1000_VF_SET_MAC_ADDR:
- retval = -EINVAL;
- if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
- retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
- else
- dev_warn(&pdev->dev,
- "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
- vf);
+ retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
break;
case E1000_VF_SET_PROMISC:
retval = igb_set_vf_promisc(adapter, msgbuf, vf);
@@ -6686,7 +6976,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
DMA_TO_DEVICE);
/* clear tx_buffer data */
- tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
/* clear last DMA location and unmap remaining buffers */
@@ -6822,8 +7111,14 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- /* transfer page from old buffer to new buffer */
- *new_buff = *old_buff;
+ /* Transfer page from old buffer to new buffer.
+ * Move each member individually to avoid possible store
+ * forwarding stalls.
+ */
+ new_buff->dma = old_buff->dma;
+ new_buff->page = old_buff->page;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
static inline bool igb_page_is_reserved(struct page *page)
@@ -6831,11 +7126,10 @@ static inline bool igb_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
-static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
- struct page *page,
- unsigned int truesize)
+static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
{
- unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
/* avoid re-using remote pages */
if (unlikely(igb_page_is_reserved(page)))
@@ -6843,16 +7137,13 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_ref_count(page) != pagecnt_bias))
+ if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
return false;
-
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= IGB_RX_BUFSZ;
#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
+#define IGB_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
- if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+ if (rx_buffer->page_offset > IGB_LAST_OFFSET)
return false;
#endif
@@ -6860,7 +7151,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
* the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
- if (unlikely(pagecnt_bias == 1)) {
+ if (unlikely(!pagecnt_bias)) {
page_ref_add(page, USHRT_MAX);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
@@ -6872,34 +7163,56 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
* igb_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
+ * @size: size of buffer to be added
*
* This function will add the data contained in rx_buffer->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
- *
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
**/
-static bool igb_add_rx_frag(struct igb_ring *rx_ring,
+static void igb_add_rx_frag(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
- unsigned int size,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int size)
{
- struct page *page = rx_buffer->page;
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
- unsigned int truesize = IGB_RX_BUFSZ;
+ unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
+ SKB_DATA_ALIGN(size);
+#endif
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+}
+
+static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer,
+ union e1000_adv_rx_desc *rx_desc,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(size);
#endif
- unsigned int pull_len;
+ unsigned int headlen;
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
- if (unlikely(skb_is_nonlinear(skb)))
- goto add_tail_frag;
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
+ if (unlikely(!skb))
+ return NULL;
if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
@@ -6907,95 +7220,73 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
size -= IGB_TS_HDR_LEN;
}
- if (likely(size <= IGB_RX_HDR_LEN)) {
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!igb_page_is_reserved(page)))
- return true;
-
- /* this page cannot be reused so discard it */
- return false;
- }
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > IGB_RX_HDR_LEN)
+ headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
/* update all of the pointers */
- va += pull_len;
- size -= pull_len;
-
-add_tail_frag:
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- (unsigned long)va & ~PAGE_MASK, size, truesize);
+ size -= headlen;
+ if (size) {
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ (va + headlen) - page_address(rx_buffer->page),
+ size, truesize);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+ } else {
+ rx_buffer->pagecnt_bias++;
+ }
- return igb_can_reuse_rx_page(rx_buffer, page, truesize);
+ return skb;
}
-static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer,
+ union e1000_adv_rx_desc *rx_desc,
+ unsigned int size)
{
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
- struct igb_rx_buffer *rx_buffer;
- struct page *page;
-
- rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
- page = rx_buffer->page;
- prefetchw(page);
-
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
-
- if (likely(!skb)) {
- void *page_addr = page_address(page) +
- rx_buffer->page_offset;
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(IGB_SKB_PAD + size);
+#endif
+ struct sk_buff *skb;
- /* prefetch first cache line of first page */
- prefetch(page_addr);
+ /* prefetch first cache line of first page */
+ prefetch(va);
#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
+ prefetch(va + L1_CACHE_BYTES);
#endif
- /* allocate a skb to store the frags */
- skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_failed++;
- return NULL;
- }
+ /* build an skb around the page buffer */
+ skb = build_skb(va - IGB_SKB_PAD, truesize);
+ if (unlikely(!skb))
+ return NULL;
- /* we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
- }
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, IGB_SKB_PAD);
+ __skb_put(skb, size);
- /* pull page into skb */
- if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
- /* hand second half of page back to the ring */
- igb_reuse_rx_page(rx_ring, rx_buffer);
- } else {
- /* We are not reusing the buffer so unmap it and free
- * any references we are holding to it
- */
- dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
+ /* pull timestamp out of packet data */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
+ __skb_pull(skb, IGB_TS_HDR_LEN);
}
- /* clear contents of rx_buffer */
- rx_buffer->page = NULL;
+ /* update buffer offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
return skb;
}
@@ -7154,6 +7445,47 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
+static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
+ const unsigned int size)
+{
+ struct igb_rx_buffer *rx_buffer;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ prefetchw(rx_buffer->page);
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+
+ rx_buffer->pagecnt_bias--;
+
+ return rx_buffer;
+}
+
+static void igb_put_rx_buffer(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer)
+{
+ if (igb_can_reuse_rx_page(rx_buffer)) {
+ /* hand second half of page back to the ring */
+ igb_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* We are not reusing the buffer so unmap it and free
+ * any references we are holding to it
+ */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
+ IGB_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+ }
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+}
+
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
struct igb_ring *rx_ring = q_vector->rx.ring;
@@ -7163,6 +7495,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
+ struct igb_rx_buffer *rx_buffer;
+ unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
@@ -7171,8 +7505,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
}
rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
-
- if (!rx_desc->wb.upper.status_error)
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
break;
/* This memory barrier is needed to keep us from reading
@@ -7181,13 +7515,25 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
*/
dma_rmb();
+ rx_buffer = igb_get_rx_buffer(rx_ring, size);
+
/* retrieve a buffer from the ring */
- skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+ if (skb)
+ igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ else if (ring_uses_build_skb(rx_ring))
+ skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
+ else
+ skb = igb_construct_skb(rx_ring, rx_buffer,
+ rx_desc, size);
/* exit if we failed to retrieve a buffer */
- if (!skb)
+ if (!skb) {
+ rx_ring->rx_stats.alloc_failed++;
+ rx_buffer->pagecnt_bias++;
break;
+ }
+ igb_put_rx_buffer(rx_ring, rx_buffer);
cleaned_count++;
/* fetch next buffer in frame if non-eop */
@@ -7231,6 +7577,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
return total_packets;
}
+static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
+}
+
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi)
{
@@ -7242,21 +7593,23 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
/* alloc new page for storage */
- page = dev_alloc_page();
+ page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
return false;
}
/* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
- DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ igb_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IGB_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_page(page);
+ __free_pages(page, igb_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_failed++;
return false;
@@ -7264,7 +7617,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = igb_rx_offset(rx_ring);
bi->pagecnt_bias = 1;
return true;
@@ -7279,6 +7632,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
+ u16 bufsz;
/* nothing to do */
if (!cleaned_count)
@@ -7288,14 +7642,15 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
+ bufsz = igb_rx_bufsz(rx_ring);
+
do {
if (!igb_alloc_mapped_page(rx_ring, bi))
break;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
- bi->page_offset,
- IGB_RX_BUFSZ,
+ bi->page_offset, bufsz,
DMA_FROM_DEVICE);
/* Refresh the desc even if buffer_addrs didn't change
@@ -7312,8 +7667,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
i -= rx_ring->count;
}
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.upper.status_error = 0;
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);
@@ -7630,6 +7985,36 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
return 0;
}
+static void igb_deliver_wake_packet(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct sk_buff *skb;
+ u32 wupl;
+
+ wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
+
+ /* WUPM stores only the first 128 bytes of the wake packet.
+ * Read the packet only if we have the whole thing.
+ */
+ if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
+ return;
+
+ skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
+ if (!skb)
+ return;
+
+ skb_put(skb, wupl);
+
+ /* Ensure reads are 32-bit aligned */
+ wupl = roundup(wupl, 4);
+
+ memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
+
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+}
+
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
static int igb_suspend(struct device *dev)
@@ -7659,7 +8044,7 @@ static int igb_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 err;
+ u32 err, val;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
@@ -7690,6 +8075,10 @@ static int igb_resume(struct device *dev)
*/
igb_get_hw_control(adapter);
+ val = rd32(E1000_WUS);
+ if (val & WAKE_PKT_WUS)
+ igb_deliver_wake_packet(netdev);
+
wr32(E1000_WUS, ~0);
rtnl_lock();
@@ -7948,11 +8337,16 @@ static void igb_io_resume(struct pci_dev *pdev)
igb_get_hw_control(adapter);
}
-static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
- u8 qsel)
+/**
+ * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
+ * @adapter: Pointer to adapter structure
+ * @index: Index of the RAR entry which need to be synced with MAC table
+ **/
+static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
{
struct e1000_hw *hw = &adapter->hw;
u32 rar_low, rar_high;
+ u8 *addr = adapter->mac_table[index].addr;
/* HW expects these to be in network order when they are plugged
* into the registers which are little endian. In order to guarantee
@@ -7963,12 +8357,16 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
rar_high = le16_to_cpup((__le16 *)(addr + 4));
/* Indicate to hardware the Address is Valid. */
- rar_high |= E1000_RAH_AV;
+ if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
+ rar_high |= E1000_RAH_AV;
- if (hw->mac.type == e1000_82575)
- rar_high |= E1000_RAH_POOL_1 * qsel;
- else
- rar_high |= E1000_RAH_POOL_1 << qsel;
+ if (hw->mac.type == e1000_82575)
+ rar_high |= E1000_RAH_POOL_1 *
+ adapter->mac_table[index].queue;
+ else
+ rar_high |= E1000_RAH_POOL_1 <<
+ adapter->mac_table[index].queue;
+ }
wr32(E1000_RAL(index), rar_low);
wrfl();
@@ -7984,10 +8382,13 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
* towards the first, as a result a collision should not be possible
*/
int rar_entry = hw->mac.rar_entry_count - (vf + 1);
+ unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
- memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
-
- igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
+ ether_addr_copy(vf_mac_addr, mac_addr);
+ ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
+ adapter->mac_table[rar_entry].queue = vf;
+ adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
+ igb_rar_set_index(adapter, rar_entry);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index c4477552ce9e..7a3fd4d74592 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -764,8 +764,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
* incoming frame. The value is stored in little endian format starting on
* byte 8.
**/
-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
- unsigned char *va,
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
struct sk_buff *skb)
{
__le64 *regval = (__le64 *)va;
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 8dea1b1367ef..34faa113a8a0 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -71,45 +71,45 @@ static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = {
#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test)
-static int igbvf_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int igbvf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 status;
- ecmd->supported = SUPPORTED_1000baseT_Full;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
- ecmd->advertising = ADVERTISED_1000baseT_Full;
-
- ecmd->port = -1;
- ecmd->transceiver = XCVR_DUMMY1;
+ cmd->base.port = -1;
status = er32(STATUS);
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
else if (status & E1000_STATUS_SPEED_100)
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
else
- ethtool_cmd_speed_set(ecmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
if (status & E1000_STATUS_FD)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ecmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
-static int igbvf_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int igbvf_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
return -EOPNOTSUPP;
}
@@ -443,8 +443,6 @@ static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
}
static const struct ethtool_ops igbvf_ethtool_ops = {
- .get_settings = igbvf_get_settings,
- .set_settings = igbvf_set_settings,
.get_drvinfo = igbvf_get_drvinfo,
.get_regs_len = igbvf_get_regs_len,
.get_regs = igbvf_get_regs,
@@ -467,6 +465,8 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
.get_ethtool_stats = igbvf_get_ethtool_stats,
.get_coalesce = igbvf_get_coalesce,
.set_coalesce = igbvf_set_coalesce,
+ .get_link_ksettings = igbvf_get_link_ksettings,
+ .set_link_ksettings = igbvf_set_link_ksettings,
};
void igbvf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 6f4290d6dc9f..bf69f01f8467 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -101,6 +101,8 @@ enum latency_range {
#define IGBVF_MNG_VLAN_NONE (-1)
+#define IGBVF_MAX_MAC_FILTERS 3
+
/* Number of packet split data buffers (not including the header buffer) */
#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
@@ -241,7 +243,6 @@ struct igbvf_adapter {
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
- struct net_device_stats net_stats;
spinlock_t stats_lock; /* prevent concurrent stats updates */
/* structs defined in e1000_hw.h */
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h
index f800bf8eedae..30d58c4a444e 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.h
+++ b/drivers/net/ethernet/intel/igbvf/mbx.h
@@ -62,6 +62,10 @@
#define E1000_VF_RESET 0x01 /* VF requests reset */
#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+/* VF requests PF to clear all unicast MAC filters */
+#define E1000_VF_MAC_FILTER_CLR (0x01 << E1000_VT_MSGINFO_SHIFT)
+/* VF requests PF to add unicast MAC filter */
+#define E1000_VF_MAC_FILTER_ADD (0x02 << E1000_VT_MSGINFO_SHIFT)
#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 839ba110f7fb..1b9cbbe88f6f 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -400,8 +400,8 @@ next_desc:
adapter->total_rx_packets += total_packets;
adapter->total_rx_bytes += total_bytes;
- adapter->net_stats.rx_bytes += total_bytes;
- adapter->net_stats.rx_packets += total_packets;
+ netdev->stats.rx_bytes += total_bytes;
+ netdev->stats.rx_packets += total_packets;
return cleaned;
}
@@ -864,8 +864,8 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
}
}
- adapter->net_stats.tx_bytes += total_bytes;
- adapter->net_stats.tx_packets += total_packets;
+ netdev->stats.tx_bytes += total_bytes;
+ netdev->stats.tx_packets += total_packets;
return count < tx_ring->count;
}
@@ -1433,12 +1433,52 @@ static void igbvf_set_multi(struct net_device *netdev)
}
/**
+ * igbvf_set_uni - Configure unicast MAC filters
+ * @netdev: network interface device structure
+ *
+ * This routine is responsible for configuring the hardware for proper
+ * unicast filters.
+ **/
+static int igbvf_set_uni(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (netdev_uc_count(netdev) > IGBVF_MAX_MAC_FILTERS) {
+ pr_err("Too many unicast filters - No Space\n");
+ return -ENOSPC;
+ }
+
+ /* Clear all unicast MAC filters */
+ hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_CLR, NULL);
+
+ if (!netdev_uc_empty(netdev)) {
+ struct netdev_hw_addr *ha;
+
+ /* Add MAC filters one by one */
+ netdev_for_each_uc_addr(ha, netdev) {
+ hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_ADD,
+ ha->addr);
+ udelay(200);
+ }
+ }
+
+ return 0;
+}
+
+static void igbvf_set_rx_mode(struct net_device *netdev)
+{
+ igbvf_set_multi(netdev);
+ igbvf_set_uni(netdev);
+}
+
+/**
* igbvf_configure - configure the hardware for Rx and Tx
* @adapter: private board structure
**/
static void igbvf_configure(struct igbvf_adapter *adapter)
{
- igbvf_set_multi(adapter->netdev);
+ igbvf_set_rx_mode(adapter->netdev);
igbvf_restore_vlan(adapter);
@@ -1798,7 +1838,7 @@ void igbvf_update_stats(struct igbvf_adapter *adapter)
UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
/* Fill out the OS statistics structure */
- adapter->net_stats.multicast = adapter->stats.mprc;
+ adapter->netdev->stats.multicast = adapter->stats.mprc;
}
static void igbvf_print_link_info(struct igbvf_adapter *adapter)
@@ -2334,21 +2374,6 @@ static void igbvf_reset_task(struct work_struct *work)
}
/**
- * igbvf_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
- **/
-static struct net_device_stats *igbvf_get_stats(struct net_device *netdev)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-
- /* only return the current stats */
- return &adapter->net_stats;
-}
-
-/**
* igbvf_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -2635,8 +2660,7 @@ static const struct net_device_ops igbvf_netdev_ops = {
.ndo_open = igbvf_open,
.ndo_stop = igbvf_close,
.ndo_start_xmit = igbvf_xmit_frame,
- .ndo_get_stats = igbvf_get_stats,
- .ndo_set_rx_mode = igbvf_set_multi,
+ .ndo_set_rx_mode = igbvf_set_rx_mode,
.ndo_set_mac_address = igbvf_set_mac,
.ndo_change_mtu = igbvf_change_mtu,
.ndo_do_ioctl = igbvf_ioctl,
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index 335ba6642145..528be116184e 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -36,6 +36,7 @@ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *,
u32, u32, u32);
static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
static s32 e1000_read_mac_addr_vf(struct e1000_hw *);
+static s32 e1000_set_uc_addr_vf(struct e1000_hw *hw, u32 subcmd, u8 *addr);
static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool);
/**
@@ -66,6 +67,8 @@ static s32 e1000_init_mac_params_vf(struct e1000_hw *hw)
mac->ops.rar_set = e1000_rar_set_vf;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
+ /* set mac filter */
+ mac->ops.set_uc_addr = e1000_set_uc_addr_vf;
/* set vlan filter table array */
mac->ops.set_vfta = e1000_set_vfta_vf;
@@ -338,6 +341,44 @@ static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw)
}
/**
+ * e1000_set_uc_addr_vf - Set or clear unicast filters
+ * @hw: pointer to the HW structure
+ * @sub_cmd: add or clear filters
+ * @addr: pointer to the filter MAC address
+ **/
+static s32 e1000_set_uc_addr_vf(struct e1000_hw *hw, u32 sub_cmd, u8 *addr)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3], msgbuf_chk;
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ memset(msgbuf, 0, sizeof(msgbuf));
+ msgbuf[0] |= sub_cmd;
+ msgbuf[0] |= E1000_VF_SET_MAC_ADDR;
+ msgbuf_chk = msgbuf[0];
+
+ if (addr)
+ memcpy(msg_addr, addr, ETH_ALEN);
+
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+
+ msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
+
+ if (!ret_val) {
+ msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
+
+ if (msgbuf[0] == (msgbuf_chk | E1000_VT_MSGTYPE_NACK))
+ return -ENOSPC;
+ }
+
+ return ret_val;
+}
+
+/**
* e1000_check_for_link_vf - Check for link for a virtual interface
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
index f00a41d9a1ca..4cf78b0dec50 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.h
+++ b/drivers/net/ethernet/intel/igbvf/vf.h
@@ -179,6 +179,7 @@ struct e1000_mac_operations {
s32 (*get_bus_info)(struct e1000_hw *);
s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32);
+ s32 (*set_uc_addr)(struct e1000_hw *, u32, u8 *);
s32 (*reset_hw)(struct e1000_hw *);
s32 (*init_hw)(struct e1000_hw *);
s32 (*setup_link)(struct e1000_hw *);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index e5d72559cca9..d10a0d242dda 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -94,24 +94,30 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
#define IXGB_STATS_LEN ARRAY_SIZE(ixgb_gstrings_stats)
static int
-ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+ixgb_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ cmd->base.port = PORT_FIBRE;
if (netif_carrier_ok(adapter->netdev)) {
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.speed = SPEED_10000;
+ cmd->base.duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ecmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
@@ -126,13 +132,14 @@ void ixgb_set_speed_duplex(struct net_device *netdev)
}
static int
-ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+ixgb_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
- if (ecmd->autoneg == AUTONEG_ENABLE ||
- (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+ if (cmd->base.autoneg == AUTONEG_ENABLE ||
+ (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
return -EINVAL;
if (netif_running(adapter->netdev)) {
@@ -630,8 +637,6 @@ ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
static const struct ethtool_ops ixgb_ethtool_ops = {
- .get_settings = ixgb_get_settings,
- .set_settings = ixgb_set_settings,
.get_drvinfo = ixgb_get_drvinfo,
.get_regs_len = ixgb_get_regs_len,
.get_regs = ixgb_get_regs,
@@ -649,6 +654,8 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
.set_phys_id = ixgb_set_phys_id,
.get_sset_count = ixgb_get_sset_count,
.get_ethtool_stats = ixgb_get_ethtool_stats,
+ .get_link_ksettings = ixgb_get_link_ksettings,
+ .set_link_ksettings = ixgb_set_link_ksettings,
};
void ixgb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index fbd220d137b3..5a713199653c 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -86,7 +86,6 @@ static void ixgb_set_multi(struct net_device *netdev);
static void ixgb_watchdog(unsigned long data);
static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
struct net_device *netdev);
-static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
static int ixgb_set_mac(struct net_device *netdev, void *p);
static irqreturn_t ixgb_intr(int irq, void *data);
@@ -367,7 +366,6 @@ static const struct net_device_ops ixgb_netdev_ops = {
.ndo_open = ixgb_open,
.ndo_stop = ixgb_close,
.ndo_start_xmit = ixgb_xmit_frame,
- .ndo_get_stats = ixgb_get_stats,
.ndo_set_rx_mode = ixgb_set_multi,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgb_set_mac,
@@ -1597,20 +1595,6 @@ ixgb_tx_timeout_task(struct work_struct *work)
}
/**
- * ixgb_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
- **/
-
-static struct net_device_stats *
-ixgb_get_stats(struct net_device *netdev)
-{
- return &netdev->stats;
-}
-
-/**
* ixgb_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b1ecc2627a5a..76263762bea1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -86,17 +86,62 @@
/* Supported Rx Buffer Sizes */
#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
+#define IXGBE_RXBUFFER_1536 1536
#define IXGBE_RXBUFFER_2K 2048
#define IXGBE_RXBUFFER_3K 3072
#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
-#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+/* Attempt to maximize the headroom available for incoming frames. We
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
+ * the frame. This leaves us with 512 bytes of room. From that we need
+ * to deduct the space needed for the shared info and the padding needed
+ * to IP align the frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ * up negative. In these cases we should fall back to the 3K
+ * buffers.
+ */
#if (PAGE_SIZE < 8192)
-#define IXGBE_MAX_FRAME_BUILD_SKB \
- (SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD)
+#define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN)
+#define IXGBE_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K))
+
+static inline int ixgbe_compute_pad(int rx_buf_len)
+{
+ int page_size, pad_size;
+
+ page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+ pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+ return pad_size;
+}
+
+static inline int ixgbe_skb_pad(void)
+{
+ int rx_buf_len;
+
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
+ *
+ * For a 3K buffer we need to add enough padding to allow for
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
+ * cache-line alignment.
+ */
+ if (IXGBE_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ else
+ rx_buf_len = IXGBE_RXBUFFER_1536;
+
+ /* if needed make room for NET_IP_ALIGN */
+ rx_buf_len -= NET_IP_ALIGN;
+
+ return ixgbe_compute_pad(rx_buf_len);
+}
+
+#define IXGBE_SKB_PAD ixgbe_skb_pad()
#else
-#define IXGBE_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K
+#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#endif
/*
@@ -190,7 +235,11 @@ struct vf_macvlans {
struct ixgbe_tx_buffer {
union ixgbe_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ /* XDP uses address ptr on irq_clean */
+ void *data;
+ };
unsigned int bytecount;
unsigned short gso_segs;
__be16 protocol;
@@ -243,6 +292,7 @@ enum ixgbe_ring_state_t {
__IXGBE_TX_XPS_INIT_DONE,
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
+ __IXGBE_TX_XDP_RING,
};
#define ring_uses_build_skb(ring) \
@@ -269,10 +319,17 @@ struct ixgbe_fwd_adapter {
set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
#define clear_ring_rsc_enabled(ring) \
clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define ring_is_xdp(ring) \
+ test_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
+#define set_ring_xdp(ring) \
+ set_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
+#define clear_ring_xdp(ring) \
+ clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
struct ixgbe_ring {
struct ixgbe_ring *next; /* pointer to next ring in q_vector */
struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
struct net_device *netdev; /* netdev ring belongs to */
+ struct bpf_prog *xdp_prog;
struct device *dev; /* device for DMA mapping */
struct ixgbe_fwd_adapter *l2_accel_priv;
void *desc; /* descriptor ring memory */
@@ -334,6 +391,7 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_FCOE_INDICES 8
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define IXGBE_MAX_L2A_QUEUES 4
#define IXGBE_BAD_L2A_QUEUE 3
#define IXGBE_MAX_MACVLANS 31
@@ -361,7 +419,7 @@ static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
return IXGBE_RXBUFFER_3K;
#if (PAGE_SIZE < 8192)
if (ring_uses_build_skb(ring))
- return IXGBE_MAX_FRAME_BUILD_SKB;
+ return IXGBE_MAX_2K_FRAME_BUILD_SKB;
#endif
return IXGBE_RXBUFFER_2K;
}
@@ -510,6 +568,7 @@ struct ixgbe_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
/* OS defined structs */
struct net_device *netdev;
+ struct bpf_prog *xdp_prog;
struct pci_dev *pdev;
unsigned long state;
@@ -576,6 +635,10 @@ struct ixgbe_adapter {
__be16 vxlan_port;
__be16 geneve_port;
+ /* XDP */
+ int num_xdp_queues;
+ struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES];
+
/* TX */
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
@@ -622,6 +685,7 @@ struct ixgbe_adapter {
u64 tx_busy;
unsigned int tx_ring_count;
+ unsigned int xdp_ring_count;
unsigned int rx_ring_count;
u32 link_speed;
@@ -705,7 +769,7 @@ struct ixgbe_adapter {
u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES];
#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
- u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)];
+ u32 *rss_key;
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -762,6 +826,7 @@ enum ixgbe_boards {
board_X540,
board_X550,
board_X550EM_x,
+ board_x550em_x_fw,
board_x550em_a,
board_x550em_a_fw,
};
@@ -771,6 +836,7 @@ extern const struct ixgbe_info ixgbe_82599_info;
extern const struct ixgbe_info ixgbe_X540_info;
extern const struct ixgbe_info ixgbe_X550_info;
extern const struct ixgbe_info ixgbe_X550EM_x_info;
+extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
extern const struct ixgbe_info ixgbe_x550em_a_info;
extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
#ifdef CONFIG_IXGBE_DCB
@@ -790,7 +856,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter);
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
void ixgbe_reset(struct ixgbe_adapter *adapter);
void ixgbe_set_ethtool_ops(struct net_device *netdev);
-int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
int ixgbe_setup_tx_resources(struct ixgbe_ring *);
void ixgbe_free_rx_resources(struct ixgbe_ring *);
void ixgbe_free_tx_resources(struct ixgbe_ring *);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 90fa5bf23d1b..7e5e336d7dcc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -179,6 +179,7 @@ static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82598_BX:
case IXGBE_DEV_ID_82599_KR:
case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
return SUPPORTED_10000baseKR_Full;
default:
return SUPPORTED_10000baseKX4_Full |
@@ -186,60 +187,62 @@ static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
}
}
-static int ixgbe_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int ixgbe_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
ixgbe_link_speed supported_link;
bool autoneg = false;
+ u32 supported, advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
/* set the supported link speeds */
if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->supported |= ixgbe_get_supported_10gtypes(hw);
+ supported |= ixgbe_get_supported_10gtypes(hw);
if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
- ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
+ supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
SUPPORTED_1000baseKX_Full :
SUPPORTED_1000baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_100_FULL)
- ecmd->supported |= SUPPORTED_100baseT_Full;
+ supported |= SUPPORTED_100baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_10_FULL)
- ecmd->supported |= SUPPORTED_10baseT_Full;
+ supported |= SUPPORTED_10baseT_Full;
/* default advertised speed if phy.autoneg_advertised isn't set */
- ecmd->advertising = ecmd->supported;
+ advertising = supported;
/* set the advertised speeds */
if (hw->phy.autoneg_advertised) {
- ecmd->advertising = 0;
+ advertising = 0;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
- ecmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->advertising |= ecmd->supported & ADVRTSD_MSK_10G;
+ advertising |= supported & ADVRTSD_MSK_10G;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
- if (ecmd->supported & SUPPORTED_1000baseKX_Full)
- ecmd->advertising |= ADVERTISED_1000baseKX_Full;
+ if (supported & SUPPORTED_1000baseKX_Full)
+ advertising |= ADVERTISED_1000baseKX_Full;
else
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
}
} else {
if (hw->phy.multispeed_fiber && !autoneg) {
if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->advertising = ADVERTISED_10000baseT_Full;
+ advertising = ADVERTISED_10000baseT_Full;
}
}
if (autoneg) {
- ecmd->supported |= SUPPORTED_Autoneg;
- ecmd->advertising |= ADVERTISED_Autoneg;
- ecmd->autoneg = AUTONEG_ENABLE;
+ supported |= SUPPORTED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
+ cmd->base.autoneg = AUTONEG_ENABLE;
} else
- ecmd->autoneg = AUTONEG_DISABLE;
-
- ecmd->transceiver = XCVR_EXTERNAL;
+ cmd->base.autoneg = AUTONEG_DISABLE;
/* Determine the remaining settings based on the PHY type. */
switch (adapter->hw.phy.type) {
@@ -248,14 +251,14 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_phy_x550em_ext_t:
case ixgbe_phy_fw:
case ixgbe_phy_cu_unknown:
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_TP;
+ cmd->base.port = PORT_TP;
break;
case ixgbe_phy_qt:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
break;
case ixgbe_phy_nl:
case ixgbe_phy_sfp_passive_tyco:
@@ -273,9 +276,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_sfp_type_da_cu:
case ixgbe_sfp_type_da_cu_core0:
case ixgbe_sfp_type_da_cu_core1:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_DA;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_DA;
break;
case ixgbe_sfp_type_sr:
case ixgbe_sfp_type_lr:
@@ -285,102 +288,113 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_sfp_type_1g_sx_core1:
case ixgbe_sfp_type_1g_lx_core0:
case ixgbe_sfp_type_1g_lx_core1:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
break;
case ixgbe_sfp_type_not_present:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_NONE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_NONE;
break;
case ixgbe_sfp_type_1g_cu_core0:
case ixgbe_sfp_type_1g_cu_core1:
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_TP;
+ cmd->base.port = PORT_TP;
break;
case ixgbe_sfp_type_unknown:
default:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_OTHER;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_OTHER;
break;
}
break;
case ixgbe_phy_xaui:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_NONE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_NONE;
break;
case ixgbe_phy_unknown:
case ixgbe_phy_generic:
case ixgbe_phy_sfp_unsupported:
default:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_OTHER;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_OTHER;
break;
}
/* Indicate pause support */
- ecmd->supported |= SUPPORTED_Pause;
+ supported |= SUPPORTED_Pause;
switch (hw->fc.requested_mode) {
case ixgbe_fc_full:
- ecmd->advertising |= ADVERTISED_Pause;
+ advertising |= ADVERTISED_Pause;
break;
case ixgbe_fc_rx_pause:
- ecmd->advertising |= ADVERTISED_Pause |
+ advertising |= ADVERTISED_Pause |
ADVERTISED_Asym_Pause;
break;
case ixgbe_fc_tx_pause:
- ecmd->advertising |= ADVERTISED_Asym_Pause;
+ advertising |= ADVERTISED_Asym_Pause;
break;
default:
- ecmd->advertising &= ~(ADVERTISED_Pause |
+ advertising &= ~(ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
}
if (netif_carrier_ok(netdev)) {
switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ cmd->base.speed = SPEED_10000;
break;
case IXGBE_LINK_SPEED_5GB_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_5000);
+ cmd->base.speed = SPEED_5000;
break;
case IXGBE_LINK_SPEED_2_5GB_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_2500);
+ cmd->base.speed = SPEED_2500;
break;
case IXGBE_LINK_SPEED_1GB_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
break;
case IXGBE_LINK_SPEED_100_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
break;
case IXGBE_LINK_SPEED_10_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
break;
default:
break;
}
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int ixgbe_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int ixgbe_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 advertised, old;
s32 err = 0;
+ u32 supported, advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
(hw->phy.multispeed_fiber)) {
@@ -388,12 +402,12 @@ static int ixgbe_set_settings(struct net_device *netdev,
* this function does not support duplex forcing, but can
* limit the advertising of the adapter to the specified speed
*/
- if (ecmd->advertising & ~ecmd->supported)
+ if (advertising & ~supported)
return -EINVAL;
/* only allow one speed at a time if no autoneg */
- if (!ecmd->autoneg && hw->phy.multispeed_fiber) {
- if (ecmd->advertising ==
+ if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
+ if (advertising ==
(ADVERTISED_10000baseT_Full |
ADVERTISED_1000baseT_Full))
return -EINVAL;
@@ -401,16 +415,16 @@ static int ixgbe_set_settings(struct net_device *netdev,
old = hw->phy.autoneg_advertised;
advertised = 0;
- if (ecmd->advertising & ADVERTISED_10000baseT_Full)
+ if (advertising & ADVERTISED_10000baseT_Full)
advertised |= IXGBE_LINK_SPEED_10GB_FULL;
- if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+ if (advertising & ADVERTISED_1000baseT_Full)
advertised |= IXGBE_LINK_SPEED_1GB_FULL;
- if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
advertised |= IXGBE_LINK_SPEED_100_FULL;
- if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
advertised |= IXGBE_LINK_SPEED_10_FULL;
if (old == advertised)
@@ -428,10 +442,11 @@ static int ixgbe_set_settings(struct net_device *netdev,
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
} else {
/* in this case we currently only support 10Gb/FULL */
- u32 speed = ethtool_cmd_speed(ecmd);
- if ((ecmd->autoneg == AUTONEG_ENABLE) ||
- (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
- (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+ u32 speed = cmd->base.speed;
+
+ if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
+ (advertising != ADVERTISED_10000baseT_Full) ||
+ (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
return -EINVAL;
}
@@ -1056,15 +1071,19 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->count = new_tx_count;
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ adapter->xdp_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
+ adapter->xdp_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
goto clear_reset;
}
/* allocate temporary buffer to store rings in */
i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
+ i = max_t(int, i, adapter->num_xdp_queues);
temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
if (!temp_ring) {
@@ -1096,12 +1115,33 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
}
}
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ memcpy(&temp_ring[i], adapter->xdp_ring[i],
+ sizeof(struct ixgbe_ring));
+
+ temp_ring[i].count = new_tx_count;
+ err = ixgbe_setup_tx_resources(&temp_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbe_free_tx_resources(&temp_ring[i]);
+ }
+ goto err_setup;
+ }
+ }
+
for (i = 0; i < adapter->num_tx_queues; i++) {
ixgbe_free_tx_resources(adapter->tx_ring[i]);
memcpy(adapter->tx_ring[i], &temp_ring[i],
sizeof(struct ixgbe_ring));
}
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ ixgbe_free_tx_resources(adapter->xdp_ring[i]);
+
+ memcpy(adapter->xdp_ring[i], &temp_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
adapter->tx_ring_count = new_tx_count;
}
@@ -1113,7 +1153,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
sizeof(struct ixgbe_ring));
temp_ring[i].count = new_rx_count;
- err = ixgbe_setup_rx_resources(&temp_ring[i]);
+ err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
if (err) {
while (i) {
i--;
@@ -1746,7 +1786,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
rx_ring->netdev = adapter->netdev;
rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
- err = ixgbe_setup_rx_resources(rx_ring);
+ err = ixgbe_setup_rx_resources(adapter, rx_ring);
if (err) {
ret_val = 4;
goto err_nomem;
@@ -2927,9 +2967,7 @@ static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
-
- return sizeof(adapter->rss_key);
+ return IXGBE_RSS_KEY_SIZE;
}
static u32 ixgbe_rss_indir_size(struct net_device *netdev)
@@ -3402,8 +3440,6 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
- .get_settings = ixgbe_get_settings,
- .set_settings = ixgbe_set_settings,
.get_drvinfo = ixgbe_get_drvinfo,
.get_regs_len = ixgbe_get_regs_len,
.get_regs = ixgbe_get_regs,
@@ -3442,6 +3478,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_ts_info = ixgbe_get_ts_info,
.get_module_info = ixgbe_get_module_info,
.get_module_eeprom = ixgbe_get_module_eeprom,
+ .get_link_ksettings = ixgbe_get_link_ksettings,
+ .set_link_ksettings = ixgbe_set_link_ksettings,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 1b8be7d813bd..b45fdc98033d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -267,12 +267,14 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
**/
static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
- int i;
+ int i, reg_idx;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = i;
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->reg_idx = i;
+ for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
+ adapter->tx_ring[i]->reg_idx = reg_idx;
+ for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
+ adapter->xdp_ring[i]->reg_idx = reg_idx;
return true;
}
@@ -308,6 +310,11 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
ixgbe_cache_ring_rss(adapter);
}
+static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
+{
+ return adapter->xdp_prog ? nr_cpu_ids : 0;
+}
+
#define IXGBE_RSS_64Q_MASK 0x3F
#define IXGBE_RSS_16Q_MASK 0xF
#define IXGBE_RSS_8Q_MASK 0x7
@@ -382,6 +389,7 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
adapter->num_rx_queues_per_pool = tcs;
adapter->num_tx_queues = vmdq_i * tcs;
+ adapter->num_xdp_queues = 0;
adapter->num_rx_queues = vmdq_i * tcs;
#ifdef IXGBE_FCOE
@@ -479,6 +487,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
adapter->num_tx_queues = rss_i * tcs;
+ adapter->num_xdp_queues = 0;
adapter->num_rx_queues = rss_i * tcs;
return true;
@@ -549,6 +558,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
adapter->num_rx_queues = vmdq_i * rss_i;
adapter->num_tx_queues = vmdq_i * rss_i;
+ adapter->num_xdp_queues = 0;
/* disable ATR as it is not supported when VMDq is enabled */
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -669,6 +679,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
adapter->num_rx_queues = rss_i;
adapter->num_tx_queues = rss_i;
+ adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
return true;
}
@@ -689,6 +700,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
/* Start with base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
+ adapter->num_xdp_queues = 0;
adapter->num_rx_pools = adapter->num_rx_queues;
adapter->num_rx_queues_per_pool = 1;
@@ -719,8 +731,11 @@ static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
int i, vectors, vector_threshold;
- /* We start by asking for one vector per queue pair */
+ /* We start by asking for one vector per queue pair with XDP queues
+ * being stacked with TX queues.
+ */
vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
+ vectors = max(vectors, adapter->num_xdp_queues);
/* It is easy to be greedy for MSI-X vectors. However, it really
* doesn't do much good if we have a lot more vectors than CPUs. We'll
@@ -800,6 +815,8 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring,
* @v_idx: index of vector in adapter struct
* @txr_count: total number of Tx rings to allocate
* @txr_idx: index of first Tx ring to allocate
+ * @xdp_count: total number of XDP rings to allocate
+ * @xdp_idx: index of first XDP ring to allocate
* @rxr_count: total number of Rx rings to allocate
* @rxr_idx: index of first Rx ring to allocate
*
@@ -808,6 +825,7 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring,
static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
int v_count, int v_idx,
int txr_count, int txr_idx,
+ int xdp_count, int xdp_idx,
int rxr_count, int rxr_idx)
{
struct ixgbe_q_vector *q_vector;
@@ -817,7 +835,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
int ring_count, size;
u8 tcs = netdev_get_num_tc(adapter->netdev);
- ring_count = txr_count + rxr_count;
+ ring_count = txr_count + rxr_count + xdp_count;
size = sizeof(struct ixgbe_q_vector) +
(sizeof(struct ixgbe_ring) * ring_count);
@@ -909,6 +927,33 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ring++;
}
+ while (xdp_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ ixgbe_add_ring(ring, &q_vector->tx);
+
+ /* apply Tx specific ring traits */
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = xdp_idx;
+ set_ring_xdp(ring);
+
+ /* assign ring to adapter */
+ adapter->xdp_ring[xdp_idx] = ring;
+
+ /* update count and index */
+ xdp_count--;
+ xdp_idx++;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
while (rxr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
@@ -1002,17 +1047,18 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
int q_vectors = adapter->num_q_vectors;
int rxr_remaining = adapter->num_rx_queues;
int txr_remaining = adapter->num_tx_queues;
- int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ int xdp_remaining = adapter->num_xdp_queues;
+ int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
int err;
/* only one q_vector if MSI-X is disabled. */
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
q_vectors = 1;
- if (q_vectors >= (rxr_remaining + txr_remaining)) {
+ if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
for (; rxr_remaining; v_idx++) {
err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
- 0, 0, 1, rxr_idx);
+ 0, 0, 0, 0, 1, rxr_idx);
if (err)
goto err_out;
@@ -1026,8 +1072,11 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
for (; v_idx < q_vectors; v_idx++) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+ int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
+
err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
tqpv, txr_idx,
+ xqpv, xdp_idx,
rqpv, rxr_idx);
if (err)
@@ -1036,14 +1085,17 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
/* update counts and index */
rxr_remaining -= rqpv;
txr_remaining -= tqpv;
+ xdp_remaining -= xqpv;
rxr_idx++;
txr_idx++;
+ xdp_idx += xqpv;
}
return 0;
err_out:
adapter->num_tx_queues = 0;
+ adapter->num_xdp_queues = 0;
adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
@@ -1066,6 +1118,7 @@ static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
int v_idx = adapter->num_q_vectors;
adapter->num_tx_queues = 0;
+ adapter->num_xdp_queues = 0;
adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
@@ -1172,9 +1225,10 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
ixgbe_cache_ring_register(adapter);
- e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
+ e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
(adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
- adapter->num_rx_queues, adapter->num_tx_queues);
+ adapter->num_rx_queues, adapter->num_tx_queues,
+ adapter->num_xdp_queues);
set_bit(__IXGBE_DOWN, &adapter->state);
@@ -1195,6 +1249,7 @@ err_alloc_q_vectors:
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
{
adapter->num_tx_queues = 0;
+ adapter->num_xdp_queues = 0;
adapter->num_rx_queues = 0;
ixgbe_free_q_vectors(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index a7a430a7be2c..22a29df1d29e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -49,6 +49,9 @@
#include <linux/if_macvlan.h>
#include <linux/if_bridge.h>
#include <linux/prefetch.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/atomic.h>
#include <scsi/fc/fc_fcoe.h>
#include <net/udp_tunnel.h>
#include <net/pkt_cls.h>
@@ -85,6 +88,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_X540] = &ixgbe_X540_info,
[board_X550] = &ixgbe_X550_info,
[board_X550EM_x] = &ixgbe_X550EM_x_info,
+ [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
[board_x550em_a] = &ixgbe_x550em_a_info,
[board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
};
@@ -131,9 +135,11 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
@@ -508,7 +514,7 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
*/
static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
{
- int i = 0, j = 0;
+ int i;
char rname[16];
u32 regs[64];
@@ -570,21 +576,38 @@ static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
break;
default:
- pr_info("%-15s %08x\n", reginfo->name,
- IXGBE_READ_REG(hw, reginfo->ofs));
+ pr_info("%-15s %08x\n",
+ reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
return;
}
- for (i = 0; i < 8; i++) {
- snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
- pr_err("%-15s", rname);
+ i = 0;
+ while (i < 64) {
+ int j;
+ char buf[9 * 8 + 1];
+ char *p = buf;
+
+ snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
for (j = 0; j < 8; j++)
- pr_cont(" %08x", regs[i*8+j]);
- pr_cont("\n");
+ p += sprintf(p, " %08x", regs[i++]);
+ pr_err("%-15s%s\n", rname, buf);
}
}
+static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)
+{
+ struct ixgbe_tx_buffer *tx_buffer;
+
+ tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
+ pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
+ n, ring->next_to_use, ring->next_to_clean,
+ (u64)dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ tx_buffer->next_to_watch,
+ (u64)tx_buffer->time_stamp);
+}
+
/*
* ixgbe_dump - Print registers, tx-rings and rx-rings
*/
@@ -594,14 +617,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_reg_info *reginfo;
int n = 0;
- struct ixgbe_ring *tx_ring;
+ struct ixgbe_ring *ring;
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
struct my_u0 { u64 a; u64 b; } *u0;
struct ixgbe_ring *rx_ring;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer_info;
- u32 staterr;
int i = 0;
if (!netif_msg_hw(adapter))
@@ -635,14 +657,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
"Queue [NTU] [NTC] [bi(ntc)->dma ]",
"leng", "ntw", "timestamp");
for (n = 0; n < adapter->num_tx_queues; n++) {
- tx_ring = adapter->tx_ring[n];
- tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
- pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
- n, tx_ring->next_to_use, tx_ring->next_to_clean,
- (u64)dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- tx_buffer->next_to_watch,
- (u64)tx_buffer->time_stamp);
+ ring = adapter->tx_ring[n];
+ ixgbe_print_buffer(ring, n);
+ }
+
+ for (n = 0; n < adapter->num_xdp_queues; n++) {
+ ring = adapter->xdp_ring[n];
+ ixgbe_print_buffer(ring, n);
}
/* Print TX Rings */
@@ -687,21 +708,32 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
*/
for (n = 0; n < adapter->num_tx_queues; n++) {
- tx_ring = adapter->tx_ring[n];
+ ring = adapter->tx_ring[n];
pr_info("------------------------------------\n");
- pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ pr_info("TX QUEUE INDEX = %d\n", ring->queue_index);
pr_info("------------------------------------\n");
pr_info("%s%s %s %s %s %s\n",
"T [desc] [address 63:0 ] ",
"[PlPOIdStDDt Ln] [bi->dma ] ",
"leng", "ntw", "timestamp", "bi->skb");
- for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
- tx_desc = IXGBE_TX_DESC(tx_ring, i);
- tx_buffer = &tx_ring->tx_buffer_info[i];
+ for (i = 0; ring->desc && (i < ring->count); i++) {
+ tx_desc = IXGBE_TX_DESC(ring, i);
+ tx_buffer = &ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
if (dma_unmap_len(tx_buffer, len) > 0) {
- pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p",
+ const char *ring_desc;
+
+ if (i == ring->next_to_use &&
+ i == ring->next_to_clean)
+ ring_desc = " NTC/U";
+ else if (i == ring->next_to_use)
+ ring_desc = " NTU";
+ else if (i == ring->next_to_clean)
+ ring_desc = " NTC";
+ else
+ ring_desc = "";
+ pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
@@ -709,16 +741,8 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
dma_unmap_len(tx_buffer, len),
tx_buffer->next_to_watch,
(u64)tx_buffer->time_stamp,
- tx_buffer->skb);
- if (i == tx_ring->next_to_use &&
- i == tx_ring->next_to_clean)
- pr_cont(" NTC/U\n");
- else if (i == tx_ring->next_to_use)
- pr_cont(" NTU\n");
- else if (i == tx_ring->next_to_clean)
- pr_cont(" NTC\n");
- else
- pr_cont("\n");
+ tx_buffer->skb,
+ ring_desc);
if (netif_msg_pktdata(adapter) &&
tx_buffer->skb)
@@ -797,34 +821,44 @@ rx_ring_summary:
pr_info("------------------------------------\n");
pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("%s%s%s",
+ pr_info("%s%s%s\n",
"R [desc] [ PktBuf A0] ",
"[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
- "<-- Adv Rx Read format\n");
- pr_info("%s%s%s",
+ "<-- Adv Rx Read format");
+ pr_info("%s%s%s\n",
"RWB[desc] [PcsmIpSHl PtRs] ",
"[vl er S cks ln] ---------------- [bi->skb ] ",
- "<-- Adv Rx Write-Back format\n");
+ "<-- Adv Rx Write-Back format");
for (i = 0; i < rx_ring->count; i++) {
+ const char *ring_desc;
+
+ if (i == rx_ring->next_to_use)
+ ring_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ ring_desc = " NTC";
+ else
+ ring_desc = "";
+
rx_buffer_info = &rx_ring->rx_buffer_info[i];
rx_desc = IXGBE_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
- if (staterr & IXGBE_RXD_STAT_DD) {
+ if (rx_desc->wb.upper.length) {
/* Descriptor Done */
- pr_info("RWB[0x%03X] %016llX "
- "%016llX ---------------- %p", i,
+ pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
+ i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- rx_buffer_info->skb);
+ rx_buffer_info->skb,
+ ring_desc);
} else {
- pr_info("R [0x%03X] %016llX "
- "%016llX %016llX %p", i,
+ pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
+ i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)rx_buffer_info->dma,
- rx_buffer_info->skb);
+ rx_buffer_info->skb,
+ ring_desc);
if (netif_msg_pktdata(adapter) &&
rx_buffer_info->dma) {
@@ -835,14 +869,6 @@ rx_ring_summary:
ixgbe_rx_bufsz(rx_ring), true);
}
}
-
- if (i == rx_ring->next_to_use)
- pr_cont(" NTU\n");
- else if (i == rx_ring->next_to_clean)
- pr_cont(" NTC\n");
- else
- pr_cont("\n");
-
}
}
}
@@ -972,6 +998,10 @@ static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
clear_bit(__IXGBE_HANG_CHECK_ARMED,
&adapter->tx_ring[i]->state);
+
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ clear_bit(__IXGBE_HANG_CHECK_ARMED,
+ &adapter->xdp_ring[i]->state);
}
static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
@@ -1016,6 +1046,14 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
if (xoff[tc])
clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
}
+
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
+
+ tc = xdp_ring->dcb_tc;
+ if (xoff[tc])
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);
+ }
}
static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
@@ -1167,7 +1205,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_packets += tx_buffer->gso_segs;
/* free the skb */
- napi_consume_skb(tx_buffer->skb, napi_budget);
+ if (ring_is_xdp(tx_ring))
+ page_frag_free(tx_buffer->data);
+ else
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -1228,7 +1269,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
struct ixgbe_hw *hw = &adapter->hw;
- e_err(drv, "Detected Tx Unit Hang\n"
+ e_err(drv, "Detected Tx Unit Hang %s\n"
" Tx Queue <%d>\n"
" TDH, TDT <%x>, <%x>\n"
" next_to_use <%x>\n"
@@ -1236,13 +1277,16 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
"tx_buffer_info[next_to_clean]\n"
" time_stamp <%lx>\n"
" jiffies <%lx>\n",
+ ring_is_xdp(tx_ring) ? "(XDP)" : "",
tx_ring->queue_index,
IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
tx_ring->next_to_use, i,
tx_ring->tx_buffer_info[i].time_stamp, jiffies);
- netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ if (!ring_is_xdp(tx_ring))
+ netif_stop_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
e_info(probe,
"tx hang %d detected on queue %d, resetting adapter\n",
@@ -1255,6 +1299,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
return true;
}
+ if (ring_is_xdp(tx_ring))
+ return !!budget;
+
netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
@@ -1846,6 +1893,10 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being fixed
*
+ * Check if the skb is valid in the XDP case it will be an error pointer.
+ * Return true in this case to abort processing and advance to next
+ * descriptor.
+ *
* Check for corrupted packet headers caused by senders on the local L2
* embedded NIC switch not setting up their Tx Descriptors right. These
* should be very rare.
@@ -1864,6 +1915,10 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
{
struct net_device *netdev = rx_ring->netdev;
+ /* XDP packets use error pointer so abort at this point */
+ if (IS_ERR(skb))
+ return true;
+
/* verify that the packet does not have any known errors */
if (unlikely(ixgbe_test_staterr(rx_desc,
IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
@@ -2039,7 +2094,7 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
- if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
+ if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
/* the page has been released from the ring */
IXGBE_CB(skb)->page_released = true;
} else {
@@ -2060,21 +2115,22 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
- union ixgbe_adv_rx_desc *rx_desc,
- unsigned int size)
+ struct xdp_buff *xdp,
+ union ixgbe_adv_rx_desc *rx_desc)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ unsigned int size = xdp->data_end - xdp->data;
#if (PAGE_SIZE < 8192)
unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
+ unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
#endif
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
+ prefetch(xdp->data);
#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
+ prefetch(xdp->data + L1_CACHE_BYTES);
#endif
/* allocate a skb to store the frags */
@@ -2087,7 +2143,7 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
IXGBE_CB(skb)->dma = rx_buffer->dma;
skb_add_rx_frag(skb, 0, rx_buffer->page,
- rx_buffer->page_offset,
+ xdp->data - page_address(rx_buffer->page),
size, truesize);
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
@@ -2095,7 +2151,8 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
rx_buffer->page_offset += truesize;
#endif
} else {
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+ memcpy(__skb_put(skb, size),
+ xdp->data, ALIGN(size, sizeof(long)));
rx_buffer->pagecnt_bias++;
}
@@ -2104,32 +2161,32 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
- union ixgbe_adv_rx_desc *rx_desc,
- unsigned int size)
+ struct xdp_buff *xdp,
+ union ixgbe_adv_rx_desc *rx_desc)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(IXGBE_SKB_PAD + size);
+ SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
#endif
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
+ prefetch(xdp->data);
#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
+ prefetch(xdp->data + L1_CACHE_BYTES);
#endif
/* build an skb to around the page buffer */
- skb = build_skb(va - IXGBE_SKB_PAD, truesize);
+ skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
/* update pointers within the skb to store the data */
- skb_reserve(skb, IXGBE_SKB_PAD);
- __skb_put(skb, size);
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ __skb_put(skb, xdp->data_end - xdp->data);
/* record DMA address if this is the start of a chain of buffers */
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
@@ -2145,6 +2202,65 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
return skb;
}
+#define IXGBE_XDP_PASS 0
+#define IXGBE_XDP_CONSUMED 1
+#define IXGBE_XDP_TX 2
+
+static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+ struct xdp_buff *xdp);
+
+static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ struct xdp_buff *xdp)
+{
+ int result = IXGBE_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ u32 act;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ if (!xdp_prog)
+ goto xdp_out;
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ result = ixgbe_xmit_xdp_ring(adapter, xdp);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping packet */
+ case XDP_DROP:
+ result = IXGBE_XDP_CONSUMED;
+ break;
+ }
+xdp_out:
+ rcu_read_unlock();
+ return ERR_PTR(-result);
+}
+
+static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
+
+ rx_buffer->page_offset ^= truesize;
+#else
+ unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
+ SKB_DATA_ALIGN(size);
+
+ rx_buffer->page_offset += truesize;
+#endif
+}
+
/**
* ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @q_vector: structure containing interrupt and ring information
@@ -2163,17 +2279,19 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
const int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
-#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter = q_vector->adapter;
+#ifdef IXGBE_FCOE
int ddp_bytes;
unsigned int mss = 0;
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
+ bool xdp_xmit = false;
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
+ struct xdp_buff xdp;
unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
@@ -2196,14 +2314,34 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
/* retrieve a buffer from the ring */
- if (skb)
+ if (!skb) {
+ xdp.data = page_address(rx_buffer->page) +
+ rx_buffer->page_offset;
+ xdp.data_hard_start = xdp.data -
+ ixgbe_rx_offset(rx_ring);
+ xdp.data_end = xdp.data + size;
+
+ skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
+ }
+
+ if (IS_ERR(skb)) {
+ if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
+ xdp_xmit = true;
+ ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
+ } else {
+ rx_buffer->pagecnt_bias++;
+ }
+ total_rx_packets++;
+ total_rx_bytes += size;
+ } else if (skb) {
ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
- else if (ring_uses_build_skb(rx_ring))
+ } else if (ring_uses_build_skb(rx_ring)) {
skb = ixgbe_build_skb(rx_ring, rx_buffer,
- rx_desc, size);
- else
+ &xdp, rx_desc);
+ } else {
skb = ixgbe_construct_skb(rx_ring, rx_buffer,
- rx_desc, size);
+ &xdp, rx_desc);
+ }
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -2260,6 +2398,16 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
total_rx_packets++;
}
+ if (xdp_xmit) {
+ struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+ }
+
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
@@ -3364,6 +3512,8 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
/* Setup the HW Tx Head and Tail descriptor pointers */
for (i = 0; i < adapter->num_tx_queues; i++)
ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]);
}
static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
@@ -3489,6 +3639,28 @@ void ixgbe_store_key(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_init_rss_key - Initialize adapter RSS key
+ * @adapter: device handle
+ *
+ * Allocates and initializes the RSS key if it is not allocated.
+ **/
+static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter)
+{
+ u32 *rss_key;
+
+ if (!adapter->rss_key) {
+ rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL);
+ if (unlikely(!rss_key))
+ return -ENOMEM;
+
+ netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE);
+ adapter->rss_key = rss_key;
+ }
+
+ return 0;
+}
+
+/**
* ixgbe_store_reta - Write the RETA table to HW
* @adapter: device handle
*
@@ -3590,7 +3762,7 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
/* Fill out hash function seeds */
for (i = 0; i < 10; i++)
IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
- adapter->rss_key[i]);
+ *(adapter->rss_key + i));
/* Fill out the redirection table */
for (i = 0, j = 0; i < 64; i++, j++) {
@@ -3651,7 +3823,6 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
- netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
if ((hw->mac.type >= ixgbe_mac_X550) &&
(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
unsigned int pf_pool = adapter->num_vfs;
@@ -3802,7 +3973,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
/* Limit the maximum frame size so we don't overrun the skb */
if (ring_uses_build_skb(ring) &&
!test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
- rxdctl |= IXGBE_MAX_FRAME_BUILD_SKB |
+ rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
IXGBE_RXDCTL_RLPML_EN;
#endif
}
@@ -3972,8 +4143,8 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
- if ((max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
- (max_frame > IXGBE_MAX_FRAME_BUILD_SKB))
+ if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
+ (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
#endif
}
@@ -5505,7 +5676,10 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
/* Free all the Tx ring sk_buffs */
- dev_kfree_skb_any(tx_buffer->skb);
+ if (ring_is_xdp(tx_ring))
+ page_frag_free(tx_buffer->data);
+ else
+ dev_kfree_skb_any(tx_buffer->skb);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -5546,7 +5720,8 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
}
/* reset BQL for queue */
- netdev_tx_reset_queue(txring_txq(tx_ring));
+ if (!ring_is_xdp(tx_ring))
+ netdev_tx_reset_queue(txring_txq(tx_ring));
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
@@ -5575,6 +5750,8 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
ixgbe_clean_tx_ring(adapter->tx_ring[i]);
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ ixgbe_clean_tx_ring(adapter->xdp_ring[i]);
}
static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
@@ -5669,6 +5846,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
u8 reg_idx = adapter->tx_ring[i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
}
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ }
/* Disable the Tx DMA engine on 82599 and later MAC */
switch (hw->mac.type) {
@@ -5854,6 +6036,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
if (!adapter->mac_table)
return -ENOMEM;
+ if (ixgbe_init_rss_key(adapter))
+ return -ENOMEM;
+
/* Set MAC specific capability flags and exceptions */
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -5944,10 +6129,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
/* assign number of SR-IOV VFs */
if (hw->mac.type != ixgbe_mac_82598EB) {
if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
- adapter->num_vfs = 0;
+ max_vfs = 0;
e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
- } else {
- adapter->num_vfs = max_vfs;
}
}
#endif /* CONFIG_PCI_IOV */
@@ -6041,7 +6224,7 @@ err:
**/
static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
{
- int i, err = 0;
+ int i, j = 0, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
@@ -6051,10 +6234,20 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
e_err(probe, "Allocation for Tx Queue %u failed\n", i);
goto err_setup_tx;
}
+ for (j = 0; j < adapter->num_xdp_queues; j++) {
+ err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]);
+ if (!err)
+ continue;
+
+ e_err(probe, "Allocation for Tx Queue %u failed\n", j);
+ goto err_setup_tx;
+ }
return 0;
err_setup_tx:
/* rewind the index freeing the rings as we go */
+ while (j--)
+ ixgbe_free_tx_resources(adapter->xdp_ring[j]);
while (i--)
ixgbe_free_tx_resources(adapter->tx_ring[i]);
return err;
@@ -6066,7 +6259,8 @@ err_setup_tx:
*
* Returns 0 on success, negative on failure
**/
-int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
+int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
int orig_node = dev_to_node(dev);
@@ -6105,6 +6299,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ rx_ring->xdp_prog = adapter->xdp_prog;
+
return 0;
err:
vfree(rx_ring->rx_buffer_info);
@@ -6128,7 +6324,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
+ err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
if (!err)
continue;
@@ -6184,6 +6380,9 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tx_ring[i]->desc)
ixgbe_free_tx_resources(adapter->tx_ring[i]);
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ if (adapter->xdp_ring[i]->desc)
+ ixgbe_free_tx_resources(adapter->xdp_ring[i]);
}
/**
@@ -6196,6 +6395,7 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
{
ixgbe_clean_rx_ring(rx_ring);
+ rx_ring->xdp_prog = NULL;
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
@@ -6602,6 +6802,14 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
bytes += tx_ring->stats.bytes;
packets += tx_ring->stats.packets;
}
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
+
+ restart_queue += xdp_ring->tx_stats.restart_queue;
+ tx_busy += xdp_ring->tx_stats.tx_busy;
+ bytes += xdp_ring->stats.bytes;
+ packets += xdp_ring->stats.packets;
+ }
adapter->restart_queue = restart_queue;
adapter->tx_busy = tx_busy;
netdev->stats.tx_bytes = bytes;
@@ -6795,6 +7003,9 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_TX_FDIR_INIT_DONE,
&(adapter->tx_ring[i]->state));
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &adapter->xdp_ring[i]->state);
/* re-enable flow director interrupts */
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
} else {
@@ -6828,6 +7039,8 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
if (netif_carrier_ok(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
set_check_for_tx_hang(adapter->tx_ring[i]);
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ set_check_for_tx_hang(adapter->xdp_ring[i]);
}
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
@@ -7058,6 +7271,13 @@ static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
return true;
}
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ struct ixgbe_ring *ring = adapter->xdp_ring[i];
+
+ if (ring->next_to_use != ring->next_to_clean)
+ return true;
+ }
+
return false;
}
@@ -8015,6 +8235,62 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
#endif
}
+static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+ struct xdp_buff *xdp)
+{
+ struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+ struct ixgbe_tx_buffer *tx_buffer;
+ union ixgbe_adv_tx_desc *tx_desc;
+ u32 len, cmd_type;
+ dma_addr_t dma;
+ u16 i;
+
+ len = xdp->data_end - xdp->data;
+
+ if (unlikely(!ixgbe_desc_unused(ring)))
+ return IXGBE_XDP_CONSUMED;
+
+ dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring->dev, dma))
+ return IXGBE_XDP_CONSUMED;
+
+ /* record the location of the first descriptor for this packet */
+ tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
+ tx_buffer->bytecount = len;
+ tx_buffer->gso_segs = 1;
+ tx_buffer->protocol = 0;
+
+ i = ring->next_to_use;
+ tx_desc = IXGBE_TX_DESC(ring, i);
+
+ dma_unmap_len_set(tx_buffer, len, len);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+ tx_buffer->data = xdp->data;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_DEXT |
+ IXGBE_ADVTXD_DCMD_IFCS;
+ cmd_type |= len | IXGBE_TXD_CMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.olinfo_status =
+ cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ /* Avoid any potential race with xdp_xmit and cleanup */
+ smp_wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ i++;
+ if (i == ring->count)
+ i = 0;
+
+ tx_buffer->next_to_watch = tx_desc;
+ ring->next_to_use = i;
+
+ return IXGBE_XDP_TX;
+}
+
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
@@ -8306,6 +8582,23 @@ static void ixgbe_netpoll(struct net_device *netdev)
#endif
+static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
+ struct ixgbe_ring *ring)
+{
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ }
+}
+
static void ixgbe_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -8331,18 +8624,13 @@ static void ixgbe_get_stats64(struct net_device *netdev,
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
- u64 bytes, packets;
- unsigned int start;
- if (ring) {
- do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
- packets = ring->stats.packets;
- bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
- stats->tx_packets += packets;
- stats->tx_bytes += bytes;
- }
+ ixgbe_get_ring_stats64(stats, ring);
+ }
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]);
+
+ ixgbe_get_ring_stats64(stats, ring);
}
rcu_read_unlock();
@@ -8948,7 +9236,9 @@ static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return ixgbe_setup_tc(dev, tc->tc);
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return ixgbe_setup_tc(dev, tc->mqprio->num_tc);
}
#ifdef CONFIG_PCI_IOV
@@ -9459,6 +9749,68 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
+static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
+{
+ int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ return -EINVAL;
+
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+ return -EINVAL;
+
+ /* verify ixgbe ring attributes are sufficient for XDP */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+ if (ring_is_rsc_enabled(ring))
+ return -EINVAL;
+
+ if (frame_size > ixgbe_rx_bufsz(ring))
+ return -EINVAL;
+ }
+
+ if (nr_cpu_ids > MAX_XDP_QUEUES)
+ return -ENOMEM;
+
+ old_prog = xchg(&adapter->xdp_prog, prog);
+
+ /* If transitioning XDP modes reconfigure rings */
+ if (!!prog != !!old_prog) {
+ int err = ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+
+ if (err) {
+ rcu_assign_pointer(adapter->xdp_prog, old_prog);
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return ixgbe_xdp_setup(dev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = !!(adapter->xdp_prog);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
@@ -9504,6 +9856,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
.ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
.ndo_features_check = ixgbe_features_check,
+ .ndo_xdp = ixgbe_xdp,
};
/**
@@ -9808,7 +10161,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ixgbe_init_mbx_params_pf(hw);
hw->mbx.ops = ii->mbx_ops;
pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
- ixgbe_enable_sriov(adapter);
+ ixgbe_enable_sriov(adapter, max_vfs);
skip_sriov:
#endif
@@ -9934,6 +10287,9 @@ skip_sriov:
if (err)
goto err_sw_init;
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ u64_stats_init(&adapter->xdp_ring[i]->syncp);
+
/* WOL not supported for all devices */
adapter->wol = 0;
hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
@@ -10059,6 +10415,7 @@ err_sw_init:
iounmap(adapter->io_addr);
kfree(adapter->jump_tables[0]);
kfree(adapter->mac_table);
+ kfree(adapter->rss_key);
err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
@@ -10143,6 +10500,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
}
kfree(adapter->mac_table);
+ kfree(adapter->rss_key);
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index e55b2602f371..654a402f0e9e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -792,7 +792,8 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
/* Setup link based on the new speed settings */
- hw->phy.ops.setup_link(hw);
+ if (hw->phy.ops.setup_link)
+ hw->phy.ops.setup_link(hw);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 044cb44747cf..8baf298a8516 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -46,88 +46,96 @@
#include "ixgbe_sriov.h"
#ifdef CONFIG_PCI_IOV
-static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
+static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
+ unsigned int num_vfs)
{
struct ixgbe_hw *hw = &adapter->hw;
- int num_vf_macvlans, i;
struct vf_macvlans *mv_list;
-
- adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
- e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
-
- /* Enable VMDq flag so device will be set in VM mode */
- adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
- if (!adapter->ring_feature[RING_F_VMDQ].limit)
- adapter->ring_feature[RING_F_VMDQ].limit = 1;
- adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs;
+ int num_vf_macvlans, i;
num_vf_macvlans = hw->mac.num_rar_entries -
- (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
+ (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
+ if (!num_vf_macvlans)
+ return;
- adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
- sizeof(struct vf_macvlans),
- GFP_KERNEL);
+ mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
+ GFP_KERNEL);
if (mv_list) {
/* Initialize list of VF macvlans */
INIT_LIST_HEAD(&adapter->vf_mvs.l);
for (i = 0; i < num_vf_macvlans; i++) {
- mv_list->vf = -1;
- mv_list->free = true;
- list_add(&mv_list->l, &adapter->vf_mvs.l);
- mv_list++;
+ mv_list[i].vf = -1;
+ mv_list[i].free = true;
+ list_add(&mv_list[i].l, &adapter->vf_mvs.l);
}
+ adapter->mv_list = mv_list;
}
+}
+
+static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
+ unsigned int num_vfs)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+
+ /* Enable VMDq flag so device will be set in VM mode */
+ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
+ if (!adapter->ring_feature[RING_F_VMDQ].limit)
+ adapter->ring_feature[RING_F_VMDQ].limit = 1;
+
+ /* Allocate memory for per VF control structures */
+ adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
+ GFP_KERNEL);
+ if (!adapter->vfinfo)
+ return -ENOMEM;
+
+ adapter->num_vfs = num_vfs;
+
+ ixgbe_alloc_vf_macvlans(adapter, num_vfs);
+ adapter->ring_feature[RING_F_VMDQ].offset = num_vfs;
/* Initialize default switching mode VEB */
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
adapter->bridge_mode = BRIDGE_MODE_VEB;
- /* If call to enable VFs succeeded then allocate memory
- * for per VF control structures.
- */
- adapter->vfinfo =
- kcalloc(adapter->num_vfs,
- sizeof(struct vf_data_storage), GFP_KERNEL);
- if (adapter->vfinfo) {
- /* limit trafffic classes based on VFs enabled */
- if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
- (adapter->num_vfs < 16)) {
- adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
- adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
- } else if (adapter->num_vfs < 32) {
- adapter->dcb_cfg.num_tcs.pg_tcs = 4;
- adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
- } else {
- adapter->dcb_cfg.num_tcs.pg_tcs = 1;
- adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
- }
-
- /* Disable RSC when in SR-IOV mode */
- adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
- IXGBE_FLAG2_RSC_ENABLED);
+ /* limit trafffic classes based on VFs enabled */
+ if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) {
+ adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
+ } else if (num_vfs < 32) {
+ adapter->dcb_cfg.num_tcs.pg_tcs = 4;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
+ } else {
+ adapter->dcb_cfg.num_tcs.pg_tcs = 1;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
+ }
- for (i = 0; i < adapter->num_vfs; i++) {
- /* enable spoof checking for all VFs */
- adapter->vfinfo[i].spoofchk_enabled = true;
+ /* Disable RSC when in SR-IOV mode */
+ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
+ IXGBE_FLAG2_RSC_ENABLED);
- /* We support VF RSS querying only for 82599 and x540
- * devices at the moment. These devices share RSS
- * indirection table and RSS hash key with PF therefore
- * we want to disable the querying by default.
- */
- adapter->vfinfo[i].rss_query_enabled = 0;
+ for (i = 0; i < num_vfs; i++) {
+ /* enable spoof checking for all VFs */
+ adapter->vfinfo[i].spoofchk_enabled = true;
- /* Untrust all VFs */
- adapter->vfinfo[i].trusted = false;
+ /* We support VF RSS querying only for 82599 and x540
+ * devices at the moment. These devices share RSS
+ * indirection table and RSS hash key with PF therefore
+ * we want to disable the querying by default.
+ */
+ adapter->vfinfo[i].rss_query_enabled = 0;
- /* set the default xcast mode */
- adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
- }
+ /* Untrust all VFs */
+ adapter->vfinfo[i].trusted = false;
- return 0;
+ /* set the default xcast mode */
+ adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
}
- return -ENOMEM;
+ e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs);
+ return 0;
}
/**
@@ -165,12 +173,13 @@ static void ixgbe_get_vfs(struct ixgbe_adapter *adapter)
/* Note this function is called when the user wants to enable SR-IOV
* VFs using the now deprecated module parameter
*/
-void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
{
int pre_existing_vfs = 0;
+ unsigned int num_vfs;
pre_existing_vfs = pci_num_vf(adapter->pdev);
- if (!pre_existing_vfs && !adapter->num_vfs)
+ if (!pre_existing_vfs && !max_vfs)
return;
/* If there are pre-existing VFs then we have to force
@@ -180,7 +189,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
* have been created via the new PCI SR-IOV sysfs interface.
*/
if (pre_existing_vfs) {
- adapter->num_vfs = pre_existing_vfs;
+ num_vfs = pre_existing_vfs;
dev_warn(&adapter->pdev->dev,
"Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
} else {
@@ -192,17 +201,16 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
* physical function. If the user requests greater than
* 63 VFs then it is an error - reset to default of zero.
*/
- adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
+ num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
- err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ err = pci_enable_sriov(adapter->pdev, num_vfs);
if (err) {
e_err(probe, "Failed to enable PCI sriov: %d\n", err);
- adapter->num_vfs = 0;
return;
}
}
- if (!__ixgbe_enable_sriov(adapter)) {
+ if (!__ixgbe_enable_sriov(adapter, num_vfs)) {
ixgbe_get_vfs(adapter);
return;
}
@@ -298,6 +306,7 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
#ifdef CONFIG_PCI_IOV
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
int err = 0;
+ u8 num_tc;
int i;
int pre_existing_vfs = pci_num_vf(dev);
@@ -310,23 +319,41 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
return err;
/* While the SR-IOV capability structure reports total VFs to be 64,
- * we have to limit the actual number allocated based on two factors.
+ * we limit the actual number allocated as below based on two factors.
+ * Num_TCs MAX_VFs
+ * 1 63
+ * <=4 31
+ * >4 15
* First, we reserve some transmit/receive resources for the PF.
* Second, VMDQ also uses the same pools that SR-IOV does. We need to
* account for this, so that we don't accidentally allocate more VFs
* than we have available pools. The PCI bus driver already checks for
* other values out of range.
*/
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VF_FUNCTIONS)
- return -EPERM;
+ num_tc = netdev_get_num_tc(adapter->netdev);
- adapter->num_vfs = num_vfs;
+ if (num_tc > 4) {
+ if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) {
+ e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC);
+ return -EPERM;
+ }
+ } else if ((num_tc > 1) && (num_tc <= 4)) {
+ if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) {
+ e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC);
+ return -EPERM;
+ }
+ } else {
+ if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) {
+ e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC);
+ return -EPERM;
+ }
+ }
- err = __ixgbe_enable_sriov(adapter);
+ err = __ixgbe_enable_sriov(adapter, num_vfs);
if (err)
return err;
- for (i = 0; i < adapter->num_vfs; i++)
+ for (i = 0; i < num_vfs; i++)
ixgbe_vf_configuration(dev, (i | 0x10000000));
/* reset before enabling SRIOV to avoid mailbox issues */
@@ -650,58 +677,6 @@ update_vlvfb:
}
}
-static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
- u8 num_tcs = netdev_get_num_tc(adapter->netdev);
-
- /* remove VLAN filters beloning to this VF */
- ixgbe_clear_vf_vlans(adapter, vf);
-
- /* add back PF assigned VLAN or VLAN 0 */
- ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
-
- /* reset offloads to defaults */
- ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
-
- /* set outgoing tags for VFs */
- if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
- ixgbe_clear_vmvir(adapter, vf);
- } else {
- if (vfinfo->pf_qos || !num_tcs)
- ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
- vfinfo->pf_qos, vf);
- else
- ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
- adapter->default_up, vf);
-
- if (vfinfo->spoofchk_enabled)
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
- }
-
- /* reset multicast table array for vf */
- adapter->vfinfo[vf].num_vf_mc_hashes = 0;
-
- /* Flush and reset the mta with the new values */
- ixgbe_set_rx_mode(adapter->netdev);
-
- ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
-
- /* reset VF api back to unknown */
- adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
-}
-
-static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
- int vf, unsigned char *mac_addr)
-{
- ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
- memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
- ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
-
- return 0;
-}
-
static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
int vf, int index, unsigned char *mac_addr)
{
@@ -757,6 +732,59 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
return 0;
}
+static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+ u8 num_tcs = netdev_get_num_tc(adapter->netdev);
+
+ /* remove VLAN filters beloning to this VF */
+ ixgbe_clear_vf_vlans(adapter, vf);
+
+ /* add back PF assigned VLAN or VLAN 0 */
+ ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
+
+ /* reset offloads to defaults */
+ ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
+
+ /* set outgoing tags for VFs */
+ if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
+ ixgbe_clear_vmvir(adapter, vf);
+ } else {
+ if (vfinfo->pf_qos || !num_tcs)
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ vfinfo->pf_qos, vf);
+ else
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ adapter->default_up, vf);
+
+ if (vfinfo->spoofchk_enabled)
+ hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+ }
+
+ /* reset multicast table array for vf */
+ adapter->vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ ixgbe_set_rx_mode(adapter->netdev);
+
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
+ ixgbe_set_vf_macvlan(adapter, vf, 0, NULL);
+
+ /* reset VF api back to unknown */
+ adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
+}
+
+static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+ int vf, unsigned char *mac_addr)
+{
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
+ memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
+ ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
+
+ return 0;
+}
+
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
@@ -1085,7 +1113,7 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
return -EOPNOTSUPP;
}
- memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key));
+ memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE);
return 0;
}
@@ -1319,18 +1347,26 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
+
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ if (is_zero_ether_addr(mac)) {
+ adapter->vfinfo[vf].pf_set_mac = false;
+ dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf);
+ } else if (is_valid_ether_addr(mac)) {
+ adapter->vfinfo[vf].pf_set_mac = true;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
+ mac, vf);
+ dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective.");
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n");
+ }
+ } else {
return -EINVAL;
- adapter->vfinfo[vf].pf_set_mac = true;
- dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
- dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
- " change effective.");
- if (test_bit(__IXGBE_DOWN, &adapter->state)) {
- dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
- " but the PF device is not up.\n");
- dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
- " attempting to use the VF device.\n");
}
+
return ixgbe_set_vf_mac(adapter, vf, mac);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 0c7977d27b71..cf67b9b18ed7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -33,6 +33,9 @@
* 63 (IXGBE_MAX_VF_FUNCTIONS - 1)
*/
#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1)
+#define IXGBE_MAX_VFS_1TC IXGBE_MAX_VF_FUNCTIONS
+#define IXGBE_MAX_VFS_4TC 32
+#define IXGBE_MAX_VFS_8TC 16
#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
@@ -56,7 +59,7 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
-void ixgbe_enable_sriov(struct ixgbe_adapter *adapter);
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs);
#endif
int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 1d07f2ead914..9c2460c5ef1b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -85,6 +85,7 @@
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
+#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0
#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2
#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3
#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4
@@ -1387,9 +1388,6 @@ struct ixgbe_thermal_sensor_data {
#define ATH_PHY_ID 0x03429050
#define AQ_FW_REV 0x20
-/* PHY Types */
-#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
-
/* Special PHY Init Routine */
#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
#define IXGBE_PHY_INIT_END_NL 0xFFFF
@@ -3128,7 +3126,9 @@ enum ixgbe_phy_type {
ixgbe_phy_aq,
ixgbe_phy_x550em_kr,
ixgbe_phy_x550em_kx4,
+ ixgbe_phy_x550em_xfi,
ixgbe_phy_x550em_ext_t,
+ ixgbe_phy_ext_1g_t,
ixgbe_phy_cu_unknown,
ixgbe_phy_qt,
ixgbe_phy_xaui,
@@ -3754,15 +3754,6 @@ struct ixgbe_info {
#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3)
#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31)
-#define IXGBE_KX4_LINK_CNTL_1 0x4C
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX BIT(16)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 BIT(17)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX BIT(24)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 BIT(25)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE BIT(29)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP BIT(30)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART BIT(31)
-
#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
@@ -3779,12 +3770,14 @@ struct ixgbe_info {
#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
#define IXGBE_SB_IOSF_CTRL_BUSY BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
-#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1
-#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
-#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
#define IXGBE_NW_MNG_IF_SEL 0x00011178
#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M BIT(17)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M BIT(18)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G BIT(19)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G BIT(20)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G BIT(21)
#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23)
#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24)
#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 84a467a8ed3d..6ea0d6a5fb90 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -95,6 +95,7 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
s32 status;
u32 ctrl, i;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -105,10 +106,17 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
ixgbe_clear_tx_pending(hw);
mac_reset_top:
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status) {
+ hw_dbg(hw, "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+
ctrl = IXGBE_CTRL_RST;
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
usleep_range(1000, 1200);
/* Poll for reset bit to self-clear indicating reset is complete */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 200f847fd8f3..2ba024b575ea 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -49,6 +49,18 @@ static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
return 0;
}
+static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ /* Start with X540 invariants, since so similar */
+ ixgbe_get_invariants_X540(hw);
+
+ phy->ops.set_phy_power = NULL;
+
+ return 0;
+}
+
static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -320,6 +332,9 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_X_KX4:
hw->phy.type = ixgbe_phy_x550em_kx4;
break;
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ hw->phy.type = ixgbe_phy_x550em_xfi;
+ break;
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
@@ -331,9 +346,21 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
else
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
/* Fallthrough */
- case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
return ixgbe_identify_phy_generic(hw);
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ hw->phy.type = ixgbe_phy_ext_1g_t;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ hw->phy.type = ixgbe_phy_fw;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
+ break;
default:
break;
}
@@ -2145,6 +2172,8 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
ixgbe_set_soft_rate_select_speed;
break;
case ixgbe_media_type_copper:
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
+ break;
mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
mac->ops.setup_fc = ixgbe_setup_fc_generic;
mac->ops.check_link = ixgbe_check_link_t_X550em;
@@ -2215,8 +2244,39 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
else
*speed = IXGBE_LINK_SPEED_10GB_FULL;
} else {
- *speed = IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
+ switch (hw->phy.type) {
+ case ixgbe_phy_x550em_kx4:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL |
+ IXGBE_LINK_SPEED_2_5GB_FULL |
+ IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case ixgbe_phy_x550em_xfi:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL |
+ IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case ixgbe_phy_ext_1g_t:
+ case ixgbe_phy_sgmii:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case ixgbe_phy_x550em_kr:
+ if (hw->mac.type == ixgbe_mac_x550em_a) {
+ /* check different backplane modes */
+ if (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ } else if (hw->device_id ==
+ IXGBE_DEV_ID_X550EM_A_KR_L) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
+ }
+ /* fall through */
+ default:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
*autoneg = true;
}
return 0;
@@ -2473,44 +2533,6 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
return ixgbe_restart_an_internal_phy_x550em(hw);
}
-/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY.
- * @hw: pointer to hardware structure
- *
- * Configures the integrated KX4 PHY.
- **/
-static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
-{
- s32 status;
- u32 reg_val;
-
- status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
- IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
- hw->bus.lan_id, &reg_val);
- if (status)
- return status;
-
- reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 |
- IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX);
-
- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE;
-
- /* Advertise 10G support. */
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4;
-
- /* Advertise 1G support. */
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX;
-
- /* Restart auto-negotiation. */
- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
- status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
- IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
- hw->bus.lan_id, reg_val);
-
- return status;
-}
-
/**
* ixgbe_setup_kr_x550em - Configure the KR PHY
* @hw: pointer to hardware structure
@@ -2521,6 +2543,9 @@ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
return 0;
+ if (ixgbe_check_reset_blocked(hw))
+ return 0;
+
return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
}
@@ -3134,7 +3159,7 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* Set functions pointers based on phy type */
switch (hw->phy.type) {
case ixgbe_phy_x550em_kx4:
- phy->ops.setup_link = ixgbe_setup_kx4_x550em;
+ phy->ops.setup_link = NULL;
phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
@@ -3143,6 +3168,12 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
+ case ixgbe_phy_x550em_xfi:
+ /* link is managed by HW */
+ phy->ops.setup_link = NULL;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ break;
case ixgbe_phy_x550em_ext_t:
/* Save NW management interface connected on board. This is used
* to determine internal PHY mode
@@ -3164,10 +3195,18 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
phy->ops.reset = ixgbe_reset_phy_t_X550em;
break;
+ case ixgbe_phy_sgmii:
+ phy->ops.setup_link = NULL;
+ break;
case ixgbe_phy_fw:
phy->ops.setup_link = ixgbe_setup_fw_link;
phy->ops.reset = ixgbe_reset_phy_fw;
break;
+ case ixgbe_phy_ext_1g_t:
+ phy->ops.setup_link = NULL;
+ phy->ops.read_reg = NULL;
+ phy->ops.write_reg = NULL;
+ break;
default:
break;
}
@@ -3193,6 +3232,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
/* Fallthrough */
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
media_type = ixgbe_media_type_backplane;
@@ -3300,6 +3340,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
u32 ctrl = 0;
u32 i;
bool link_up = false;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
/* Call adapter stop to disable Tx/Rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -3345,9 +3386,16 @@ mac_reset_top:
ctrl = IXGBE_CTRL_RST;
}
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status) {
+ hw_dbg(hw, "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
usleep_range(1000, 1200);
/* Poll for reset bit to self-clear meaning reset is complete */
@@ -3780,7 +3828,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = {
.get_media_type = ixgbe_get_media_type_X550em,
.get_san_mac_addr = NULL,
.get_wwn_prefix = NULL,
- .setup_link = NULL, /* defined later */
+ .setup_link = &ixgbe_setup_mac_link_X540,
.get_link_capabilities = ixgbe_get_link_capabilities_X550em,
.get_bus_info = ixgbe_get_bus_info_X550em,
.setup_sfp = ixgbe_setup_sfp_modules_X550em,
@@ -3862,6 +3910,17 @@ static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
.write_reg = &ixgbe_write_phy_reg_generic,
};
+static const struct ixgbe_phy_operations phy_ops_x550em_x_fw = {
+ X550_COMMON_PHY
+ .check_overtemp = NULL,
+ .init = ixgbe_init_phy_ops_X550em,
+ .identify = ixgbe_identify_phy_x550em,
+ .read_reg = NULL,
+ .write_reg = NULL,
+ .read_reg_mdi = NULL,
+ .write_reg_mdi = NULL,
+};
+
static const struct ixgbe_phy_operations phy_ops_x550em_a = {
X550_COMMON_PHY
.check_overtemp = &ixgbe_tn_check_overtemp,
@@ -3924,6 +3983,16 @@ const struct ixgbe_info ixgbe_X550EM_x_info = {
.link_ops = &link_ops_x550em_x,
};
+const struct ixgbe_info ixgbe_x550em_x_fw_info = {
+ .mac = ixgbe_mac_X550EM_x,
+ .get_invariants = ixgbe_get_invariants_X550_x_fw,
+ .mac_ops = &mac_ops_X550EM_x,
+ .eeprom_ops = &eeprom_ops_X550EM_x,
+ .phy_ops = &phy_ops_x550em_x_fw,
+ .mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_X550EM_x,
+};
+
const struct ixgbe_info ixgbe_x550em_a_info = {
.mac = ixgbe_mac_x550em_a,
.get_invariants = &ixgbe_get_invariants_X550_a,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 1f6c0ecd50bb..ff9d05f308ee 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -80,7 +80,7 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
#define IXGBEVF_QUEUE_STATS_LEN ( \
(((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
- (sizeof(struct ixgbe_stats) / sizeof(u64)))
+ (sizeof(struct ixgbevf_stats) / sizeof(u64)))
#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
#define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
@@ -91,18 +91,18 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
-static int ixgbevf_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int ixgbevf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 link_speed = 0;
bool link_up;
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->transceiver = XCVR_DUMMY1;
- ecmd->port = -1;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = -1;
hw->mac.get_link_status = 1;
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
@@ -122,11 +122,11 @@ static int ixgbevf_get_settings(struct net_device *netdev,
break;
}
- ethtool_cmd_speed_set(ecmd, speed);
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.speed = speed;
+ cmd->base.duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
return 0;
@@ -855,7 +855,8 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
if (key)
- memcpy(key, adapter->rss_key, sizeof(adapter->rss_key));
+ memcpy(key, adapter->rss_key,
+ ixgbevf_get_rxfh_key_size(netdev));
if (indir) {
int i;
@@ -885,7 +886,6 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
}
static const struct ethtool_ops ixgbevf_ethtool_ops = {
- .get_settings = ixgbevf_get_settings,
.get_drvinfo = ixgbevf_get_drvinfo,
.get_regs_len = ixgbevf_get_regs_len,
.get_regs = ixgbevf_get_regs,
@@ -905,6 +905,7 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size,
.get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
.get_rxfh = ixgbevf_get_rxfh,
+ .get_link_ksettings = ixgbevf_get_link_ksettings,
};
void ixgbevf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index a8cbc2dda0dd..581f44bbd7b3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -319,7 +319,7 @@ struct ixgbevf_adapter {
spinlock_t mbx_lock;
unsigned long last_reset;
- u32 rss_key[IXGBEVF_VFRSSRK_REGS];
+ u32 *rss_key;
u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 80bab261a0ec..eee29bddddc1 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1660,6 +1660,28 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
reg_idx);
}
+/**
+ * ixgbevf_init_rss_key - Initialize adapter RSS key
+ * @adapter: device handle
+ *
+ * Allocates and initializes the RSS key if it is not allocated.
+ **/
+static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter)
+{
+ u32 *rss_key;
+
+ if (!adapter->rss_key) {
+ rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL);
+ if (unlikely(!rss_key))
+ return -ENOMEM;
+
+ netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE);
+ adapter->rss_key = rss_key;
+ }
+
+ return 0;
+}
+
static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -1668,9 +1690,8 @@ static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
u8 i, j;
/* Fill out hash function seeds */
- netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
- IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i));
for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
if (j == rss_i)
@@ -2611,6 +2632,12 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
hw->mbx.ops.init_params(hw);
+ if (hw->mac.type >= ixgbe_mac_X550_vf) {
+ err = ixgbevf_init_rss_key(adapter);
+ if (err)
+ goto out;
+ }
+
/* assume legacy case in which PF would only give VF 2 queues */
hw->mac.max_tx_queues = 2;
hw->mac.max_rx_queues = 2;
@@ -4127,6 +4154,7 @@ err_register:
err_sw_init:
ixgbevf_reset_interrupt_capability(adapter);
iounmap(adapter->io_addr);
+ kfree(adapter->rss_key);
err_ioremap:
disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
free_netdev(netdev);
@@ -4173,6 +4201,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
hw_dbg(&adapter->hw, "Remove complete\n");
+ kfree(adapter->rss_key);
disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 8a5db9d7219d..b6d0c01eab10 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -333,7 +333,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
switch (hw->api_version) {
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
- if (hw->mac.type >= ixgbe_mac_X550_vf)
+ if (hw->mac.type < ixgbe_mac_X550_vf)
break;
default:
return -EOPNOTSUPP;
@@ -399,7 +399,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
switch (hw->api_version) {
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
- if (hw->mac.type >= ixgbe_mac_X550_vf)
+ if (hw->mac.type < ixgbe_mac_X550_vf)
break;
default:
return -EOPNOTSUPP;
@@ -419,7 +419,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* If the operation has been refused by a PF return -EPERM */
- if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
return -EPERM;
/* If we didn't get an ACK there must have been
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index d2555e8b947e..da6fb825afea 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -82,13 +82,13 @@ config MVNETA_BM
that all dependencies are met.
config MVPP2
- tristate "Marvell Armada 375 network interface support"
+ tristate "Marvell Armada 375/7K/8K network interface support"
depends on ARCH_MVEBU || COMPILE_TEST
depends on HAS_DMA
select MVMDIO
---help---
This driver supports the network interface units in the
- Marvell ARMADA 375 SoC.
+ Marvell ARMADA 375, 7K and 8K SoCs.
config PXA168_ETH
tristate "Marvell pxa168 ethernet support"
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index a0d1b084ecec..90a60b98c28e 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -53,7 +53,7 @@
struct orion_mdio_dev {
struct mutex lock;
void __iomem *regs;
- struct clk *clk;
+ struct clk *clk[3];
/*
* If we have access to the error interrupt pin (which is
* somewhat misnamed as it not only reflects internal errors
@@ -187,7 +187,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
struct resource *r;
struct mii_bus *bus;
struct orion_mdio_dev *dev;
- int ret;
+ int i, ret;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -216,11 +216,20 @@ static int orion_mdio_probe(struct platform_device *pdev)
init_waitqueue_head(&dev->smi_busy_wait);
- dev->clk = devm_clk_get(&pdev->dev, NULL);
- if (!IS_ERR(dev->clk))
- clk_prepare_enable(dev->clk);
+ for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
+ dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
+ if (IS_ERR(dev->clk[i]))
+ break;
+ clk_prepare_enable(dev->clk[i]);
+ }
dev->err_interrupt = platform_get_irq(pdev, 0);
+ if (dev->err_interrupt > 0 &&
+ resource_size(r) < MVMDIO_ERR_INT_MASK + 4) {
+ dev_err(&pdev->dev,
+ "disabling interrupt, resource size is too small\n");
+ dev->err_interrupt = 0;
+ }
if (dev->err_interrupt > 0) {
ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
orion_mdio_err_irq,
@@ -251,8 +260,16 @@ static int orion_mdio_probe(struct platform_device *pdev)
return 0;
out_mdio:
- if (!IS_ERR(dev->clk))
- clk_disable_unprepare(dev->clk);
+ if (dev->err_interrupt > 0)
+ writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
+
+ for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
+ if (IS_ERR(dev->clk[i]))
+ break;
+ clk_disable_unprepare(dev->clk[i]);
+ clk_put(dev->clk[i]);
+ }
+
return ret;
}
@@ -260,11 +277,18 @@ static int orion_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
struct orion_mdio_dev *dev = bus->priv;
+ int i;
- writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
+ if (dev->err_interrupt > 0)
+ writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
mdiobus_unregister(bus);
- if (!IS_ERR(dev->clk))
- clk_disable_unprepare(dev->clk);
+
+ for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
+ if (IS_ERR(dev->clk[i]))
+ break;
+ clk_disable_unprepare(dev->clk[i]);
+ clk_put(dev->clk[i]);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 61dd4462411c..d297011b535d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -431,6 +431,7 @@ struct mvneta_port {
/* Flags for special SoC configurations */
bool neta_armada3700;
u16 rx_offset_correction;
+ const struct mbus_dram_target_info *dram_target_info;
};
/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -1106,7 +1107,7 @@ static void mvneta_port_up(struct mvneta_port *pp)
q_map = 0;
for (queue = 0; queue < txq_number; queue++) {
struct mvneta_tx_queue *txq = &pp->txqs[queue];
- if (txq->descs != NULL)
+ if (txq->descs)
q_map |= (1 << queue);
}
mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
@@ -1115,7 +1116,7 @@ static void mvneta_port_up(struct mvneta_port *pp)
for (queue = 0; queue < rxq_number; queue++) {
struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
- if (rxq->descs != NULL)
+ if (rxq->descs)
q_map |= (1 << queue);
}
mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
@@ -2849,7 +2850,7 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
rxq->size * MVNETA_DESC_ALIGNED_SIZE,
&rxq->descs_phys, GFP_KERNEL);
- if (rxq->descs == NULL)
+ if (!rxq->descs)
return -ENOMEM;
rxq->last_desc = rxq->size - 1;
@@ -2919,7 +2920,7 @@ static int mvneta_txq_init(struct mvneta_port *pp,
txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
&txq->descs_phys, GFP_KERNEL);
- if (txq->descs == NULL)
+ if (!txq->descs)
return -ENOMEM;
txq->last_desc = txq->size - 1;
@@ -2932,8 +2933,9 @@ static int mvneta_txq_init(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
- txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
- if (txq->tx_skb == NULL) {
+ txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
+ GFP_KERNEL);
+ if (!txq->tx_skb) {
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
@@ -2944,7 +2946,7 @@ static int mvneta_txq_init(struct mvneta_port *pp,
txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * TSO_HEADER_SIZE,
&txq->tso_hdrs_phys, GFP_KERNEL);
- if (txq->tso_hdrs == NULL) {
+ if (!txq->tso_hdrs) {
kfree(txq->tx_skb);
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -3317,6 +3319,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
static int mvneta_mdio_probe(struct mvneta_port *pp)
{
struct phy_device *phy_dev;
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
pp->phy_interface);
@@ -3325,6 +3328,9 @@ static int mvneta_mdio_probe(struct mvneta_port *pp)
return -ENODEV;
}
+ phy_ethtool_get_wol(phy_dev, &wol);
+ device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
+
phy_dev->supported &= PHY_GBIT_FEATURES;
phy_dev->advertising = phy_dev->supported;
@@ -3941,10 +3947,16 @@ static void mvneta_ethtool_get_wol(struct net_device *dev,
static int mvneta_ethtool_set_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
+ int ret;
+
if (!dev->phydev)
return -EOPNOTSUPP;
- return phy_ethtool_set_wol(dev->phydev, wol);
+ ret = phy_ethtool_set_wol(dev->phydev, wol);
+ if (!ret)
+ device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
+
+ return ret;
}
static const struct net_device_ops mvneta_netdev_ops = {
@@ -3991,8 +4003,7 @@ static int mvneta_init(struct device *dev, struct mvneta_port *pp)
/* Set port default values */
mvneta_defaults_set(pp);
- pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
- GFP_KERNEL);
+ pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
if (!pp->txqs)
return -ENOMEM;
@@ -4004,8 +4015,7 @@ static int mvneta_init(struct device *dev, struct mvneta_port *pp)
txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
}
- pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
- GFP_KERNEL);
+ pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
if (!pp->rxqs)
return -ENOMEM;
@@ -4016,9 +4026,11 @@ static int mvneta_init(struct device *dev, struct mvneta_port *pp)
rxq->size = pp->rx_ring_size;
rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
rxq->time_coal = MVNETA_RX_COAL_USEC;
- rxq->buf_virt_addr = devm_kmalloc(pp->dev->dev.parent,
- rxq->size * sizeof(void *),
- GFP_KERNEL);
+ rxq->buf_virt_addr
+ = devm_kmalloc_array(pp->dev->dev.parent,
+ rxq->size,
+ sizeof(*rxq->buf_virt_addr),
+ GFP_KERNEL);
if (!rxq->buf_virt_addr)
return -ENOMEM;
}
@@ -4098,6 +4110,8 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
ctrl |= MVNETA_GMAC2_PORT_RGMII;
break;
default:
@@ -4118,7 +4132,6 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
/* Device initialization routine */
static int mvneta_probe(struct platform_device *pdev)
{
- const struct mbus_dram_target_info *dram_target_info;
struct resource *res;
struct device_node *dn = pdev->dev.of_node;
struct device_node *phy_node;
@@ -4267,13 +4280,13 @@ static int mvneta_probe(struct platform_device *pdev)
pp->tx_csum_limit = tx_csum_limit;
- dram_target_info = mv_mbus_dram_info();
+ pp->dram_target_info = mv_mbus_dram_info();
/* Armada3700 requires setting default configuration of Mbus
* windows, however without using filled mbus_dram_target_info
* structure.
*/
- if (dram_target_info || pp->neta_armada3700)
- mvneta_conf_mbus_windows(pp, dram_target_info);
+ if (pp->dram_target_info || pp->neta_armada3700)
+ mvneta_conf_mbus_windows(pp, pp->dram_target_info);
pp->tx_ring_size = MVNETA_MAX_TXD;
pp->rx_ring_size = MVNETA_MAX_RXD;
@@ -4405,6 +4418,61 @@ static int mvneta_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int mvneta_suspend(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if (netif_running(dev))
+ mvneta_stop(dev);
+ netif_device_detach(dev);
+ clk_disable_unprepare(pp->clk_bus);
+ clk_disable_unprepare(pp->clk);
+ return 0;
+}
+
+static int mvneta_resume(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct net_device *dev = dev_get_drvdata(device);
+ struct mvneta_port *pp = netdev_priv(dev);
+ int err;
+
+ clk_prepare_enable(pp->clk);
+ if (!IS_ERR(pp->clk_bus))
+ clk_prepare_enable(pp->clk_bus);
+ if (pp->dram_target_info || pp->neta_armada3700)
+ mvneta_conf_mbus_windows(pp, pp->dram_target_info);
+ if (pp->bm_priv) {
+ err = mvneta_bm_port_init(pdev, pp);
+ if (err < 0) {
+ dev_info(&pdev->dev, "use SW buffer management\n");
+ pp->bm_priv = NULL;
+ }
+ }
+ mvneta_defaults_set(pp);
+ err = mvneta_port_power_up(pp, pp->phy_interface);
+ if (err < 0) {
+ dev_err(device, "can't power up port\n");
+ return err;
+ }
+
+ if (pp->use_inband_status)
+ mvneta_fixed_link_update(pp, dev->phydev);
+
+ netif_device_attach(dev);
+ if (netif_running(dev)) {
+ mvneta_open(dev);
+ mvneta_set_rx_mode(dev);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
+
static const struct of_device_id mvneta_match[] = {
{ .compatible = "marvell,armada-370-neta" },
{ .compatible = "marvell,armada-xp-neta" },
@@ -4419,6 +4487,7 @@ static struct platform_driver mvneta_driver = {
.driver = {
.name = MVNETA_DRIVER_NAME,
.of_match_table = mvneta_match,
+ .pm = &mvneta_pm_ops,
},
};
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index d00421b9ffea..9b875d776b29 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -25,6 +25,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/phy.h>
#include <linux/clk.h>
#include <linux/hrtimer.h>
@@ -49,9 +50,11 @@
#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
#define MVPP2_RXQ_POOL_SHORT_OFFS 20
-#define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
+#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
+#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
#define MVPP2_RXQ_POOL_LONG_OFFS 24
-#define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
+#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
+#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
#define MVPP2_RXQ_DISABLE_MASK BIT(31)
@@ -99,6 +102,7 @@
/* Descriptor Manager Top Registers */
#define MVPP2_RXQ_NUM_REG 0x2040
#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
+#define MVPP22_DESC_ADDR_OFFS 8
#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
@@ -117,9 +121,6 @@
#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
-#define MVPP2_TXQ_THRESH_REG 0x2094
-#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
-#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
#define MVPP2_TXQ_INDEX_REG 0x2098
#define MVPP2_TXQ_PREF_BUF_REG 0x209c
#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
@@ -140,6 +141,7 @@
#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
+#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
@@ -152,10 +154,52 @@
#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
#define MVPP2_BASE_ADDR_ENABLE 0x4060
+/* AXI Bridge Registers */
+#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
+#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
+#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
+#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
+#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
+#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
+#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
+#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
+#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
+#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
+#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
+#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
+
+/* Values for AXI Bridge registers */
+#define MVPP22_AXI_ATTR_CACHE_OFFS 0
+#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
+
+#define MVPP22_AXI_CODE_CACHE_OFFS 0
+#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
+
+#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
+#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
+#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
+
+#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
+#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
+
/* Interrupt Cause and Mask registers */
#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
-#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
+#define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
+
+#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
+#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
+
+#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
+
+#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
+#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
+#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
+#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
+
#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
@@ -210,14 +254,19 @@
#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
+#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
+#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
+#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
+#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
#define MVPP2_BM_VIRT_RLS_REG 0x64c0
-#define MVPP2_BM_MC_RLS_REG 0x64c4
-#define MVPP2_BM_MC_ID_MASK 0xfff
-#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
+#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
+#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
+#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
+#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
/* TX Scheduler registers */
#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
@@ -287,6 +336,24 @@
#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
+#define MVPP22_GMAC_CTRL_4_REG 0x90
+#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
+#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
+#define MVPP22_CTRL4_SYNC_BYPASS BIT(6)
+#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
+
+/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
+ * relative to port->base.
+ */
+#define MVPP22_XLG_CTRL3_REG 0x11c
+#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
+#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
+
+/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
+#define MVPP22_SMI_MISC_CFG_REG 0x1204
+#define MVPP22_SMI_POLLING_EN BIT(10)
+
+#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
@@ -335,15 +402,9 @@
/* Maximum number of TXQs used by single port */
#define MVPP2_MAX_TXQ 8
-/* Maximum number of RXQs used by single port */
-#define MVPP2_MAX_RXQ 8
-
/* Dfault number of RXQs in use */
#define MVPP2_DEFAULT_RXQ 4
-/* Total number of RXQs available to all ports */
-#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
-
/* Max number of Rx descriptors */
#define MVPP2_MAX_RXD 128
@@ -615,6 +676,11 @@ enum mvpp2_prs_l3_cast {
*/
#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
+#define MVPP21_ADDR_SPACE_SZ 0
+#define MVPP22_ADDR_SPACE_SZ SZ_64K
+
+#define MVPP2_MAX_CPUS 4
+
enum mvpp2_bm_type {
MVPP2_BM_FREE,
MVPP2_BM_SWF_LONG,
@@ -626,12 +692,19 @@ enum mvpp2_bm_type {
/* Shared Packet Processor resources */
struct mvpp2 {
/* Shared registers' base addresses */
- void __iomem *base;
void __iomem *lms_base;
+ void __iomem *iface_base;
+
+ /* On PPv2.2, each CPU can access the base register through a
+ * separate address space, each 64 KB apart from each
+ * other.
+ */
+ void __iomem *cpu_base[MVPP2_MAX_CPUS];
/* Common clocks */
struct clk *pp_clk;
struct clk *gop_clk;
+ struct clk *mg_clk;
/* List of pointers to port structures */
struct mvpp2_port **port_list;
@@ -649,6 +722,12 @@ struct mvpp2 {
/* Tclk value */
u32 tclk;
+
+ /* HW version */
+ enum { MVPP21, MVPP22 } hw_version;
+
+ /* Maximum number of RXQs per port */
+ unsigned int max_port_rxqs;
};
struct mvpp2_pcpu_stats {
@@ -670,6 +749,11 @@ struct mvpp2_port_pcpu {
struct mvpp2_port {
u8 id;
+ /* Index of the port from the "group of ports" complex point
+ * of view
+ */
+ int gop_id;
+
int irq;
struct mvpp2 *priv;
@@ -741,22 +825,24 @@ struct mvpp2_port {
#define MVPP2_RXD_L3_IP6 BIT(30)
#define MVPP2_RXD_BUF_HDR BIT(31)
-struct mvpp2_tx_desc {
+/* HW TX descriptor for PPv2.1 */
+struct mvpp21_tx_desc {
u32 command; /* Options used by HW for packet transmitting.*/
u8 packet_offset; /* the offset from the buffer beginning */
u8 phys_txq; /* destination queue ID */
u16 data_size; /* data size of transmitted packet in bytes */
- u32 buf_phys_addr; /* physical addr of transmitted buffer */
+ u32 buf_dma_addr; /* physical addr of transmitted buffer */
u32 buf_cookie; /* cookie for access to TX buffer in tx path */
u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
u32 reserved2; /* reserved (for future use) */
};
-struct mvpp2_rx_desc {
+/* HW RX descriptor for PPv2.1 */
+struct mvpp21_rx_desc {
u32 status; /* info about received packet */
u16 reserved1; /* parser_info (for future use, PnC) */
u16 data_size; /* size of received packet in bytes */
- u32 buf_phys_addr; /* physical address of the buffer */
+ u32 buf_dma_addr; /* physical address of the buffer */
u32 buf_cookie; /* cookie for access to RX buffer in rx path */
u16 reserved2; /* gem_port_id (for future use, PON) */
u16 reserved3; /* csum_l4 (for future use, PnC) */
@@ -767,12 +853,51 @@ struct mvpp2_rx_desc {
u32 reserved8;
};
+/* HW TX descriptor for PPv2.2 */
+struct mvpp22_tx_desc {
+ u32 command;
+ u8 packet_offset;
+ u8 phys_txq;
+ u16 data_size;
+ u64 reserved1;
+ u64 buf_dma_addr_ptp;
+ u64 buf_cookie_misc;
+};
+
+/* HW RX descriptor for PPv2.2 */
+struct mvpp22_rx_desc {
+ u32 status;
+ u16 reserved1;
+ u16 data_size;
+ u32 reserved2;
+ u32 reserved3;
+ u64 buf_dma_addr_key_hash;
+ u64 buf_cookie_misc;
+};
+
+/* Opaque type used by the driver to manipulate the HW TX and RX
+ * descriptors
+ */
+struct mvpp2_tx_desc {
+ union {
+ struct mvpp21_tx_desc pp21;
+ struct mvpp22_tx_desc pp22;
+ };
+};
+
+struct mvpp2_rx_desc {
+ union {
+ struct mvpp21_rx_desc pp21;
+ struct mvpp22_rx_desc pp22;
+ };
+};
+
struct mvpp2_txq_pcpu_buf {
/* Transmitted SKB */
struct sk_buff *skb;
/* Physical address of transmitted buffer */
- dma_addr_t phys;
+ dma_addr_t dma;
/* Size transmitted */
size_t size;
@@ -825,7 +950,7 @@ struct mvpp2_tx_queue {
struct mvpp2_tx_desc *descs;
/* DMA address of the Tx DMA descriptors array */
- dma_addr_t descs_phys;
+ dma_addr_t descs_dma;
/* Index of the last Tx DMA descriptor */
int last_desc;
@@ -848,7 +973,7 @@ struct mvpp2_rx_queue {
struct mvpp2_rx_desc *descs;
/* DMA address of the RX DMA descriptors array */
- dma_addr_t descs_phys;
+ dma_addr_t descs_dma;
/* Index of the last RX DMA descriptor */
int last_desc;
@@ -912,6 +1037,8 @@ struct mvpp2_bm_pool {
/* Buffer Pointers Pool External (BPPE) size */
int size;
+ /* BPPE size in bytes */
+ int size_bytes;
/* Number of buffers for this pool */
int buf_num;
/* Pool buffer size */
@@ -922,29 +1049,13 @@ struct mvpp2_bm_pool {
/* BPPE virtual base address */
u32 *virt_addr;
- /* BPPE physical base address */
- dma_addr_t phys_addr;
+ /* BPPE DMA base address */
+ dma_addr_t dma_addr;
/* Ports using BM pool */
u32 port_map;
};
-struct mvpp2_buff_hdr {
- u32 next_buff_phys_addr;
- u32 next_buff_virt_addr;
- u16 byte_count;
- u16 info;
- u8 reserved1; /* bm_qset (for future use, BM) */
-};
-
-/* Buffer header info bits */
-#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
-#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
-#define MVPP2_B_HDR_INFO_LAST_OFFS 12
-#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
-#define MVPP2_B_HDR_INFO_IS_LAST(info) \
- ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
-
/* Static declaractions */
/* Number of RXQs used by single port */
@@ -959,12 +1070,177 @@ static int txq_number = MVPP2_MAX_TXQ;
static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
{
- writel(data, priv->base + offset);
+ writel(data, priv->cpu_base[0] + offset);
}
static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
{
- return readl(priv->base + offset);
+ return readl(priv->cpu_base[0] + offset);
+}
+
+/* These accessors should be used to access:
+ *
+ * - per-CPU registers, where each CPU has its own copy of the
+ * register.
+ *
+ * MVPP2_BM_VIRT_ALLOC_REG
+ * MVPP2_BM_ADDR_HIGH_ALLOC
+ * MVPP22_BM_ADDR_HIGH_RLS_REG
+ * MVPP2_BM_VIRT_RLS_REG
+ * MVPP2_ISR_RX_TX_CAUSE_REG
+ * MVPP2_ISR_RX_TX_MASK_REG
+ * MVPP2_TXQ_NUM_REG
+ * MVPP2_AGGR_TXQ_UPDATE_REG
+ * MVPP2_TXQ_RSVD_REQ_REG
+ * MVPP2_TXQ_RSVD_RSLT_REG
+ * MVPP2_TXQ_SENT_REG
+ * MVPP2_RXQ_NUM_REG
+ *
+ * - global registers that must be accessed through a specific CPU
+ * window, because they are related to an access to a per-CPU
+ * register
+ *
+ * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
+ * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
+ * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
+ */
+static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
+ u32 offset, u32 data)
+{
+ writel(data, priv->cpu_base[cpu] + offset);
+}
+
+static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
+ u32 offset)
+{
+ return readl(priv->cpu_base[cpu] + offset);
+}
+
+static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return tx_desc->pp21.buf_dma_addr;
+ else
+ return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
+}
+
+static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ dma_addr_t dma_addr)
+{
+ if (port->priv->hw_version == MVPP21) {
+ tx_desc->pp21.buf_dma_addr = dma_addr;
+ } else {
+ u64 val = (u64)dma_addr;
+
+ tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
+ tx_desc->pp22.buf_dma_addr_ptp |= val;
+ }
+}
+
+static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return tx_desc->pp21.data_size;
+ else
+ return tx_desc->pp22.data_size;
+}
+
+static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ size_t size)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.data_size = size;
+ else
+ tx_desc->pp22.data_size = size;
+}
+
+static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ unsigned int txq)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.phys_txq = txq;
+ else
+ tx_desc->pp22.phys_txq = txq;
+}
+
+static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ unsigned int command)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.command = command;
+ else
+ tx_desc->pp22.command = command;
+}
+
+static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ unsigned int offset)
+{
+ if (port->priv->hw_version == MVPP21)
+ tx_desc->pp21.packet_offset = offset;
+ else
+ tx_desc->pp22.packet_offset = offset;
+}
+
+static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return tx_desc->pp21.packet_offset;
+ else
+ return tx_desc->pp22.packet_offset;
+}
+
+static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return rx_desc->pp21.buf_dma_addr;
+ else
+ return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
+}
+
+static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return rx_desc->pp21.buf_cookie;
+ else
+ return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
+}
+
+static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return rx_desc->pp21.data_size;
+ else
+ return rx_desc->pp22.data_size;
+}
+
+static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
+{
+ if (port->priv->hw_version == MVPP21)
+ return rx_desc->pp21.status;
+ else
+ return rx_desc->pp22.status;
}
static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
@@ -974,15 +1250,17 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
txq_pcpu->txq_get_index = 0;
}
-static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
+static void mvpp2_txq_inc_put(struct mvpp2_port *port,
+ struct mvpp2_txq_pcpu *txq_pcpu,
struct sk_buff *skb,
struct mvpp2_tx_desc *tx_desc)
{
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_put_index;
tx_buf->skb = skb;
- tx_buf->size = tx_desc->data_size;
- tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
+ tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
+ tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
+ mvpp2_txdesc_offset_get(port, tx_desc);
txq_pcpu->txq_put_index++;
if (txq_pcpu->txq_put_index == txq_pcpu->size)
txq_pcpu->txq_put_index = 0;
@@ -1411,7 +1689,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
mvpp2_prs_hw_read(priv, &pe);
} else {
/* Entry doesn't exist - create new */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
pe.index = MVPP2_PE_DROP_ALL;
@@ -1448,7 +1726,7 @@ static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
mvpp2_prs_hw_read(priv, &pe);
} else {
/* Entry doesn't exist - create new */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
pe.index = MVPP2_PE_MAC_PROMISCUOUS;
@@ -1494,7 +1772,7 @@ static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
mvpp2_prs_hw_read(priv, &pe);
} else {
/* Entry doesn't exist - create new */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
pe.index = index;
@@ -1546,7 +1824,7 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
mvpp2_prs_hw_read(priv, &pe);
} else {
/* Entry doesn't exist - create new */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
pe.index = tid;
@@ -1609,7 +1887,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
mvpp2_prs_hw_read(priv, &pe);
} else {
/* Entry doesn't exist - create new */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
pe.index = tid;
@@ -1743,10 +2021,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
if (tid <= tid_aux) {
ret = -EINVAL;
- goto error;
+ goto free_pe;
}
- memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
+ memset(pe, 0, sizeof(*pe));
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
pe->index = tid;
@@ -1775,8 +2053,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
mvpp2_prs_tcam_port_map_set(pe, port_map);
mvpp2_prs_hw_write(priv, pe);
-
-error:
+free_pe:
kfree(pe);
return ret;
@@ -1861,7 +2138,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
ai = mvpp2_prs_double_vlan_ai_free_get(priv);
if (ai < 0) {
ret = ai;
- goto error;
+ goto free_pe;
}
/* Get first single/triple vlan tid */
@@ -1884,10 +2161,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
if (tid >= tid_aux) {
ret = -ERANGE;
- goto error;
+ goto free_pe;
}
- memset(pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(pe, 0, sizeof(*pe));
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
pe->index = tid;
@@ -1911,8 +2188,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
/* Update ports' mask */
mvpp2_prs_tcam_port_map_set(pe, port_map);
mvpp2_prs_hw_write(priv, pe);
-
-error:
+free_pe:
kfree(pe);
return ret;
}
@@ -1934,7 +2210,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = tid;
@@ -1992,7 +2268,7 @@ static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = tid;
@@ -2048,7 +2324,7 @@ static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
pe.index = tid;
@@ -2087,7 +2363,7 @@ static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
pe.index = tid;
@@ -2147,7 +2423,7 @@ static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
int port;
for (port = 0; port < MVPP2_MAX_PORTS; port++) {
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
@@ -2169,7 +2445,7 @@ static void mvpp2_prs_mh_init(struct mvpp2 *priv)
{
struct mvpp2_prs_entry pe;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
pe.index = MVPP2_PE_MH_DEFAULT;
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
@@ -2192,7 +2468,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
{
struct mvpp2_prs_entry pe;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
/* Non-promiscuous mode for all ports - DROP unknown packets */
pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
@@ -2253,7 +2529,7 @@ static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
/* Set default entry, in case DSA or EDSA tag not found */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
pe.index = MVPP2_PE_DSA_DEFAULT;
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
@@ -2283,7 +2559,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
pe.index = tid;
@@ -2309,7 +2585,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
pe.index = tid;
@@ -2339,7 +2615,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
pe.index = tid;
@@ -2373,7 +2649,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
pe.index = tid;
@@ -2438,7 +2714,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
pe.index = tid;
@@ -2535,7 +2811,7 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
return err;
/* Set default double vlan entry */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
pe.index = MVPP2_PE_VLAN_DBL;
@@ -2555,7 +2831,7 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Set default vlan none entry */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
pe.index = MVPP2_PE_VLAN_NONE;
@@ -2585,7 +2861,7 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
pe.index = tid;
@@ -2635,7 +2911,7 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
pe.index = tid;
@@ -2662,7 +2938,7 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
pe.index = tid;
@@ -2720,7 +2996,7 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
return err;
/* Default IPv4 entry for unknown protocols */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = MVPP2_PE_IP4_PROTO_UN;
@@ -2745,7 +3021,7 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Default IPv4 entry for unicast address */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = MVPP2_PE_IP4_ADDR_UN;
@@ -2813,7 +3089,7 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
if (tid < 0)
return tid;
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
pe.index = tid;
@@ -2834,7 +3110,7 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Default IPv6 entry for unknown protocols */
- memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
pe.index = MVPP2_PE_IP6_PROTO_UN;
@@ -2927,7 +3203,7 @@ static int mvpp2_prs_default_init(struct platform_device *pdev,
mvpp2_prs_hw_inv(priv, index);
priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
- sizeof(struct mvpp2_prs_shadow),
+ sizeof(*priv->prs_shadow),
GFP_KERNEL);
if (!priv->prs_shadow)
return -ENOMEM;
@@ -3378,27 +3654,39 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
struct mvpp2 *priv,
struct mvpp2_bm_pool *bm_pool, int size)
{
- int size_bytes;
u32 val;
- size_bytes = sizeof(u32) * size;
- bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
- &bm_pool->phys_addr,
+ /* Number of buffer pointers must be a multiple of 16, as per
+ * hardware constraints
+ */
+ if (!IS_ALIGNED(size, 16))
+ return -EINVAL;
+
+ /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
+ * bytes per buffer pointer
+ */
+ if (priv->hw_version == MVPP21)
+ bm_pool->size_bytes = 2 * sizeof(u32) * size;
+ else
+ bm_pool->size_bytes = 2 * sizeof(u64) * size;
+
+ bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
+ &bm_pool->dma_addr,
GFP_KERNEL);
if (!bm_pool->virt_addr)
return -ENOMEM;
if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
MVPP2_BM_POOL_PTR_ALIGN)) {
- dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
- bm_pool->phys_addr);
+ dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
+ bm_pool->virt_addr, bm_pool->dma_addr);
dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
return -ENOMEM;
}
mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
- bm_pool->phys_addr);
+ lower_32_bits(bm_pool->dma_addr));
mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
@@ -3426,6 +3714,34 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
}
+static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
+ struct mvpp2_bm_pool *bm_pool,
+ dma_addr_t *dma_addr,
+ phys_addr_t *phys_addr)
+{
+ int cpu = smp_processor_id();
+
+ *dma_addr = mvpp2_percpu_read(priv, cpu,
+ MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
+ *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
+
+ if (priv->hw_version == MVPP22) {
+ u32 val;
+ u32 dma_addr_highbits, phys_addr_highbits;
+
+ val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
+ dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
+ phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
+ MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
+
+ if (sizeof(dma_addr_t) == 8)
+ *dma_addr |= (u64)dma_addr_highbits << 32;
+
+ if (sizeof(phys_addr_t) == 8)
+ *phys_addr |= (u64)phys_addr_highbits << 32;
+ }
+}
+
/* Free all buffers from the pool */
static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
struct mvpp2_bm_pool *bm_pool)
@@ -3433,21 +3749,21 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
int i;
for (i = 0; i < bm_pool->buf_num; i++) {
- dma_addr_t buf_phys_addr;
- unsigned long vaddr;
+ dma_addr_t buf_dma_addr;
+ phys_addr_t buf_phys_addr;
+ void *data;
- /* Get buffer virtual address (indirect access) */
- buf_phys_addr = mvpp2_read(priv,
- MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
- vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
+ mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
+ &buf_dma_addr, &buf_phys_addr);
- dma_unmap_single(dev, buf_phys_addr,
+ dma_unmap_single(dev, buf_dma_addr,
bm_pool->buf_size, DMA_FROM_DEVICE);
- if (!vaddr)
+ data = (void *)phys_to_virt(buf_phys_addr);
+ if (!data)
break;
- mvpp2_frag_free(bm_pool, (void *)vaddr);
+ mvpp2_frag_free(bm_pool, data);
}
/* Update BM driver with number of buffers removed from pool */
@@ -3471,9 +3787,9 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
val |= MVPP2_BM_STOP_MASK;
mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
- dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
+ dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
bm_pool->virt_addr,
- bm_pool->phys_addr);
+ bm_pool->dma_addr);
return 0;
}
@@ -3515,7 +3831,7 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
/* Allocate and initialize BM pools */
priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
- sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
+ sizeof(*priv->bm_pools), GFP_KERNEL);
if (!priv->bm_pools)
return -ENOMEM;
@@ -3529,17 +3845,20 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
int lrxq, int long_pool)
{
- u32 val;
+ u32 val, mask;
int prxq;
/* Get queue physical ID */
prxq = port->rxqs[lrxq]->id;
- val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
- val &= ~MVPP2_RXQ_POOL_LONG_MASK;
- val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
- MVPP2_RXQ_POOL_LONG_MASK);
+ if (port->priv->hw_version == MVPP21)
+ mask = MVPP21_RXQ_POOL_LONG_MASK;
+ else
+ mask = MVPP22_RXQ_POOL_LONG_MASK;
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~mask;
+ val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
}
@@ -3547,40 +3866,45 @@ static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
int lrxq, int short_pool)
{
- u32 val;
+ u32 val, mask;
int prxq;
/* Get queue physical ID */
prxq = port->rxqs[lrxq]->id;
- val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
- val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
- val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
- MVPP2_RXQ_POOL_SHORT_MASK);
+ if (port->priv->hw_version == MVPP21)
+ mask = MVPP21_RXQ_POOL_SHORT_MASK;
+ else
+ mask = MVPP22_RXQ_POOL_SHORT_MASK;
+ val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~mask;
+ val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
}
static void *mvpp2_buf_alloc(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool,
- dma_addr_t *buf_phys_addr,
+ dma_addr_t *buf_dma_addr,
+ phys_addr_t *buf_phys_addr,
gfp_t gfp_mask)
{
- dma_addr_t phys_addr;
+ dma_addr_t dma_addr;
void *data;
data = mvpp2_frag_alloc(bm_pool);
if (!data)
return NULL;
- phys_addr = dma_map_single(port->dev->dev.parent, data,
- MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
+ dma_addr = dma_map_single(port->dev->dev.parent, data,
+ MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
mvpp2_frag_free(bm_pool, data);
return NULL;
}
- *buf_phys_addr = phys_addr;
+ *buf_dma_addr = dma_addr;
+ *buf_phys_addr = virt_to_phys(data);
return data;
}
@@ -3604,37 +3928,46 @@ static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
/* Release buffer to BM */
static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
- dma_addr_t buf_phys_addr,
- unsigned long buf_virt_addr)
+ dma_addr_t buf_dma_addr,
+ phys_addr_t buf_phys_addr)
{
- mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
- mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
-}
+ int cpu = smp_processor_id();
-/* Release multicast buffer */
-static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
- dma_addr_t buf_phys_addr,
- unsigned long buf_virt_addr,
- int mc_id)
-{
- u32 val = 0;
+ if (port->priv->hw_version == MVPP22) {
+ u32 val = 0;
+
+ if (sizeof(dma_addr_t) == 8)
+ val |= upper_32_bits(buf_dma_addr) &
+ MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
- val |= (mc_id & MVPP2_BM_MC_ID_MASK);
- mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
+ if (sizeof(phys_addr_t) == 8)
+ val |= (upper_32_bits(buf_phys_addr)
+ << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
+ MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
- mvpp2_bm_pool_put(port, pool,
- buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
- buf_virt_addr);
+ mvpp2_percpu_write(port->priv, cpu,
+ MVPP22_BM_ADDR_HIGH_RLS_REG, val);
+ }
+
+ /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
+ * returned in the "cookie" field of the RX
+ * descriptor. Instead of storing the virtual address, we
+ * store the physical address
+ */
+ mvpp2_percpu_write(port->priv, cpu,
+ MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
+ mvpp2_percpu_write(port->priv, cpu,
+ MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
}
/* Refill BM pool */
static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
- dma_addr_t phys_addr,
- unsigned long cookie)
+ dma_addr_t dma_addr,
+ phys_addr_t phys_addr)
{
int pool = mvpp2_bm_cookie_pool_get(bm);
- mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
}
/* Allocate buffers for the pool */
@@ -3642,7 +3975,8 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool, int buf_num)
{
int i, buf_size, total_size;
- dma_addr_t phys_addr;
+ dma_addr_t dma_addr;
+ phys_addr_t phys_addr;
void *buf;
buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
@@ -3657,12 +3991,13 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
}
for (i = 0; i < buf_num; i++) {
- buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
+ buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
+ &phys_addr, GFP_KERNEL);
if (!buf)
break;
- mvpp2_bm_pool_put(port, bm_pool->id, phys_addr,
- (unsigned long)buf);
+ mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
+ phys_addr);
}
/* Update BM driver with number of buffers added to pool */
@@ -3830,7 +4165,8 @@ static void mvpp2_interrupts_mask(void *arg)
{
struct mvpp2_port *port = arg;
- mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
+ mvpp2_percpu_write(port->priv, smp_processor_id(),
+ MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
}
/* Unmask the current CPU's Rx/Tx interrupts */
@@ -3838,17 +4174,46 @@ static void mvpp2_interrupts_unmask(void *arg)
{
struct mvpp2_port *port = arg;
- mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
- (MVPP2_CAUSE_MISC_SUM_MASK |
- MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
+ mvpp2_percpu_write(port->priv, smp_processor_id(),
+ MVPP2_ISR_RX_TX_MASK_REG(port->id),
+ (MVPP2_CAUSE_MISC_SUM_MASK |
+ MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
}
/* Port configuration routines */
+static void mvpp22_port_mii_set(struct mvpp2_port *port)
+{
+ u32 val;
+
+ return;
+
+ /* Only GOP port 0 has an XLG MAC */
+ if (port->gop_id == 0) {
+ val = readl(port->base + MVPP22_XLG_CTRL3_REG);
+ val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
+ val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
+ writel(val, port->base + MVPP22_XLG_CTRL3_REG);
+ }
+
+ val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
+ if (port->phy_interface == PHY_INTERFACE_MODE_RGMII)
+ val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL;
+ else
+ val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
+ val &= ~MVPP22_CTRL4_DP_CLK_SEL;
+ val |= MVPP22_CTRL4_SYNC_BYPASS;
+ val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+ writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
+}
+
static void mvpp2_port_mii_set(struct mvpp2_port *port)
{
u32 val;
+ if (port->priv->hw_version == MVPP22)
+ mvpp22_port_mii_set(port);
+
val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
switch (port->phy_interface) {
@@ -3952,16 +4317,18 @@ static void mvpp2_defaults_set(struct mvpp2_port *port)
{
int tx_port_num, val, queue, ptxq, lrxq;
- /* Configure port to loopback if needed */
- if (port->flags & MVPP2_F_LOOPBACK)
- mvpp2_port_loopback_set(port);
+ if (port->priv->hw_version == MVPP21) {
+ /* Configure port to loopback if needed */
+ if (port->flags & MVPP2_F_LOOPBACK)
+ mvpp2_port_loopback_set(port);
- /* Update TX FIFO MIN Threshold */
- val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
- val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
- /* Min. TX threshold must be less than minimal packet length */
- val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
- writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+ /* Update TX FIFO MIN Threshold */
+ val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+ val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
+ /* Min. TX threshold must be less than minimal packet length */
+ val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
+ writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+ }
/* Disable Legacy WRR, Disable EJP, Release from reset */
tx_port_num = mvpp2_egress_port(port);
@@ -4048,7 +4415,7 @@ static void mvpp2_egress_enable(struct mvpp2_port *port)
for (queue = 0; queue < txq_number; queue++) {
struct mvpp2_tx_queue *txq = port->txqs[queue];
- if (txq->descs != NULL)
+ if (txq->descs)
qmap |= (1 << queue);
}
@@ -4149,11 +4516,15 @@ static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
}
/* Obtain BM cookie information from descriptor */
-static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
+static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
+ struct mvpp2_rx_desc *rx_desc)
{
- int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
- MVPP2_RXD_BM_POOL_ID_OFFS;
int cpu = smp_processor_id();
+ int pool;
+
+ pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
+ MVPP2_RXD_BM_POOL_ID_MASK) >>
+ MVPP2_RXD_BM_POOL_ID_OFFS;
return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
@@ -4161,18 +4532,6 @@ static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
/* Tx descriptors helper methods */
-/* Get number of Tx descriptors waiting to be transmitted by HW */
-static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
- struct mvpp2_tx_queue *txq)
-{
- u32 val;
-
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
-
- return val & MVPP2_TXQ_PENDING_MASK;
-}
-
/* Get pointer to next Tx descriptor to be processed (send) by HW */
static struct mvpp2_tx_desc *
mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
@@ -4187,7 +4546,8 @@ mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
{
/* aggregated access - relevant TXQ number is written in TX desc */
- mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
+ mvpp2_percpu_write(port->priv, smp_processor_id(),
+ MVPP2_AGGR_TXQ_UPDATE_REG, pending);
}
@@ -4216,11 +4576,12 @@ static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
struct mvpp2_tx_queue *txq, int num)
{
u32 val;
+ int cpu = smp_processor_id();
val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
- mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
+ mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
- val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
+ val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
return val & MVPP2_TXQ_RSVD_RSLT_MASK;
}
@@ -4321,7 +4682,8 @@ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
u32 val;
/* Reading status reg resets transmitted descriptor counter */
- val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
+ val = mvpp2_percpu_read(port->priv, smp_processor_id(),
+ MVPP2_TXQ_SENT_REG(txq->id));
return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
MVPP2_TRANSMITTED_COUNT_OFFSET;
@@ -4335,7 +4697,8 @@ static void mvpp2_txq_sent_counter_clear(void *arg)
for (queue = 0; queue < txq_number; queue++) {
int id = port->txqs[queue]->id;
- mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
+ mvpp2_percpu_read(port->priv, smp_processor_id(),
+ MVPP2_TXQ_SENT_REG(id));
}
}
@@ -4394,12 +4757,14 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
+ int cpu = smp_processor_id();
+
if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
- mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG,
- rxq->pkts_coal);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
+ rxq->pkts_coal);
}
static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -4449,7 +4814,7 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
- dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
+ dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
tx_buf->size, DMA_TO_DEVICE);
if (tx_buf->skb)
dev_kfree_skb_any(tx_buf->skb);
@@ -4527,10 +4892,12 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
int desc_num, int cpu,
struct mvpp2 *priv)
{
+ u32 txq_dma;
+
/* Allocate memory for TX descriptors */
aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
desc_num * MVPP2_DESC_ALIGNED_SIZE,
- &aggr_txq->descs_phys, GFP_KERNEL);
+ &aggr_txq->descs_dma, GFP_KERNEL);
if (!aggr_txq->descs)
return -ENOMEM;
@@ -4540,10 +4907,16 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
aggr_txq->next_desc_to_proc = mvpp2_read(priv,
MVPP2_AGGR_TXQ_INDEX_REG(cpu));
- /* Set Tx descriptors queue starting address */
- /* indirect access */
- mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
- aggr_txq->descs_phys);
+ /* Set Tx descriptors queue starting address indirect
+ * access
+ */
+ if (priv->hw_version == MVPP21)
+ txq_dma = aggr_txq->descs_dma;
+ else
+ txq_dma = aggr_txq->descs_dma >>
+ MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
+
+ mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
return 0;
@@ -4554,12 +4927,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
+ u32 rxq_dma;
+ int cpu;
+
rxq->size = port->rx_ring_size;
/* Allocate memory for RX descriptors */
rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
rxq->size * MVPP2_DESC_ALIGNED_SIZE,
- &rxq->descs_phys, GFP_KERNEL);
+ &rxq->descs_dma, GFP_KERNEL);
if (!rxq->descs)
return -ENOMEM;
@@ -4569,10 +4945,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
/* Set Rx descriptors queue starting address - indirect access */
- mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
- mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
- mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
+ cpu = smp_processor_id();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+ if (port->priv->hw_version == MVPP21)
+ rxq_dma = rxq->descs_dma;
+ else
+ rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
/* Set Offset */
mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
@@ -4599,10 +4980,11 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
for (i = 0; i < rx_received; i++) {
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
- u32 bm = mvpp2_bm_cookie_build(rx_desc);
+ u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
- mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
- rx_desc->buf_cookie);
+ mvpp2_pool_refill(port, bm,
+ mvpp2_rxdesc_dma_addr_get(port, rx_desc),
+ mvpp2_rxdesc_cookie_get(port, rx_desc));
}
mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
}
@@ -4611,26 +4993,29 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
static void mvpp2_rxq_deinit(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
+ int cpu;
+
mvpp2_rxq_drop_pkts(port, rxq);
if (rxq->descs)
dma_free_coherent(port->dev->dev.parent,
rxq->size * MVPP2_DESC_ALIGNED_SIZE,
rxq->descs,
- rxq->descs_phys);
+ rxq->descs_dma);
rxq->descs = NULL;
rxq->last_desc = 0;
rxq->next_desc_to_proc = 0;
- rxq->descs_phys = 0;
+ rxq->descs_dma = 0;
/* Clear Rx descriptors queue starting address and size;
* free descriptor number
*/
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
- mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
- mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
+ cpu = smp_processor_id();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
}
/* Create and initialize a Tx queue */
@@ -4646,23 +5031,25 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
/* Allocate memory for Tx descriptors */
txq->descs = dma_alloc_coherent(port->dev->dev.parent,
txq->size * MVPP2_DESC_ALIGNED_SIZE,
- &txq->descs_phys, GFP_KERNEL);
+ &txq->descs_dma, GFP_KERNEL);
if (!txq->descs)
return -ENOMEM;
txq->last_desc = txq->size - 1;
/* Set Tx descriptors queue starting address - indirect access */
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
- mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
- MVPP2_TXQ_DESC_SIZE_MASK);
- mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
- mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
- txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
- val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
+ cpu = smp_processor_id();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
+ txq->descs_dma);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
+ txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
+ txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
+ val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
val &= ~MVPP2_TXQ_PENDING_MASK;
- mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
/* Calculate base address in prefetch buffer. We reserve 16 descriptors
* for each existing TXQ.
@@ -4673,9 +5060,9 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
(txq->log_id * desc_per_txq);
- mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
- MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
- MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
+ MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
+ MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
/* WRR / EJP configuration - indirect access */
tx_port_num = mvpp2_egress_port(port);
@@ -4694,11 +5081,11 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
txq_pcpu->size = txq->size;
- txq_pcpu->buffs = kmalloc(txq_pcpu->size *
- sizeof(struct mvpp2_txq_pcpu_buf),
- GFP_KERNEL);
+ txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
+ sizeof(*txq_pcpu->buffs),
+ GFP_KERNEL);
if (!txq_pcpu->buffs)
- goto error;
+ goto cleanup;
txq_pcpu->count = 0;
txq_pcpu->reserved_num = 0;
@@ -4707,8 +5094,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
}
return 0;
-
-error:
+cleanup:
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
kfree(txq_pcpu->buffs);
@@ -4716,7 +5102,7 @@ error:
dma_free_coherent(port->dev->dev.parent,
txq->size * MVPP2_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_phys);
+ txq->descs, txq->descs_dma);
return -ENOMEM;
}
@@ -4736,20 +5122,21 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
if (txq->descs)
dma_free_coherent(port->dev->dev.parent,
txq->size * MVPP2_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_phys);
+ txq->descs, txq->descs_dma);
txq->descs = NULL;
txq->last_desc = 0;
txq->next_desc_to_proc = 0;
- txq->descs_phys = 0;
+ txq->descs_dma = 0;
/* Set minimum bandwidth for disabled TXQs */
mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
/* Set Tx descriptors queue starting address and size */
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
- mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
+ cpu = smp_processor_id();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
}
/* Cleanup Tx ports */
@@ -4759,10 +5146,11 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
int delay, pending, cpu;
u32 val;
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
+ cpu = smp_processor_id();
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
+ val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
val |= MVPP2_TXQ_DRAIN_EN_MASK;
- mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
/* The napi queue has been stopped so wait for all packets
* to be transmitted.
@@ -4778,11 +5166,13 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
mdelay(1);
delay++;
- pending = mvpp2_txq_pend_desc_num_get(port, txq);
+ pending = mvpp2_percpu_read(port->priv, cpu,
+ MVPP2_TXQ_PENDING_REG);
+ pending &= MVPP2_TXQ_PENDING_MASK;
} while (pending);
val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
- mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
+ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
@@ -4991,20 +5381,21 @@ static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
static void mvpp2_rx_error(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
- u32 status = rx_desc->status;
+ u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
+ size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
switch (status & MVPP2_RXD_ERR_CODE_MASK) {
case MVPP2_RXD_ERR_CRC:
- netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
- status, rx_desc->data_size);
+ netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
+ status, sz);
break;
case MVPP2_RXD_ERR_OVERRUN:
- netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
- status, rx_desc->data_size);
+ netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
+ status, sz);
break;
case MVPP2_RXD_ERR_RESOURCE:
- netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
- status, rx_desc->data_size);
+ netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
+ status, sz);
break;
}
}
@@ -5031,15 +5422,17 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
static int mvpp2_rx_refill(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool, u32 bm)
{
- dma_addr_t phys_addr;
+ dma_addr_t dma_addr;
+ phys_addr_t phys_addr;
void *buf;
/* No recycle or too many buffers are in use, so allocate a new skb */
- buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
+ buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
+ GFP_ATOMIC);
if (!buf)
return -ENOMEM;
- mvpp2_pool_refill(port, bm, phys_addr, (unsigned long)buf);
+ mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
return 0;
}
@@ -5075,43 +5468,6 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
}
-static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
- struct mvpp2_rx_desc *rx_desc)
-{
- struct mvpp2_buff_hdr *buff_hdr;
- struct sk_buff *skb;
- u32 rx_status = rx_desc->status;
- dma_addr_t buff_phys_addr;
- unsigned long buff_virt_addr;
- dma_addr_t buff_phys_addr_next;
- unsigned long buff_virt_addr_next;
- int mc_id;
- int pool_id;
-
- pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
- MVPP2_RXD_BM_POOL_ID_OFFS;
- buff_phys_addr = rx_desc->buf_phys_addr;
- buff_virt_addr = rx_desc->buf_cookie;
-
- do {
- skb = (struct sk_buff *)buff_virt_addr;
- buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
-
- mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
-
- buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
- buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
-
- /* Release buffer */
- mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
- buff_virt_addr, mc_id);
-
- buff_phys_addr = buff_phys_addr_next;
- buff_virt_addr = buff_virt_addr_next;
-
- } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
-}
-
/* Main rx processing */
static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
struct mvpp2_rx_queue *rxq)
@@ -5132,25 +5488,23 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
struct mvpp2_bm_pool *bm_pool;
struct sk_buff *skb;
unsigned int frag_size;
- dma_addr_t phys_addr;
+ dma_addr_t dma_addr;
+ phys_addr_t phys_addr;
u32 bm, rx_status;
int pool, rx_bytes, err;
void *data;
rx_done++;
- rx_status = rx_desc->status;
- rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
- phys_addr = rx_desc->buf_phys_addr;
- data = (void *)(uintptr_t)rx_desc->buf_cookie;
-
- bm = mvpp2_bm_cookie_build(rx_desc);
+ rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
+ rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
+ rx_bytes -= MVPP2_MH_SIZE;
+ dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
+ phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+ data = (void *)phys_to_virt(phys_addr);
+
+ bm = mvpp2_bm_cookie_build(port, rx_desc);
pool = mvpp2_bm_cookie_pool_get(bm);
bm_pool = &port->priv->bm_pools[pool];
- /* Check if buffer header is used */
- if (rx_status & MVPP2_RXD_BUF_HDR) {
- mvpp2_buff_hdr_rx(port, rx_desc);
- continue;
- }
/* In case of an error, release the requested buffer pointer
* to the Buffer Manager. This request process is controlled
@@ -5158,13 +5512,11 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
* comprised by the RX descriptor.
*/
if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
- err_drop_frame:
+err_drop_frame:
dev->stats.rx_errors++;
mvpp2_rx_error(port, rx_desc);
/* Return the buffer to the pool */
-
- mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
- rx_desc->buf_cookie);
+ mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
continue;
}
@@ -5185,7 +5537,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
goto err_drop_frame;
}
- dma_unmap_single(dev->dev.parent, phys_addr,
+ dma_unmap_single(dev->dev.parent, dma_addr,
bm_pool->buf_size, DMA_FROM_DEVICE);
rcvd_pkts++;
@@ -5216,11 +5568,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
}
static inline void
-tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
+tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
struct mvpp2_tx_desc *desc)
{
- dma_unmap_single(dev, desc->buf_phys_addr,
- desc->data_size, DMA_TO_DEVICE);
+ dma_addr_t buf_dma_addr =
+ mvpp2_txdesc_dma_addr_get(port, desc);
+ size_t buf_sz =
+ mvpp2_txdesc_size_get(port, desc);
+ dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
+ buf_sz, DMA_TO_DEVICE);
mvpp2_txq_desc_put(txq);
}
@@ -5232,47 +5588,49 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
struct mvpp2_tx_desc *tx_desc;
int i;
- dma_addr_t buf_phys_addr;
+ dma_addr_t buf_dma_addr;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *addr = page_address(frag->page.p) + frag->page_offset;
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
- tx_desc->phys_txq = txq->id;
- tx_desc->data_size = frag->size;
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+ mvpp2_txdesc_size_set(port, tx_desc, frag->size);
- buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
- tx_desc->data_size,
+ buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
+ frag->size,
DMA_TO_DEVICE);
- if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
+ if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
mvpp2_txq_desc_put(txq);
- goto error;
+ goto cleanup;
}
- tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
- tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
+ mvpp2_txdesc_offset_set(port, tx_desc,
+ buf_dma_addr & MVPP2_TX_DESC_ALIGN);
+ mvpp2_txdesc_dma_addr_set(port, tx_desc,
+ buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
if (i == (skb_shinfo(skb)->nr_frags - 1)) {
/* Last descriptor */
- tx_desc->command = MVPP2_TXD_L_DESC;
- mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
+ mvpp2_txdesc_cmd_set(port, tx_desc,
+ MVPP2_TXD_L_DESC);
+ mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
} else {
/* Descriptor in the middle: Not First, Not Last */
- tx_desc->command = 0;
- mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
+ mvpp2_txdesc_cmd_set(port, tx_desc, 0);
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
}
}
return 0;
-
-error:
+cleanup:
/* Release all descriptors that were used to map fragments of
* this packet, as well as the corresponding DMA mappings
*/
for (i = i - 1; i >= 0; i--) {
tx_desc = txq->descs + i;
- tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
+ tx_desc_unmap_put(port, txq, tx_desc);
}
return -ENOMEM;
@@ -5285,7 +5643,7 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
struct mvpp2_tx_queue *txq, *aggr_txq;
struct mvpp2_txq_pcpu *txq_pcpu;
struct mvpp2_tx_desc *tx_desc;
- dma_addr_t buf_phys_addr;
+ dma_addr_t buf_dma_addr;
int frags = 0;
u16 txq_id;
u32 tx_cmd;
@@ -5307,35 +5665,38 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
/* Get a descriptor for the first part of the packet */
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
- tx_desc->phys_txq = txq->id;
- tx_desc->data_size = skb_headlen(skb);
+ mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+ mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
- buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
- tx_desc->data_size, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
+ buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
mvpp2_txq_desc_put(txq);
frags = 0;
goto out;
}
- tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
- tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
+
+ mvpp2_txdesc_offset_set(port, tx_desc,
+ buf_dma_addr & MVPP2_TX_DESC_ALIGN);
+ mvpp2_txdesc_dma_addr_set(port, tx_desc,
+ buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
tx_cmd = mvpp2_skb_tx_csum(port, skb);
if (frags == 1) {
/* First and Last descriptor */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
- tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
+ mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
+ mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
} else {
/* First but not Last */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
- tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
+ mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
+ mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
/* Continue with other skb fragments */
if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
- tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
+ tx_desc_unmap_put(port, txq, tx_desc);
frags = 0;
goto out;
}
@@ -5396,6 +5757,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
u32 cause_rx_tx, cause_rx, cause_misc;
int rx_done = 0;
struct mvpp2_port *port = netdev_priv(napi->dev);
+ int cpu = smp_processor_id();
/* Rx/Tx cause register
*
@@ -5407,8 +5769,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
*
* Each CPU has its own Rx/Tx cause register
*/
- cause_rx_tx = mvpp2_read(port->priv,
- MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
+ cause_rx_tx = mvpp2_percpu_read(port->priv, cpu,
+ MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
@@ -5417,8 +5779,9 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
/* Clear the cause register */
mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
- mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
- cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
+ mvpp2_percpu_write(port->priv, cpu,
+ MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
+ cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
}
cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
@@ -5530,7 +5893,7 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
return 0;
}
-static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
+static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
{
u32 mac_addr_l, mac_addr_m, mac_addr_h;
@@ -5698,7 +6061,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data)) {
err = -EADDRNOTAVAIL;
- goto error;
+ goto log_error;
}
if (!netif_running(dev)) {
@@ -5708,7 +6071,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
/* Reconfigure parser to accept the original MAC address */
err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
if (err)
- goto error;
+ goto log_error;
}
mvpp2_stop_dev(port);
@@ -5720,15 +6083,14 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
/* Reconfigure parser accept the original MAC address */
err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
if (err)
- goto error;
+ goto log_error;
out_start:
mvpp2_start_dev(port);
mvpp2_egress_enable(port);
mvpp2_ingress_enable(port);
return 0;
-
-error:
- netdev_err(dev, "fail to change MAC address\n");
+log_error:
+ netdev_err(dev, "failed to change MAC address\n");
return err;
}
@@ -5753,7 +6115,7 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
/* Reconfigure BM to the original MTU */
err = mvpp2_bm_update_mtu(dev, dev->mtu);
if (err)
- goto error;
+ goto log_error;
}
mvpp2_stop_dev(port);
@@ -5767,7 +6129,7 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
/* Reconfigure BM to the original MTU */
err = mvpp2_bm_update_mtu(dev, dev->mtu);
if (err)
- goto error;
+ goto log_error;
out_start:
mvpp2_start_dev(port);
@@ -5775,9 +6137,8 @@ out_start:
mvpp2_ingress_enable(port);
return 0;
-
-error:
- netdev_err(dev, "fail to change MTU\n");
+log_error:
+ netdev_err(dev, "failed to change MTU\n");
return err;
}
@@ -5946,7 +6307,7 @@ static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
err_clean_rxqs:
mvpp2_cleanup_rxqs(port);
err_out:
- netdev_err(dev, "fail to change ring parameters");
+ netdev_err(dev, "failed to change ring parameters");
return err;
}
@@ -5975,16 +6336,6 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-/* Driver initialization */
-
-static void mvpp2_port_power_up(struct mvpp2_port *port)
-{
- mvpp2_port_mii_set(port);
- mvpp2_port_periodic_xon_disable(port);
- mvpp2_port_fc_adv_enable(port);
- mvpp2_port_reset(port);
-}
-
/* Initialize port HW */
static int mvpp2_port_init(struct mvpp2_port *port)
{
@@ -5993,7 +6344,8 @@ static int mvpp2_port_init(struct mvpp2_port *port)
struct mvpp2_txq_pcpu *txq_pcpu;
int queue, cpu, err;
- if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
+ if (port->first_rxq + rxq_number >
+ MVPP2_MAX_PORTS * priv->max_port_rxqs)
return -EINVAL;
/* Disable port */
@@ -6061,7 +6413,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
}
/* Configure Rx queue group interrupt for this port */
- mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
+ if (priv->hw_version == MVPP21) {
+ mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
+ rxq_number);
+ } else {
+ u32 val;
+
+ val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET);
+ mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
+
+ val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET);
+ mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
+ }
/* Create Rx descriptor rings */
for (queue = 0; queue < rxq_number; queue++) {
@@ -6103,8 +6466,7 @@ err_free_percpu:
/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
struct device_node *port_node,
- struct mvpp2 *priv,
- int *next_first_rxq)
+ struct mvpp2 *priv)
{
struct device_node *phy_node;
struct mvpp2_port *port;
@@ -6117,11 +6479,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
u32 id;
int features;
int phy_mode;
- int priv_common_regs_num = 2;
int err, i, cpu;
- dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
- rxq_number);
+ dev = alloc_etherdev_mqs(sizeof(*port), txq_number, rxq_number);
if (!dev)
return -ENOMEM;
@@ -6163,16 +6523,30 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->priv = priv;
port->id = id;
- port->first_rxq = *next_first_rxq;
+ if (priv->hw_version == MVPP21)
+ port->first_rxq = port->id * rxq_number;
+ else
+ port->first_rxq = port->id * priv->max_port_rxqs;
+
port->phy_node = phy_node;
port->phy_interface = phy_mode;
- res = platform_get_resource(pdev, IORESOURCE_MEM,
- priv_common_regs_num + id);
- port->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(port->base)) {
- err = PTR_ERR(port->base);
- goto err_free_irq;
+ if (priv->hw_version == MVPP21) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
+ port->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(port->base)) {
+ err = PTR_ERR(port->base);
+ goto err_free_irq;
+ }
+ } else {
+ if (of_property_read_u32(port_node, "gop-port-id",
+ &port->gop_id)) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "missing gop-port-id value\n");
+ goto err_free_irq;
+ }
+
+ port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
}
/* Alloc per-cpu stats */
@@ -6187,7 +6561,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mac_from = "device tree";
ether_addr_copy(dev->dev_addr, dt_mac_addr);
} else {
- mvpp2_get_mac_address(port, hw_mac_addr);
+ if (priv->hw_version == MVPP21)
+ mvpp21_get_mac_address(port, hw_mac_addr);
if (is_valid_ether_addr(hw_mac_addr)) {
mac_from = "hardware";
ether_addr_copy(dev->dev_addr, hw_mac_addr);
@@ -6207,7 +6582,14 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev_err(&pdev->dev, "failed to init port %d\n", id);
goto err_free_stats;
}
- mvpp2_port_power_up(port);
+
+ mvpp2_port_mii_set(port);
+ mvpp2_port_periodic_xon_disable(port);
+
+ if (priv->hw_version == MVPP21)
+ mvpp2_port_fc_adv_enable(port);
+
+ mvpp2_port_reset(port);
port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
if (!port->pcpu) {
@@ -6245,8 +6627,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
- /* Increment the first Rx queue number to be used by the next port */
- *next_first_rxq += rxq_number;
priv->port_list[id] = port;
return 0;
@@ -6330,6 +6710,60 @@ static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
}
+static void mvpp2_axi_init(struct mvpp2 *priv)
+{
+ u32 val, rdval, wrval;
+
+ mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
+
+ /* AXI Bridge Configuration */
+
+ rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
+ << MVPP22_AXI_ATTR_CACHE_OFFS;
+ rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_ATTR_DOMAIN_OFFS;
+
+ wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
+ << MVPP22_AXI_ATTR_CACHE_OFFS;
+ wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_ATTR_DOMAIN_OFFS;
+
+ /* BM */
+ mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
+ mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
+
+ /* Descriptors */
+ mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
+ mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
+ mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
+ mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
+
+ /* Buffer Data */
+ mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
+ mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
+
+ val = MVPP22_AXI_CODE_CACHE_NON_CACHE
+ << MVPP22_AXI_CODE_CACHE_OFFS;
+ val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
+ << MVPP22_AXI_CODE_DOMAIN_OFFS;
+ mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
+ mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
+
+ val = MVPP22_AXI_CODE_CACHE_RD_CACHE
+ << MVPP22_AXI_CODE_CACHE_OFFS;
+ val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_CODE_DOMAIN_OFFS;
+
+ mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
+
+ val = MVPP22_AXI_CODE_CACHE_WR_CACHE
+ << MVPP22_AXI_CODE_CACHE_OFFS;
+ val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
+ << MVPP22_AXI_CODE_DOMAIN_OFFS;
+
+ mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
+}
+
/* Initialize network controller common part HW */
static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
{
@@ -6338,7 +6772,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
u32 val;
/* Checks for hardware constraints */
- if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
+ if (rxq_number % 4 || (rxq_number > priv->max_port_rxqs) ||
(txq_number > MVPP2_MAX_TXQ)) {
dev_err(&pdev->dev, "invalid queue size parameter\n");
return -EINVAL;
@@ -6349,14 +6783,23 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
if (dram_target_info)
mvpp2_conf_mbus_windows(dram_target_info, priv);
+ if (priv->hw_version == MVPP22)
+ mvpp2_axi_init(priv);
+
/* Disable HW PHY polling */
- val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
- val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
- writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+ if (priv->hw_version == MVPP21) {
+ val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+ val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
+ writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+ } else {
+ val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
+ val &= ~MVPP22_SMI_POLLING_EN;
+ writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
+ }
/* Allocate and initialize aggregated TXQs */
priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
- sizeof(struct mvpp2_tx_queue),
+ sizeof(*priv->aggr_txqs),
GFP_KERNEL);
if (!priv->aggr_txqs)
return -ENOMEM;
@@ -6374,11 +6817,25 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
mvpp2_rx_fifo_init(priv);
/* Reset Rx queue group interrupt configuration */
- for (i = 0; i < MVPP2_MAX_PORTS; i++)
- mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
+ for (i = 0; i < MVPP2_MAX_PORTS; i++) {
+ if (priv->hw_version == MVPP21) {
+ mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i),
+ rxq_number);
+ continue;
+ } else {
+ u32 val;
+
+ val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET);
+ mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
+
+ val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET);
+ mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
+ }
+ }
- writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
- priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
+ if (priv->hw_version == MVPP21)
+ writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
+ priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
/* Allow cache snoop when transmiting packets */
mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
@@ -6405,22 +6862,46 @@ static int mvpp2_probe(struct platform_device *pdev)
struct device_node *port_node;
struct mvpp2 *priv;
struct resource *res;
- int port_count, first_rxq;
+ void __iomem *base;
+ int port_count, cpu;
int err;
- priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ priv->hw_version =
+ (unsigned long)of_device_get_match_data(&pdev->dev);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (priv->hw_version == MVPP21) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->lms_base))
+ return PTR_ERR(priv->lms_base);
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->iface_base))
+ return PTR_ERR(priv->iface_base);
+ }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->lms_base))
- return PTR_ERR(priv->lms_base);
+ for_each_present_cpu(cpu) {
+ u32 addr_space_sz;
+
+ addr_space_sz = (priv->hw_version == MVPP21 ?
+ MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
+ priv->cpu_base[cpu] = base + cpu * addr_space_sz;
+ }
+
+ if (priv->hw_version == MVPP21)
+ priv->max_port_rxqs = 8;
+ else
+ priv->max_port_rxqs = 32;
priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
if (IS_ERR(priv->pp_clk))
@@ -6438,42 +6919,70 @@ static int mvpp2_probe(struct platform_device *pdev)
if (err < 0)
goto err_pp_clk;
+ if (priv->hw_version == MVPP22) {
+ priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
+ if (IS_ERR(priv->mg_clk)) {
+ err = PTR_ERR(priv->mg_clk);
+ goto err_gop_clk;
+ }
+
+ err = clk_prepare_enable(priv->mg_clk);
+ if (err < 0)
+ goto err_gop_clk;
+ }
+
/* Get system's tclk rate */
priv->tclk = clk_get_rate(priv->pp_clk);
+ if (priv->hw_version == MVPP22) {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+ if (err)
+ goto err_mg_clk;
+ /* Sadly, the BM pools all share the same register to
+ * store the high 32 bits of their address. So they
+ * must all have the same high 32 bits, which forces
+ * us to restrict coherent memory to DMA_BIT_MASK(32).
+ */
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ goto err_mg_clk;
+ }
+
/* Initialize network controller */
err = mvpp2_init(pdev, priv);
if (err < 0) {
dev_err(&pdev->dev, "failed to initialize controller\n");
- goto err_gop_clk;
+ goto err_mg_clk;
}
port_count = of_get_available_child_count(dn);
if (port_count == 0) {
dev_err(&pdev->dev, "no ports enabled\n");
err = -ENODEV;
- goto err_gop_clk;
+ goto err_mg_clk;
}
priv->port_list = devm_kcalloc(&pdev->dev, port_count,
- sizeof(struct mvpp2_port *),
- GFP_KERNEL);
+ sizeof(*priv->port_list),
+ GFP_KERNEL);
if (!priv->port_list) {
err = -ENOMEM;
- goto err_gop_clk;
+ goto err_mg_clk;
}
/* Initialize ports */
- first_rxq = 0;
for_each_available_child_of_node(dn, port_node) {
- err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
+ err = mvpp2_port_probe(pdev, port_node, priv);
if (err < 0)
- goto err_gop_clk;
+ goto err_mg_clk;
}
platform_set_drvdata(pdev, priv);
return 0;
+err_mg_clk:
+ if (priv->hw_version == MVPP22)
+ clk_disable_unprepare(priv->mg_clk);
err_gop_clk:
clk_disable_unprepare(priv->gop_clk);
err_pp_clk:
@@ -6506,9 +7015,10 @@ static int mvpp2_remove(struct platform_device *pdev)
dma_free_coherent(&pdev->dev,
MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
aggr_txq->descs,
- aggr_txq->descs_phys);
+ aggr_txq->descs_dma);
}
+ clk_disable_unprepare(priv->mg_clk);
clk_disable_unprepare(priv->pp_clk);
clk_disable_unprepare(priv->gop_clk);
@@ -6516,7 +7026,14 @@ static int mvpp2_remove(struct platform_device *pdev)
}
static const struct of_device_id mvpp2_match[] = {
- { .compatible = "marvell,armada-375-pp2" },
+ {
+ .compatible = "marvell,armada-375-pp2",
+ .data = (void *)MVPP21,
+ },
+ {
+ .compatible = "marvell,armada-7k-pp22",
+ .data = (void *)MVPP22,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, mvpp2_match);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 28cb36d9e50a..993724959a7c 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -556,11 +556,11 @@ static int init_hash_table(struct pxa168_eth_private *pep)
* function.Driver can dynamically switch to them if the 1/2kB hash
* table is full.
*/
- if (pep->htpr == NULL) {
+ if (!pep->htpr) {
pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent,
HASH_ADDR_TABLE_SIZE,
&pep->htpr_dma, GFP_KERNEL);
- if (pep->htpr == NULL)
+ if (!pep->htpr)
return -ENOMEM;
} else {
memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
@@ -1036,8 +1036,7 @@ static int rxq_init(struct net_device *dev)
int rx_desc_num = pep->rx_ring_size;
/* Allocate RX skb rings */
- pep->rx_skb = kzalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
- GFP_KERNEL);
+ pep->rx_skb = kcalloc(rx_desc_num, sizeof(*pep->rx_skb), GFP_KERNEL);
if (!pep->rx_skb)
return -ENOMEM;
@@ -1096,8 +1095,7 @@ static int txq_init(struct net_device *dev)
int size = 0, i = 0;
int tx_desc_num = pep->tx_ring_size;
- pep->tx_skb = kzalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
- GFP_KERNEL);
+ pep->tx_skb = kcalloc(tx_desc_num, sizeof(*pep->tx_skb), GFP_KERNEL);
if (!pep->tx_skb)
return -ENOMEM;
@@ -1358,7 +1356,7 @@ static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
int cmd)
{
- if (dev->phydev != NULL)
+ if (dev->phydev)
return phy_mii_ioctl(dev->phydev, ifr, cmd);
return -EOPNOTSUPP;
@@ -1503,7 +1501,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep->timeout.data = (unsigned long)pep;
pep->smi_bus = mdiobus_alloc();
- if (pep->smi_bus == NULL) {
+ if (!pep->smi_bus) {
err = -ENOMEM;
goto err_netdev;
}
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index edb95271a4f2..5d7d94de4e00 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2657,7 +2657,7 @@ static int skge_down(struct net_device *dev)
struct skge_hw *hw = skge->hw;
int port = skge->port;
- if (skge->mem == NULL)
+ if (!skge->mem)
return 0;
netif_info(skge, ifdown, skge->netdev, "disabling interface\n");
@@ -3718,7 +3718,7 @@ static int skge_debug_show(struct seq_file *seq, void *v)
t->csum_offs, t->csum_write, t->csum_start);
}
- seq_printf(seq, "\nRx Ring:\n");
+ seq_puts(seq, "\nRx Ring:\n");
for (e = skge->rx_ring.to_clean; ; e = e->next) {
const struct skge_rx_desc *r = e->desc;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 2b2cc3f3ca10..1145cde2274a 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4544,7 +4544,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v)
sky2_read32(hw, B0_Y2_SP_ICR));
if (!netif_running(dev)) {
- seq_printf(seq, "network not running\n");
+ seq_puts(seq, "network not running\n");
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 93949139e62c..16f97552ae98 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1849,6 +1849,12 @@ static int mtk_hw_init(struct mtk_eth *eth)
/* GE2, Force 1000M/FD, FC ON */
mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
+ /* Indicates CDM to parse the MTK special tag from CPU
+ * which also is working out for untag packets.
+ */
+ val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
+ mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
+
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
@@ -1911,10 +1917,9 @@ static int __init mtk_init(struct net_device *dev)
/* If the mac address is invalid, use random mac address */
if (!is_valid_ether_addr(dev->dev_addr)) {
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
dev_err(eth->dev, "generated random MAC address %pM\n",
dev->dev_addr);
- dev->addr_assign_type = NET_ADDR_RANDOM;
}
return mtk_phy_connect(dev);
@@ -2320,6 +2325,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
eth->netdev[id]->irq = eth->irq[0];
+ eth->netdev[id]->dev.of_node = np;
+
return 0;
free_netdev:
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 08285a96ff70..3c46a3b613b9 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -70,6 +70,10 @@
/* Frame Engine Interrupt Grouping Register */
#define MTK_FE_INT_GRP 0x20
+/* CDMP Ingress Control Register */
+#define MTK_CDMQ_IG_CTRL 0x1400
+#define MTK_CDMQ_STAG_EN BIT(0)
+
/* CDMP Exgress Control Register */
#define MTK_CDMP_EG_CTRL 0x404
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c4d714fcc7da..ffbcb27c05e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -117,7 +117,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
/* port statistics */
"tso_packets",
"xmit_more",
- "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
+ "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_pages",
"rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
/* pf statistics */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 61420473fe5f..94fab20ef146 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -92,7 +92,9 @@ static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return mlx4_en_setup_tc(dev, tc->tc);
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return mlx4_en_setup_tc(dev, tc->mqprio->num_tc);
}
#ifdef CONFIG_RFS_ACCEL
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 9166d90e7328..e0eb695318e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -213,6 +213,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_good = 0;
priv->port_stats.rx_chksum_none = 0;
priv->port_stats.rx_chksum_complete = 0;
+ priv->port_stats.rx_alloc_pages = 0;
priv->xdp_stats.rx_xdp_drop = 0;
priv->xdp_stats.rx_xdp_tx = 0;
priv->xdp_stats.rx_xdp_tx_full = 0;
@@ -223,6 +224,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok);
priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none);
priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete);
+ priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages);
priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop);
priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx);
priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 867292880c07..aa074e57ce06 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -50,173 +50,62 @@
#include "mlx4_en.h"
-static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_alloc *page_alloc,
- const struct mlx4_en_frag_info *frag_info,
- gfp_t _gfp)
+static int mlx4_alloc_page(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_alloc *frag,
+ gfp_t gfp)
{
- int order;
struct page *page;
dma_addr_t dma;
- for (order = frag_info->order; ;) {
- gfp_t gfp = _gfp;
-
- if (order)
- gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
- page = alloc_pages(gfp, order);
- if (likely(page))
- break;
- if (--order < 0 ||
- ((PAGE_SIZE << order) < frag_info->frag_size))
- return -ENOMEM;
- }
- dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
- frag_info->dma_dir);
+ page = alloc_page(gfp);
+ if (unlikely(!page))
+ return -ENOMEM;
+ dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
if (unlikely(dma_mapping_error(priv->ddev, dma))) {
- put_page(page);
+ __free_page(page);
return -ENOMEM;
}
- page_alloc->page_size = PAGE_SIZE << order;
- page_alloc->page = page;
- page_alloc->dma = dma;
- page_alloc->page_offset = 0;
- /* Not doing get_page() for each frag is a big win
- * on asymetric workloads. Note we can not use atomic_set().
- */
- page_ref_add(page, page_alloc->page_size / frag_info->frag_stride - 1);
+ frag->page = page;
+ frag->dma = dma;
+ frag->page_offset = priv->rx_headroom;
return 0;
}
static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_desc *rx_desc,
struct mlx4_en_rx_alloc *frags,
- struct mlx4_en_rx_alloc *ring_alloc,
gfp_t gfp)
{
- struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
- const struct mlx4_en_frag_info *frag_info;
- struct page *page;
int i;
- for (i = 0; i < priv->num_frags; i++) {
- frag_info = &priv->frag_info[i];
- page_alloc[i] = ring_alloc[i];
- page_alloc[i].page_offset += frag_info->frag_stride;
-
- if (page_alloc[i].page_offset + frag_info->frag_stride <=
- ring_alloc[i].page_size)
- continue;
-
- if (unlikely(mlx4_alloc_pages(priv, &page_alloc[i],
- frag_info, gfp)))
- goto out;
- }
-
- for (i = 0; i < priv->num_frags; i++) {
- frags[i] = ring_alloc[i];
- frags[i].page_offset += priv->frag_info[i].rx_headroom;
- rx_desc->data[i].addr = cpu_to_be64(frags[i].dma +
- frags[i].page_offset);
- ring_alloc[i] = page_alloc[i];
- }
-
- return 0;
-
-out:
- while (i--) {
- if (page_alloc[i].page != ring_alloc[i].page) {
- dma_unmap_page(priv->ddev, page_alloc[i].dma,
- page_alloc[i].page_size,
- priv->frag_info[i].dma_dir);
- page = page_alloc[i].page;
- /* Revert changes done by mlx4_alloc_pages */
- page_ref_sub(page, page_alloc[i].page_size /
- priv->frag_info[i].frag_stride - 1);
- put_page(page);
+ for (i = 0; i < priv->num_frags; i++, frags++) {
+ if (!frags->page) {
+ if (mlx4_alloc_page(priv, frags, gfp))
+ return -ENOMEM;
+ ring->rx_alloc_pages++;
}
- }
- return -ENOMEM;
-}
-
-static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_alloc *frags,
- int i)
-{
- const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
- u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
-
-
- if (next_frag_end > frags[i].page_size)
- dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
- frag_info->dma_dir);
-
- if (frags[i].page)
- put_page(frags[i].page);
-}
-
-static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring)
-{
- int i;
- struct mlx4_en_rx_alloc *page_alloc;
-
- for (i = 0; i < priv->num_frags; i++) {
- const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
-
- if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
- frag_info, GFP_KERNEL | __GFP_COLD))
- goto out;
-
- en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n",
- i, ring->page_alloc[i].page_size,
- page_ref_count(ring->page_alloc[i].page));
+ rx_desc->data[i].addr = cpu_to_be64(frags->dma +
+ frags->page_offset);
}
return 0;
-
-out:
- while (i--) {
- struct page *page;
-
- page_alloc = &ring->page_alloc[i];
- dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->page_size,
- priv->frag_info[i].dma_dir);
- page = page_alloc->page;
- /* Revert changes done by mlx4_alloc_pages */
- page_ref_sub(page, page_alloc->page_size /
- priv->frag_info[i].frag_stride - 1);
- put_page(page);
- page_alloc->page = NULL;
- }
- return -ENOMEM;
}
-static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring)
+static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_alloc *frag)
{
- struct mlx4_en_rx_alloc *page_alloc;
- int i;
-
- for (i = 0; i < priv->num_frags; i++) {
- const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
-
- page_alloc = &ring->page_alloc[i];
- en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
- i, page_count(page_alloc->page));
-
- dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->page_size, frag_info->dma_dir);
- while (page_alloc->page_offset + frag_info->frag_stride <
- page_alloc->page_size) {
- put_page(page_alloc->page);
- page_alloc->page_offset += frag_info->frag_stride;
- }
- page_alloc->page = NULL;
+ if (frag->page) {
+ dma_unmap_page(priv->ddev, frag->dma,
+ PAGE_SIZE, priv->dma_dir);
+ __free_page(frag->page);
}
+ /* We need to clear all fields, otherwise a change of priv->log_rx_info
+ * could lead to see garbage later in frag->page.
+ */
+ memset(frag, 0, sizeof(*frag));
}
-static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
+static void mlx4_en_init_rx_desc(const struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
@@ -248,18 +137,23 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
struct mlx4_en_rx_alloc *frags = ring->rx_info +
(index << priv->log_rx_info);
-
if (ring->page_cache.index > 0) {
- frags[0] = ring->page_cache.buf[--ring->page_cache.index];
- rx_desc->data[0].addr = cpu_to_be64(frags[0].dma +
- frags[0].page_offset);
+ /* XDP uses a single page per frame */
+ if (!frags->page) {
+ ring->page_cache.index--;
+ frags->page = ring->page_cache.buf[ring->page_cache.index].page;
+ frags->dma = ring->page_cache.buf[ring->page_cache.index].dma;
+ }
+ frags->page_offset = XDP_PACKET_HEADROOM;
+ rx_desc->data[0].addr = cpu_to_be64(frags->dma +
+ XDP_PACKET_HEADROOM);
return 0;
}
- return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
+ return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
}
-static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
+static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring)
{
return ring->prod == ring->cons;
}
@@ -269,7 +163,8 @@ static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
}
-static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
+/* slow path */
+static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
int index)
{
@@ -279,7 +174,7 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
frags = ring->rx_info + (index << priv->log_rx_info);
for (nr = 0; nr < priv->num_frags; nr++) {
en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
- mlx4_en_free_frag(priv, frags, nr);
+ mlx4_en_free_frag(priv, frags + nr);
}
}
@@ -335,12 +230,12 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
ring->cons, ring->prod);
/* Unmap and free Rx buffers */
- while (!mlx4_en_is_ring_empty(ring)) {
- index = ring->cons & ring->size_mask;
+ for (index = 0; index < ring->size; index++) {
en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
mlx4_en_free_rx_desc(priv, ring, index);
- ++ring->cons;
}
+ ring->cons = 0;
+ ring->prod = 0;
}
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
@@ -392,9 +287,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
- ring->rx_info = vmalloc_node(tmp, node);
+ ring->rx_info = vzalloc_node(tmp, node);
if (!ring->rx_info) {
- ring->rx_info = vmalloc(tmp);
+ ring->rx_info = vzalloc(tmp);
if (!ring->rx_info) {
err = -ENOMEM;
goto err_ring;
@@ -464,16 +359,6 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
/* Initialize all descriptors */
for (i = 0; i < ring->size; i++)
mlx4_en_init_rx_desc(priv, ring, i);
-
- /* Initialize page allocators */
- err = mlx4_en_init_allocator(priv, ring);
- if (err) {
- en_err(priv, "Failed initializing ring allocator\n");
- if (ring->stride <= TXBB_SIZE)
- ring->buf -= TXBB_SIZE;
- ring_ind--;
- goto err_allocator;
- }
}
err = mlx4_en_fill_rx_buffers(priv);
if (err)
@@ -493,11 +378,9 @@ err_buffers:
mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
ring_ind = priv->rx_ring_num - 1;
-err_allocator:
while (ring_ind >= 0) {
if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
- mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]);
ring_ind--;
}
return err;
@@ -537,7 +420,9 @@ bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
if (cache->index >= MLX4_EN_CACHE_SIZE)
return false;
- cache->buf[cache->index++] = *frame;
+ cache->buf[cache->index].page = frame->page;
+ cache->buf[cache->index].dma = frame->dma;
+ cache->index++;
return true;
}
@@ -567,136 +452,91 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
int i;
for (i = 0; i < ring->page_cache.index; i++) {
- struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i];
-
- dma_unmap_page(priv->ddev, frame->dma, frame->page_size,
- priv->frag_info[0].dma_dir);
- put_page(frame->page);
+ dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
+ PAGE_SIZE, priv->dma_dir);
+ put_page(ring->page_cache.buf[i].page);
}
ring->page_cache.index = 0;
mlx4_en_free_rx_buf(priv, ring);
if (ring->stride <= TXBB_SIZE)
ring->buf -= TXBB_SIZE;
- mlx4_en_destroy_allocator(priv, ring);
}
static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_desc *rx_desc,
struct mlx4_en_rx_alloc *frags,
struct sk_buff *skb,
int length)
{
- struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
- struct mlx4_en_frag_info *frag_info;
- int nr;
+ const struct mlx4_en_frag_info *frag_info = priv->frag_info;
+ unsigned int truesize = 0;
+ int nr, frag_size;
+ struct page *page;
dma_addr_t dma;
+ bool release;
/* Collect used fragments while replacing them in the HW descriptors */
- for (nr = 0; nr < priv->num_frags; nr++) {
- frag_info = &priv->frag_info[nr];
- if (length <= frag_info->frag_prefix_size)
- break;
- if (unlikely(!frags[nr].page))
+ for (nr = 0;; frags++) {
+ frag_size = min_t(int, length, frag_info->frag_size);
+
+ page = frags->page;
+ if (unlikely(!page))
goto fail;
- dma = be64_to_cpu(rx_desc->data[nr].addr);
- dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size,
- DMA_FROM_DEVICE);
+ dma = frags->dma;
+ dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
+ frag_size, priv->dma_dir);
+
+ __skb_fill_page_desc(skb, nr, page, frags->page_offset,
+ frag_size);
- __skb_fill_page_desc(skb, nr, frags[nr].page,
- frags[nr].page_offset,
- frag_info->frag_size);
+ truesize += frag_info->frag_stride;
+ if (frag_info->frag_stride == PAGE_SIZE / 2) {
+ frags->page_offset ^= PAGE_SIZE / 2;
+ release = page_count(page) != 1 ||
+ page_is_pfmemalloc(page) ||
+ page_to_nid(page) != numa_mem_id();
+ } else {
+ u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
- skb->truesize += frag_info->frag_stride;
- frags[nr].page = NULL;
+ frags->page_offset += sz_align;
+ release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
+ }
+ if (release) {
+ dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
+ frags->page = NULL;
+ } else {
+ page_ref_inc(page);
+ }
+
+ nr++;
+ length -= frag_size;
+ if (!length)
+ break;
+ frag_info++;
}
- /* Adjust size of last fragment to match actual length */
- if (nr > 0)
- skb_frag_size_set(&skb_frags_rx[nr - 1],
- length - priv->frag_info[nr - 1].frag_prefix_size);
+ skb->truesize += truesize;
return nr;
fail:
while (nr > 0) {
nr--;
- __skb_frag_unref(&skb_frags_rx[nr]);
+ __skb_frag_unref(skb_shinfo(skb)->frags + nr);
}
return 0;
}
-
-static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_desc *rx_desc,
- struct mlx4_en_rx_alloc *frags,
- unsigned int length)
-{
- struct sk_buff *skb;
- void *va;
- int used_frags;
- dma_addr_t dma;
-
- skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
- if (unlikely(!skb)) {
- en_dbg(RX_ERR, priv, "Failed allocating skb\n");
- return NULL;
- }
- skb_reserve(skb, NET_IP_ALIGN);
- skb->len = length;
-
- /* Get pointer to first fragment so we could copy the headers into the
- * (linear part of the) skb */
- va = page_address(frags[0].page) + frags[0].page_offset;
-
- if (length <= SMALL_PACKET_SIZE) {
- /* We are copying all relevant data to the skb - temporarily
- * sync buffers for the copy */
- dma = be64_to_cpu(rx_desc->data[0].addr);
- dma_sync_single_for_cpu(priv->ddev, dma, length,
- DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb, va, length);
- skb->tail += length;
- } else {
- unsigned int pull_len;
-
- /* Move relevant fragments to skb */
- used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
- skb, length);
- if (unlikely(!used_frags)) {
- kfree_skb(skb);
- return NULL;
- }
- skb_shinfo(skb)->nr_frags = used_frags;
-
- pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE);
- /* Copy headers into the skb linear buffer */
- memcpy(skb->data, va, pull_len);
- skb->tail += pull_len;
-
- /* Skip headers in first fragment */
- skb_shinfo(skb)->frags[0].page_offset += pull_len;
-
- /* Adjust size of first fragment */
- skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len);
- skb->data_len = length - pull_len;
- }
- return skb;
-}
-
-static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
+static void validate_loopback(struct mlx4_en_priv *priv, void *va)
{
+ const unsigned char *data = va + ETH_HLEN;
int i;
- int offset = ETH_HLEN;
- for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
- if (*(skb->data + offset) != (unsigned char) (i & 0xff))
- goto out_loopback;
+ for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++) {
+ if (data[i] != (unsigned char)i)
+ return;
}
/* Loopback found */
priv->loopback_ok = 1;
-
-out_loopback:
- dev_kfree_skb_any(skb);
}
static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
@@ -801,7 +641,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
struct mlx4_en_rx_alloc *frags;
- struct mlx4_en_rx_desc *rx_desc;
struct bpf_prog *xdp_prog;
int doorbell_pending;
struct sk_buff *skb;
@@ -834,10 +673,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cq->mcq.cons_index & cq->size)) {
+ void *va;
frags = ring->rx_info + (index << priv->log_rx_info);
- rx_desc = ring->buf + (index << ring->log_stride);
-
+ va = page_address(frags[0].page) + frags[0].page_offset;
/*
* make sure we read the CQE after we read the ownership bit
*/
@@ -860,16 +699,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* and not performing the selftest or flb disabled
*/
if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
- struct ethhdr *ethh;
+ const struct ethhdr *ethh = va;
dma_addr_t dma;
/* Get pointer to first fragment since we haven't
* skb yet and cast it to ethhdr struct
*/
- dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma = frags[0].dma + frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
- ethh = (struct ethhdr *)(page_address(frags[0].page) +
- frags[0].page_offset);
if (is_multicast_ether_addr(ethh->h_dest)) {
struct mlx4_mac_entry *entry;
@@ -887,13 +724,16 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
}
}
+ if (unlikely(priv->validate_loopback)) {
+ validate_loopback(priv, va);
+ goto next;
+ }
+
/*
* Packet is OK - process it.
*/
length = be32_to_cpu(cqe->byte_cnt);
length -= ring->fcs_del;
- l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
- (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
/* A bpf program gets first chance to drop the packet. It may
* read bytes but not past the end of the frag.
@@ -904,13 +744,13 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
void *orig_data;
u32 act;
- dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma = frags[0].dma + frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma,
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
- xdp.data_hard_start = page_address(frags[0].page);
- xdp.data = xdp.data_hard_start + frags[0].page_offset;
+ xdp.data_hard_start = va - frags[0].page_offset;
+ xdp.data = va;
xdp.data_end = xdp.data + length;
orig_data = xdp.data;
@@ -920,6 +760,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
length = xdp.data_end - xdp.data;
frags[0].page_offset = xdp.data -
xdp.data_hard_start;
+ va = xdp.data;
}
switch (act) {
@@ -928,8 +769,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
case XDP_TX:
if (likely(!mlx4_en_xmit_frame(ring, frags, dev,
length, cq->ring,
- &doorbell_pending)))
- goto consumed;
+ &doorbell_pending))) {
+ frags[0].page = NULL;
+ goto next;
+ }
trace_xdp_exception(dev, xdp_prog, act);
goto xdp_drop_no_cnt; /* Drop on xmit failure */
default:
@@ -939,8 +782,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
case XDP_DROP:
ring->xdp_drop++;
xdp_drop_no_cnt:
- if (likely(mlx4_en_rx_recycle(ring, frags)))
- goto consumed;
goto next;
}
}
@@ -948,129 +789,51 @@ xdp_drop_no_cnt:
ring->bytes += length;
ring->packets++;
+ skb = napi_get_frags(&cq->napi);
+ if (!skb)
+ goto next;
+
+ if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
+ timestamp = mlx4_en_get_cqe_ts(cqe);
+ mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
+ timestamp);
+ }
+ skb_record_rx_queue(skb, cq->ring);
+
if (likely(dev->features & NETIF_F_RXCSUM)) {
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) {
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
cqe->checksum == cpu_to_be16(0xffff)) {
ip_summed = CHECKSUM_UNNECESSARY;
+ l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+ (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+ if (l2_tunnel)
+ skb->csum_level = 1;
ring->csum_ok++;
} else {
- ip_summed = CHECKSUM_NONE;
- ring->csum_none++;
+ goto csum_none;
}
} else {
if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
(cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
MLX4_CQE_STATUS_IPV6))) {
- ip_summed = CHECKSUM_COMPLETE;
- ring->csum_complete++;
+ if (check_csum(cqe, skb, va, dev->features)) {
+ goto csum_none;
+ } else {
+ ip_summed = CHECKSUM_COMPLETE;
+ ring->csum_complete++;
+ }
} else {
- ip_summed = CHECKSUM_NONE;
- ring->csum_none++;
+ goto csum_none;
}
}
} else {
+csum_none:
ip_summed = CHECKSUM_NONE;
ring->csum_none++;
}
-
- /* This packet is eligible for GRO if it is:
- * - DIX Ethernet (type interpretation)
- * - TCP/IP (v4)
- * - without IP options
- * - not an IP fragment
- */
- if (dev->features & NETIF_F_GRO) {
- struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
- if (!gro_skb)
- goto next;
-
- nr = mlx4_en_complete_rx_desc(priv,
- rx_desc, frags, gro_skb,
- length);
- if (!nr)
- goto next;
-
- if (ip_summed == CHECKSUM_COMPLETE) {
- void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
- if (check_csum(cqe, gro_skb, va,
- dev->features)) {
- ip_summed = CHECKSUM_NONE;
- ring->csum_none++;
- ring->csum_complete--;
- }
- }
-
- skb_shinfo(gro_skb)->nr_frags = nr;
- gro_skb->len = length;
- gro_skb->data_len = length;
- gro_skb->ip_summed = ip_summed;
-
- if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
- gro_skb->csum_level = 1;
-
- if ((cqe->vlan_my_qpn &
- cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
- (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
- u16 vid = be16_to_cpu(cqe->sl_vid);
-
- __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
- } else if ((be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_SVLAN_PRESENT_MASK) &&
- (dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
- __vlan_hwaccel_put_tag(gro_skb,
- htons(ETH_P_8021AD),
- be16_to_cpu(cqe->sl_vid));
- }
-
- if (dev->features & NETIF_F_RXHASH)
- skb_set_hash(gro_skb,
- be32_to_cpu(cqe->immed_rss_invalid),
- (ip_summed == CHECKSUM_UNNECESSARY) ?
- PKT_HASH_TYPE_L4 :
- PKT_HASH_TYPE_L3);
-
- skb_record_rx_queue(gro_skb, cq->ring);
-
- if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
- timestamp = mlx4_en_get_cqe_ts(cqe);
- mlx4_en_fill_hwtstamps(mdev,
- skb_hwtstamps(gro_skb),
- timestamp);
- }
-
- napi_gro_frags(&cq->napi);
- goto next;
- }
-
- /* GRO not possible, complete processing here */
- skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
- if (unlikely(!skb)) {
- ring->dropped++;
- goto next;
- }
-
- if (unlikely(priv->validate_loopback)) {
- validate_loopback(priv, skb);
- goto next;
- }
-
- if (ip_summed == CHECKSUM_COMPLETE) {
- if (check_csum(cqe, skb, skb->data, dev->features)) {
- ip_summed = CHECKSUM_NONE;
- ring->csum_complete--;
- ring->csum_none++;
- }
- }
-
skb->ip_summed = ip_summed;
- skb->protocol = eth_type_trans(skb, dev);
- skb_record_rx_queue(skb, cq->ring);
-
- if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
- skb->csum_level = 1;
-
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
be32_to_cpu(cqe->immed_rss_invalid),
@@ -1078,36 +841,36 @@ xdp_drop_no_cnt:
PKT_HASH_TYPE_L4 :
PKT_HASH_TYPE_L3);
- if ((be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_CVLAN_PRESENT_MASK) &&
+
+ if ((cqe->vlan_my_qpn &
+ cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
- else if ((be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_SVLAN_PRESENT_MASK) &&
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ be16_to_cpu(cqe->sl_vid));
+ else if ((cqe->vlan_my_qpn &
+ cpu_to_be32(MLX4_CQE_SVLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_STAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
be16_to_cpu(cqe->sl_vid));
- if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
- timestamp = mlx4_en_get_cqe_ts(cqe);
- mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
- timestamp);
+ nr = mlx4_en_complete_rx_desc(priv, frags, skb, length);
+ if (likely(nr)) {
+ skb_shinfo(skb)->nr_frags = nr;
+ skb->len = length;
+ skb->data_len = length;
+ napi_gro_frags(&cq->napi);
+ } else {
+ skb->vlan_tci = 0;
+ skb_clear_hash(skb);
}
-
- napi_gro_receive(&cq->napi, skb);
next:
- for (nr = 0; nr < priv->num_frags; nr++)
- mlx4_en_free_frag(priv, frags, nr);
-
-consumed:
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
if (++polled == budget)
- goto out;
+ break;
}
-out:
rcu_read_unlock();
if (polled) {
@@ -1178,13 +941,6 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
return done;
}
-static const int frag_sizes[] = {
- FRAG_SZ0,
- FRAG_SZ1,
- FRAG_SZ2,
- FRAG_SZ3
-};
-
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -1195,33 +951,43 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
* This only works when num_frags == 1.
*/
if (priv->tx_ring_num[TX_XDP]) {
- priv->frag_info[0].order = 0;
priv->frag_info[0].frag_size = eff_mtu;
- priv->frag_info[0].frag_prefix_size = 0;
/* This will gain efficient xdp frame recycling at the
* expense of more costly truesize accounting
*/
priv->frag_info[0].frag_stride = PAGE_SIZE;
- priv->frag_info[0].dma_dir = PCI_DMA_BIDIRECTIONAL;
- priv->frag_info[0].rx_headroom = XDP_PACKET_HEADROOM;
+ priv->dma_dir = PCI_DMA_BIDIRECTIONAL;
+ priv->rx_headroom = XDP_PACKET_HEADROOM;
i = 1;
} else {
- int buf_size = 0;
+ int frag_size_max = 2048, buf_size = 0;
+
+ /* should not happen, right ? */
+ if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048)
+ frag_size_max = PAGE_SIZE;
while (buf_size < eff_mtu) {
- priv->frag_info[i].order = MLX4_EN_ALLOC_PREFER_ORDER;
- priv->frag_info[i].frag_size =
- (eff_mtu > buf_size + frag_sizes[i]) ?
- frag_sizes[i] : eff_mtu - buf_size;
- priv->frag_info[i].frag_prefix_size = buf_size;
- priv->frag_info[i].frag_stride =
- ALIGN(priv->frag_info[i].frag_size,
- SMP_CACHE_BYTES);
- priv->frag_info[i].dma_dir = PCI_DMA_FROMDEVICE;
- priv->frag_info[i].rx_headroom = 0;
- buf_size += priv->frag_info[i].frag_size;
+ int frag_stride, frag_size = eff_mtu - buf_size;
+ int pad, nb;
+
+ if (i < MLX4_EN_MAX_RX_FRAGS - 1)
+ frag_size = min(frag_size, frag_size_max);
+
+ priv->frag_info[i].frag_size = frag_size;
+ frag_stride = ALIGN(frag_size, SMP_CACHE_BYTES);
+ /* We can only pack 2 1536-bytes frames in on 4K page
+ * Therefore, each frame would consume more bytes (truesize)
+ */
+ nb = PAGE_SIZE / frag_stride;
+ pad = (PAGE_SIZE - nb * frag_stride) / nb;
+ pad &= ~(SMP_CACHE_BYTES - 1);
+ priv->frag_info[i].frag_stride = frag_stride + pad;
+
+ buf_size += frag_size;
i++;
}
+ priv->dma_dir = PCI_DMA_FROMDEVICE;
+ priv->rx_headroom = 0;
}
priv->num_frags = i;
@@ -1232,10 +998,9 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) {
en_err(priv,
- " frag:%d - size:%d prefix:%d stride:%d\n",
+ " frag:%d - size:%d stride:%d\n",
i,
priv->frag_info[i].frag_size,
- priv->frag_info[i].frag_prefix_size,
priv->frag_info[i].frag_stride);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 95290e1fc9fe..17112faafbcc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -81,14 +81,11 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
{
u32 loopback_ok = 0;
int i;
- bool gro_enabled;
priv->loopback_ok = 0;
priv->validate_loopback = 1;
- gro_enabled = priv->dev->features & NETIF_F_GRO;
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
- priv->dev->features &= ~NETIF_F_GRO;
/* xmit */
if (mlx4_en_test_loopback_xmit(priv)) {
@@ -111,9 +108,6 @@ mlx4_en_test_loopback_exit:
priv->validate_loopback = 0;
- if (gro_enabled)
- priv->dev->features |= NETIF_F_GRO;
-
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
return !loopback_ok;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 3ed42199d3f1..3ba89bc43d74 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -354,13 +354,11 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_alloc frame = {
.page = tx_info->page,
.dma = tx_info->map0_dma,
- .page_offset = XDP_PACKET_HEADROOM,
- .page_size = PAGE_SIZE,
};
if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
dma_unmap_page(priv->ddev, tx_info->map0_dma,
- PAGE_SIZE, priv->frag_info[0].dma_dir);
+ PAGE_SIZE, priv->dma_dir);
put_page(tx_info->page);
}
@@ -980,8 +978,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->tso_packets++;
- i = ((skb->len - lso_header_size) / shinfo->gso_size) +
- !!((skb->len - lso_header_size) % shinfo->gso_size);
+ i = shinfo->gso_segs;
tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
ring->packets += i;
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 3629ce11a68b..39f401aa3047 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -102,17 +102,6 @@
/* Use the maximum between 16384 and a single page */
#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384)
-#define MLX4_EN_ALLOC_PREFER_ORDER min_t(int, get_order(32768), \
- PAGE_ALLOC_COSTLY_ORDER)
-
-/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU
- * and 4K allocations) */
-enum {
- FRAG_SZ0 = 1536 - NET_IP_ALIGN,
- FRAG_SZ1 = 4096,
- FRAG_SZ2 = 4096,
- FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
-};
#define MLX4_EN_MAX_RX_FRAGS 4
/* Maximum ring sizes */
@@ -264,13 +253,16 @@ struct mlx4_en_rx_alloc {
struct page *page;
dma_addr_t dma;
u32 page_offset;
- u32 page_size;
};
#define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT)
+
struct mlx4_en_page_cache {
u32 index;
- struct mlx4_en_rx_alloc buf[MLX4_EN_CACHE_SIZE];
+ struct {
+ struct page *page;
+ dma_addr_t dma;
+ } buf[MLX4_EN_CACHE_SIZE];
};
struct mlx4_en_priv;
@@ -335,7 +327,6 @@ struct mlx4_en_rx_desc {
struct mlx4_en_rx_ring {
struct mlx4_hwq_resources wqres;
- struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
u32 size ; /* number of Rx descs*/
u32 actual_size;
u32 size_mask;
@@ -355,6 +346,7 @@ struct mlx4_en_rx_ring {
unsigned long csum_ok;
unsigned long csum_none;
unsigned long csum_complete;
+ unsigned long rx_alloc_pages;
unsigned long xdp_drop;
unsigned long xdp_tx;
unsigned long xdp_tx_full;
@@ -472,11 +464,7 @@ struct mlx4_en_mc_list {
struct mlx4_en_frag_info {
u16 frag_size;
- u16 frag_prefix_size;
u32 frag_stride;
- enum dma_data_direction dma_dir;
- u16 order;
- u16 rx_headroom;
};
#ifdef CONFIG_MLX4_EN_DCB
@@ -584,8 +572,10 @@ struct mlx4_en_priv {
u32 rx_ring_num;
u32 rx_skb_size;
struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
- u16 num_frags;
- u16 log_rx_info;
+ u8 num_frags;
+ u8 log_rx_info;
+ u8 dma_dir;
+ u16 rx_headroom;
struct mlx4_en_tx_ring **tx_ring[MLX4_EN_NUM_TX_TYPES];
struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 48641cb0367f..926f3c3f3665 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -37,7 +37,7 @@ struct mlx4_en_port_stats {
unsigned long queue_stopped;
unsigned long wake_queue;
unsigned long tx_timeout;
- unsigned long rx_alloc_failed;
+ unsigned long rx_alloc_pages;
unsigned long rx_chksum_good;
unsigned long rx_chksum_none;
unsigned long rx_chksum_complete;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index d8d5d161b8c7..4aa29ee93013 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2749,7 +2749,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
int err;
int index = vhcr->in_modifier;
struct res_mtt *mtt;
- struct res_mpt *mpt;
+ struct res_mpt *mpt = NULL;
int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
int phys;
int id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 117170014e88..a84b652f9b54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -31,3 +31,10 @@ config MLX5_CORE_EN_DCB
This flag is depended on the kernel's DCB support.
If unsure, set to Y
+
+config MLX5_CORE_IPOIB
+ bool "Mellanox Technologies ConnectX-4 IPoIB offloads support"
+ depends on MLX5_CORE_EN
+ default y
+ ---help---
+ MLX5 IPoIB offloads & acceleration support.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9f43beb86250..9e644615f07a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -11,3 +11,5 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o en_selftest.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
+
+mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index a380353a78c2..5bdaf3d545b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -279,6 +279,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_XRC_SRQ:
case MLX5_CMD_OP_DESTROY_DCT:
case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
+ case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
case MLX5_CMD_OP_DEALLOC_PD:
case MLX5_CMD_OP_DEALLOC_UAR:
case MLX5_CMD_OP_DETACH_FROM_MCG:
@@ -305,8 +307,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
- case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
- case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
+ case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -363,6 +364,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_Q_COUNTER:
case MLX5_CMD_OP_SET_RATE_LIMIT:
case MLX5_CMD_OP_QUERY_RATE_LIMIT:
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
case MLX5_CMD_OP_ALLOC_PD:
case MLX5_CMD_OP_ALLOC_UAR:
case MLX5_CMD_OP_CONFIG_INT_MODERATION:
@@ -414,10 +419,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
- case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
- case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
- case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
- case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
+ case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -501,6 +503,12 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
+ MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
+ MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
+ MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
+ MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
+ MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
+ MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
MLX5_COMMAND_STR_CASE(ALLOC_PD);
MLX5_COMMAND_STR_CASE(DEALLOC_PD);
MLX5_COMMAND_STR_CASE(ALLOC_UAR);
@@ -576,12 +584,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
- MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
- MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
- MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
- MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
- MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
- MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
+ MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
default: return "unknown command opcode";
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 3d9490cd2db1..0099a3e397bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -37,6 +37,7 @@
#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
+#include <linux/crash_dump.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/cq.h>
@@ -111,18 +112,13 @@
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
-#define MLX5E_SQ_BF_BUDGET 16
#define MLX5E_ICOSQ_MAX_WQEBBS \
(DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_IHS_DS_COUNT \
- DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS)
#define MLX5E_XDP_TX_DS_COUNT \
((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
-#define MLX5E_XDP_TX_WQEBBS \
- DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS)
#define MLX5E_NUM_MAIN_GROUPS 9
@@ -158,6 +154,14 @@ static inline int mlx5_max_log_rq_size(int wq_type)
}
}
+static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
+{
+ return is_kdump_kernel() ?
+ MLX5E_MIN_NUM_CHANNELS :
+ min_t(int, mdev->priv.eq_table.num_comp_vectors,
+ MLX5E_MAX_NUM_CHANNELS);
+}
+
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
@@ -187,15 +191,15 @@ enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 1),
};
-#define MLX5E_SET_PFLAG(priv, pflag, enable) \
+#define MLX5E_SET_PFLAG(params, pflag, enable) \
do { \
if (enable) \
- (priv)->params.pflags |= (pflag); \
+ (params)->pflags |= (pflag); \
else \
- (priv)->params.pflags &= ~(pflag); \
+ (params)->pflags &= ~(pflag); \
} while (0)
-#define MLX5E_GET_PFLAG(priv, pflag) (!!((priv)->params.pflags & (pflag)))
+#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag)))
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
@@ -218,7 +222,6 @@ struct mlx5e_params {
bool rx_cqe_compress_def;
struct mlx5e_cq_moder rx_cq_moderation;
struct mlx5e_cq_moder tx_cq_moderation;
- u16 min_rx_wqes;
bool lro_en;
u32 lro_wqe_sz;
u16 tx_max_inline;
@@ -227,9 +230,11 @@ struct mlx5e_params {
u8 toeplitz_hash_key[40];
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
bool vlan_strip_disable;
+ bool scatter_fcs_en;
bool rx_am_enabled;
u32 lro_timeout;
u32 pflags;
+ struct bpf_prog *xdp_prog;
};
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -285,7 +290,6 @@ struct mlx5e_cq {
struct napi_struct *napi;
struct mlx5_core_cq mcq;
struct mlx5e_channel *channel;
- struct mlx5e_priv *priv;
/* cqe decompression */
struct mlx5_cqe64 title;
@@ -295,22 +299,163 @@ struct mlx5e_cq {
u16 decmprs_wqe_counter;
/* control */
+ struct mlx5_core_dev *mdev;
struct mlx5_frag_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp;
-struct mlx5e_rq;
-typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe);
-typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
- u16 ix);
+struct mlx5e_tx_wqe_info {
+ struct sk_buff *skb;
+ u32 num_bytes;
+ u8 num_wqebbs;
+ u8 num_dma;
+};
+
+enum mlx5e_dma_map_type {
+ MLX5E_DMA_MAP_SINGLE,
+ MLX5E_DMA_MAP_PAGE
+};
+
+struct mlx5e_sq_dma {
+ dma_addr_t addr;
+ u32 size;
+ enum mlx5e_dma_map_type type;
+};
+
+enum {
+ MLX5E_SQ_STATE_ENABLED,
+};
+
+struct mlx5e_sq_wqe_info {
+ u8 opcode;
+ u8 num_wqebbs;
+};
+
+struct mlx5e_txqsq {
+ /* data path */
+
+ /* dirtied @completion */
+ u16 cc;
+ u32 dma_fifo_cc;
+
+ /* dirtied @xmit */
+ u16 pc ____cacheline_aligned_in_smp;
+ u32 dma_fifo_pc;
+ struct mlx5e_sq_stats stats;
+
+ struct mlx5e_cq cq;
+
+ /* write@xmit, read@completion */
+ struct {
+ struct mlx5e_sq_dma *dma_fifo;
+ struct mlx5e_tx_wqe_info *wqe_info;
+ } db;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ u32 dma_fifo_mask;
+ void __iomem *uar_map;
+ struct netdev_queue *txq;
+ u32 sqn;
+ u16 max_inline;
+ u8 min_inline_mode;
+ u16 edge;
+ struct device *pdev;
+ struct mlx5e_tstamp *tstamp;
+ __be32 mkey_be;
+ unsigned long state;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5e_channel *channel;
+ int txq_ix;
+ u32 rate_limit;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_xdpsq {
+ /* data path */
+
+ /* dirtied @rx completion */
+ u16 cc;
+ u16 pc;
+
+ struct mlx5e_cq cq;
+
+ /* write@xmit, read@completion */
+ struct {
+ struct mlx5e_dma_info *di;
+ bool doorbell;
+ } db;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ void __iomem *uar_map;
+ u32 sqn;
+ struct device *pdev;
+ __be32 mkey_be;
+ u8 min_inline_mode;
+ unsigned long state;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5e_channel *channel;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_icosq {
+ /* data path */
+
+ /* dirtied @completion */
+ u16 cc;
+
+ /* dirtied @xmit */
+ u16 pc ____cacheline_aligned_in_smp;
+ u32 dma_fifo_pc;
+ u16 prev_cc;
-typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
+ struct mlx5e_cq cq;
+
+ /* write@xmit, read@completion */
+ struct {
+ struct mlx5e_sq_wqe_info *ico_wqe;
+ } db;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ void __iomem *uar_map;
+ u32 sqn;
+ u16 edge;
+ struct device *pdev;
+ __be32 mkey_be;
+ unsigned long state;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5e_channel *channel;
+} ____cacheline_aligned_in_smp;
+
+static inline bool
+mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
+{
+ return (((wq->sz_m1 & (cc - pc)) >= n) || (cc == pc));
+}
struct mlx5e_dma_info {
struct page *page;
dma_addr_t addr;
};
+struct mlx5e_umr_dma_info {
+ __be64 *mtt;
+ dma_addr_t mtt_addr;
+ struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
+ struct mlx5e_umr_wqe wqe;
+};
+
+struct mlx5e_mpw_info {
+ struct mlx5e_umr_dma_info umr;
+ u16 consumed_strides;
+ u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
+};
+
struct mlx5e_rx_am_stats {
int ppms; /* packets per msec */
int epms; /* events per msec */
@@ -347,6 +492,11 @@ struct mlx5e_page_cache {
struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
};
+struct mlx5e_rq;
+typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
+typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq*, struct mlx5e_rx_wqe*, u16);
+typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
+
struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;
@@ -381,7 +531,10 @@ struct mlx5e_rq {
u16 rx_headroom;
struct mlx5e_rx_am am; /* Adaptive Moderation */
+
+ /* XDP */
struct bpf_prog *xdp_prog;
+ struct mlx5e_xdpsq xdpsq;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
@@ -390,118 +543,10 @@ struct mlx5e_rq {
u32 mpwqe_num_strides;
u32 rqn;
struct mlx5e_channel *channel;
- struct mlx5e_priv *priv;
+ struct mlx5_core_dev *mdev;
struct mlx5_core_mkey umr_mkey;
} ____cacheline_aligned_in_smp;
-struct mlx5e_umr_dma_info {
- __be64 *mtt;
- dma_addr_t mtt_addr;
- struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
- struct mlx5e_umr_wqe wqe;
-};
-
-struct mlx5e_mpw_info {
- struct mlx5e_umr_dma_info umr;
- u16 consumed_strides;
- u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
-};
-
-struct mlx5e_tx_wqe_info {
- u32 num_bytes;
- u8 num_wqebbs;
- u8 num_dma;
-};
-
-enum mlx5e_dma_map_type {
- MLX5E_DMA_MAP_SINGLE,
- MLX5E_DMA_MAP_PAGE
-};
-
-struct mlx5e_sq_dma {
- dma_addr_t addr;
- u32 size;
- enum mlx5e_dma_map_type type;
-};
-
-enum {
- MLX5E_SQ_STATE_ENABLED,
- MLX5E_SQ_STATE_BF_ENABLE,
-};
-
-struct mlx5e_sq_wqe_info {
- u8 opcode;
- u8 num_wqebbs;
-};
-
-enum mlx5e_sq_type {
- MLX5E_SQ_TXQ,
- MLX5E_SQ_ICO,
- MLX5E_SQ_XDP
-};
-
-struct mlx5e_sq {
- /* data path */
-
- /* dirtied @completion */
- u16 cc;
- u32 dma_fifo_cc;
-
- /* dirtied @xmit */
- u16 pc ____cacheline_aligned_in_smp;
- u32 dma_fifo_pc;
- u16 bf_offset;
- u16 prev_cc;
- u8 bf_budget;
- struct mlx5e_sq_stats stats;
-
- struct mlx5e_cq cq;
-
- /* pointers to per tx element info: write@xmit, read@completion */
- union {
- struct {
- struct sk_buff **skb;
- struct mlx5e_sq_dma *dma_fifo;
- struct mlx5e_tx_wqe_info *wqe_info;
- } txq;
- struct mlx5e_sq_wqe_info *ico_wqe;
- struct {
- struct mlx5e_sq_wqe_info *wqe_info;
- struct mlx5e_dma_info *di;
- bool doorbell;
- } xdp;
- } db;
-
- /* read only */
- struct mlx5_wq_cyc wq;
- u32 dma_fifo_mask;
- void __iomem *uar_map;
- struct netdev_queue *txq;
- u32 sqn;
- u16 bf_buf_size;
- u16 max_inline;
- u8 min_inline_mode;
- u16 edge;
- struct device *pdev;
- struct mlx5e_tstamp *tstamp;
- __be32 mkey_be;
- unsigned long state;
-
- /* control path */
- struct mlx5_wq_ctrl wq_ctrl;
- struct mlx5_sq_bfreg bfreg;
- struct mlx5e_channel *channel;
- int tc;
- u32 rate_limit;
- u8 type;
-} ____cacheline_aligned_in_smp;
-
-static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
-{
- return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
- (sq->cc == sq->pc));
-}
-
enum channel_flags {
MLX5E_CHANNEL_NAPI_SCHED = 1,
};
@@ -509,9 +554,8 @@ enum channel_flags {
struct mlx5e_channel {
/* data path */
struct mlx5e_rq rq;
- struct mlx5e_sq xdp_sq;
- struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
- struct mlx5e_sq icosq; /* internal control operations */
+ struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_icosq icosq; /* internal control operations */
bool xdp;
struct napi_struct napi;
struct device *pdev;
@@ -522,10 +566,18 @@ struct mlx5e_channel {
/* control */
struct mlx5e_priv *priv;
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_tstamp *tstamp;
int ix;
int cpu;
};
+struct mlx5e_channels {
+ struct mlx5e_channel **c;
+ unsigned int num;
+ struct mlx5e_params params;
+};
+
enum mlx5e_traffic_types {
MLX5E_TT_IPV4_TCP,
MLX5E_TT_IPV6_TCP,
@@ -675,34 +727,17 @@ enum {
MLX5E_NIC_PRIO
};
-struct mlx5e_profile {
- void (*init)(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile, void *ppriv);
- void (*cleanup)(struct mlx5e_priv *priv);
- int (*init_rx)(struct mlx5e_priv *priv);
- void (*cleanup_rx)(struct mlx5e_priv *priv);
- int (*init_tx)(struct mlx5e_priv *priv);
- void (*cleanup_tx)(struct mlx5e_priv *priv);
- void (*enable)(struct mlx5e_priv *priv);
- void (*disable)(struct mlx5e_priv *priv);
- void (*update_stats)(struct mlx5e_priv *priv);
- int (*max_nch)(struct mlx5_core_dev *mdev);
- int max_tc;
-};
-
struct mlx5e_priv {
/* priv data path fields - start */
- struct mlx5e_sq **txq_to_sq_map;
- int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
- struct bpf_prog *xdp_prog;
+ struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
+ int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
/* priv data path fields - end */
unsigned long state;
struct mutex state_lock; /* Protects Interface state */
struct mlx5e_rq drop_rq;
- struct mlx5e_channel **channel;
+ struct mlx5e_channels channels;
u32 tisn[MLX5E_MAX_NUM_TC];
struct mlx5e_rqt indir_rqt;
struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
@@ -712,7 +747,6 @@ struct mlx5e_priv {
struct mlx5e_flow_steering fs;
struct mlx5e_vxlan_db vxlan;
- struct mlx5e_params params;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
@@ -732,9 +766,28 @@ struct mlx5e_priv {
void *ppriv;
};
+struct mlx5e_profile {
+ void (*init)(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile, void *ppriv);
+ void (*cleanup)(struct mlx5e_priv *priv);
+ int (*init_rx)(struct mlx5e_priv *priv);
+ void (*cleanup_rx)(struct mlx5e_priv *priv);
+ int (*init_tx)(struct mlx5e_priv *priv);
+ void (*cleanup_tx)(struct mlx5e_priv *priv);
+ void (*enable)(struct mlx5e_priv *priv);
+ void (*disable)(struct mlx5e_priv *priv);
+ void (*update_stats)(struct mlx5e_priv *priv);
+ int (*max_nch)(struct mlx5_core_dev *mdev);
+ struct {
+ mlx5e_fp_handle_rx_cqe handle_rx_cqe;
+ mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
+ } rx_handlers;
+ int max_tc;
+};
+
void mlx5e_build_ptys2ethtool_map(void);
-void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -744,7 +797,9 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
-void mlx5e_free_sq_descs(struct mlx5e_sq *sq);
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
+void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
bool recycle);
@@ -792,7 +847,7 @@ void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
struct ptp_clock_event *event);
int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
-void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
@@ -801,14 +856,40 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
-int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
+struct mlx5e_redirect_rqt_param {
+ bool is_rss;
+ union {
+ u32 rqn; /* Direct RQN (Non-RSS) */
+ struct {
+ u8 hfunc;
+ struct mlx5e_channels *channels;
+ } rss; /* RSS data */
+ };
+};
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
-void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
- enum mlx5e_traffic_types tt);
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
+ struct mlx5e_redirect_rqt_param rrp);
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
+ enum mlx5e_traffic_types tt,
+ void *tirc);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
+
+int mlx5e_open_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *chs);
+void mlx5e_close_channels(struct mlx5e_channels *chs);
+
+/* Function pointer to be used to modify WH settings while
+ * switching channels
+ */
+typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
+void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *new_chs,
+ mlx5e_fp_hw_modify hw_modify);
+void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
+void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
+
void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
u32 *indirection_rqt, int len,
int num_channels);
@@ -816,30 +897,43 @@ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
-void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type);
+void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params, u8 rq_type);
-static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
- struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
+static inline
+struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
{
- u16 ofst = sq->bf_offset;
+ u16 pi = *pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+
+ memset(cseg, 0, sizeof(*cseg));
+
+ cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
+ cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
+ (*pc)++;
+
+ return wqe;
+}
+
+static inline
+void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
+ void __iomem *uar_map,
+ struct mlx5_wqe_ctrl_seg *ctrl)
+{
+ ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
/* ensure wqe is visible to device before updating doorbell record */
dma_wmb();
- *sq->wq.db = cpu_to_be32(sq->pc);
+ *wq->db = cpu_to_be32(pc);
/* ensure doorbell record is visible to device before ringing the
* doorbell
*/
wmb();
- if (bf_sz)
- __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz);
- else
- mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL);
- /* flush the write-combining mapped buffer */
- wmb();
- sq->bf_offset ^= sq->bf_buf_size;
+ mlx5_write64((__be32 *)ctrl, uar_map, NULL);
}
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
@@ -895,44 +989,43 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
-int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
- bool enable_uc_lb);
-
-struct mlx5_eswitch_rep;
-int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
-void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
-int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep);
-void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
-int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
-void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
-int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
-void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
-void mlx5e_update_hw_rep_counters(struct mlx5e_priv *priv);
+int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
+
+/* common netdev helpers */
+int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
+
+int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv);
+void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
-void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
+void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv);
int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
+void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
+
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn);
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
+
+int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
+ u32 underlay_qpn, u32 *tisn);
+void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
+
int mlx5e_create_tises(struct mlx5e_priv *priv);
void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
void mlx5e_update_stats_work(struct work_struct *work);
-struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
- const struct mlx5e_profile *profile,
- void *ppriv);
-void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
-int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
-void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
-int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
- void *sp);
-bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id);
+/* mlx5e generic netdev management API */
+struct net_device*
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
+ void *ppriv);
+int mlx5e_attach_netdev(struct mlx5e_priv *priv);
+void mlx5e_detach_netdev(struct mlx5e_priv *priv);
+void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
+void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ u16 max_channels);
-bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
-bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 68419a01db36..c8a005326e30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -174,13 +174,9 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
- struct mlx5_flow_act flow_act = {
- .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
- .encap_id = 0,
- };
- struct mlx5_flow_destination dest;
struct mlx5e_tir *tir = priv->indir_tir;
+ struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
int err = 0;
@@ -325,10 +321,16 @@ static int arfs_create_table(struct mlx5e_priv *priv,
{
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
+ struct mlx5_flow_table_attr ft_attr = {};
int err;
- ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
- MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL, 0);
+ ft->num_groups = 0;
+
+ ft_attr.max_fte = MLX5E_ARFS_TABLE_SIZE;
+ ft_attr.level = MLX5E_ARFS_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -469,15 +471,11 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
- struct mlx5_flow_act flow_act = {
- .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
- .encap_id = 0,
- };
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
struct arfs_table *arfs_table;
struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 37e66eef6fb5..e706a87fc8b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -90,6 +90,7 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct hwtstamp_config config;
+ int err;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
return -EOPNOTSUPP;
@@ -111,7 +112,7 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
/* Reset CQE compression to Admin default */
- mlx5e_modify_rx_cqe_compression_locked(priv, priv->params.rx_cqe_compress_def);
+ mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@@ -129,7 +130,12 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
/* Disable CQE compression */
netdev_warn(dev, "Disabling cqe compression");
- mlx5e_modify_rx_cqe_compression_locked(priv, false);
+ err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+ if (err) {
+ netdev_err(dev, "Failed disabling cqe compression err=%d\n", err);
+ mutex_unlock(&priv->state_lock);
+ return err;
+ }
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index bd898d8deda0..f1f17f7a3cd0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -107,10 +107,18 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
goto err_dealloc_transport_domain;
}
+ err = mlx5_alloc_bfreg(mdev, &res->bfreg, false, false);
+ if (err) {
+ mlx5_core_err(mdev, "alloc bfreg failed, %d\n", err);
+ goto err_destroy_mkey;
+ }
+
INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
return 0;
+err_destroy_mkey:
+ mlx5_core_destroy_mkey(mdev, &res->mkey);
err_dealloc_transport_domain:
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
err_dealloc_pd:
@@ -122,23 +130,26 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
{
struct mlx5e_resources *res = &mdev->mlx5e_res;
+ mlx5_free_bfreg(mdev, &res->bfreg);
mlx5_core_destroy_mkey(mdev, &res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
mlx5_core_dealloc_pd(mdev, res->pdn);
}
-int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
- bool enable_uc_lb)
+int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_tir *tir;
- void *in;
+ int err = -ENOMEM;
+ u32 tirn = 0;
int inlen;
- int err = 0;
+ void *in;
+
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
if (!in)
- return -ENOMEM;
+ goto out;
if (enable_uc_lb)
MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@@ -147,13 +158,16 @@ int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
- err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
+ tirn = tir->tirn;
+ err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
if (err)
goto out;
}
out:
kvfree(in);
+ if (err)
+ netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index a004a5a1a4c2..ce7b09d72ff6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -42,8 +42,9 @@ static void mlx5e_get_drvinfo(struct net_device *dev,
strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d.%d",
- fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+ "%d.%d.%04d (%.16s)",
+ fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
+ mdev->board_id);
strlcpy(drvinfo->bus_info, pci_name(mdev->pdev),
sizeof(drvinfo->bus_info));
}
@@ -152,12 +153,9 @@ static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
}
#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
-#define MLX5E_NUM_RQ_STATS(priv) \
- (NUM_RQ_STATS * priv->params.num_channels * \
- test_bit(MLX5E_STATE_OPENED, &priv->state))
+#define MLX5E_NUM_RQ_STATS(priv) (NUM_RQ_STATS * (priv)->channels.num)
#define MLX5E_NUM_SQ_STATS(priv) \
- (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
- test_bit(MLX5E_STATE_OPENED, &priv->state))
+ (NUM_SQ_STATS * (priv)->channels.num * (priv)->channels.params.num_tc)
#define MLX5E_NUM_PFC_COUNTERS(priv) \
((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
NUM_PPORT_PER_PRIO_PFC_COUNTERS)
@@ -262,17 +260,17 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
return;
/* per channel counters */
- for (i = 0; i < priv->params.num_channels; i++)
+ for (i = 0; i < priv->channels.num; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
rq_stats_desc[j].format, i);
- for (tc = 0; tc < priv->params.num_tc; tc++)
- for (i = 0; i < priv->params.num_channels; i++)
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++)
+ for (i = 0; i < priv->channels.num; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
sq_stats_desc[j].format,
- priv->channeltc_to_txq_map[i][tc]);
+ priv->channel_tc2txq[i][tc]);
}
static void mlx5e_get_strings(struct net_device *dev,
@@ -303,6 +301,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channels *channels;
struct mlx5_priv *mlx5_priv;
int i, j, tc, prio, idx = 0;
unsigned long pfc_combined;
@@ -313,6 +312,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_update_stats(priv);
+ channels = &priv->channels;
mutex_unlock(&priv->state_lock);
for (i = 0; i < NUM_SW_COUNTERS; i++)
@@ -382,16 +382,16 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
return;
/* per channel counters */
- for (i = 0; i < priv->params.num_channels; i++)
+ for (i = 0; i < channels->num; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel[i]->rq.stats,
+ MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
rq_stats_desc, j);
- for (tc = 0; tc < priv->params.num_tc; tc++)
- for (i = 0; i < priv->params.num_channels; i++)
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++)
+ for (i = 0; i < channels->num; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel[i]->sq[tc].stats,
+ data[idx++] = MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
sq_stats_desc, j);
}
@@ -406,8 +406,8 @@ static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return num_wqe;
- stride_size = 1 << priv->params.mpwqe_log_stride_sz;
- num_strides = 1 << priv->params.mpwqe_log_num_strides;
+ stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz;
+ num_strides = 1 << priv->channels.params.mpwqe_log_num_strides;
wqe_size = stride_size * num_strides;
packets_per_wqe = wqe_size /
@@ -427,8 +427,8 @@ static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return num_packets;
- stride_size = 1 << priv->params.mpwqe_log_stride_sz;
- num_strides = 1 << priv->params.mpwqe_log_num_strides;
+ stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz;
+ num_strides = 1 << priv->channels.params.mpwqe_log_num_strides;
wqe_size = stride_size * num_strides;
num_packets = (1 << order_base_2(num_packets));
@@ -443,26 +443,25 @@ static void mlx5e_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int rq_wq_type = priv->params.rq_wq_type;
+ int rq_wq_type = priv->channels.params.rq_wq_type;
param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << mlx5_max_log_rq_size(rq_wq_type));
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
- 1 << priv->params.log_rq_size);
- param->tx_pending = 1 << priv->params.log_sq_size;
+ 1 << priv->channels.params.log_rq_size);
+ param->tx_pending = 1 << priv->channels.params.log_sq_size;
}
static int mlx5e_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- bool was_opened;
- int rq_wq_type = priv->params.rq_wq_type;
+ int rq_wq_type = priv->channels.params.rq_wq_type;
+ struct mlx5e_channels new_channels = {};
u32 rx_pending_wqes;
u32 min_rq_size;
u32 max_rq_size;
- u16 min_rx_wqes;
u8 log_rq_size;
u8 log_sq_size;
u32 num_mtts;
@@ -500,7 +499,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
}
num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
- if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
!MLX5E_VALID_NUM_MTTS(num_mtts)) {
netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
__func__, param->rx_pending);
@@ -522,26 +521,29 @@ static int mlx5e_set_ringparam(struct net_device *dev,
log_rq_size = order_base_2(rx_pending_wqes);
log_sq_size = order_base_2(param->tx_pending);
- min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes);
- if (log_rq_size == priv->params.log_rq_size &&
- log_sq_size == priv->params.log_sq_size &&
- min_rx_wqes == priv->params.min_rx_wqes)
+ if (log_rq_size == priv->channels.params.log_rq_size &&
+ log_sq_size == priv->channels.params.log_sq_size)
return 0;
mutex_lock(&priv->state_lock);
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
- mlx5e_close_locked(dev);
+ new_channels.params = priv->channels.params;
+ new_channels.params.log_rq_size = log_rq_size;
+ new_channels.params.log_sq_size = log_sq_size;
- priv->params.log_rq_size = log_rq_size;
- priv->params.log_sq_size = log_sq_size;
- priv->params.min_rx_wqes = min_rx_wqes;
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ goto unlock;
+ }
+
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ goto unlock;
- if (was_opened)
- err = mlx5e_open_locked(dev);
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+unlock:
mutex_unlock(&priv->state_lock);
return err;
@@ -553,7 +555,7 @@ static void mlx5e_get_channels(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
ch->max_combined = priv->profile->max_nch(priv->mdev);
- ch->combined_count = priv->params.num_channels;
+ ch->combined_count = priv->channels.params.num_channels;
}
static int mlx5e_set_channels(struct net_device *dev,
@@ -561,8 +563,8 @@ static int mlx5e_set_channels(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
unsigned int count = ch->combined_count;
+ struct mlx5e_channels new_channels = {};
bool arfs_enabled;
- bool was_opened;
int err = 0;
if (!count) {
@@ -571,27 +573,32 @@ static int mlx5e_set_channels(struct net_device *dev,
return -EINVAL;
}
- if (priv->params.num_channels == count)
+ if (priv->channels.params.num_channels == count)
return 0;
mutex_lock(&priv->state_lock);
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
- mlx5e_close_locked(dev);
+ new_channels.params = priv->channels.params;
+ new_channels.params.num_channels = count;
+ mlx5e_build_default_indir_rqt(priv->mdev, new_channels.params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, count);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ goto out;
+ }
+
+ /* Create fresh channels with new parameters */
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ goto out;
arfs_enabled = dev->features & NETIF_F_NTUPLE;
if (arfs_enabled)
mlx5e_arfs_disable(priv);
- priv->params.num_channels = count;
- mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, count);
-
- if (was_opened)
- err = mlx5e_open_locked(dev);
- if (err)
- goto out;
+ /* Switch to new channels, set new parameters and close old ones */
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
if (arfs_enabled) {
err = mlx5e_arfs_enable(priv);
@@ -614,49 +621,24 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -EOPNOTSUPP;
- coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
- coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
- coal->tx_coalesce_usecs = priv->params.tx_cq_moderation.usec;
- coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts;
- coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled;
+ coal->rx_coalesce_usecs = priv->channels.params.rx_cq_moderation.usec;
+ coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts;
+ coal->tx_coalesce_usecs = priv->channels.params.tx_cq_moderation.usec;
+ coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts;
+ coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled;
return 0;
}
-static int mlx5e_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *coal)
+static void
+mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_channel *c;
- bool restart =
- !!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled;
- bool was_opened;
- int err = 0;
int tc;
int i;
- if (!MLX5_CAP_GEN(mdev, cq_moderation))
- return -EOPNOTSUPP;
-
- mutex_lock(&priv->state_lock);
-
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened && restart) {
- mlx5e_close_locked(netdev);
- priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
- }
-
- priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
- priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
- priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
- priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
-
- if (!was_opened || restart)
- goto out;
-
- for (i = 0; i < priv->params.num_channels; ++i) {
- c = priv->channel[i];
+ for (i = 0; i < priv->channels.num; ++i) {
+ struct mlx5e_channel *c = priv->channels.c[i];
for (tc = 0; tc < c->num_tc; tc++) {
mlx5_core_modify_cq_moderation(mdev,
@@ -669,11 +651,50 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
coal->rx_coalesce_usecs,
coal->rx_max_coalesced_frames);
}
+}
-out:
- if (was_opened && restart)
- err = mlx5e_open_locked(netdev);
+static int mlx5e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_channels new_channels = {};
+ int err = 0;
+ bool reset;
+
+ if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->state_lock);
+ new_channels.params = priv->channels.params;
+
+ new_channels.params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
+ new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
+ new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
+ new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
+ new_channels.params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ goto out;
+ }
+ /* we are opened */
+
+ reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled;
+ if (!reset) {
+ mlx5e_set_priv_channels_coalesce(priv, coal);
+ priv->channels.params = new_channels.params;
+ goto out;
+ }
+
+ /* open fresh channels with new coal parameters */
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ goto out;
+
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+out:
mutex_unlock(&priv->state_lock);
return err;
}
@@ -968,7 +989,7 @@ static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return sizeof(priv->params.toeplitz_hash_key);
+ return sizeof(priv->channels.params.toeplitz_hash_key);
}
static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
@@ -982,15 +1003,15 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
struct mlx5e_priv *priv = netdev_priv(netdev);
if (indir)
- memcpy(indir, priv->params.indirection_rqt,
- sizeof(priv->params.indirection_rqt));
+ memcpy(indir, priv->channels.params.indirection_rqt,
+ sizeof(priv->channels.params.indirection_rqt));
if (key)
- memcpy(key, priv->params.toeplitz_hash_key,
- sizeof(priv->params.toeplitz_hash_key));
+ memcpy(key, priv->channels.params.toeplitz_hash_key,
+ sizeof(priv->channels.params.toeplitz_hash_key));
if (hfunc)
- *hfunc = priv->params.rss_hfunc;
+ *hfunc = priv->channels.params.rss_hfunc;
return 0;
}
@@ -1006,7 +1027,7 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
memset(tirc, 0, ctxlen);
- mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
+ mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc);
mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
}
}
@@ -1030,25 +1051,37 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mutex_lock(&priv->state_lock);
- if (indir) {
- u32 rqtn = priv->indir_rqt.rqtn;
-
- memcpy(priv->params.indirection_rqt, indir,
- sizeof(priv->params.indirection_rqt));
- mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
- }
-
if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
- hfunc != priv->params.rss_hfunc) {
- priv->params.rss_hfunc = hfunc;
+ hfunc != priv->channels.params.rss_hfunc) {
+ priv->channels.params.rss_hfunc = hfunc;
hash_changed = true;
}
+ if (indir) {
+ memcpy(priv->channels.params.indirection_rqt, indir,
+ sizeof(priv->channels.params.indirection_rqt));
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ u32 rqtn = priv->indir_rqt.rqtn;
+ struct mlx5e_redirect_rqt_param rrp = {
+ .is_rss = true,
+ {
+ .rss = {
+ .hfunc = priv->channels.params.rss_hfunc,
+ .channels = &priv->channels,
+ },
+ },
+ };
+
+ mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
+ }
+ }
+
if (key) {
- memcpy(priv->params.toeplitz_hash_key, key,
- sizeof(priv->params.toeplitz_hash_key));
+ memcpy(priv->channels.params.toeplitz_hash_key, key,
+ sizeof(priv->channels.params.toeplitz_hash_key));
hash_changed = hash_changed ||
- priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
+ priv->channels.params.rss_hfunc == ETH_RSS_HASH_TOP;
}
if (hash_changed)
@@ -1069,7 +1102,7 @@ static int mlx5e_get_rxnfc(struct net_device *netdev,
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
- info->data = priv->params.num_channels;
+ info->data = priv->channels.params.num_channels;
break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = priv->fs.ethtool.tot_num_rules;
@@ -1097,7 +1130,7 @@ static int mlx5e_get_tunable(struct net_device *dev,
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
- *(u32 *)data = priv->params.tx_max_inline;
+ *(u32 *)data = priv->channels.params.tx_max_inline;
break;
default:
err = -EINVAL;
@@ -1113,9 +1146,11 @@ static int mlx5e_set_tunable(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
- bool was_opened;
- u32 val;
+ struct mlx5e_channels new_channels = {};
int err = 0;
+ u32 val;
+
+ mutex_lock(&priv->state_lock);
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
@@ -1125,24 +1160,26 @@ static int mlx5e_set_tunable(struct net_device *dev,
break;
}
- mutex_lock(&priv->state_lock);
+ new_channels.params = priv->channels.params;
+ new_channels.params.tx_max_inline = val;
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
- mlx5e_close_locked(dev);
-
- priv->params.tx_max_inline = val;
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ break;
+ }
- if (was_opened)
- err = mlx5e_open_locked(dev);
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ break;
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
- mutex_unlock(&priv->state_lock);
break;
default:
err = -EINVAL;
break;
}
+ mutex_unlock(&priv->state_lock);
return err;
}
@@ -1442,15 +1479,15 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_channels new_channels = {};
bool rx_mode_changed;
u8 rx_cq_period_mode;
int err = 0;
- bool reset;
rx_cq_period_mode = enable ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode;
+ rx_mode_changed = rx_cq_period_mode != priv->channels.params.rx_cq_period_mode;
if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
@@ -1459,16 +1496,51 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
if (!rx_mode_changed)
return 0;
- reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (reset)
- mlx5e_close_locked(netdev);
+ new_channels.params = priv->channels.params;
+ mlx5e_set_rx_cq_mode_params(&new_channels.params, rx_cq_period_mode);
- mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ return 0;
+ }
- if (reset)
- err = mlx5e_open_locked(netdev);
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ return err;
- return err;
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+ return 0;
+}
+
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
+{
+ bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
+ struct mlx5e_channels new_channels = {};
+ int err = 0;
+
+ if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
+ return new_val ? -EOPNOTSUPP : 0;
+
+ if (curr_val == new_val)
+ return 0;
+
+ new_channels.params = priv->channels.params;
+ MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
+
+ mlx5e_set_rq_type_params(priv->mdev, &new_channels.params,
+ new_channels.params.rq_wq_type);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ return 0;
+ }
+
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ return err;
+
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+ return 0;
}
static int set_pflag_rx_cqe_compress(struct net_device *netdev,
@@ -1486,8 +1558,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
}
mlx5e_modify_rx_cqe_compression_locked(priv, enable);
- priv->params.rx_cqe_compress_def = enable;
- mlx5e_set_rq_type_params(priv, priv->params.rq_wq_type);
+ priv->channels.params.rx_cqe_compress_def = enable;
return 0;
}
@@ -1499,7 +1570,7 @@ static int mlx5e_handle_pflag(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
bool enable = !!(wanted_flags & flag);
- u32 changes = wanted_flags ^ priv->params.pflags;
+ u32 changes = wanted_flags ^ priv->channels.params.pflags;
int err;
if (!(changes & flag))
@@ -1512,7 +1583,7 @@ static int mlx5e_handle_pflag(struct net_device *netdev,
return err;
}
- MLX5E_SET_PFLAG(priv, flag, enable);
+ MLX5E_SET_PFLAG(&priv->channels.params, flag, enable);
return 0;
}
@@ -1541,7 +1612,7 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return priv->params.pflags;
+ return priv->channels.params.pflags;
}
static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index f2762e45c8ae..576d6787b484 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -159,14 +159,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type,
u16 vid, struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_act flow_act = {
- .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
- .encap_id = 0,
- };
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest;
struct mlx5_flow_handle **rule_p;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
@@ -659,11 +655,7 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
u16 etype,
u8 proto)
{
- struct mlx5_flow_act flow_act = {
- .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
- .encap_id = 0,
- };
+ MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err = 0;
@@ -800,7 +792,7 @@ err:
return err;
}
-static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
{
struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
@@ -808,14 +800,19 @@ static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
mlx5e_destroy_flow_table(&ttc->ft);
}
-static int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn)
{
struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft = &ttc->ft;
int err;
- ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
- MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL, 0);
+ ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE;
+ ft_attr.level = MLX5E_TTC_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+ ft_attr.underlay_qpn = underlay_qpn;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -848,13 +845,9 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai, int type)
{
- struct mlx5_flow_act flow_act = {
- .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
- .encap_id = 0,
- };
struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
int err = 0;
u8 *mc_dmac;
@@ -985,12 +978,16 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
{
struct mlx5e_l2_table *l2_table = &priv->fs.l2;
struct mlx5e_flow_table *ft = &l2_table->ft;
+ struct mlx5_flow_table_attr ft_attr = {};
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
- MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL, 0);
+ ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
+ ft_attr.level = MLX5E_L2_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1088,11 +1085,16 @@ static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
{
struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
+ struct mlx5_flow_table_attr ft_attr = {};
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
- MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL, 0);
+
+ ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
+ ft_attr.level = MLX5E_VLAN_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
@@ -1145,7 +1147,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- err = mlx5e_create_ttc_table(priv);
+ err = mlx5e_create_ttc_table(priv, 0);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 26fc77e80f7b..85bf4a389295 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -390,7 +390,7 @@ static int validate_flow(struct mlx5e_priv *priv,
if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
return -EINVAL;
- if (fs->ring_cookie >= priv->params.num_channels &&
+ if (fs->ring_cookie >= priv->channels.params.num_channels &&
fs->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 15cc7b469d2e..a61b71b6fff3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -31,28 +31,24 @@
*/
#include <net/tc_act/tc_gact.h>
-#include <linux/crash_dump.h>
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
#include <linux/bpf.h>
+#include "eswitch.h"
#include "en.h"
#include "en_tc.h"
-#include "eswitch.h"
+#include "en_rep.h"
#include "vxlan.h"
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
struct mlx5_wq_param wq;
- bool am_enabled;
};
struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
- u16 max_inline;
- u8 min_inline_mode;
- enum mlx5e_sq_type type;
};
struct mlx5e_cq_param {
@@ -79,49 +75,47 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, reg_umr_sq);
}
-void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
+void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params, u8 rq_type)
{
- priv->params.rq_wq_type = rq_type;
- priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
- switch (priv->params.rq_wq_type) {
+ params->rq_wq_type = rq_type;
+ params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- priv->params.log_rq_size = is_kdump_kernel() ?
+ params->log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
- priv->params.mpwqe_log_stride_sz =
- MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
- MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(priv->mdev) :
- MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(priv->mdev);
- priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
- priv->params.mpwqe_log_stride_sz;
+ params->mpwqe_log_stride_sz =
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
+ MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
+ MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
+ params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
+ params->mpwqe_log_stride_sz;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- priv->params.log_rq_size = is_kdump_kernel() ?
+ params->log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
/* Extra room needed for build_skb */
- priv->params.lro_wqe_sz -= MLX5_RX_HEADROOM +
+ params->lro_wqe_sz -= MLX5_RX_HEADROOM +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
- priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
- BIT(priv->params.log_rq_size));
- mlx5_core_info(priv->mdev,
- "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
- priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
- BIT(priv->params.log_rq_size),
- BIT(priv->params.mpwqe_log_stride_sz),
- MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS));
+ mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
+ params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+ BIT(params->log_rq_size),
+ BIT(params->mpwqe_log_stride_sz),
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
}
-static void mlx5e_set_rq_priv_params(struct mlx5e_priv *priv)
+static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
- u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(priv->mdev) &&
- !priv->xdp_prog ?
+ u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
+ !params->xdp_prog ?
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
MLX5_WQ_TYPE_LINKED_LIST;
- mlx5e_set_rq_type_params(priv, rq_type);
+ mlx5e_set_rq_type_params(mdev, params, rq_type);
}
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -181,8 +175,10 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
int i, j;
memset(s, 0, sizeof(*s));
- for (i = 0; i < priv->params.num_channels; i++) {
- rq_stats = &priv->channel[i]->rq.stats;
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_channel *c = priv->channels.c[i];
+
+ rq_stats = &c->rq.stats;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
@@ -204,8 +200,8 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_cache_empty += rq_stats->cache_empty;
s->rx_cache_busy += rq_stats->cache_busy;
- for (j = 0; j < priv->params.num_tc; j++) {
- sq_stats = &priv->channel[i]->sq[j].stats;
+ for (j = 0; j < priv->channels.params.num_tc; j++) {
+ sq_stats = &c->sq[j].stats;
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
@@ -402,8 +398,10 @@ static inline int mlx5e_get_wqe_mtt_sz(void)
MLX5_UMR_MTT_ALIGNMENT);
}
-static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
- struct mlx5e_umr_wqe *wqe, u16 ix)
+static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
+ struct mlx5e_icosq *sq,
+ struct mlx5e_umr_wqe *wqe,
+ u16 ix)
{
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
@@ -493,11 +491,10 @@ static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
kfree(rq->mpwqe.info);
}
-static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv,
+static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
u64 npages, u8 page_shift,
struct mlx5_core_mkey *umr_mkey)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
u32 *in;
@@ -531,21 +528,20 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv,
return err;
}
-static int mlx5e_create_rq_umr_mkey(struct mlx5e_rq *rq)
+static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
{
- struct mlx5e_priv *priv = rq->priv;
- u64 num_mtts = MLX5E_REQUIRED_MTTS(BIT(priv->params.log_rq_size));
+ u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq));
- return mlx5e_create_umr_mkey(priv, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
+ return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
}
-static int mlx5e_create_rq(struct mlx5e_channel *c,
- struct mlx5e_rq_param *param,
- struct mlx5e_rq *rq)
+static int mlx5e_alloc_rq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rqp,
+ struct mlx5e_rq *rq)
{
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
- void *rqc = param->rqc;
+ struct mlx5_core_dev *mdev = c->mdev;
+ void *rqc = rqp->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
u32 byte_count;
u32 frag_sz;
@@ -554,9 +550,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
int err;
int i;
- param->wq.db_numa_node = cpu_to_node(c->cpu);
+ rqp->wq.db_numa_node = cpu_to_node(c->cpu);
- err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+ err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
&rq->wq_ctrl);
if (err)
return err;
@@ -565,15 +561,15 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
- rq->wq_type = priv->params.rq_wq_type;
+ rq->wq_type = params->rq_wq_type;
rq->pdev = c->pdev;
rq->netdev = c->netdev;
- rq->tstamp = &priv->tstamp;
+ rq->tstamp = c->tstamp;
rq->channel = c;
rq->ix = c->ix;
- rq->priv = c->priv;
+ rq->mdev = mdev;
- rq->xdp_prog = priv->xdp_prog ? bpf_prog_inc(priv->xdp_prog) : NULL;
+ rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
if (IS_ERR(rq->xdp_prog)) {
err = PTR_ERR(rq->xdp_prog);
rq->xdp_prog = NULL;
@@ -588,24 +584,26 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->rx_headroom = MLX5_RX_HEADROOM;
}
- switch (priv->params.rq_wq_type) {
+ switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- if (mlx5e_is_vf_vport_rep(priv)) {
- err = -EINVAL;
- goto err_rq_wq_destroy;
- }
- rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
- rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
+ rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
+ if (!rq->handle_rx_cqe) {
+ err = -EINVAL;
+ netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
+ goto err_rq_wq_destroy;
+ }
+
+ rq->mpwqe_stride_sz = BIT(params->mpwqe_log_stride_sz);
+ rq->mpwqe_num_strides = BIT(params->mpwqe_log_num_strides);
rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
byte_count = rq->buff.wqe_sz;
- err = mlx5e_create_rq_umr_mkey(rq);
+ err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
goto err_rq_wq_destroy;
rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
@@ -621,18 +619,20 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
err = -ENOMEM;
goto err_rq_wq_destroy;
}
-
- if (mlx5e_is_vf_vport_rep(priv))
- rq->handle_rx_cqe = mlx5e_handle_rx_cqe_rep;
- else
- rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
-
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
- rq->buff.wqe_sz = (priv->params.lro_en) ?
- priv->params.lro_wqe_sz :
- MLX5E_SW2HW_MTU(priv->netdev->mtu);
+ rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
+ if (!rq->handle_rx_cqe) {
+ kfree(rq->dma_info);
+ err = -EINVAL;
+ netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
+ goto err_rq_wq_destroy;
+ }
+
+ rq->buff.wqe_sz = params->lro_en ?
+ params->lro_wqe_sz :
+ MLX5E_SW2HW_MTU(c->netdev->mtu);
byte_count = rq->buff.wqe_sz;
/* calc the required page order */
@@ -656,8 +656,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
}
INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
- rq->am.mode = priv->params.rx_cq_period_mode;
-
+ rq->am.mode = params->rx_cq_period_mode;
rq->page_cache.head = 0;
rq->page_cache.tail = 0;
@@ -674,7 +673,7 @@ err_rq_wq_destroy:
return err;
}
-static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
+static void mlx5e_free_rq(struct mlx5e_rq *rq)
{
int i;
@@ -684,7 +683,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_mpwqe_info(rq);
- mlx5_core_destroy_mkey(rq->priv->mdev, &rq->umr_mkey);
+ mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
kfree(rq->dma_info);
@@ -699,10 +698,10 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
mlx5_wq_destroy(&rq->wq_ctrl);
}
-static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
+static int mlx5e_create_rq(struct mlx5e_rq *rq,
+ struct mlx5e_rq_param *param)
{
- struct mlx5e_priv *priv = rq->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_dev *mdev = rq->mdev;
void *in;
void *rqc;
@@ -723,7 +722,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
- MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
@@ -742,8 +740,7 @@ static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
int next_state)
{
struct mlx5e_channel *c = rq->channel;
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_dev *mdev = c->mdev;
void *in;
void *rqc;
@@ -767,7 +764,7 @@ static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
return err;
}
-static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
+static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
{
struct mlx5e_channel *c = rq->channel;
struct mlx5e_priv *priv = c->priv;
@@ -787,6 +784,35 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
MLX5_SET64(modify_rq_in, in, modify_bitmask,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
+ MLX5_SET(rqc, rqc, scatter_fcs, enable);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
+
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5_core_dev *mdev = c->mdev;
+ void *in;
+ void *rqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
+ MLX5_SET64(modify_rq_in, in, modify_bitmask,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
MLX5_SET(rqc, rqc, vsd, vsd);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
@@ -798,25 +824,28 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
return err;
}
-static void mlx5e_disable_rq(struct mlx5e_rq *rq)
+static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
- mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
+ mlx5_core_destroy_rq(rq->mdev, rq->rqn);
}
static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
{
unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
struct mlx5e_channel *c = rq->channel;
- struct mlx5e_priv *priv = c->priv;
+
struct mlx5_wq_ll *wq = &rq->wq;
+ u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
while (time_before(jiffies, exp_time)) {
- if (wq->cur_sz >= priv->params.min_rx_wqes)
+ if (wq->cur_sz >= min_wqes)
return 0;
msleep(20);
}
+ netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
+ rq->rqn, wq->cur_sz, min_wqes);
return -ETIMEDOUT;
}
@@ -842,83 +871,128 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
}
static int mlx5e_open_rq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
struct mlx5e_rq_param *param,
struct mlx5e_rq *rq)
{
- struct mlx5e_sq *sq = &c->icosq;
- u16 pi = sq->pc & sq->wq.sz_m1;
int err;
- err = mlx5e_create_rq(c, param, rq);
+ err = mlx5e_alloc_rq(c, params, param, rq);
if (err)
return err;
- err = mlx5e_enable_rq(rq, param);
+ err = mlx5e_create_rq(rq, param);
if (err)
- goto err_destroy_rq;
+ goto err_free_rq;
- set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err)
- goto err_disable_rq;
+ goto err_destroy_rq;
- if (param->am_enabled)
+ if (params->rx_am_enabled)
set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
- sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
- sq->db.ico_wqe[pi].num_wqebbs = 1;
- mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
-
return 0;
-err_disable_rq:
- clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- mlx5e_disable_rq(rq);
err_destroy_rq:
mlx5e_destroy_rq(rq);
+err_free_rq:
+ mlx5e_free_rq(rq);
return err;
}
-static void mlx5e_close_rq(struct mlx5e_rq *rq)
+static void mlx5e_activate_rq(struct mlx5e_rq *rq)
+{
+ struct mlx5e_icosq *sq = &rq->channel->icosq;
+ u16 pi = sq->pc & sq->wq.sz_m1;
+ struct mlx5e_tx_wqe *nopwqe;
+
+ set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
+ sq->db.ico_wqe[pi].num_wqebbs = 1;
+ nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
+}
+
+static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
{
clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
- cancel_work_sync(&rq->am.work);
+}
- mlx5e_disable_rq(rq);
- mlx5e_free_rx_descs(rq);
+static void mlx5e_close_rq(struct mlx5e_rq *rq)
+{
+ cancel_work_sync(&rq->am.work);
mlx5e_destroy_rq(rq);
+ mlx5e_free_rx_descs(rq);
+ mlx5e_free_rq(rq);
}
-static void mlx5e_free_sq_xdp_db(struct mlx5e_sq *sq)
+static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{
- kfree(sq->db.xdp.di);
- kfree(sq->db.xdp.wqe_info);
+ kfree(sq->db.di);
}
-static int mlx5e_alloc_sq_xdp_db(struct mlx5e_sq *sq, int numa)
+static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- sq->db.xdp.di = kzalloc_node(sizeof(*sq->db.xdp.di) * wq_sz,
+ sq->db.di = kzalloc_node(sizeof(*sq->db.di) * wq_sz,
GFP_KERNEL, numa);
- sq->db.xdp.wqe_info = kzalloc_node(sizeof(*sq->db.xdp.wqe_info) * wq_sz,
- GFP_KERNEL, numa);
- if (!sq->db.xdp.di || !sq->db.xdp.wqe_info) {
- mlx5e_free_sq_xdp_db(sq);
+ if (!sq->db.di) {
+ mlx5e_free_xdpsq_db(sq);
return -ENOMEM;
}
return 0;
}
-static void mlx5e_free_sq_ico_db(struct mlx5e_sq *sq)
+static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_xdpsq *sq)
+{
+ void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
+ struct mlx5_core_dev *mdev = c->mdev;
+ int err;
+
+ sq->pdev = c->pdev;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->uar_map = mdev->mlx5e_res.bfreg.map;
+ sq->min_inline_mode = params->tx_min_inline_mode;
+
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
+ err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
+ if (err)
+ return err;
+ sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
+
+ err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
+ if (err)
+ goto err_sq_wq_destroy;
+
+ return 0;
+
+err_sq_wq_destroy:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+
+ return err;
+}
+
+static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
+{
+ mlx5e_free_xdpsq_db(sq);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
{
kfree(sq->db.ico_wqe);
}
-static int mlx5e_alloc_sq_ico_db(struct mlx5e_sq *sq, int numa)
+static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
{
u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
@@ -930,155 +1004,128 @@ static int mlx5e_alloc_sq_ico_db(struct mlx5e_sq *sq, int numa)
return 0;
}
-static void mlx5e_free_sq_txq_db(struct mlx5e_sq *sq)
+static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_icosq *sq)
{
- kfree(sq->db.txq.wqe_info);
- kfree(sq->db.txq.dma_fifo);
- kfree(sq->db.txq.skb);
-}
+ void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
+ struct mlx5_core_dev *mdev = c->mdev;
+ int err;
-static int mlx5e_alloc_sq_txq_db(struct mlx5e_sq *sq, int numa)
-{
- int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+ sq->pdev = c->pdev;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->uar_map = mdev->mlx5e_res.bfreg.map;
- sq->db.txq.skb = kzalloc_node(wq_sz * sizeof(*sq->db.txq.skb),
- GFP_KERNEL, numa);
- sq->db.txq.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.txq.dma_fifo),
- GFP_KERNEL, numa);
- sq->db.txq.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.txq.wqe_info),
- GFP_KERNEL, numa);
- if (!sq->db.txq.skb || !sq->db.txq.dma_fifo || !sq->db.txq.wqe_info) {
- mlx5e_free_sq_txq_db(sq);
- return -ENOMEM;
- }
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
+ err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
+ if (err)
+ return err;
+ sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- sq->dma_fifo_mask = df_sz - 1;
+ err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
+ if (err)
+ goto err_sq_wq_destroy;
+
+ sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS;
return 0;
+
+err_sq_wq_destroy:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+
+ return err;
}
-static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
{
- switch (sq->type) {
- case MLX5E_SQ_TXQ:
- mlx5e_free_sq_txq_db(sq);
- break;
- case MLX5E_SQ_ICO:
- mlx5e_free_sq_ico_db(sq);
- break;
- case MLX5E_SQ_XDP:
- mlx5e_free_sq_xdp_db(sq);
- break;
- }
+ mlx5e_free_icosq_db(sq);
+ mlx5_wq_destroy(&sq->wq_ctrl);
}
-static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
{
- switch (sq->type) {
- case MLX5E_SQ_TXQ:
- return mlx5e_alloc_sq_txq_db(sq, numa);
- case MLX5E_SQ_ICO:
- return mlx5e_alloc_sq_ico_db(sq, numa);
- case MLX5E_SQ_XDP:
- return mlx5e_alloc_sq_xdp_db(sq, numa);
- }
-
- return 0;
+ kfree(sq->db.wqe_info);
+ kfree(sq->db.dma_fifo);
}
-static int mlx5e_sq_get_max_wqebbs(u8 sq_type)
+static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
{
- switch (sq_type) {
- case MLX5E_SQ_ICO:
- return MLX5E_ICOSQ_MAX_WQEBBS;
- case MLX5E_SQ_XDP:
- return MLX5E_XDP_TX_WQEBBS;
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+ sq->db.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.dma_fifo),
+ GFP_KERNEL, numa);
+ sq->db.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.wqe_info),
+ GFP_KERNEL, numa);
+ if (!sq->db.dma_fifo || !sq->db.wqe_info) {
+ mlx5e_free_txqsq_db(sq);
+ return -ENOMEM;
}
- return MLX5_SEND_WQE_MAX_WQEBBS;
+
+ sq->dma_fifo_mask = df_sz - 1;
+
+ return 0;
}
-static int mlx5e_create_sq(struct mlx5e_channel *c,
- int tc,
- struct mlx5e_sq_param *param,
- struct mlx5e_sq *sq)
+static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
+ int txq_ix,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_txqsq *sq)
{
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
-
- void *sqc = param->sqc;
- void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
+ struct mlx5_core_dev *mdev = c->mdev;
int err;
- sq->type = param->type;
sq->pdev = c->pdev;
- sq->tstamp = &priv->tstamp;
+ sq->tstamp = c->tstamp;
sq->mkey_be = c->mkey_be;
sq->channel = c;
- sq->tc = tc;
-
- err = mlx5_alloc_bfreg(mdev, &sq->bfreg, MLX5_CAP_GEN(mdev, bf), false);
- if (err)
- return err;
+ sq->txq_ix = txq_ix;
+ sq->uar_map = mdev->mlx5e_res.bfreg.map;
+ sq->max_inline = params->tx_max_inline;
+ sq->min_inline_mode = params->tx_min_inline_mode;
- sq->uar_map = sq->bfreg.map;
param->wq.db_numa_node = cpu_to_node(c->cpu);
-
- err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
- &sq->wq_ctrl);
+ err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
if (err)
- goto err_unmap_free_uar;
-
- sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- if (sq->bfreg.wc)
- set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
-
- sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
- sq->max_inline = param->max_inline;
- sq->min_inline_mode = param->min_inline_mode;
+ return err;
+ sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
+ err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
- if (sq->type == MLX5E_SQ_TXQ) {
- int txq_ix;
-
- txq_ix = c->ix + tc * priv->params.num_channels;
- sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
- priv->txq_to_sq_map[txq_ix] = sq;
- }
-
- sq->edge = (sq->wq.sz_m1 + 1) - mlx5e_sq_get_max_wqebbs(sq->type);
- sq->bf_budget = MLX5E_SQ_BF_BUDGET;
+ sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
return 0;
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
-err_unmap_free_uar:
- mlx5_free_bfreg(mdev, &sq->bfreg);
-
return err;
}
-static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
+static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
{
- struct mlx5e_channel *c = sq->channel;
- struct mlx5e_priv *priv = c->priv;
-
- mlx5e_free_sq_db(sq);
+ mlx5e_free_txqsq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
- mlx5_free_bfreg(priv->mdev, &sq->bfreg);
}
-static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
-{
- struct mlx5e_channel *c = sq->channel;
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
+struct mlx5e_create_sq_param {
+ struct mlx5_wq_ctrl *wq_ctrl;
+ u32 cqn;
+ u32 tisn;
+ u8 tis_lst_sz;
+ u8 min_inline_mode;
+};
+static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_create_sq_param *csp,
+ u32 *sqn)
+{
void *in;
void *sqc;
void *wq;
@@ -1086,7 +1133,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
int err;
inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
- sizeof(u64) * sq->wq_ctrl.buf.npages;
+ sizeof(u64) * csp->wq_ctrl->buf.npages;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
@@ -1095,40 +1142,40 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
wq = MLX5_ADDR_OF(sqc, sqc, wq);
memcpy(sqc, param->sqc, sizeof(param->sqc));
-
- MLX5_SET(sqc, sqc, tis_num_0, param->type == MLX5E_SQ_ICO ?
- 0 : priv->tisn[sq->tc]);
- MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
+ MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
+ MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
+ MLX5_SET(sqc, sqc, cqn, csp->cqn);
if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
- MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
+ MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
- MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
- MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, uar_page, sq->bfreg.index);
- MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
- MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+ MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
- mlx5_fill_page_array(&sq->wq_ctrl.buf,
- (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+ mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
- err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+ err = mlx5_core_create_sq(mdev, in, inlen, sqn);
kvfree(in);
return err;
}
-static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
- int next_state, bool update_rl, int rl_index)
-{
- struct mlx5e_channel *c = sq->channel;
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
+struct mlx5e_modify_sq_param {
+ int curr_state;
+ int next_state;
+ bool rl_update;
+ int rl_index;
+};
+static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
+ struct mlx5e_modify_sq_param *p)
+{
void *in;
void *sqc;
int inlen;
@@ -1141,68 +1188,94 @@ static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
- MLX5_SET(modify_sq_in, in, sq_state, curr_state);
- MLX5_SET(sqc, sqc, state, next_state);
- if (update_rl && next_state == MLX5_SQC_STATE_RDY) {
+ MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
+ MLX5_SET(sqc, sqc, state, p->next_state);
+ if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
- MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
+ MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
}
- err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
+ err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
kvfree(in);
return err;
}
-static void mlx5e_disable_sq(struct mlx5e_sq *sq)
+static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
{
- struct mlx5e_channel *c = sq->channel;
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
-
- mlx5_core_destroy_sq(mdev, sq->sqn);
- if (sq->rate_limit)
- mlx5_rl_remove_rate(mdev, sq->rate_limit);
+ mlx5_core_destroy_sq(mdev, sqn);
}
-static int mlx5e_open_sq(struct mlx5e_channel *c,
- int tc,
- struct mlx5e_sq_param *param,
- struct mlx5e_sq *sq)
+static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_create_sq_param *csp,
+ u32 *sqn)
{
+ struct mlx5e_modify_sq_param msp = {0};
int err;
- err = mlx5e_create_sq(c, tc, param, sq);
+ err = mlx5e_create_sq(mdev, param, csp, sqn);
if (err)
return err;
- err = mlx5e_enable_sq(sq, param);
+ msp.curr_state = MLX5_SQC_STATE_RST;
+ msp.next_state = MLX5_SQC_STATE_RDY;
+ err = mlx5e_modify_sq(mdev, *sqn, &msp);
if (err)
- goto err_destroy_sq;
+ mlx5e_destroy_sq(mdev, *sqn);
- set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
- false, 0);
+ return err;
+}
+
+static int mlx5e_set_sq_maxrate(struct net_device *dev,
+ struct mlx5e_txqsq *sq, u32 rate);
+
+static int mlx5e_open_txqsq(struct mlx5e_channel *c,
+ u32 tisn,
+ int txq_ix,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_create_sq_param csp = {};
+ u32 tx_rate;
+ int err;
+
+ err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
if (err)
- goto err_disable_sq;
+ return err;
- if (sq->txq) {
- netdev_tx_reset_queue(sq->txq);
- netif_tx_start_queue(sq->txq);
- }
+ csp.tisn = tisn;
+ csp.tis_lst_sz = 1;
+ csp.cqn = sq->cq.mcq.cqn;
+ csp.wq_ctrl = &sq->wq_ctrl;
+ csp.min_inline_mode = sq->min_inline_mode;
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ if (err)
+ goto err_free_txqsq;
+
+ tx_rate = c->priv->tx_rates[sq->txq_ix];
+ if (tx_rate)
+ mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
return 0;
-err_disable_sq:
+err_free_txqsq:
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- mlx5e_disable_sq(sq);
-err_destroy_sq:
- mlx5e_destroy_sq(sq);
+ mlx5e_free_txqsq(sq);
return err;
}
+static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
+{
+ sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
+ set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ netdev_tx_reset_queue(sq->txq);
+ netif_tx_start_queue(sq->txq);
+}
+
static inline void netif_tx_disable_queue(struct netdev_queue *txq)
{
__netif_tx_lock_bh(txq);
@@ -1210,43 +1283,153 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
__netif_tx_unlock_bh(txq);
}
-static void mlx5e_close_sq(struct mlx5e_sq *sq)
+static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
{
+ struct mlx5e_channel *c = sq->channel;
+
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
/* prevent netif_tx_wake_queue */
- napi_synchronize(&sq->channel->napi);
+ napi_synchronize(&c->napi);
- if (sq->txq) {
- netif_tx_disable_queue(sq->txq);
+ netif_tx_disable_queue(sq->txq);
- /* last doorbell out, godspeed .. */
- if (mlx5e_sq_has_room_for(sq, 1)) {
- sq->db.txq.skb[(sq->pc & sq->wq.sz_m1)] = NULL;
- mlx5e_send_nop(sq, true);
- }
+ /* last doorbell out, godspeed .. */
+ if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) {
+ struct mlx5e_tx_wqe *nop;
+
+ sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL;
+ nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl);
}
+}
+
+static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5_core_dev *mdev = c->mdev;
- mlx5e_disable_sq(sq);
- mlx5e_free_sq_descs(sq);
- mlx5e_destroy_sq(sq);
+ mlx5e_destroy_sq(mdev, sq->sqn);
+ if (sq->rate_limit)
+ mlx5_rl_remove_rate(mdev, sq->rate_limit);
+ mlx5e_free_txqsq_descs(sq);
+ mlx5e_free_txqsq(sq);
}
-static int mlx5e_create_cq(struct mlx5e_channel *c,
- struct mlx5e_cq_param *param,
- struct mlx5e_cq *cq)
+static int mlx5e_open_icosq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_icosq *sq)
+{
+ struct mlx5e_create_sq_param csp = {};
+ int err;
+
+ err = mlx5e_alloc_icosq(c, param, sq);
+ if (err)
+ return err;
+
+ csp.cqn = sq->cq.mcq.cqn;
+ csp.wq_ctrl = &sq->wq_ctrl;
+ csp.min_inline_mode = params->tx_min_inline_mode;
+ set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ if (err)
+ goto err_free_icosq;
+
+ return 0;
+
+err_free_icosq:
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ mlx5e_free_icosq(sq);
+
+ return err;
+}
+
+static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
+
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ napi_synchronize(&c->napi);
+
+ mlx5e_destroy_sq(c->mdev, sq->sqn);
+ mlx5e_free_icosq(sq);
+}
+
+static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_xdpsq *sq)
+{
+ unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
+ struct mlx5e_create_sq_param csp = {};
+ unsigned int inline_hdr_sz = 0;
+ int err;
+ int i;
+
+ err = mlx5e_alloc_xdpsq(c, params, param, sq);
+ if (err)
+ return err;
+
+ csp.tis_lst_sz = 1;
+ csp.tisn = c->priv->tisn[0]; /* tc = 0 */
+ csp.cqn = sq->cq.mcq.cqn;
+ csp.wq_ctrl = &sq->wq_ctrl;
+ csp.min_inline_mode = sq->min_inline_mode;
+ set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ if (err)
+ goto err_free_xdpsq;
+
+ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
+ inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
+ ds_cnt++;
+ }
+
+ /* Pre initialize fixed WQE fields */
+ for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg;
+
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+
+ dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
+ dseg->lkey = sq->mkey_be;
+ }
+
+ return 0;
+
+err_free_xdpsq:
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ mlx5e_free_xdpsq(sq);
+
+ return err;
+}
+
+static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
+
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ napi_synchronize(&c->napi);
+
+ mlx5e_destroy_sq(c->mdev, sq->sqn);
+ mlx5e_free_xdpsq_descs(sq);
+ mlx5e_free_xdpsq(sq);
+}
+
+static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
+ struct mlx5e_cq_param *param,
+ struct mlx5e_cq *cq)
{
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
int eqn_not_used;
unsigned int irqn;
int err;
u32 i;
- param->wq.buf_numa_node = cpu_to_node(c->cpu);
- param->wq.db_numa_node = cpu_to_node(c->cpu);
- param->eq_ix = c->ix;
-
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
&cq->wq_ctrl);
if (err)
@@ -1254,8 +1437,6 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
- cq->napi = &c->napi;
-
mcq->cqe_sz = 64;
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
@@ -1272,21 +1453,38 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
cqe->op_own = 0xf1;
}
- cq->channel = c;
- cq->priv = priv;
+ cq->mdev = mdev;
return 0;
}
-static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
+static int mlx5e_alloc_cq(struct mlx5e_channel *c,
+ struct mlx5e_cq_param *param,
+ struct mlx5e_cq *cq)
+{
+ struct mlx5_core_dev *mdev = c->priv->mdev;
+ int err;
+
+ param->wq.buf_numa_node = cpu_to_node(c->cpu);
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
+ param->eq_ix = c->ix;
+
+ err = mlx5e_alloc_cq_common(mdev, param, cq);
+
+ cq->napi = &c->napi;
+ cq->channel = c;
+
+ return err;
+}
+
+static void mlx5e_free_cq(struct mlx5e_cq *cq)
{
mlx5_cqwq_destroy(&cq->wq_ctrl);
}
-static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
+static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
{
- struct mlx5e_priv *priv = cq->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_dev *mdev = cq->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
void *in;
@@ -1330,47 +1528,41 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
return 0;
}
-static void mlx5e_disable_cq(struct mlx5e_cq *cq)
+static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
{
- struct mlx5e_priv *priv = cq->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
-
- mlx5_core_destroy_cq(mdev, &cq->mcq);
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
}
static int mlx5e_open_cq(struct mlx5e_channel *c,
+ struct mlx5e_cq_moder moder,
struct mlx5e_cq_param *param,
- struct mlx5e_cq *cq,
- struct mlx5e_cq_moder moderation)
+ struct mlx5e_cq *cq)
{
+ struct mlx5_core_dev *mdev = c->mdev;
int err;
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
- err = mlx5e_create_cq(c, param, cq);
+ err = mlx5e_alloc_cq(c, param, cq);
if (err)
return err;
- err = mlx5e_enable_cq(cq, param);
+ err = mlx5e_create_cq(cq, param);
if (err)
- goto err_destroy_cq;
+ goto err_free_cq;
if (MLX5_CAP_GEN(mdev, cq_moderation))
- mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
- moderation.usec,
- moderation.pkts);
+ mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
return 0;
-err_destroy_cq:
- mlx5e_destroy_cq(cq);
+err_free_cq:
+ mlx5e_free_cq(cq);
return err;
}
static void mlx5e_close_cq(struct mlx5e_cq *cq)
{
- mlx5e_disable_cq(cq);
mlx5e_destroy_cq(cq);
+ mlx5e_free_cq(cq);
}
static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
@@ -1379,15 +1571,15 @@ static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
}
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
{
- struct mlx5e_priv *priv = c->priv;
int err;
int tc;
for (tc = 0; tc < c->num_tc; tc++) {
- err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
- priv->params.tx_cq_moderation);
+ err = mlx5e_open_cq(c, params->tx_cq_moderation,
+ &cparam->tx_cq, &c->sq[tc].cq);
if (err)
goto err_close_tx_cqs;
}
@@ -1410,13 +1602,17 @@ static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
}
static int mlx5e_open_sqs(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
{
int err;
int tc;
- for (tc = 0; tc < c->num_tc; tc++) {
- err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
+ for (tc = 0; tc < params->num_tc; tc++) {
+ int txq_ix = c->ix + tc * params->num_channels;
+
+ err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
+ params, &cparam->sq, &c->sq[tc]);
if (err)
goto err_close_sqs;
}
@@ -1425,7 +1621,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
err_close_sqs:
for (tc--; tc >= 0; tc--)
- mlx5e_close_sq(&c->sq[tc]);
+ mlx5e_close_txqsq(&c->sq[tc]);
return err;
}
@@ -1435,23 +1631,15 @@ static void mlx5e_close_sqs(struct mlx5e_channel *c)
int tc;
for (tc = 0; tc < c->num_tc; tc++)
- mlx5e_close_sq(&c->sq[tc]);
-}
-
-static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
-{
- int i;
-
- for (i = 0; i < priv->profile->max_tc; i++)
- priv->channeltc_to_txq_map[ix][i] =
- ix + i * priv->params.num_channels;
+ mlx5e_close_txqsq(&c->sq[tc]);
}
static int mlx5e_set_sq_maxrate(struct net_device *dev,
- struct mlx5e_sq *sq, u32 rate)
+ struct mlx5e_txqsq *sq, u32 rate)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_modify_sq_param msp = {0};
u16 rl_index = 0;
int err;
@@ -1474,8 +1662,11 @@ static int mlx5e_set_sq_maxrate(struct net_device *dev,
}
}
- err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
- MLX5_SQC_STATE_RDY, true, rl_index);
+ msp.curr_state = MLX5_SQC_STATE_RDY;
+ msp.next_state = MLX5_SQC_STATE_RDY;
+ msp.rl_index = rl_index;
+ msp.rl_update = true;
+ err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
if (err) {
netdev_err(dev, "Failed configuring rate %u: %d\n",
rate, err);
@@ -1493,7 +1684,7 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_sq *sq = priv->txq_to_sq_map[index];
+ struct mlx5e_txqsq *sq = priv->txq2sq[index];
int err = 0;
if (!mlx5_rl_is_supported(mdev)) {
@@ -1520,114 +1711,87 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
-static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
-{
- return is_kdump_kernel() ?
- MLX5E_MIN_NUM_CHANNELS :
- min_t(int, mdev->priv.eq_table.num_comp_vectors,
- MLX5E_MAX_NUM_CHANNELS);
-}
-
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+ struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
{
- struct mlx5e_cq_moder icosq_cq_moder = {0, 0};
+ struct mlx5e_cq_moder icocq_moder = {0, 0};
struct net_device *netdev = priv->netdev;
- struct mlx5e_cq_moder rx_cq_profile;
int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
- struct mlx5e_sq *sq;
int err;
- int i;
c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
c->priv = priv;
+ c->mdev = priv->mdev;
+ c->tstamp = &priv->tstamp;
c->ix = ix;
c->cpu = cpu;
c->pdev = &priv->mdev->pdev->dev;
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
- c->num_tc = priv->params.num_tc;
- c->xdp = !!priv->xdp_prog;
-
- if (priv->params.rx_am_enabled)
- rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
- else
- rx_cq_profile = priv->params.rx_cq_moderation;
-
- mlx5e_build_channeltc_to_txq_map(priv, ix);
+ c->num_tc = params->num_tc;
+ c->xdp = !!params->xdp_prog;
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
- err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder);
+ err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
if (err)
goto err_napi_del;
- err = mlx5e_open_tx_cqs(c, cparam);
+ err = mlx5e_open_tx_cqs(c, params, cparam);
if (err)
goto err_close_icosq_cq;
- err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
- rx_cq_profile);
+ err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
if (err)
goto err_close_tx_cqs;
/* XDP SQ CQ params are same as normal TXQ sq CQ params */
- err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
- priv->params.tx_cq_moderation) : 0;
+ err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
+ &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
if (err)
goto err_close_rx_cq;
napi_enable(&c->napi);
- err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
+ err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
if (err)
goto err_disable_napi;
- err = mlx5e_open_sqs(c, cparam);
+ err = mlx5e_open_sqs(c, params, cparam);
if (err)
goto err_close_icosq;
- for (i = 0; i < priv->params.num_tc; i++) {
- u32 txq_ix = priv->channeltc_to_txq_map[ix][i];
-
- if (priv->tx_rates[txq_ix]) {
- sq = priv->txq_to_sq_map[txq_ix];
- mlx5e_set_sq_maxrate(priv->netdev, sq,
- priv->tx_rates[txq_ix]);
- }
- }
-
- err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0;
+ err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
if (err)
goto err_close_sqs;
- err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
+ err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq);
if (err)
goto err_close_xdp_sq;
- netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
*cp = c;
return 0;
err_close_xdp_sq:
if (c->xdp)
- mlx5e_close_sq(&c->xdp_sq);
+ mlx5e_close_xdpsq(&c->rq.xdpsq);
err_close_sqs:
mlx5e_close_sqs(c);
err_close_icosq:
- mlx5e_close_sq(&c->icosq);
+ mlx5e_close_icosq(&c->icosq);
err_disable_napi:
napi_disable(&c->napi);
if (c->xdp)
- mlx5e_close_cq(&c->xdp_sq.cq);
+ mlx5e_close_cq(&c->rq.xdpsq.cq);
err_close_rx_cq:
mlx5e_close_cq(&c->rq.cq);
@@ -1645,16 +1809,35 @@ err_napi_del:
return err;
}
+static void mlx5e_activate_channel(struct mlx5e_channel *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_activate_txqsq(&c->sq[tc]);
+ mlx5e_activate_rq(&c->rq);
+ netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
+}
+
+static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
+{
+ int tc;
+
+ mlx5e_deactivate_rq(&c->rq);
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_deactivate_txqsq(&c->sq[tc]);
+}
+
static void mlx5e_close_channel(struct mlx5e_channel *c)
{
mlx5e_close_rq(&c->rq);
if (c->xdp)
- mlx5e_close_sq(&c->xdp_sq);
+ mlx5e_close_xdpsq(&c->rq.xdpsq);
mlx5e_close_sqs(c);
- mlx5e_close_sq(&c->icosq);
+ mlx5e_close_icosq(&c->icosq);
napi_disable(&c->napi);
if (c->xdp)
- mlx5e_close_cq(&c->xdp_sq.cq);
+ mlx5e_close_cq(&c->rq.xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
@@ -1664,17 +1847,16 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
}
static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
- switch (priv->params.rq_wq_type) {
+ switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- MLX5_SET(wq, wq, log_wqe_num_of_strides,
- priv->params.mpwqe_log_num_strides - 9);
- MLX5_SET(wq, wq, log_wqe_stride_size,
- priv->params.mpwqe_log_stride_sz - 6);
+ MLX5_SET(wq, wq, log_wqe_num_of_strides, params->mpwqe_log_num_strides - 9);
+ MLX5_SET(wq, wq, log_wqe_stride_size, params->mpwqe_log_stride_sz - 6);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
@@ -1683,14 +1865,14 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
- MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
+ MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size);
MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
+ MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
+ MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->wq.linear = 1;
-
- param->am_enabled = priv->params.rx_am_enabled;
}
static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
@@ -1715,17 +1897,14 @@ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
}
static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
mlx5e_build_sq_param_common(priv, param);
- MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
-
- param->max_inline = priv->params.tx_max_inline;
- param->min_inline_mode = priv->params.tx_min_inline_mode;
- param->type = MLX5E_SQ_TXQ;
+ MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1737,37 +1916,36 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
}
static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
u8 log_cq_size;
- switch (priv->params.rq_wq_type) {
+ switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- log_cq_size = priv->params.log_rq_size +
- priv->params.mpwqe_log_num_strides;
+ log_cq_size = params->log_rq_size + params->mpwqe_log_num_strides;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- log_cq_size = priv->params.log_rq_size;
+ log_cq_size = params->log_rq_size;
}
MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
- if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
MLX5_SET(cqc, cqc, cqe_comp_en, 1);
}
mlx5e_build_common_cq_param(priv, param);
-
- param->cq_period_mode = priv->params.rx_cq_period_mode;
}
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
- MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
+ MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
mlx5e_build_common_cq_param(priv, param);
@@ -1775,8 +1953,8 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
}
static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
- struct mlx5e_cq_param *param,
- u8 log_wq_size)
+ u8 log_wq_size,
+ struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
@@ -1788,8 +1966,8 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
}
static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
- struct mlx5e_sq_param *param,
- u8 log_wq_size)
+ u8 log_wq_size,
+ struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
@@ -1798,162 +1976,119 @@ static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
-
- param->type = MLX5E_SQ_ICO;
}
static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
mlx5e_build_sq_param_common(priv, param);
- MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
-
- param->max_inline = priv->params.tx_max_inline;
- param->min_inline_mode = priv->params.tx_min_inline_mode;
- param->type = MLX5E_SQ_XDP;
+ MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
}
-static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
+ struct mlx5e_channel_param *cparam)
{
u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
- mlx5e_build_rq_param(priv, &cparam->rq);
- mlx5e_build_sq_param(priv, &cparam->sq);
- mlx5e_build_xdpsq_param(priv, &cparam->xdp_sq);
- mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
- mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
- mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
- mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
+ mlx5e_build_rq_param(priv, params, &cparam->rq);
+ mlx5e_build_sq_param(priv, params, &cparam->sq);
+ mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
+ mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
+ mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq);
+ mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
+ mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
}
-static int mlx5e_open_channels(struct mlx5e_priv *priv)
+int mlx5e_open_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *chs)
{
struct mlx5e_channel_param *cparam;
- int nch = priv->params.num_channels;
int err = -ENOMEM;
int i;
- int j;
-
- priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
- GFP_KERNEL);
- priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
- sizeof(struct mlx5e_sq *), GFP_KERNEL);
+ chs->num = chs->params.num_channels;
+ chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
+ if (!chs->c || !cparam)
+ goto err_free;
- if (!priv->channel || !priv->txq_to_sq_map || !cparam)
- goto err_free_txq_to_sq_map;
-
- mlx5e_build_channel_param(priv, cparam);
-
- for (i = 0; i < nch; i++) {
- err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]);
- if (err)
- goto err_close_channels;
- }
-
- for (j = 0; j < nch; j++) {
- err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
+ mlx5e_build_channel_param(priv, &chs->params, cparam);
+ for (i = 0; i < chs->num; i++) {
+ err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]);
if (err)
goto err_close_channels;
}
- /* FIXME: This is a W/A for tx timeout watch dog false alarm when
- * polling for inactive tx queues.
- */
- netif_tx_start_all_queues(priv->netdev);
-
kfree(cparam);
return 0;
err_close_channels:
for (i--; i >= 0; i--)
- mlx5e_close_channel(priv->channel[i]);
+ mlx5e_close_channel(chs->c[i]);
-err_free_txq_to_sq_map:
- kfree(priv->txq_to_sq_map);
- kfree(priv->channel);
+err_free:
+ kfree(chs->c);
kfree(cparam);
-
+ chs->num = 0;
return err;
}
-static void mlx5e_close_channels(struct mlx5e_priv *priv)
+static void mlx5e_activate_channels(struct mlx5e_channels *chs)
{
int i;
- /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
- * polling for inactive tx queues.
- */
- netif_tx_stop_all_queues(priv->netdev);
- netif_tx_disable(priv->netdev);
-
- for (i = 0; i < priv->params.num_channels; i++)
- mlx5e_close_channel(priv->channel[i]);
-
- kfree(priv->txq_to_sq_map);
- kfree(priv->channel);
+ for (i = 0; i < chs->num; i++)
+ mlx5e_activate_channel(chs->c[i]);
}
-static int mlx5e_rx_hash_fn(int hfunc)
+static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
{
- return (hfunc == ETH_RSS_HASH_TOP) ?
- MLX5_RX_HASH_FN_TOEPLITZ :
- MLX5_RX_HASH_FN_INVERTED_XOR8;
-}
-
-static int mlx5e_bits_invert(unsigned long a, int size)
-{
- int inv = 0;
+ int err = 0;
int i;
- for (i = 0; i < size; i++)
- inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+ for (i = 0; i < chs->num; i++) {
+ err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq);
+ if (err)
+ break;
+ }
- return inv;
+ return err;
}
-static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
+static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
{
int i;
- for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
- int ix = i;
- u32 rqn;
-
- if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
- ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
-
- ix = priv->params.indirection_rqt[ix];
- rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
- priv->channel[ix]->rq.rqn :
- priv->drop_rq.rqn;
- MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
- }
+ for (i = 0; i < chs->num; i++)
+ mlx5e_deactivate_channel(chs->c[i]);
}
-static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
- int ix)
+void mlx5e_close_channels(struct mlx5e_channels *chs)
{
- u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
- priv->channel[ix]->rq.rqn :
- priv->drop_rq.rqn;
+ int i;
- MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
+ for (i = 0; i < chs->num; i++)
+ mlx5e_close_channel(chs->c[i]);
+
+ kfree(chs->c);
+ chs->num = 0;
}
-static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
- int ix, struct mlx5e_rqt *rqt)
+static int
+mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
{
struct mlx5_core_dev *mdev = priv->mdev;
void *rqtc;
int inlen;
int err;
u32 *in;
+ int i;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
@@ -1965,10 +2100,8 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
- if (sz > 1) /* RSS */
- mlx5e_fill_indir_rqt_rqns(priv, rqtc);
- else
- mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
+ for (i = 0; i < sz; i++)
+ MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
if (!err)
@@ -1984,11 +2117,15 @@ void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
}
-static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv)
+int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
{
struct mlx5e_rqt *rqt = &priv->indir_rqt;
+ int err;
- return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt);
+ err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
+ if (err)
+ mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
+ return err;
}
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
@@ -1999,7 +2136,7 @@ int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
rqt = &priv->direct_tir[ix].rqt;
- err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt);
+ err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
if (err)
goto err_destroy_rqts;
}
@@ -2007,13 +2144,64 @@ int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
return 0;
err_destroy_rqts:
+ mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
for (ix--; ix >= 0; ix--)
mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
return err;
}
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
+void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+}
+
+static int mlx5e_rx_hash_fn(int hfunc)
+{
+ return (hfunc == ETH_RSS_HASH_TOP) ?
+ MLX5_RX_HASH_FN_TOEPLITZ :
+ MLX5_RX_HASH_FN_INVERTED_XOR8;
+}
+
+static int mlx5e_bits_invert(unsigned long a, int size)
+{
+ int inv = 0;
+ int i;
+
+ for (i = 0; i < size; i++)
+ inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+
+ return inv;
+}
+
+static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
+ struct mlx5e_redirect_rqt_param rrp, void *rqtc)
+{
+ int i;
+
+ for (i = 0; i < sz; i++) {
+ u32 rqn;
+
+ if (rrp.is_rss) {
+ int ix = i;
+
+ if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
+ ix = mlx5e_bits_invert(i, ilog2(sz));
+
+ ix = priv->channels.params.indirection_rqt[ix];
+ rqn = rrp.rss.channels->c[ix]->rq.rqn;
+ } else {
+ rqn = rrp.rqn;
+ }
+ MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
+ }
+}
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
+ struct mlx5e_redirect_rqt_param rrp)
{
struct mlx5_core_dev *mdev = priv->mdev;
void *rqtc;
@@ -2029,41 +2217,86 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
- if (sz > 1) /* RSS */
- mlx5e_fill_indir_rqt_rqns(priv, rqtc);
- else
- mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
-
MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
-
+ mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
kvfree(in);
-
return err;
}
-static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
+static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
+ struct mlx5e_redirect_rqt_param rrp)
+{
+ if (!rrp.is_rss)
+ return rrp.rqn;
+
+ if (ix >= rrp.rss.channels->num)
+ return priv->drop_rq.rqn;
+
+ return rrp.rss.channels->c[ix]->rq.rqn;
+}
+
+static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
+ struct mlx5e_redirect_rqt_param rrp)
{
u32 rqtn;
int ix;
if (priv->indir_rqt.enabled) {
+ /* RSS RQ table */
rqtn = priv->indir_rqt.rqtn;
- mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
+ mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
}
- for (ix = 0; ix < priv->params.num_channels; ix++) {
+ for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+ struct mlx5e_redirect_rqt_param direct_rrp = {
+ .is_rss = false,
+ {
+ .rqn = mlx5e_get_direct_rqn(priv, ix, rrp)
+ },
+ };
+
+ /* Direct RQ Tables */
if (!priv->direct_tir[ix].rqt.enabled)
continue;
+
rqtn = priv->direct_tir[ix].rqt.rqtn;
- mlx5e_redirect_rqt(priv, rqtn, 1, ix);
+ mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
}
}
-static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
+static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *chs)
+{
+ struct mlx5e_redirect_rqt_param rrp = {
+ .is_rss = true,
+ {
+ .rss = {
+ .channels = chs,
+ .hfunc = chs->params.rss_hfunc,
+ }
+ },
+ };
+
+ mlx5e_redirect_rqts(priv, rrp);
+}
+
+static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
+{
+ struct mlx5e_redirect_rqt_param drop_rrp = {
+ .is_rss = false,
+ {
+ .rqn = priv->drop_rq.rqn,
+ },
+ };
+
+ mlx5e_redirect_rqts(priv, drop_rrp);
+}
+
+static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
{
- if (!priv->params.lro_en)
+ if (!params->lro_en)
return;
#define ROUGH_MAX_L2_L3_HDR_SZ 256
@@ -2072,13 +2305,13 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
- (priv->params.lro_wqe_sz -
- ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
+ (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
}
-void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
- enum mlx5e_traffic_types tt)
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
+ enum mlx5e_traffic_types tt,
+ void *tirc)
{
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
@@ -2094,16 +2327,15 @@ void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
- MLX5_SET(tirc, tirc, rx_hash_fn,
- mlx5e_rx_hash_fn(priv->params.rss_hfunc));
- if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+ MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
+ if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
void *rss_key = MLX5_ADDR_OF(tirc, tirc,
rx_hash_toeplitz_key);
size_t len = MLX5_FLD_SZ_BYTES(tirc,
rx_hash_toeplitz_key);
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
- memcpy(rss_key, priv->params.toeplitz_hash_key, len);
+ memcpy(rss_key, params->toeplitz_hash_key, len);
}
switch (tt) {
@@ -2208,7 +2440,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
- mlx5e_build_tir_ctx_lro(tirc, priv);
+ mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
@@ -2258,9 +2490,9 @@ static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
*mtu = MLX5E_HW2SW_MTU(hw_mtu);
}
-static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
+static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct net_device *netdev = priv->netdev;
u16 mtu;
int err;
@@ -2280,8 +2512,8 @@ static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
static void mlx5e_netdev_set_tcs(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- int nch = priv->params.num_channels;
- int ntc = priv->params.num_tc;
+ int nch = priv->channels.params.num_channels;
+ int ntc = priv->channels.params.num_tc;
int tc;
netdev_reset_tc(netdev);
@@ -2298,53 +2530,116 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
netdev_set_tc_queue(netdev, tc, nch, 0);
}
+static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
+{
+ struct mlx5e_channel *c;
+ struct mlx5e_txqsq *sq;
+ int i, tc;
+
+ for (i = 0; i < priv->channels.num; i++)
+ for (tc = 0; tc < priv->profile->max_tc; tc++)
+ priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
+
+ for (i = 0; i < priv->channels.num; i++) {
+ c = priv->channels.c[i];
+ for (tc = 0; tc < c->num_tc; tc++) {
+ sq = &c->sq[tc];
+ priv->txq2sq[sq->txq_ix] = sq;
+ }
+ }
+}
+
+static bool mlx5e_is_eswitch_vport_mngr(struct mlx5_core_dev *mdev)
+{
+ return (MLX5_CAP_GEN(mdev, vport_group_manager) &&
+ MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH);
+}
+
+void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
+{
+ int num_txqs = priv->channels.num * priv->channels.params.num_tc;
+ struct net_device *netdev = priv->netdev;
+
+ mlx5e_netdev_set_tcs(netdev);
+ netif_set_real_num_tx_queues(netdev, num_txqs);
+ netif_set_real_num_rx_queues(netdev, priv->channels.num);
+
+ mlx5e_build_channels_tx_maps(priv);
+ mlx5e_activate_channels(&priv->channels);
+ netif_tx_start_all_queues(priv->netdev);
+
+ if (mlx5e_is_eswitch_vport_mngr(priv->mdev))
+ mlx5e_add_sqs_fwd_rules(priv);
+
+ mlx5e_wait_channels_min_rx_wqes(&priv->channels);
+ mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
+}
+
+void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
+{
+ mlx5e_redirect_rqts_to_drop(priv);
+
+ if (mlx5e_is_eswitch_vport_mngr(priv->mdev))
+ mlx5e_remove_sqs_fwd_rules(priv);
+
+ /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
+ * polling for inactive tx queues.
+ */
+ netif_tx_stop_all_queues(priv->netdev);
+ netif_tx_disable(priv->netdev);
+ mlx5e_deactivate_channels(&priv->channels);
+}
+
+void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *new_chs,
+ mlx5e_fp_hw_modify hw_modify)
+{
+ struct net_device *netdev = priv->netdev;
+ int new_num_txqs;
+
+ new_num_txqs = new_chs->num * new_chs->params.num_tc;
+
+ netif_carrier_off(netdev);
+
+ if (new_num_txqs < netdev->real_num_tx_queues)
+ netif_set_real_num_tx_queues(netdev, new_num_txqs);
+
+ mlx5e_deactivate_priv_channels(priv);
+ mlx5e_close_channels(&priv->channels);
+
+ priv->channels = *new_chs;
+
+ /* New channels are ready to roll, modify HW settings if needed */
+ if (hw_modify)
+ hw_modify(priv);
+
+ mlx5e_refresh_tirs(priv, false);
+ mlx5e_activate_priv_channels(priv);
+
+ mlx5e_update_carrier(priv);
+}
+
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5_core_dev *mdev = priv->mdev;
- int num_txqs;
int err;
set_bit(MLX5E_STATE_OPENED, &priv->state);
- mlx5e_netdev_set_tcs(netdev);
-
- num_txqs = priv->params.num_channels * priv->params.num_tc;
- netif_set_real_num_tx_queues(netdev, num_txqs);
- netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
-
- err = mlx5e_open_channels(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
- __func__, err);
+ err = mlx5e_open_channels(priv, &priv->channels);
+ if (err)
goto err_clear_state_opened_flag;
- }
-
- err = mlx5e_refresh_tirs_self_loopback(priv->mdev, false);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
- __func__, err);
- goto err_close_channels;
- }
- mlx5e_redirect_rqts(priv);
+ mlx5e_refresh_tirs(priv, false);
+ mlx5e_activate_priv_channels(priv);
mlx5e_update_carrier(priv);
mlx5e_timestamp_init(priv);
-#ifdef CONFIG_RFS_ACCEL
- priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
-#endif
+
if (priv->profile->update_stats)
queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
- if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
- err = mlx5e_add_sqs_fwd_rules(priv);
- if (err)
- goto err_close_channels;
- }
return 0;
-err_close_channels:
- mlx5e_close_channels(priv);
err_clear_state_opened_flag:
clear_bit(MLX5E_STATE_OPENED, &priv->state);
return err;
@@ -2365,7 +2660,6 @@ int mlx5e_open(struct net_device *netdev)
int mlx5e_close_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5_core_dev *mdev = priv->mdev;
/* May already be CLOSED in case a previous configuration operation
* (e.g RX/TX queue size change) that involves close&open failed.
@@ -2375,13 +2669,10 @@ int mlx5e_close_locked(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
- if (MLX5_CAP_GEN(mdev, vport_group_manager))
- mlx5e_remove_sqs_fwd_rules(priv);
-
mlx5e_timestamp_cleanup(priv);
netif_carrier_off(priv->netdev);
- mlx5e_redirect_rqts(priv);
- mlx5e_close_channels(priv);
+ mlx5e_deactivate_priv_channels(priv);
+ mlx5e_close_channels(&priv->channels);
return 0;
}
@@ -2401,11 +2692,10 @@ int mlx5e_close(struct net_device *netdev)
return err;
}
-static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
- struct mlx5e_rq *rq,
- struct mlx5e_rq_param *param)
+static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq *rq,
+ struct mlx5e_rq_param *param)
{
- struct mlx5_core_dev *mdev = priv->mdev;
void *rqc = param->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
int err;
@@ -2417,111 +2707,85 @@ static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
if (err)
return err;
- rq->priv = priv;
+ rq->mdev = mdev;
return 0;
}
-static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
- struct mlx5e_cq *cq,
- struct mlx5e_cq_param *param)
+static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
+ struct mlx5e_cq *cq,
+ struct mlx5e_cq_param *param)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_core_cq *mcq = &cq->mcq;
- int eqn_not_used;
- unsigned int irqn;
- int err;
-
- err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
- &cq->wq_ctrl);
- if (err)
- return err;
-
- mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
-
- mcq->cqe_sz = 64;
- mcq->set_ci_db = cq->wq_ctrl.db.db;
- mcq->arm_db = cq->wq_ctrl.db.db + 1;
- *mcq->set_ci_db = 0;
- *mcq->arm_db = 0;
- mcq->vector = param->eq_ix;
- mcq->comp = mlx5e_completion_event;
- mcq->event = mlx5e_cq_error_event;
- mcq->irqn = irqn;
-
- cq->priv = priv;
-
- return 0;
+ return mlx5e_alloc_cq_common(mdev, param, cq);
}
-static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
+static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq *drop_rq)
{
- struct mlx5e_cq_param cq_param;
- struct mlx5e_rq_param rq_param;
- struct mlx5e_rq *rq = &priv->drop_rq;
- struct mlx5e_cq *cq = &priv->drop_rq.cq;
+ struct mlx5e_cq_param cq_param = {};
+ struct mlx5e_rq_param rq_param = {};
+ struct mlx5e_cq *cq = &drop_rq->cq;
int err;
- memset(&cq_param, 0, sizeof(cq_param));
- memset(&rq_param, 0, sizeof(rq_param));
mlx5e_build_drop_rq_param(&rq_param);
- err = mlx5e_create_drop_cq(priv, cq, &cq_param);
+ err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
if (err)
return err;
- err = mlx5e_enable_cq(cq, &cq_param);
+ err = mlx5e_create_cq(cq, &cq_param);
if (err)
- goto err_destroy_cq;
+ goto err_free_cq;
- err = mlx5e_create_drop_rq(priv, rq, &rq_param);
+ err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
if (err)
- goto err_disable_cq;
+ goto err_destroy_cq;
- err = mlx5e_enable_rq(rq, &rq_param);
+ err = mlx5e_create_rq(drop_rq, &rq_param);
if (err)
- goto err_destroy_rq;
+ goto err_free_rq;
return 0;
-err_destroy_rq:
- mlx5e_destroy_rq(&priv->drop_rq);
-
-err_disable_cq:
- mlx5e_disable_cq(&priv->drop_rq.cq);
+err_free_rq:
+ mlx5e_free_rq(drop_rq);
err_destroy_cq:
- mlx5e_destroy_cq(&priv->drop_rq.cq);
+ mlx5e_destroy_cq(cq);
+
+err_free_cq:
+ mlx5e_free_cq(cq);
return err;
}
-static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
+static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
{
- mlx5e_disable_rq(&priv->drop_rq);
- mlx5e_destroy_rq(&priv->drop_rq);
- mlx5e_disable_cq(&priv->drop_rq.cq);
- mlx5e_destroy_cq(&priv->drop_rq.cq);
+ mlx5e_destroy_rq(drop_rq);
+ mlx5e_free_rq(drop_rq);
+ mlx5e_destroy_cq(&drop_rq->cq);
+ mlx5e_free_cq(&drop_rq->cq);
}
-static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
+int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
+ u32 underlay_qpn, u32 *tisn)
{
- struct mlx5_core_dev *mdev = priv->mdev;
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
MLX5_SET(tisc, tisc, prio, tc << 1);
+ MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
if (mlx5_lag_is_lacp_owner(mdev))
MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
- return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+ return mlx5_core_create_tis(mdev, in, sizeof(in), tisn);
}
-static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
+void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
{
- mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+ mlx5_core_destroy_tis(mdev, tisn);
}
int mlx5e_create_tises(struct mlx5e_priv *priv)
@@ -2530,7 +2794,7 @@ int mlx5e_create_tises(struct mlx5e_priv *priv)
int tc;
for (tc = 0; tc < priv->profile->max_tc; tc++) {
- err = mlx5e_create_tis(priv, tc);
+ err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]);
if (err)
goto err_close_tises;
}
@@ -2539,7 +2803,7 @@ int mlx5e_create_tises(struct mlx5e_priv *priv)
err_close_tises:
for (tc--; tc >= 0; tc--)
- mlx5e_destroy_tis(priv, tc);
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
return err;
}
@@ -2549,34 +2813,34 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
int tc;
for (tc = 0; tc < priv->profile->max_tc; tc++)
- mlx5e_destroy_tis(priv, tc);
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
}
-static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
- enum mlx5e_traffic_types tt)
+static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
+ enum mlx5e_traffic_types tt,
+ u32 *tirc)
{
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
- mlx5e_build_tir_ctx_lro(tirc, priv);
+ mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
- mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
+ mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc);
}
-static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
- u32 rqtn)
+static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
{
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
- mlx5e_build_tir_ctx_lro(tirc, priv);
+ mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, rqtn);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
}
-static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
+int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
{
struct mlx5e_tir *tir;
void *tirc;
@@ -2594,7 +2858,7 @@ static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
memset(in, 0, inlen);
tir = &priv->indir_tir[tt];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- mlx5e_build_indir_tir_ctx(priv, tirc, tt);
+ mlx5e_build_indir_tir_ctx(priv, tt, tirc);
err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
if (err)
goto err_destroy_tirs;
@@ -2605,6 +2869,7 @@ static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
return 0;
err_destroy_tirs:
+ mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
for (tt--; tt >= 0; tt--)
mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
@@ -2632,8 +2897,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
memset(in, 0, inlen);
tir = &priv->direct_tir[ix];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- mlx5e_build_direct_tir_ctx(priv, tirc,
- priv->direct_tir[ix].rqt.rqtn);
+ mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc);
err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
if (err)
goto err_destroy_ch_tirs;
@@ -2644,6 +2908,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
return 0;
err_destroy_ch_tirs:
+ mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err);
for (ix--; ix >= 0; ix--)
mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
@@ -2652,7 +2917,7 @@ err_destroy_ch_tirs:
return err;
}
-static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
+void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
{
int i;
@@ -2669,16 +2934,27 @@ void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
}
-int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
+static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
{
int err = 0;
int i;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- return 0;
+ for (i = 0; i < chs->num; i++) {
+ err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
+ if (err)
+ return err;
+ }
- for (i = 0; i < priv->params.num_channels; i++) {
- err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd);
+ return 0;
+}
+
+static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < chs->num; i++) {
+ err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
if (err)
return err;
}
@@ -2689,7 +2965,7 @@ int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- bool was_opened;
+ struct mlx5e_channels new_channels = {};
int err = 0;
if (tc && tc != MLX5E_MAX_NUM_TC)
@@ -2697,17 +2973,21 @@ static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
mutex_lock(&priv->state_lock);
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
- mlx5e_close_locked(priv->netdev);
+ new_channels.params = priv->channels.params;
+ new_channels.params.num_tc = tc ? tc : 1;
- priv->params.num_tc = tc ? tc : 1;
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ goto out;
+ }
- if (was_opened)
- err = mlx5e_open_locked(priv->netdev);
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ goto out;
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+out:
mutex_unlock(&priv->state_lock);
-
return err;
}
@@ -2737,7 +3017,9 @@ mqprio:
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return mlx5e_setup_tc(dev, tc->tc);
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return mlx5e_setup_tc(dev, tc->mqprio->num_tc);
}
static void
@@ -2822,26 +3104,31 @@ typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
static int set_feature_lro(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- int err;
+ struct mlx5e_channels new_channels = {};
+ int err = 0;
+ bool reset;
mutex_lock(&priv->state_lock);
- if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
- mlx5e_close_locked(priv->netdev);
+ reset = (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST);
+ reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
- priv->params.lro_en = enable;
- err = mlx5e_modify_tirs_lro(priv);
- if (err) {
- netdev_err(netdev, "lro modify failed, %d\n", err);
- priv->params.lro_en = !enable;
+ new_channels.params = priv->channels.params;
+ new_channels.params.lro_en = enable;
+
+ if (!reset) {
+ priv->channels.params = new_channels.params;
+ err = mlx5e_modify_tirs_lro(priv);
+ goto out;
}
- if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
- mlx5e_open_locked(priv->netdev);
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ goto out;
+ mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
+out:
mutex_unlock(&priv->state_lock);
-
return err;
}
@@ -2878,23 +3165,44 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable)
return mlx5_set_port_fcs(mdev, !enable);
}
-static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
+static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
mutex_lock(&priv->state_lock);
- priv->params.vlan_strip_disable = !enable;
- err = mlx5e_modify_rqs_vsd(priv, !enable);
+ priv->channels.params.scatter_fcs_en = enable;
+ err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
if (err)
- priv->params.vlan_strip_disable = enable;
+ priv->channels.params.scatter_fcs_en = !enable;
mutex_unlock(&priv->state_lock);
return err;
}
+static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ priv->channels.params.vlan_strip_disable = !enable;
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
+ if (err)
+ priv->channels.params.vlan_strip_disable = enable;
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
#ifdef CONFIG_RFS_ACCEL
static int set_feature_arfs(struct net_device *netdev, bool enable)
{
@@ -2947,6 +3255,8 @@ static int mlx5e_set_features(struct net_device *netdev,
set_feature_tc_num_filters);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
set_feature_rx_all);
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS,
+ set_feature_rx_fcs);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
set_feature_rx_vlan);
#ifdef CONFIG_RFS_ACCEL
@@ -2960,28 +3270,38 @@ static int mlx5e_set_features(struct net_device *netdev,
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- bool was_opened;
+ struct mlx5e_channels new_channels = {};
+ int curr_mtu;
int err = 0;
bool reset;
mutex_lock(&priv->state_lock);
- reset = !priv->params.lro_en &&
- (priv->params.rq_wq_type !=
+ reset = !priv->channels.params.lro_en &&
+ (priv->channels.params.rq_wq_type !=
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened && reset)
- mlx5e_close_locked(netdev);
+ reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
+ curr_mtu = netdev->mtu;
netdev->mtu = new_mtu;
- mlx5e_set_dev_port_mtu(netdev);
- if (was_opened && reset)
- err = mlx5e_open_locked(netdev);
+ if (!reset) {
+ mlx5e_set_dev_port_mtu(priv);
+ goto out;
+ }
- mutex_unlock(&priv->state_lock);
+ new_channels.params = priv->channels.params;
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err) {
+ netdev->mtu = curr_mtu;
+ goto out;
+ }
+
+ mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu);
+out:
+ mutex_unlock(&priv->state_lock);
return err;
}
@@ -3186,8 +3506,8 @@ static void mlx5e_tx_timeout(struct net_device *dev)
netdev_err(dev, "TX timeout detected\n");
- for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
- struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
+ for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
+ struct mlx5e_txqsq *sq = priv->txq2sq[i];
if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
continue;
@@ -3219,7 +3539,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
/* no need for full reset when exchanging programs */
- reset = (!priv->xdp_prog || !prog);
+ reset = (!priv->channels.params.xdp_prog || !prog);
if (was_opened && reset)
mlx5e_close_locked(netdev);
@@ -3227,7 +3547,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
/* num_channels is invariant here, so we can take the
* batched reference right upfront.
*/
- prog = bpf_prog_add(prog, priv->params.num_channels);
+ prog = bpf_prog_add(prog, priv->channels.num);
if (IS_ERR(prog)) {
err = PTR_ERR(prog);
goto unlock;
@@ -3237,12 +3557,12 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
/* exchange programs, extra prog reference we got from caller
* as long as we don't fail from this point onwards.
*/
- old_prog = xchg(&priv->xdp_prog, prog);
+ old_prog = xchg(&priv->channels.params.xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
if (reset) /* change RQ type according to priv->xdp_prog */
- mlx5e_set_rq_priv_params(priv);
+ mlx5e_set_rq_params(priv->mdev, &priv->channels.params);
if (was_opened && reset)
mlx5e_open_locked(netdev);
@@ -3253,8 +3573,8 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
/* exchanging programs w/o reset, we update ref counts on behalf
* of the channels RQs here.
*/
- for (i = 0; i < priv->params.num_channels; i++) {
- struct mlx5e_channel *c = priv->channel[i];
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_channel *c = priv->channels.c[i];
clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
napi_synchronize(&c->napi);
@@ -3280,7 +3600,7 @@ static bool mlx5e_xdp_attached(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- return !!priv->xdp_prog;
+ return !!priv->channels.params.xdp_prog;
}
static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
@@ -3303,10 +3623,12 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
static void mlx5e_netpoll(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channels *chs = &priv->channels;
+
int i;
- for (i = 0; i < priv->params.num_channels; i++)
- napi_schedule(&priv->channel[i]->napi);
+ for (i = 0; i < chs->num; i++)
+ napi_schedule(&chs->c[i]->napi);
}
#endif
@@ -3463,6 +3785,12 @@ static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
(pci_bw < 40000) && (pci_bw < link_speed));
}
+static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw)
+{
+ return !(link_speed && pci_bw &&
+ (pci_bw <= 16000) && (pci_bw < link_speed));
+}
+
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
{
params->rx_cq_period_mode = cq_period_mode;
@@ -3475,6 +3803,13 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
params->rx_cq_moderation.usec =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
+
+ if (params->rx_am_enabled)
+ params->rx_cq_moderation =
+ mlx5e_am_get_def_profile(params->rx_cq_period_mode);
+
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ params->rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
}
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
@@ -3489,75 +3824,81 @@ u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
}
-static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ u16 max_channels)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
+ u8 cq_period_mode = 0;
u32 link_speed = 0;
u32 pci_bw = 0;
- u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
- MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->params.num_channels = profile->max_nch(mdev);
- priv->profile = profile;
- priv->ppriv = ppriv;
+ params->num_channels = max_channels;
+ params->num_tc = 1;
- priv->params.lro_timeout =
- mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+ mlx5e_get_max_linkspeed(mdev, &link_speed);
+ mlx5e_get_pci_bw(mdev, &pci_bw);
+ mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
+ link_speed, pci_bw);
- priv->params.log_sq_size = is_kdump_kernel() ?
+ /* SQ */
+ params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */
- priv->params.rx_cqe_compress_def = false;
+ params->rx_cqe_compress_def = false;
if (MLX5_CAP_GEN(mdev, cqe_compression) &&
- MLX5_CAP_GEN(mdev, vport_group_manager)) {
- mlx5e_get_max_linkspeed(mdev, &link_speed);
- mlx5e_get_pci_bw(mdev, &pci_bw);
- mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
- link_speed, pci_bw);
- priv->params.rx_cqe_compress_def =
- cqe_compress_heuristic(link_speed, pci_bw);
- }
-
- MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS,
- priv->params.rx_cqe_compress_def);
-
- mlx5e_set_rq_priv_params(priv);
- if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
- priv->params.lro_en = true;
-
- priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
- mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
-
- priv->params.tx_cq_moderation.usec =
- MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
- priv->params.tx_cq_moderation.pkts =
- MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
- priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
- mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
- if (priv->params.tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
+ MLX5_CAP_GEN(mdev, vport_group_manager))
+ params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
+
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
+
+ /* RQ */
+ mlx5e_set_rq_params(mdev, params);
+
+ /* HW LRO */
+ /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
+ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+ params->lro_en = hw_lro_heuristic(link_speed, pci_bw);
+ params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+
+ /* CQ moderation params */
+ cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+ MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
+
+ params->tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+ params->tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+
+ /* TX inline */
+ params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
+ mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
+ if (params->tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
- priv->params.tx_min_inline_mode = MLX5_INLINE_MODE_L2;
+ params->tx_min_inline_mode = MLX5_INLINE_MODE_L2;
- priv->params.num_tc = 1;
- priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
+ /* RSS */
+ params->rss_hfunc = ETH_RSS_HASH_XOR;
+ netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
+ mlx5e_build_default_indir_rqt(mdev, params->indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, max_channels);
+}
- netdev_rss_key_fill(priv->params.toeplitz_hash_key,
- sizeof(priv->params.toeplitz_hash_key));
+static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
- mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->profile = profile;
+ priv->ppriv = ppriv;
- /* Initialize pflags */
- MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
- priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+ mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
mutex_init(&priv->state_lock);
@@ -3642,13 +3983,19 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (fcs_supported)
netdev->hw_features |= NETIF_F_RXALL;
+ if (MLX5_CAP_ETH(mdev, scatter_fcs))
+ netdev->hw_features |= NETIF_F_RXFCS;
+
netdev->features = netdev->hw_features;
- if (!priv->params.lro_en)
+ if (!priv->channels.params.lro_en)
netdev->features &= ~NETIF_F_LRO;
if (fcs_enabled)
netdev->features &= ~NETIF_F_RXALL;
+ if (!priv->channels.params.scatter_fcs_en)
+ netdev->features &= ~NETIF_F_RXFCS;
+
#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
if (FT_CAP(flow_modify_en) &&
FT_CAP(modify_root) &&
@@ -3708,39 +4055,30 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_vxlan_cleanup(priv);
- if (priv->xdp_prog)
- bpf_prog_put(priv->xdp_prog);
+ if (priv->channels.params.xdp_prog)
+ bpf_prog_put(priv->channels.params.xdp_prog);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- int i;
- err = mlx5e_create_indirect_rqts(priv);
- if (err) {
- mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err);
+ err = mlx5e_create_indirect_rqt(priv);
+ if (err)
return err;
- }
err = mlx5e_create_direct_rqts(priv);
- if (err) {
- mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
+ if (err)
goto err_destroy_indirect_rqts;
- }
err = mlx5e_create_indirect_tirs(priv);
- if (err) {
- mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err);
+ if (err)
goto err_destroy_direct_rqts;
- }
err = mlx5e_create_direct_tirs(priv);
- if (err) {
- mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
+ if (err)
goto err_destroy_indirect_tirs;
- }
err = mlx5e_create_flow_steering(priv);
if (err) {
@@ -3761,8 +4099,7 @@ err_destroy_direct_tirs:
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
- for (i = 0; i < priv->profile->max_nch(mdev); i++)
- mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+ mlx5e_destroy_direct_rqts(priv);
err_destroy_indirect_rqts:
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
return err;
@@ -3770,14 +4107,11 @@ err_destroy_indirect_rqts:
static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
- int i;
-
mlx5e_tc_cleanup(priv);
mlx5e_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
- for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
- mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+ mlx5e_destroy_direct_rqts(priv);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
}
@@ -3801,21 +4135,22 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
- struct mlx5_eswitch_rep rep;
+ u16 max_mtu;
+
+ mlx5e_init_l2_addr(priv);
+
+ /* MTU range: 68 - hw-specific max */
+ netdev->min_mtu = ETH_MIN_MTU;
+ mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
+ netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+ mlx5e_set_dev_port_mtu(priv);
mlx5_lag_add(mdev, netdev);
mlx5e_enable_async_events(priv);
- if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
- mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
- rep.load = mlx5e_nic_rep_load;
- rep.unload = mlx5e_nic_rep_unload;
- rep.vport = FDB_UPLINK_VPORT;
- rep.netdev = netdev;
- mlx5_eswitch_register_vport_rep(esw, 0, &rep);
- }
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ mlx5e_register_vport_reps(priv);
if (netdev->reg_state != NETREG_REGISTERED)
return;
@@ -3828,16 +4163,29 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
}
queue_work(priv->wq, &priv->set_rx_mode_work);
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ mlx5e_open(netdev);
+ netif_device_attach(netdev);
+ rtnl_unlock();
}
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
+ rtnl_lock();
+ if (netif_running(priv->netdev))
+ mlx5e_close(priv->netdev);
+ netif_device_detach(priv->netdev);
+ rtnl_unlock();
queue_work(priv->wq, &priv->set_rx_mode_work);
+
if (MLX5_CAP_GEN(mdev, vport_group_manager))
- mlx5_eswitch_unregister_vport_rep(esw, 0);
+ mlx5e_unregister_vport_reps(priv);
+
mlx5e_disable_async_events(priv);
mlx5_lag_remove(mdev);
}
@@ -3853,9 +4201,13 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.disable = mlx5e_nic_disable,
.update_stats = mlx5e_update_stats,
.max_nch = mlx5e_get_max_num_channels,
+ .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
+ .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
};
+/* mlx5e generic netdev management API (move to en_common.c) */
+
struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
const struct mlx5e_profile *profile,
void *ppriv)
@@ -3872,6 +4224,10 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
return NULL;
}
+#ifdef CONFIG_RFS_ACCEL
+ netdev->rx_cpu_rmap = mdev->rmap;
+#endif
+
profile->init(mdev, netdev, profile, ppriv);
netif_carrier_off(netdev);
@@ -3891,14 +4247,12 @@ err_cleanup_nic:
return NULL;
}
-int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
+int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
const struct mlx5e_profile *profile;
- struct mlx5e_priv *priv;
- u16 max_mtu;
int err;
- priv = netdev_priv(netdev);
profile = priv->profile;
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
@@ -3906,7 +4260,7 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
if (err)
goto out;
- err = mlx5e_open_drop_rq(priv);
+ err = mlx5e_open_drop_rq(mdev, &priv->drop_rq);
if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
goto err_cleanup_tx;
@@ -3918,28 +4272,13 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
mlx5e_create_q_counter(priv);
- mlx5e_init_l2_addr(priv);
-
- /* MTU range: 68 - hw-specific max */
- netdev->min_mtu = ETH_MIN_MTU;
- mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
- netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu);
-
- mlx5e_set_dev_port_mtu(netdev);
-
if (profile->enable)
profile->enable(priv);
- rtnl_lock();
- if (netif_running(netdev))
- mlx5e_open(netdev);
- netif_device_attach(netdev);
- rtnl_unlock();
-
return 0;
err_close_drop_rq:
- mlx5e_close_drop_rq(priv);
+ mlx5e_close_drop_rq(&priv->drop_rq);
err_cleanup_tx:
profile->cleanup_tx(priv);
@@ -3948,66 +4287,34 @@ out:
return err;
}
-static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
-{
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
- int total_vfs = MLX5_TOTAL_VPORTS(mdev);
- int vport;
- u8 mac[ETH_ALEN];
-
- if (!MLX5_CAP_GEN(mdev, vport_group_manager))
- return;
-
- mlx5_query_nic_vport_mac_address(mdev, 0, mac);
-
- for (vport = 1; vport < total_vfs; vport++) {
- struct mlx5_eswitch_rep rep;
-
- rep.load = mlx5e_vport_rep_load;
- rep.unload = mlx5e_vport_rep_unload;
- rep.vport = vport;
- ether_addr_copy(rep.hw_id, mac);
- mlx5_eswitch_register_vport_rep(esw, vport, &rep);
- }
-}
-
-static void mlx5e_unregister_vport_rep(struct mlx5_core_dev *mdev)
-{
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
- int total_vfs = MLX5_TOTAL_VPORTS(mdev);
- int vport;
-
- if (!MLX5_CAP_GEN(mdev, vport_group_manager))
- return;
-
- for (vport = 1; vport < total_vfs; vport++)
- mlx5_eswitch_unregister_vport_rep(esw, vport);
-}
-
-void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
+void mlx5e_detach_netdev(struct mlx5e_priv *priv)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
const struct mlx5e_profile *profile = priv->profile;
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
- rtnl_lock();
- if (netif_running(netdev))
- mlx5e_close(netdev);
- netif_device_detach(netdev);
- rtnl_unlock();
-
if (profile->disable)
profile->disable(priv);
flush_workqueue(priv->wq);
mlx5e_destroy_q_counter(priv);
profile->cleanup_rx(priv);
- mlx5e_close_drop_rq(priv);
+ mlx5e_close_drop_rq(&priv->drop_rq);
profile->cleanup_tx(priv);
cancel_delayed_work_sync(&priv->update_stats_work);
}
+void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
+{
+ const struct mlx5e_profile *profile = priv->profile;
+ struct net_device *netdev = priv->netdev;
+
+ destroy_workqueue(priv->wq);
+ if (profile->cleanup)
+ profile->cleanup(priv);
+ free_netdev(netdev);
+}
+
/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
* hardware contexts and to connect it to the current netdev.
*/
@@ -4024,13 +4331,12 @@ static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
if (err)
return err;
- err = mlx5e_attach_netdev(mdev, netdev);
+ err = mlx5e_attach_netdev(priv);
if (err) {
mlx5e_destroy_mdev_resources(mdev);
return err;
}
- mlx5e_register_vport_rep(mdev);
return 0;
}
@@ -4042,8 +4348,7 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
if (!netif_device_present(netdev))
return;
- mlx5e_unregister_vport_rep(mdev);
- mlx5e_detach_netdev(mdev, netdev);
+ mlx5e_detach_netdev(priv);
mlx5e_destroy_mdev_resources(mdev);
}
@@ -4051,7 +4356,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
{
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(mdev);
- void *ppriv = NULL;
+ struct mlx5e_rep_priv *rpriv = NULL;
void *priv;
int vport;
int err;
@@ -4061,10 +4366,17 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
if (err)
return NULL;
- if (MLX5_CAP_GEN(mdev, vport_group_manager))
- ppriv = &esw->offloads.vport_reps[0];
+ if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+ rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
+ if (!rpriv) {
+ mlx5_core_warn(mdev,
+ "Not creating net device, Failed to alloc rep priv data\n");
+ return NULL;
+ }
+ rpriv->rep = &esw->offloads.vport_reps[0];
+ }
- netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
+ netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
goto err_unregister_reps;
@@ -4090,33 +4402,25 @@ err_detach:
mlx5e_detach(mdev, priv);
err_destroy_netdev:
- mlx5e_destroy_netdev(mdev, priv);
+ mlx5e_destroy_netdev(priv);
err_unregister_reps:
for (vport = 1; vport < total_vfs; vport++)
mlx5_eswitch_unregister_vport_rep(esw, vport);
+ kfree(rpriv);
return NULL;
}
-void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
-{
- const struct mlx5e_profile *profile = priv->profile;
- struct net_device *netdev = priv->netdev;
-
- destroy_workqueue(priv->wq);
- if (profile->cleanup)
- profile->cleanup(priv);
- free_netdev(netdev);
-}
-
static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
{
struct mlx5e_priv *priv = vpriv;
+ void *ppriv = priv->ppriv;
unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
- mlx5e_destroy_netdev(mdev, priv);
+ mlx5e_destroy_netdev(priv);
+ kfree(ppriv);
}
static void *mlx5e_get_netdev(void *vpriv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index f621373bd7a5..79462c0368a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -34,10 +34,14 @@
#include <linux/mlx5/fs.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
+#include <net/netevent.h>
+#include <net/arp.h>
#include "eswitch.h"
#include "en.h"
+#include "en_rep.h"
#include "en_tc.h"
+#include "fs_core.h"
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
@@ -75,7 +79,8 @@ static void mlx5e_rep_get_strings(struct net_device *dev,
static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct rtnl_link_stats64 *vport_stats;
struct ifla_vf_stats vf_stats;
int err;
@@ -102,14 +107,16 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
int i, j;
memset(s, 0, sizeof(*s));
- for (i = 0; i < priv->params.num_channels; i++) {
- rq_stats = &priv->channel[i]->rq.stats;
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_channel *c = priv->channels.c[i];
+
+ rq_stats = &c->rq.stats;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
- for (j = 0; j < priv->params.num_tc; j++) {
- sq_stats = &priv->channel[i]->sq[j].stats;
+ for (j = 0; j < priv->channels.params.num_tc; j++) {
+ sq_stats = &c->sq[j].stats;
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
@@ -163,7 +170,8 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
if (esw->mode == SRIOV_NONE)
@@ -182,66 +190,426 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
}
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
-
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5e_channel *c;
- int n, tc, err, num_sqs = 0;
+ int n, tc, num_sqs = 0;
+ int err = -ENOMEM;
u16 *sqs;
- sqs = kcalloc(priv->params.num_channels * priv->params.num_tc, sizeof(u16), GFP_KERNEL);
+ sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(u16), GFP_KERNEL);
if (!sqs)
- return -ENOMEM;
+ goto out;
- for (n = 0; n < priv->params.num_channels; n++) {
- c = priv->channel[n];
+ for (n = 0; n < priv->channels.num; n++) {
+ c = priv->channels.c[n];
for (tc = 0; tc < c->num_tc; tc++)
sqs[num_sqs++] = c->sq[tc].sqn;
}
err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
-
kfree(sqs);
+
+out:
+ if (err)
+ netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
return err;
}
-int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
{
- struct net_device *netdev = rep->netdev;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+
+ mlx5_eswitch_sqs2vport_stop(esw, rep);
+}
+
+static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ unsigned long ipv6_interval = NEIGH_VAR(&ipv6_stub->nd_tbl->parms,
+ DELAY_PROBE_TIME);
+#else
+ unsigned long ipv6_interval = ~0UL;
+#endif
+ unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
+ DELAY_PROBE_TIME);
+ struct net_device *netdev = rpriv->rep->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- return mlx5e_add_sqs_fwd_rules(priv);
- return 0;
+ rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
+ mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
}
-void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
+void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- mlx5_eswitch_sqs2vport_stop(esw, rep);
+ mlx5_fc_queue_stats_work(priv->mdev,
+ &neigh_update->neigh_stats_work,
+ neigh_update->min_interval);
}
-void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep)
+static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
{
- struct net_device *netdev = rep->netdev;
+ struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
+ neigh_update.neigh_stats_work.work);
+ struct net_device *netdev = rpriv->rep->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_neigh_hash_entry *nhe;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_remove_sqs_fwd_rules(priv);
+ rtnl_lock();
+ if (!list_empty(&rpriv->neigh_update.neigh_list))
+ mlx5e_rep_queue_neigh_stats_work(priv);
- /* clean (and re-init) existing uplink offloaded TC rules */
- mlx5e_tc_cleanup(priv);
- mlx5e_tc_init(priv);
+ list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
+ mlx5e_tc_update_neigh_used_value(nhe);
+
+ rtnl_unlock();
+}
+
+static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
+{
+ refcount_inc(&nhe->refcnt);
+}
+
+static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
+{
+ if (refcount_dec_and_test(&nhe->refcnt))
+ kfree(nhe);
+}
+
+static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ bool neigh_connected,
+ unsigned char ha[ETH_ALEN])
+{
+ struct ethhdr *eth = (struct ethhdr *)e->encap_header;
+
+ ASSERT_RTNL();
+
+ if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) ||
+ !ether_addr_equal(e->h_dest, ha))
+ mlx5e_tc_encap_flows_del(priv, e);
+
+ if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ ether_addr_copy(e->h_dest, ha);
+ ether_addr_copy(eth->h_dest, ha);
+
+ mlx5e_tc_encap_flows_add(priv, e);
+ }
+}
+
+static void mlx5e_rep_neigh_update(struct work_struct *work)
+{
+ struct mlx5e_neigh_hash_entry *nhe =
+ container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
+ struct neighbour *n = nhe->n;
+ struct mlx5e_encap_entry *e;
+ unsigned char ha[ETH_ALEN];
+ struct mlx5e_priv *priv;
+ bool neigh_connected;
+ bool encap_connected;
+ u8 nud_state, dead;
+
+ rtnl_lock();
+
+ /* If these parameters are changed after we release the lock,
+ * we'll receive another event letting us know about it.
+ * We use this lock to avoid inconsistency between the neigh validity
+ * and it's hw address.
+ */
+ read_lock_bh(&n->lock);
+ memcpy(ha, n->ha, ETH_ALEN);
+ nud_state = n->nud_state;
+ dead = n->dead;
+ read_unlock_bh(&n->lock);
+
+ neigh_connected = (nud_state & NUD_VALID) && !dead;
+
+ list_for_each_entry(e, &nhe->encap_list, encap_list) {
+ encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
+ priv = netdev_priv(e->out_dev);
+
+ if (encap_connected != neigh_connected ||
+ !ether_addr_equal(e->h_dest, ha))
+ mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
+ }
+ mlx5e_rep_neigh_entry_release(nhe);
+ rtnl_unlock();
+ neigh_release(n);
+}
+
+static struct mlx5e_neigh_hash_entry *
+mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
+ struct mlx5e_neigh *m_neigh);
+
+static int mlx5e_rep_netevent_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
+ neigh_update.netevent_nb);
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ struct net_device *netdev = rpriv->rep->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_neigh_hash_entry *nhe = NULL;
+ struct mlx5e_neigh m_neigh = {};
+ struct neigh_parms *p;
+ struct neighbour *n;
+ bool found = false;
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ n = ptr;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
+#else
+ if (n->tbl != &arp_tbl)
+#endif
+ return NOTIFY_DONE;
+
+ m_neigh.dev = n->dev;
+ m_neigh.family = n->ops->family;
+ memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+
+ /* We are in atomic context and can't take RTNL mutex, so use
+ * spin_lock_bh to lookup the neigh table. bh is used since
+ * netevent can be called from a softirq context.
+ */
+ spin_lock_bh(&neigh_update->encap_lock);
+ nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
+ if (!nhe) {
+ spin_unlock_bh(&neigh_update->encap_lock);
+ return NOTIFY_DONE;
+ }
+
+ /* This assignment is valid as long as the the neigh reference
+ * is taken
+ */
+ nhe->n = n;
+
+ /* Take a reference to ensure the neighbour and mlx5 encap
+ * entry won't be destructed until we drop the reference in
+ * delayed work.
+ */
+ neigh_hold(n);
+ mlx5e_rep_neigh_entry_hold(nhe);
+
+ if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
+ mlx5e_rep_neigh_entry_release(nhe);
+ neigh_release(n);
+ }
+ spin_unlock_bh(&neigh_update->encap_lock);
+ break;
+
+ case NETEVENT_DELAY_PROBE_TIME_UPDATE:
+ p = ptr;
+
+ /* We check the device is present since we don't care about
+ * changes in the default table, we only care about changes
+ * done per device delay prob time parameter.
+ */
+#if IS_ENABLED(CONFIG_IPV6)
+ if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
+#else
+ if (!p->dev || p->tbl != &arp_tbl)
+#endif
+ return NOTIFY_DONE;
+
+ /* We are in atomic context and can't take RTNL mutex,
+ * so use spin_lock_bh to walk the neigh list and look for
+ * the relevant device. bh is used since netevent can be
+ * called from a softirq context.
+ */
+ spin_lock_bh(&neigh_update->encap_lock);
+ list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
+ if (p->dev == nhe->m_neigh.dev) {
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_bh(&neigh_update->encap_lock);
+ if (!found)
+ return NOTIFY_DONE;
+
+ neigh_update->min_interval = min_t(unsigned long,
+ NEIGH_VAR(p, DELAY_PROBE_TIME),
+ neigh_update->min_interval);
+ mlx5_fc_update_sampling_interval(priv->mdev,
+ neigh_update->min_interval);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static const struct rhashtable_params mlx5e_neigh_ht_params = {
+ .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
+ .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
+ .key_len = sizeof(struct mlx5e_neigh),
+ .automatic_shrinking = true,
+};
+
+static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ int err;
+
+ err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
+ if (err)
+ return err;
+
+ INIT_LIST_HEAD(&neigh_update->neigh_list);
+ spin_lock_init(&neigh_update->encap_lock);
+ INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
+ mlx5e_rep_neigh_stats_work);
+ mlx5e_rep_neigh_update_init_interval(rpriv);
+
+ rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
+ err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
+ if (err)
+ goto out_err;
+ return 0;
+
+out_err:
+ rhashtable_destroy(&neigh_update->neigh_ht);
+ return err;
+}
+
+static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev);
+
+ unregister_netevent_notifier(&neigh_update->netevent_nb);
+
+ flush_workqueue(priv->wq); /* flush neigh update works */
+
+ cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
+
+ rhashtable_destroy(&neigh_update->neigh_ht);
+}
+
+static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
+ struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ int err;
+
+ err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
+ &nhe->rhash_node,
+ mlx5e_neigh_ht_params);
+ if (err)
+ return err;
+
+ list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
+
+ return err;
+}
+
+static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
+ struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ spin_lock_bh(&rpriv->neigh_update.encap_lock);
+
+ list_del(&nhe->neigh_list);
+
+ rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
+ &nhe->rhash_node,
+ mlx5e_neigh_ht_params);
+ spin_unlock_bh(&rpriv->neigh_update.encap_lock);
+}
+
+/* This function must only be called under RTNL lock or under the
+ * representor's encap_lock in case RTNL mutex can't be held.
+ */
+static struct mlx5e_neigh_hash_entry *
+mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
+ struct mlx5e_neigh *m_neigh)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+
+ return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
+ mlx5e_neigh_ht_params);
+}
+
+static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct mlx5e_neigh_hash_entry **nhe)
+{
+ int err;
+
+ *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
+ if (!*nhe)
+ return -ENOMEM;
+
+ memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
+ INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
+ INIT_LIST_HEAD(&(*nhe)->encap_list);
+ refcount_set(&(*nhe)->refcnt, 1);
+
+ err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
+ if (err)
+ goto out_free;
+ return 0;
+
+out_free:
+ kfree(*nhe);
+ return err;
+}
+
+static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
+ struct mlx5e_neigh_hash_entry *nhe)
+{
+ /* The neigh hash entry must be removed from the hash table regardless
+ * of the reference count value, so it won't be found by the next
+ * neigh notification call. The neigh hash entry reference count is
+ * incremented only during creation and neigh notification calls and
+ * protects from freeing the nhe struct.
+ */
+ mlx5e_rep_neigh_entry_remove(priv, nhe);
+ mlx5e_rep_neigh_entry_release(nhe);
+}
+
+int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_neigh_hash_entry *nhe;
+ int err;
+
+ nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
+ if (!nhe) {
+ err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
+ if (err)
+ return err;
+ }
+ list_add(&e->encap_list, &nhe->encap_list);
+ return 0;
+}
+
+void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_neigh_hash_entry *nhe;
+
+ list_del(&e->encap_list);
+ nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
+
+ if (list_empty(&nhe->encap_list))
+ mlx5e_rep_neigh_entry_destroy(priv, nhe);
}
static int mlx5e_rep_open(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err;
@@ -259,7 +627,8 @@ static int mlx5e_rep_open(struct net_device *dev)
static int mlx5e_rep_close(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
(void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
@@ -271,7 +640,8 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
char *buf, size_t len)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
int ret;
ret = snprintf(buf, len, "%d", rep->vport - 1);
@@ -314,18 +684,25 @@ static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle,
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
{
- struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep;
- if (rep && rep->vport == FDB_UPLINK_VPORT && esw->mode == SRIOV_OFFLOADS)
+ if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+ return false;
+
+ rep = rpriv->rep;
+ if (esw->mode == SRIOV_OFFLOADS &&
+ rep && rep->vport == FDB_UPLINK_VPORT)
return true;
return false;
}
-bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
+static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
{
- struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
if (rep && rep->vport != FDB_UPLINK_VPORT)
return true;
@@ -397,42 +774,23 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_get_offload_stats = mlx5e_get_offload_stats,
};
-static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- priv->params.log_sq_size =
- MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
- priv->params.rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
- priv->params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
-
- priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
- BIT(priv->params.log_rq_size));
-
- priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
- mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
-
- priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
- priv->params.num_tc = 1;
-
- priv->params.lro_wqe_sz =
- MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
-
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->params.num_channels = profile->max_nch(mdev);
- priv->profile = profile;
- priv->ppriv = ppriv;
+ params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
+ params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
- mutex_init(&priv->state_lock);
+ params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
- INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+ params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
+ params->num_tc = 1;
+ params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
}
static void mlx5e_build_rep_netdev(struct net_device *netdev)
@@ -458,30 +816,39 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
const struct mlx5e_profile *profile,
void *ppriv)
{
- mlx5e_build_rep_netdev_priv(mdev, netdev, profile, ppriv);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->profile = profile;
+ priv->ppriv = ppriv;
+
+ mutex_init(&priv->state_lock);
+
+ INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+
+ priv->channels.params.num_channels = profile->max_nch(mdev);
+ mlx5e_build_rep_params(mdev, &priv->channels.params);
mlx5e_build_rep_netdev(netdev);
}
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_eswitch_rep *rep = priv->ppriv;
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_flow_handle *flow_rule;
int err;
- int i;
+
+ mlx5e_init_l2_addr(priv);
err = mlx5e_create_direct_rqts(priv);
- if (err) {
- mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
+ if (err)
return err;
- }
err = mlx5e_create_direct_tirs(priv);
- if (err) {
- mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
+ if (err)
goto err_destroy_direct_rqts;
- }
flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
rep->vport,
@@ -503,21 +870,19 @@ err_del_flow_rule:
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_direct_rqts:
- for (i = 0; i < priv->params.num_channels; i++)
- mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+ mlx5e_destroy_direct_rqts(priv);
return err;
}
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
- struct mlx5_eswitch_rep *rep = priv->ppriv;
- int i;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
mlx5e_tc_cleanup(priv);
mlx5_del_flow_rules(rep->vport_rx_rule);
mlx5e_destroy_direct_tirs(priv);
- for (i = 0; i < priv->params.num_channels; i++)
- mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+ mlx5e_destroy_direct_rqts(priv);
}
static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
@@ -546,56 +911,181 @@ static struct mlx5e_profile mlx5e_rep_profile = {
.cleanup_tx = mlx5e_cleanup_nic_tx,
.update_stats = mlx5e_rep_update_stats,
.max_nch = mlx5e_get_rep_max_num_channels,
+ .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
+ .rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */,
.max_tc = 1,
};
-int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep)
+/* e-Switch vport representors */
+
+static int
+mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_priv *priv = netdev_priv(rep->netdev);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ int err;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_add_sqs_fwd_rules(priv);
+ if (err)
+ return err;
+ }
+
+ err = mlx5e_rep_neigh_init(rpriv);
+ if (err)
+ goto err_remove_sqs;
+
+ return 0;
+
+err_remove_sqs:
+ mlx5e_remove_sqs_fwd_rules(priv);
+ return err;
+}
+
+static void
+mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
{
+ struct mlx5e_priv *priv = netdev_priv(rep->netdev);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_remove_sqs_fwd_rules(priv);
+
+ /* clean (and re-init) existing uplink offloaded TC rules */
+ mlx5e_tc_cleanup(priv);
+ mlx5e_tc_init(priv);
+
+ mlx5e_rep_neigh_cleanup(rpriv);
+}
+
+static int
+mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_rep_priv *rpriv;
struct net_device *netdev;
int err;
- netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep);
+ rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
+ if (!rpriv)
+ return -ENOMEM;
+
+ netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
rep->vport);
+ kfree(rpriv);
return -EINVAL;
}
rep->netdev = netdev;
+ rpriv->rep = rep;
- err = mlx5e_attach_netdev(esw->dev, netdev);
+ err = mlx5e_attach_netdev(netdev_priv(netdev));
if (err) {
pr_warn("Failed to attach representor netdev for vport %d\n",
rep->vport);
goto err_destroy_netdev;
}
+ err = mlx5e_rep_neigh_init(rpriv);
+ if (err) {
+ pr_warn("Failed to initialized neighbours handling for vport %d\n",
+ rep->vport);
+ goto err_detach_netdev;
+ }
+
err = register_netdev(netdev);
if (err) {
pr_warn("Failed to register representor netdev for vport %d\n",
rep->vport);
- goto err_detach_netdev;
+ goto err_neigh_cleanup;
}
return 0;
+err_neigh_cleanup:
+ mlx5e_rep_neigh_cleanup(rpriv);
+
err_detach_netdev:
- mlx5e_detach_netdev(esw->dev, netdev);
+ mlx5e_detach_netdev(netdev_priv(netdev));
err_destroy_netdev:
- mlx5e_destroy_netdev(esw->dev, netdev_priv(netdev));
-
+ mlx5e_destroy_netdev(netdev_priv(netdev));
+ kfree(rpriv);
return err;
}
-void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep)
+static void
+mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
{
struct net_device *netdev = rep->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ void *ppriv = priv->ppriv;
+
+ unregister_netdev(rep->netdev);
+
+ mlx5e_rep_neigh_cleanup(rpriv);
+ mlx5e_detach_netdev(priv);
+ mlx5e_destroy_netdev(priv);
+ kfree(ppriv); /* mlx5e_rep_priv */
+}
+
+static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+ int vport;
+ u8 mac[ETH_ALEN];
+
+ mlx5_query_nic_vport_mac_address(mdev, 0, mac);
+
+ for (vport = 1; vport < total_vfs; vport++) {
+ struct mlx5_eswitch_rep rep;
+
+ rep.load = mlx5e_vport_rep_load;
+ rep.unload = mlx5e_vport_rep_unload;
+ rep.vport = vport;
+ ether_addr_copy(rep.hw_id, mac);
+ mlx5_eswitch_register_vport_rep(esw, vport, &rep);
+ }
+}
+
+static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+ int vport;
+
+ for (vport = 1; vport < total_vfs; vport++)
+ mlx5_eswitch_unregister_vport_rep(esw, vport);
+}
+
+void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_eswitch_rep rep;
+
+ mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
+ rep.load = mlx5e_nic_rep_load;
+ rep.unload = mlx5e_nic_rep_unload;
+ rep.vport = FDB_UPLINK_VPORT;
+ rep.netdev = priv->netdev;
+ mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/
+
+ mlx5e_rep_register_vf_vports(priv); /* VFs vports */
+}
+
+void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
- unregister_netdev(netdev);
- mlx5e_detach_netdev(esw->dev, netdev);
- mlx5e_destroy_netdev(esw->dev, netdev_priv(netdev));
+ mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
+ mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
new file mode 100644
index 000000000000..a0a1a7a1d6c0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5E_REP_H__
+#define __MLX5E_REP_H__
+
+#include <net/ip_tunnels.h>
+#include <linux/rhashtable.h>
+#include "eswitch.h"
+#include "en.h"
+
+struct mlx5e_neigh_update_table {
+ struct rhashtable neigh_ht;
+ /* Save the neigh hash entries in a list in addition to the hash table
+ * (neigh_ht). In order to iterate easily over the neigh entries.
+ * Used for stats query.
+ */
+ struct list_head neigh_list;
+ /* protect lookup/remove operations */
+ spinlock_t encap_lock;
+ struct notifier_block netevent_nb;
+ struct delayed_work neigh_stats_work;
+ unsigned long min_interval; /* jiffies */
+};
+
+struct mlx5e_rep_priv {
+ struct mlx5_eswitch_rep *rep;
+ struct mlx5e_neigh_update_table neigh_update;
+};
+
+struct mlx5e_neigh {
+ struct net_device *dev;
+ union {
+ __be32 v4;
+ struct in6_addr v6;
+ } dst_ip;
+ int family;
+};
+
+struct mlx5e_neigh_hash_entry {
+ struct rhash_head rhash_node;
+ struct mlx5e_neigh m_neigh;
+
+ /* Save the neigh hash entry in a list on the representor in
+ * addition to the hash table. In order to iterate easily over the
+ * neighbour entries. Used for stats query.
+ */
+ struct list_head neigh_list;
+
+ /* encap list sharing the same neigh */
+ struct list_head encap_list;
+
+ /* valid only when the neigh reference is taken during
+ * neigh_update_work workqueue callback.
+ */
+ struct neighbour *n;
+ struct work_struct neigh_update_work;
+
+ /* neigh hash entry can be deleted only when the refcount is zero.
+ * refcount is needed to avoid neigh hash entry removal by TC, while
+ * it's used by the neigh notification call.
+ */
+ refcount_t refcnt;
+
+ /* Save the last reported time offloaded trafic pass over one of the
+ * neigh hash entry flows. Use it to periodically update the neigh
+ * 'used' value and avoid neigh deleting by the kernel.
+ */
+ unsigned long reported_lastuse;
+};
+
+enum {
+ /* set when the encap entry is successfully offloaded into HW */
+ MLX5_ENCAP_ENTRY_VALID = BIT(0),
+};
+
+struct mlx5e_encap_entry {
+ /* neigh hash entry list of encaps sharing the same neigh */
+ struct list_head encap_list;
+ struct mlx5e_neigh m_neigh;
+ /* a node of the eswitch encap hash table which keeping all the encap
+ * entries
+ */
+ struct hlist_node encap_hlist;
+ struct list_head flows;
+ u32 encap_id;
+ struct ip_tunnel_info tun_info;
+ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
+
+ struct net_device *out_dev;
+ int tunnel_type;
+ u8 flags;
+ char *encap_header;
+ int encap_size;
+};
+
+void mlx5e_register_vport_reps(struct mlx5e_priv *priv);
+void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv);
+bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
+int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
+void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
+
+int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, void *sp);
+bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id);
+
+int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
+void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+
+int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e);
+void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e);
+
+void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
+
+#endif /* __MLX5E_REP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index bafcb349a50c..7b1566f0ae58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -39,6 +39,8 @@
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
+#include "en_rep.h"
+#include "ipoib.h"
static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
{
@@ -156,28 +158,6 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
}
-void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val)
-{
- bool was_opened;
-
- if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
- return;
-
- if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val)
- return;
-
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
- mlx5e_close_locked(priv->netdev);
-
- MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, val);
- mlx5e_set_rq_type_params(priv, priv->params.rq_wq_type);
-
- if (was_opened)
- mlx5e_open_locked(priv->netdev);
-
-}
-
#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
@@ -331,7 +311,7 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev,
static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
- struct mlx5e_sq *sq = &rq->channel->icosq;
+ struct mlx5e_icosq *sq = &rq->channel->icosq;
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *wqe;
u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB);
@@ -341,7 +321,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
sq->db.ico_wqe[pi].num_wqebbs = 1;
- mlx5e_send_nop(sq, false);
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
}
wqe = mlx5_wq_cyc_get_wqe(wq, pi);
@@ -353,7 +333,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
sq->db.ico_wqe[pi].num_wqebbs = num_wqebbs;
sq->pc += num_wqebbs;
- mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
}
static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
@@ -637,37 +617,36 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
}
-static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
+static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_tx_wqe *wqe;
- u16 pi = (sq->pc - MLX5E_XDP_TX_WQEBBS) & wq->sz_m1; /* last pi */
+ u16 pi = (sq->pc - 1) & wq->sz_m1; /* last pi */
wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
}
static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
struct mlx5e_dma_info *di,
const struct xdp_buff *xdp)
{
- struct mlx5e_sq *sq = &rq->channel->xdp_sq;
+ struct mlx5e_xdpsq *sq = &rq->xdpsq;
struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = sq->pc & wq->sz_m1;
+ u16 pi = sq->pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- struct mlx5e_sq_wqe_info *wi = &sq->db.xdp.wqe_info[pi];
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
struct mlx5_wqe_data_seg *dseg;
- u8 ds_cnt = MLX5E_XDP_TX_DS_COUNT;
ptrdiff_t data_offset = xdp->data - xdp->data_hard_start;
dma_addr_t dma_addr = di->addr + data_offset;
unsigned int dma_len = xdp->data_end - xdp->data;
+ prefetchw(wqe);
+
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE ||
MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) {
rq->stats.xdp_drop++;
@@ -675,48 +654,42 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
return false;
}
- if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) {
- if (sq->db.xdp.doorbell) {
+ if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
+ if (sq->db.doorbell) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
- sq->db.xdp.doorbell = false;
+ sq->db.doorbell = false;
}
rq->stats.xdp_tx_full++;
mlx5e_page_release(rq, di, true);
return false;
}
- dma_sync_single_for_device(sq->pdev, dma_addr, dma_len,
- PCI_DMA_TODEVICE);
+ dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
- memset(wqe, 0, sizeof(*wqe));
+ cseg->fm_ce_se = 0;
dseg = (struct mlx5_wqe_data_seg *)eseg + 1;
+
/* copy the inline part if required */
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE);
eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
dma_len -= MLX5E_XDP_MIN_INLINE;
dma_addr += MLX5E_XDP_MIN_INLINE;
-
- ds_cnt += MLX5E_XDP_IHS_DS_COUNT;
dseg++;
}
/* write the dma part */
dseg->addr = cpu_to_be64(dma_addr);
dseg->byte_count = cpu_to_be32(dma_len);
- dseg->lkey = sq->mkey_be;
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- sq->db.xdp.di[pi] = *di;
- wi->opcode = MLX5_OPCODE_SEND;
- wi->num_wqebbs = MLX5E_XDP_TX_WQEBBS;
- sq->pc += MLX5E_XDP_TX_WQEBBS;
+ sq->db.di[pi] = *di;
+ sq->pc++;
- sq->db.xdp.doorbell = true;
+ sq->db.doorbell = true;
rq->stats.xdp_tx++;
return true;
}
@@ -837,7 +810,8 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct net_device *netdev = rq->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5e_rx_wqe *wqe;
struct sk_buff *skb;
__be16 wqe_counter_be;
@@ -932,7 +906,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto mpwrq_cqe_out;
}
- prefetch(skb->data);
+ prefetchw(skb->data);
cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
@@ -950,7 +924,7 @@ mpwrq_cqe_out:
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
- struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq;
+ struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
int work_done = 0;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
@@ -977,9 +951,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
rq->handle_rx_cqe(rq, cqe);
}
- if (xdp_sq->db.xdp.doorbell) {
- mlx5e_xmit_xdp_doorbell(xdp_sq);
- xdp_sq->db.xdp.doorbell = false;
+ if (xdpsq->db.doorbell) {
+ mlx5e_xmit_xdp_doorbell(xdpsq);
+ xdpsq->db.doorbell = false;
}
mlx5_cqwq_update_db_record(&cq->wq);
@@ -989,3 +963,152 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
return work_done;
}
+
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_xdpsq *sq;
+ struct mlx5e_rq *rq;
+ u16 sqcc;
+ int i;
+
+ sq = container_of(cq, struct mlx5e_xdpsq, cq);
+
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+ return false;
+
+ rq = container_of(sq, struct mlx5e_rq, xdpsq);
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+ struct mlx5_cqe64 *cqe;
+ u16 wqe_counter;
+ bool last_wqe;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (!cqe)
+ break;
+
+ mlx5_cqwq_pop(&cq->wq);
+
+ wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+ do {
+ struct mlx5e_dma_info *di;
+ u16 ci;
+
+ last_wqe = (sqcc == wqe_counter);
+
+ ci = sqcc & sq->wq.sz_m1;
+ di = &sq->db.di[ci];
+
+ sqcc++;
+ /* Recycle RX page */
+ mlx5e_page_release(rq, di, true);
+ } while (!last_wqe);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc = sqcc;
+ return (i == MLX5E_TX_CQ_POLL_BUDGET);
+}
+
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq);
+ struct mlx5e_dma_info *di;
+ u16 ci;
+
+ while (sq->cc != sq->pc) {
+ ci = sq->cc & sq->wq.sz_m1;
+ di = &sq->db.di[ci];
+ sq->cc++;
+
+ mlx5e_page_release(rq, di, false);
+ }
+}
+
+#ifdef CONFIG_MLX5_CORE_IPOIB
+
+#define MLX5_IB_GRH_DGID_OFFSET 24
+#define MLX5_IB_GRH_BYTES 40
+#define MLX5_IPOIB_ENCAP_LEN 4
+#define MLX5_GID_SIZE 16
+
+static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rq->netdev;
+ u8 *dgid;
+ u8 g;
+
+ g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
+ dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
+ if ((!g) || dgid[0] != 0xff)
+ skb->pkt_type = PACKET_HOST;
+ else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0)
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+
+ /* TODO: IB/ipoib: Allow mcast packets from other VFs
+ * 68996a6e760e5c74654723eeb57bf65628ae87f4
+ */
+
+ skb_pull(skb, MLX5_IB_GRH_BYTES);
+
+ skb->protocol = *((__be16 *)(skb->data));
+
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+
+ skb_record_rx_queue(skb, rq->ix);
+
+ if (likely(netdev->features & NETIF_F_RXHASH))
+ mlx5e_skb_set_hash(cqe, skb);
+
+ skb_reset_mac_header(skb);
+ skb_pull(skb, MLX5_IPOIB_ENCAP_LEN);
+
+ skb->dev = netdev;
+
+ rq->stats.csum_complete++;
+ rq->stats.packets++;
+ rq->stats.bytes += cqe_bcnt;
+}
+
+void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_rx_wqe *wqe;
+ __be16 wqe_counter_be;
+ struct sk_buff *skb;
+ u16 wqe_counter;
+ u32 cqe_bcnt;
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+
+ skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
+ if (!skb)
+ goto wq_ll_pop;
+
+ mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ napi_gro_receive(rq->cq.napi, skb);
+
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+}
+
+#endif /* CONFIG_MLX5_CORE_IPOIB */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index cbfac06b7ffd..02dd3a95ed8f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -293,7 +293,7 @@ void mlx5e_rx_am_work(struct work_struct *work)
struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
- mlx5_core_modify_cq_moderation(rq->priv->mdev, &rq->cq.mcq,
+ mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
cur_profile.usec, cur_profile.pkts);
am->state = MLX5E_AM_START_MEASURE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 5621dcfda4f1..5225f2226a67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -236,12 +236,9 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
{
int err = 0;
- err = mlx5e_refresh_tirs_self_loopback(priv->mdev, true);
- if (err) {
- netdev_err(priv->netdev,
- "\tFailed to enable UC loopback err(%d)\n", err);
+ err = mlx5e_refresh_tirs(priv, true);
+ if (err)
return err;
- }
lbtp->loopback_ok = false;
init_completion(&lbtp->comp);
@@ -258,7 +255,7 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
struct mlx5e_lbt_priv *lbtp)
{
dev_remove_pack(&lbtp->pt);
- mlx5e_refresh_tirs_self_loopback(priv->mdev, false);
+ mlx5e_refresh_tirs(priv, false);
}
#define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 5436866798f4..11c27e4fadf6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -42,14 +42,25 @@
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
+#include <net/tc_act/tc_pedit.h>
#include <net/vxlan.h>
+#include <net/arp.h>
#include "en.h"
+#include "en_rep.h"
#include "en_tc.h"
#include "eswitch.h"
#include "vxlan.h"
+struct mlx5_nic_flow_attr {
+ u32 action;
+ u32 flow_tag;
+ u32 mod_hdr_id;
+};
+
enum {
MLX5E_TC_FLOW_ESWITCH = BIT(0),
+ MLX5E_TC_FLOW_NIC = BIT(1),
+ MLX5E_TC_FLOW_OFFLOADED = BIT(2),
};
struct mlx5e_tc_flow {
@@ -58,7 +69,16 @@ struct mlx5e_tc_flow {
u8 flags;
struct mlx5_flow_handle *rule;
struct list_head encap; /* flows sharing the same encap */
- struct mlx5_esw_flow_attr *attr;
+ union {
+ struct mlx5_esw_flow_attr esw_attr[0];
+ struct mlx5_nic_flow_attr nic_attr[0];
+ };
+};
+
+struct mlx5e_tc_flow_parse_attr {
+ struct mlx5_flow_spec spec;
+ int num_mod_hdr_actions;
+ void *mod_hdr_actions;
};
enum {
@@ -71,24 +91,26 @@ enum {
static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- u32 action, u32 flow_tag)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow)
{
+ struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_core_dev *dev = priv->mdev;
- struct mlx5_flow_destination dest = { 0 };
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {
- .action = action,
- .flow_tag = flow_tag,
+ .action = attr->action,
+ .flow_tag = attr->flow_tag,
.encap_id = 0,
};
struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule;
bool table_created = false;
+ int err;
- if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fs.vlan.ft.t;
- } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(dev, true);
if (IS_ERR(counter))
return ERR_CAST(counter);
@@ -97,6 +119,19 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
dest.counter = counter;
}
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL,
+ parse_attr->num_mod_hdr_actions,
+ parse_attr->mod_hdr_actions,
+ &attr->mod_hdr_id);
+ flow_act.modify_id = attr->mod_hdr_id;
+ kfree(parse_attr->mod_hdr_actions);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_create_mod_hdr_id;
+ }
+ }
+
if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
priv->fs.tc.t =
mlx5_create_auto_grouped_flow_table(priv->fs.ns,
@@ -114,8 +149,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
table_created = true;
}
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
+ parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
+ &flow_act, &dest, 1);
if (IS_ERR(rule))
goto err_add_rule;
@@ -128,6 +164,10 @@ err_add_rule:
priv->fs.tc.t = NULL;
}
err_create_ft:
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5_modify_header_dealloc(priv->mdev,
+ attr->mod_hdr_id);
+err_create_mod_hdr_id:
mlx5_fc_destroy(dev, counter);
return rule;
@@ -138,47 +178,195 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
{
struct mlx5_fc *counter = NULL;
- if (!IS_ERR(flow->rule)) {
- counter = mlx5_flow_rule_counter(flow->rule);
- mlx5_del_flow_rules(flow->rule);
- mlx5_fc_destroy(priv->mdev, counter);
- }
+ counter = mlx5_flow_rule_counter(flow->rule);
+ mlx5_del_flow_rules(flow->rule);
+ mlx5_fc_destroy(priv->mdev, counter);
if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
}
+
+ if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5_modify_header_dealloc(priv->mdev,
+ flow->nic_attr->mod_hdr_id);
}
+static void mlx5e_detach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow);
+
static struct mlx5_flow_handle *
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5_flow_handle *rule;
int err;
err = mlx5_eswitch_add_vlan_action(esw, attr);
- if (err)
- return ERR_PTR(err);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_add_vlan;
+ }
- return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
-}
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
+ parse_attr->num_mod_hdr_actions,
+ parse_attr->mod_hdr_actions,
+ &attr->mod_hdr_id);
+ kfree(parse_attr->mod_hdr_actions);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_mod_hdr;
+ }
+ }
-static void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow);
+ rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
+ if (IS_ERR(rule))
+ goto err_add_rule;
+
+ return rule;
+
+err_add_rule:
+ if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5_modify_header_dealloc(priv->mdev,
+ attr->mod_hdr_id);
+err_mod_hdr:
+ mlx5_eswitch_del_vlan_action(esw, attr);
+err_add_vlan:
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+ mlx5e_detach_encap(priv, flow);
+ return rule;
+}
static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr);
+ if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
+ }
- mlx5_eswitch_del_vlan_action(esw, flow->attr);
+ mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
- if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+ if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
mlx5e_detach_encap(priv, flow);
+ kvfree(flow->esw_attr->parse_attr);
+ }
+
+ if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5_modify_header_dealloc(priv->mdev,
+ attr->mod_hdr_id);
+}
+
+void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_tc_flow *flow;
+ int err;
+
+ err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
+ e->encap_size, e->encap_header,
+ &e->encap_id);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
+ err);
+ return;
+ }
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(priv);
+
+ list_for_each_entry(flow, &e->flows, encap) {
+ flow->esw_attr->encap_id = e->encap_id;
+ flow->rule = mlx5e_tc_add_fdb_flow(priv,
+ flow->esw_attr->parse_attr,
+ flow);
+ if (IS_ERR(flow->rule)) {
+ err = PTR_ERR(flow->rule);
+ mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
+ err);
+ continue;
+ }
+ flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ }
+}
+
+void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_tc_flow *flow;
+ struct mlx5_fc *counter;
+
+ list_for_each_entry(flow, &e->flows, encap) {
+ if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
+ counter = mlx5_flow_rule_counter(flow->rule);
+ mlx5_del_flow_rules(flow->rule);
+ mlx5_fc_destroy(priv->mdev, counter);
+ }
+ }
+
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
+ e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
+ mlx5_encap_dealloc(priv->mdev, e->encap_id);
+ }
+}
+
+void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
+ u64 bytes, packets, lastuse = 0;
+ struct mlx5e_tc_flow *flow;
+ struct mlx5e_encap_entry *e;
+ struct mlx5_fc *counter;
+ struct neigh_table *tbl;
+ bool neigh_used = false;
+ struct neighbour *n;
+
+ if (m_neigh->family == AF_INET)
+ tbl = &arp_tbl;
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (m_neigh->family == AF_INET6)
+ tbl = ipv6_stub->nd_tbl;
+#endif
+ else
+ return;
+
+ list_for_each_entry(e, &nhe->encap_list, encap_list) {
+ if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
+ continue;
+ list_for_each_entry(flow, &e->flows, encap) {
+ if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ counter = mlx5_flow_rule_counter(flow->rule);
+ mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+ if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
+ neigh_used = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (neigh_used) {
+ nhe->reported_lastuse = jiffies;
+
+ /* find the relevant neigh according to the cached device and
+ * dst ip pair
+ */
+ n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
+ if (!n) {
+ WARN(1, "The neighbour already freed\n");
+ return;
+ }
+
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ }
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@@ -188,22 +376,20 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
list_del(&flow->encap);
if (list_empty(next)) {
- struct mlx5_encap_entry *e;
+ struct mlx5e_encap_entry *e;
+
+ e = list_entry(next, struct mlx5e_encap_entry, flows);
+ mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
- e = list_entry(next, struct mlx5_encap_entry, flows);
- if (e->n) {
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID)
mlx5_encap_dealloc(priv->mdev, e->encap_id);
- neigh_release(e->n);
- }
+
hlist_del_rcu(&e->encap_hlist);
+ kfree(e->encap_header);
kfree(e);
}
}
-/* we get here also when setting rule to the FW failed, etc. It means that the
- * flow rule itself might not exist, but some offloading related to the actions
- * should be cleaned.
- */
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
@@ -631,16 +817,18 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
{
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
- struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep;
u8 min_inline;
int err;
err = __parse_cls_flower(priv, spec, f, &min_inline);
- if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
- rep->vport != FDB_UPLINK_VPORT) {
- if (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
- esw->offloads.inline_mode < min_inline) {
+ if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
+ rep = rpriv->rep;
+ if (rep->vport != FDB_UPLINK_VPORT &&
+ (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
+ esw->offloads.inline_mode < min_inline)) {
netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
min_inline, esw->offloads.inline_mode);
@@ -651,29 +839,313 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
return err;
}
+struct pedit_headers {
+ struct ethhdr eth;
+ struct iphdr ip4;
+ struct ipv6hdr ip6;
+ struct tcphdr tcp;
+ struct udphdr udp;
+};
+
+static int pedit_header_offsets[] = {
+ [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
+ [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
+ [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
+ [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
+ [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
+};
+
+#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
+
+static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
+ struct pedit_headers *masks,
+ struct pedit_headers *vals)
+{
+ u32 *curr_pmask, *curr_pval;
+
+ if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
+ goto out_err;
+
+ curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
+ curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
+
+ if (*curr_pmask & mask) /* disallow acting twice on the same location */
+ goto out_err;
+
+ *curr_pmask |= mask;
+ *curr_pval |= (val & mask);
+
+ return 0;
+
+out_err:
+ return -EOPNOTSUPP;
+}
+
+struct mlx5_fields {
+ u8 field;
+ u8 size;
+ u32 offset;
+};
+
+static struct mlx5_fields fields[] = {
+ {MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])},
+ {MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_dest[4])},
+ {MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])},
+ {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
+ {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
+
+ {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
+ {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
+ {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
+ {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
+
+ {MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])},
+ {MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])},
+ {MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])},
+ {MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])},
+ {MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])},
+ {MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])},
+ {MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])},
+ {MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])},
+
+ {MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)},
+ {MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)},
+ {MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5},
+
+ {MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)},
+ {MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)},
+};
+
+/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
+ * max from the SW pedit action. On success, it says how many HW actions were
+ * actually parsed.
+ */
+static int offload_pedit_fields(struct pedit_headers *masks,
+ struct pedit_headers *vals,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
+ int i, action_size, nactions, max_actions, first, last;
+ void *s_masks_p, *a_masks_p, *vals_p;
+ u32 s_mask, a_mask, val;
+ struct mlx5_fields *f;
+ u8 cmd, field_bsize;
+ unsigned long mask;
+ void *action;
+
+ set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
+ add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
+ set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
+ add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
+
+ action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+ action = parse_attr->mod_hdr_actions;
+ max_actions = parse_attr->num_mod_hdr_actions;
+ nactions = 0;
+
+ for (i = 0; i < ARRAY_SIZE(fields); i++) {
+ f = &fields[i];
+ /* avoid seeing bits set from previous iterations */
+ s_mask = a_mask = mask = val = 0;
+
+ s_masks_p = (void *)set_masks + f->offset;
+ a_masks_p = (void *)add_masks + f->offset;
+
+ memcpy(&s_mask, s_masks_p, f->size);
+ memcpy(&a_mask, a_masks_p, f->size);
+
+ if (!s_mask && !a_mask) /* nothing to offload here */
+ continue;
+
+ if (s_mask && a_mask) {
+ printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
+ return -EOPNOTSUPP;
+ }
+
+ if (nactions == max_actions) {
+ printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
+ return -EOPNOTSUPP;
+ }
+
+ if (s_mask) {
+ cmd = MLX5_ACTION_TYPE_SET;
+ mask = s_mask;
+ vals_p = (void *)set_vals + f->offset;
+ /* clear to denote we consumed this field */
+ memset(s_masks_p, 0, f->size);
+ } else {
+ cmd = MLX5_ACTION_TYPE_ADD;
+ mask = a_mask;
+ vals_p = (void *)add_vals + f->offset;
+ /* clear to denote we consumed this field */
+ memset(a_masks_p, 0, f->size);
+ }
+
+ memcpy(&val, vals_p, f->size);
+
+ field_bsize = f->size * BITS_PER_BYTE;
+ first = find_first_bit(&mask, field_bsize);
+ last = find_last_bit(&mask, field_bsize);
+ if (first > 0 || last != (field_bsize - 1)) {
+ printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
+ mask);
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(set_action_in, action, action_type, cmd);
+ MLX5_SET(set_action_in, action, field, f->field);
+
+ if (cmd == MLX5_ACTION_TYPE_SET) {
+ MLX5_SET(set_action_in, action, offset, 0);
+ /* length is num of bits to be written, zero means length of 32 */
+ MLX5_SET(set_action_in, action, length, field_bsize);
+ }
+
+ if (field_bsize == 32)
+ MLX5_SET(set_action_in, action, data, ntohl(val));
+ else if (field_bsize == 16)
+ MLX5_SET(set_action_in, action, data, ntohs(val));
+ else if (field_bsize == 8)
+ MLX5_SET(set_action_in, action, data, val);
+
+ action += action_size;
+ nactions++;
+ }
+
+ parse_attr->num_mod_hdr_actions = nactions;
+ return 0;
+}
+
+static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
+ const struct tc_action *a, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ int nkeys, action_size, max_actions;
+
+ nkeys = tcf_pedit_nkeys(a);
+ action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+
+ if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
+ max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
+ else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
+ max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
+
+ /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
+ max_actions = min(max_actions, nkeys * 16);
+
+ parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
+ if (!parse_attr->mod_hdr_actions)
+ return -ENOMEM;
+
+ parse_attr->num_mod_hdr_actions = max_actions;
+ return 0;
+}
+
+static const struct pedit_headers zero_masks = {};
+
+static int parse_tc_pedit_action(struct mlx5e_priv *priv,
+ const struct tc_action *a, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
+ int nkeys, i, err = -EOPNOTSUPP;
+ u32 mask, val, offset;
+ u8 cmd, htype;
+
+ nkeys = tcf_pedit_nkeys(a);
+
+ memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
+ memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
+
+ for (i = 0; i < nkeys; i++) {
+ htype = tcf_pedit_htype(a, i);
+ cmd = tcf_pedit_cmd(a, i);
+ err = -EOPNOTSUPP; /* can't be all optimistic */
+
+ if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
+ printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
+ goto out_err;
+ }
+
+ if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
+ printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
+ goto out_err;
+ }
+
+ mask = tcf_pedit_mask(a, i);
+ val = tcf_pedit_val(a, i);
+ offset = tcf_pedit_offset(a, i);
+
+ err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
+ if (err)
+ goto out_err;
+ }
+
+ err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
+ if (err)
+ goto out_err;
+
+ err = offload_pedit_fields(masks, vals, parse_attr);
+ if (err < 0)
+ goto out_dealloc_parsed_actions;
+
+ for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
+ cmd_masks = &masks[cmd];
+ if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
+ printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
+ cmd);
+ print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
+ 16, 1, cmd_masks, sizeof(zero_masks), true);
+ err = -EOPNOTSUPP;
+ goto out_dealloc_parsed_actions;
+ }
+ }
+
+ return 0;
+
+out_dealloc_parsed_actions:
+ kfree(parse_attr->mod_hdr_actions);
+out_err:
+ return err;
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
- u32 *action, u32 *flow_tag)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow)
{
+ struct mlx5_nic_flow_attr *attr = flow->nic_attr;
const struct tc_action *a;
LIST_HEAD(actions);
+ int err;
if (tc_no_actions(exts))
return -EINVAL;
- *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
- *action = 0;
+ attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+ attr->action = 0;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
/* Only support a single action per rule */
- if (*action)
+ if (attr->action)
return -EINVAL;
if (is_tcf_gact_shot(a)) {
- *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.flow_counter))
- *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ continue;
+ }
+
+ if (is_tcf_pedit(a)) {
+ err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
+ parse_attr);
+ if (err)
+ return err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
continue;
}
@@ -686,8 +1158,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL;
}
- *flow_tag = mark;
- *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->flow_tag = mark;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
continue;
}
@@ -853,16 +1325,17 @@ static void gen_vxlan_header_ipv6(struct net_device *out_dev,
static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
- struct mlx5_encap_entry *e,
- struct net_device **out_dev)
+ struct mlx5e_encap_entry *e)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ struct net_device *out_dev;
struct neighbour *n = NULL;
struct flowi4 fl4 = {};
char *encap_header;
int ttl, err;
+ u8 nud_state;
if (max_encap_size < ipv4_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
@@ -887,25 +1360,36 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
fl4.daddr = tun_key->u.ipv4.dst;
fl4.saddr = tun_key->u.ipv4.src;
- err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
+ err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
&fl4, &n, &ttl);
if (err)
goto out;
- if (!(n->nud_state & NUD_VALID)) {
- pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
- err = -EOPNOTSUPP;
+ /* used by mlx5e_detach_encap to lookup a neigh hash table
+ * entry in the neigh hash table when a user deletes a rule
+ */
+ e->m_neigh.dev = n->dev;
+ e->m_neigh.family = n->ops->family;
+ memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+ e->out_dev = out_dev;
+
+ /* It's importent to add the neigh to the hash table before checking
+ * the neigh validity state. So if we'll get a notification, in case the
+ * neigh changes it's validity state, we would find the relevant neigh
+ * in the hash.
+ */
+ err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
+ if (err)
goto out;
- }
- e->n = n;
- e->out_dev = *out_dev;
-
- neigh_ha_snapshot(e->h_dest, n, *out_dev);
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ ether_addr_copy(e->h_dest, n->ha);
+ read_unlock_bh(&n->lock);
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
- gen_vxlan_header_ipv4(*out_dev, encap_header,
+ gen_vxlan_header_ipv4(out_dev, encap_header,
ipv4_encap_size, e->h_dest, ttl,
fl4.daddr,
fl4.saddr, tun_key->tp_dst,
@@ -913,31 +1397,49 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
break;
default:
err = -EOPNOTSUPP;
- goto out;
+ goto destroy_neigh_entry;
+ }
+ e->encap_size = ipv4_encap_size;
+ e->encap_header = encap_header;
+
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ return -EAGAIN;
}
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
ipv4_encap_size, encap_header, &e->encap_id);
+ if (err)
+ goto destroy_neigh_entry;
+
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
+ neigh_release(n);
+ return err;
+
+destroy_neigh_entry:
+ mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
out:
- if (err && n)
- neigh_release(n);
kfree(encap_header);
+ if (n)
+ neigh_release(n);
return err;
}
static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
- struct mlx5_encap_entry *e,
- struct net_device **out_dev)
-
+ struct mlx5e_encap_entry *e)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ struct net_device *out_dev;
struct neighbour *n = NULL;
struct flowi6 fl6 = {};
char *encap_header;
int err, ttl = 0;
+ u8 nud_state;
if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
@@ -963,25 +1465,36 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
fl6.daddr = tun_key->u.ipv6.dst;
fl6.saddr = tun_key->u.ipv6.src;
- err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev,
+ err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
&fl6, &n, &ttl);
if (err)
goto out;
- if (!(n->nud_state & NUD_VALID)) {
- pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr);
- err = -EOPNOTSUPP;
+ /* used by mlx5e_detach_encap to lookup a neigh hash table
+ * entry in the neigh hash table when a user deletes a rule
+ */
+ e->m_neigh.dev = n->dev;
+ e->m_neigh.family = n->ops->family;
+ memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+ e->out_dev = out_dev;
+
+ /* It's importent to add the neigh to the hash table before checking
+ * the neigh validity state. So if we'll get a notification, in case the
+ * neigh changes it's validity state, we would find the relevant neigh
+ * in the hash.
+ */
+ err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
+ if (err)
goto out;
- }
-
- e->n = n;
- e->out_dev = *out_dev;
- neigh_ha_snapshot(e->h_dest, n, *out_dev);
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ ether_addr_copy(e->h_dest, n->ha);
+ read_unlock_bh(&n->lock);
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
- gen_vxlan_header_ipv6(*out_dev, encap_header,
+ gen_vxlan_header_ipv6(out_dev, encap_header,
ipv6_encap_size, e->h_dest, ttl,
&fl6.daddr,
&fl6.saddr, tun_key->tp_dst,
@@ -989,31 +1502,51 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
break;
default:
err = -EOPNOTSUPP;
- goto out;
+ goto destroy_neigh_entry;
+ }
+
+ e->encap_size = ipv6_encap_size;
+ e->encap_header = encap_header;
+
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ return -EAGAIN;
}
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
ipv6_encap_size, encap_header, &e->encap_id);
+ if (err)
+ goto destroy_neigh_entry;
+
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
+ neigh_release(n);
+ return err;
+
+destroy_neigh_entry:
+ mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
out:
- if (err && n)
- neigh_release(n);
kfree(encap_header);
+ if (n)
+ neigh_release(n);
return err;
}
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,
- struct mlx5_esw_flow_attr *attr)
+ struct net_device **encap_dev,
+ struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
- struct mlx5e_priv *up_priv = netdev_priv(up_dev);
unsigned short family = ip_tunnel_info_af(tun_info);
+ struct mlx5e_priv *up_priv = netdev_priv(up_dev);
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct ip_tunnel_key *key = &tun_info->key;
- struct mlx5_encap_entry *e;
- struct net_device *out_dev;
- int tunnel_type, err = -EOPNOTSUPP;
+ struct mlx5e_encap_entry *e;
+ int tunnel_type, err = 0;
uintptr_t hash_key;
bool found = false;
@@ -1048,10 +1581,8 @@ vxlan_encap_offload_err:
}
}
- if (found) {
- attr->encap = e;
- return 0;
- }
+ if (found)
+ goto attach_flow;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e)
@@ -1062,16 +1593,21 @@ vxlan_encap_offload_err:
INIT_LIST_HEAD(&e->flows);
if (family == AF_INET)
- err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
+ err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
else if (family == AF_INET6)
- err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev);
+ err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
- if (err)
+ if (err && err != -EAGAIN)
goto out_err;
- attr->encap = e;
hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
+attach_flow:
+ list_add(&flow->encap, &e->flows);
+ *encap_dev = e->out_dev;
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID)
+ attr->encap_id = e->encap_id;
+
return err;
out_err:
@@ -1080,20 +1616,22 @@ out_err:
}
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
{
- struct mlx5_esw_flow_attr *attr = flow->attr;
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct ip_tunnel_info *info = NULL;
const struct tc_action *a;
LIST_HEAD(actions);
bool encap = false;
- int err;
+ int err = 0;
if (tc_no_actions(exts))
return -EINVAL;
memset(attr, 0, sizeof(*attr));
- attr->in_rep = priv->ppriv;
+ attr->in_rep = rpriv->rep;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
@@ -1103,9 +1641,19 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
continue;
}
+ if (is_tcf_pedit(a)) {
+ err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
+ parse_attr);
+ if (err)
+ return err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ continue;
+ }
+
if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
- struct net_device *out_dev;
+ struct net_device *out_dev, *encap_dev = NULL;
struct mlx5e_priv *out_priv;
out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
@@ -1115,18 +1663,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
out_priv = netdev_priv(out_dev);
- attr->out_rep = out_priv->ppriv;
+ rpriv = out_priv->ppriv;
+ attr->out_rep = rpriv->rep;
} else if (encap) {
err = mlx5e_attach_encap(priv, info,
- out_dev, attr);
- if (err)
+ out_dev, &encap_dev, flow);
+ if (err && err != -EAGAIN)
return err;
- list_add(&flow->encap, &attr->encap->flows);
attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- out_priv = netdev_priv(attr->encap->out_dev);
- attr->out_rep = out_priv->ppriv;
+ out_priv = netdev_priv(encap_dev);
+ rpriv = out_priv->ppriv;
+ attr->out_rep = rpriv->rep;
+ attr->parse_attr = parse_attr;
} else {
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
@@ -1166,28 +1716,30 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL;
}
- return 0;
+ return err;
}
int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
struct tc_cls_flower_offload *f)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_table *tc = &priv->fs.tc;
- int err, attr_size = 0;
- u32 flow_tag, action;
struct mlx5e_tc_flow *flow;
- struct mlx5_flow_spec *spec;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int attr_size, err = 0;
u8 flow_flags = 0;
if (esw && esw->mode == SRIOV_OFFLOADS) {
flow_flags = MLX5E_TC_FLOW_ESWITCH;
attr_size = sizeof(struct mlx5_esw_flow_attr);
+ } else {
+ flow_flags = MLX5E_TC_FLOW_NIC;
+ attr_size = sizeof(struct mlx5_nic_flow_attr);
}
flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
- spec = mlx5_vzalloc(sizeof(*spec));
- if (!spec || !flow) {
+ parse_attr = mlx5_vzalloc(sizeof(*parse_attr));
+ if (!parse_attr || !flow) {
err = -ENOMEM;
goto err_free;
}
@@ -1195,42 +1747,54 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
flow->cookie = f->cookie;
flow->flags = flow_flags;
- err = parse_cls_flower(priv, flow, spec, f);
+ err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
if (err < 0)
goto err_free;
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
- flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
- err = parse_tc_fdb_actions(priv, f->exts, flow);
+ err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
if (err < 0)
- goto err_free;
- flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
+ goto err_handle_encap_flow;
+ flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
} else {
- err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
+ err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
if (err < 0)
goto err_free;
- flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
+ flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
}
if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule);
- goto err_del_rule;
+ goto err_free;
}
+ flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
err = rhashtable_insert_fast(&tc->ht, &flow->node,
tc->ht_params);
if (err)
goto err_del_rule;
- goto out;
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
+ !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
+ kvfree(parse_attr);
+ return err;
err_del_rule:
mlx5e_tc_del_flow(priv, flow);
+err_handle_encap_flow:
+ if (err == -EAGAIN) {
+ err = rhashtable_insert_fast(&tc->ht, &flow->node,
+ tc->ht_params);
+ if (err)
+ mlx5e_tc_del_flow(priv, flow);
+ else
+ return 0;
+ }
+
err_free:
+ kvfree(parse_attr);
kfree(flow);
-out:
- kvfree(spec);
return err;
}
@@ -1249,7 +1813,6 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
mlx5e_tc_del_flow(priv, flow);
-
kfree(flow);
return 0;
@@ -1272,6 +1835,9 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
if (!flow)
return -EINVAL;
+ if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
+ return 0;
+
counter = mlx5_flow_rule_counter(flow->rule);
if (!counter)
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 34bf903fc886..ecbe30d808ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -46,6 +46,15 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
int mlx5e_stats_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f);
+struct mlx5e_encap_entry;
+void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e);
+void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e);
+
+struct mlx5e_neigh_hash_entry;
+void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
+
static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
{
return atomic_read(&priv->fs.tc.ht.nelems);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 57f5e2d7ebd1..ab3bb026ff9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -33,34 +33,12 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include "en.h"
+#include "ipoib.h"
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
MLX5E_SQ_NOPS_ROOM)
-void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
-
- u16 pi = sq->pc & wq->sz_m1;
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
-
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
-
- memset(cseg, 0, sizeof(*cseg));
-
- cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
-
- sq->pc++;
- sq->stats.nop++;
-
- if (notify_hw) {
- cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
- }
-}
-
static inline void mlx5e_tx_dma_unmap(struct device *pdev,
struct mlx5e_sq_dma *dma)
{
@@ -76,25 +54,25 @@ static inline void mlx5e_tx_dma_unmap(struct device *pdev,
}
}
-static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
+static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
dma_addr_t addr,
u32 size,
enum mlx5e_dma_map_type map_type)
{
u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
- sq->db.txq.dma_fifo[i].addr = addr;
- sq->db.txq.dma_fifo[i].size = size;
- sq->db.txq.dma_fifo[i].type = map_type;
+ sq->db.dma_fifo[i].addr = addr;
+ sq->db.dma_fifo[i].size = size;
+ sq->db.dma_fifo[i].type = map_type;
sq->dma_fifo_pc++;
}
-static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
+static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
{
- return &sq->db.txq.dma_fifo[i & sq->dma_fifo_mask];
+ return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
}
-static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma)
+static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
{
int i;
@@ -111,6 +89,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct mlx5e_priv *priv = netdev_priv(dev);
int channel_ix = fallback(dev, skb);
+ u16 num_channels;
int up = 0;
if (!netdev_get_num_tc(dev))
@@ -122,11 +101,11 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
/* channel_ix can be larger than num_channels since
* dev->num_real_tx_queues = num_channels * num_tc
*/
- if (channel_ix >= priv->params.num_channels)
- channel_ix = reciprocal_scale(channel_ix,
- priv->params.num_channels);
+ num_channels = priv->channels.params.num_channels;
+ if (channel_ix >= num_channels)
+ channel_ix = reciprocal_scale(channel_ix, num_channels);
- return priv->channeltc_to_txq_map[channel_ix][up];
+ return priv->channel_tc2txq[channel_ix][up];
}
static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
@@ -175,25 +154,6 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
}
}
-static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
- struct sk_buff *skb, bool bf)
-{
- /* Some NIC TX decisions, e.g loopback, are based on the packet
- * headers and occur before the data gather.
- * Therefore these headers must be copied into the WQE
- */
- if (bf) {
- u16 ihs = skb_headlen(skb);
-
- if (skb_vlan_tag_present(skb))
- ihs += VLAN_HLEN;
-
- if (ihs <= sq->max_inline)
- return skb_headlen(skb);
- }
- return mlx5e_calc_min_inline(sq->min_inline_mode, skb);
-}
-
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
unsigned int *skb_len,
unsigned int len)
@@ -218,31 +178,9 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
}
-static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+static inline void
+mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
{
- struct mlx5_wq_cyc *wq = &sq->wq;
-
- u16 pi = sq->pc & wq->sz_m1;
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- struct mlx5e_tx_wqe_info *wi = &sq->db.txq.wqe_info[pi];
-
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
- struct mlx5_wqe_data_seg *dseg;
-
- unsigned char *skb_data = skb->data;
- unsigned int skb_len = skb->len;
- u8 opcode = MLX5_OPCODE_SEND;
- dma_addr_t dma_addr = 0;
- unsigned int num_bytes;
- bool bf = false;
- u16 headlen;
- u16 ds_cnt;
- u16 ihs;
- int i;
-
- memset(wqe, 0, sizeof(*wqe));
-
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) {
@@ -254,74 +192,51 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
}
} else
sq->stats.csum_none++;
+}
- if (sq->cc != sq->prev_cc) {
- sq->prev_cc = sq->cc;
- sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
- }
-
- if (skb_is_gso(skb)) {
- eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
- opcode = MLX5_OPCODE_LSO;
+static inline u16
+mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes)
+{
+ u16 ihs;
- if (skb->encapsulation) {
- ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
- sq->stats.tso_inner_packets++;
- sq->stats.tso_inner_bytes += skb->len - ihs;
- } else {
- ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
- sq->stats.tso_packets++;
- sq->stats.tso_bytes += skb->len - ihs;
- }
+ eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
- sq->stats.packets += skb_shinfo(skb)->gso_segs;
- num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
+ if (skb->encapsulation) {
+ ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+ sq->stats.tso_inner_packets++;
+ sq->stats.tso_inner_bytes += skb->len - ihs;
} else {
- bf = sq->bf_budget &&
- !skb->xmit_more &&
- !skb_shinfo(skb)->nr_frags;
- ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
- sq->stats.packets++;
- num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
- }
-
- sq->stats.bytes += num_bytes;
- wi->num_bytes = num_bytes;
-
- ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
- if (ihs) {
- if (skb_vlan_tag_present(skb)) {
- mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
- ihs += VLAN_HLEN;
- } else {
- memcpy(eseg->inline_hdr.start, skb_data, ihs);
- mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
- }
- eseg->inline_hdr.sz = cpu_to_be16(ihs);
- ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
- } else if (skb_vlan_tag_present(skb)) {
- eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
- eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
+ ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ sq->stats.tso_packets++;
+ sq->stats.tso_bytes += skb->len - ihs;
}
- dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
+ *num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
+ return ihs;
+}
- wi->num_dma = 0;
+static inline int
+mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ unsigned char *skb_data, u16 headlen,
+ struct mlx5_wqe_data_seg *dseg)
+{
+ dma_addr_t dma_addr = 0;
+ u8 num_dma = 0;
+ int i;
- headlen = skb_len - skb->data_len;
if (headlen) {
dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
- goto dma_unmap_wqe_err;
+ return -ENOMEM;
dseg->addr = cpu_to_be64(dma_addr);
dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(headlen);
mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
- wi->num_dma++;
-
+ num_dma++;
dseg++;
}
@@ -330,59 +245,120 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
int fsz = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
- DMA_TO_DEVICE);
+ DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
- goto dma_unmap_wqe_err;
+ return -ENOMEM;
dseg->addr = cpu_to_be64(dma_addr);
dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(fsz);
mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
- wi->num_dma++;
-
+ num_dma++;
dseg++;
}
- ds_cnt += wi->num_dma;
-
- cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ return num_dma;
+}
- sq->db.txq.skb[pi] = skb;
+static inline void
+mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma,
+ struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi;
+ wi->num_bytes = num_bytes;
+ wi->num_dma = num_dma;
wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- sq->pc += wi->num_wqebbs;
+ wi->skb = skb;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- netdev_tx_sent_queue(sq->txq, wi->num_bytes);
+ netdev_tx_sent_queue(sq->txq, num_bytes);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
+ sq->pc += wi->num_wqebbs;
+ if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
netif_tx_stop_queue(sq->txq);
sq->stats.stopped++;
}
- sq->stats.xmit_more += skb->xmit_more;
- if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
- int bf_sz = 0;
+ if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
- if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state))
- bf_sz = wi->num_wqebbs << 3;
+ /* fill sq edge with nops to avoid wqe wrap around */
+ while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
+ sq->db.wqe_info[pi].skb = NULL;
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ sq->stats.nop++;
+ }
+}
- cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz);
+static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+
+ unsigned char *skb_data = skb->data;
+ unsigned int skb_len = skb->len;
+ u8 opcode = MLX5_OPCODE_SEND;
+ unsigned int num_bytes;
+ int num_dma;
+ u16 headlen;
+ u16 ds_cnt;
+ u16 ihs;
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+
+ if (skb_is_gso(skb)) {
+ opcode = MLX5_OPCODE_LSO;
+ ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
+ sq->stats.packets += skb_shinfo(skb)->gso_segs;
+ } else {
+ ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
+ num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ sq->stats.packets++;
}
+ sq->stats.bytes += num_bytes;
+ sq->stats.xmit_more += skb->xmit_more;
- /* fill sq edge with nops to avoid wqe wrap around */
- while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
- sq->db.txq.skb[pi] = NULL;
- mlx5e_send_nop(sq, false);
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+ if (ihs) {
+ if (skb_vlan_tag_present(skb)) {
+ mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
+ ihs += VLAN_HLEN;
+ } else {
+ memcpy(eseg->inline_hdr.start, skb_data, ihs);
+ mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
+ }
+ eseg->inline_hdr.sz = cpu_to_be16(ihs);
+ ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
+ } else if (skb_vlan_tag_present(skb)) {
+ eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
+ eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
}
- if (bf)
- sq->bf_budget--;
+ headlen = skb_len - skb->data_len;
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
+ (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
+ if (unlikely(num_dma < 0))
+ goto dma_unmap_wqe_err;
+
+ mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
+ num_bytes, num_dma, wi, cseg);
return NETDEV_TX_OK;
@@ -398,21 +374,21 @@ dma_unmap_wqe_err:
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
+ struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
return mlx5e_sq_xmit(sq, skb);
}
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
- struct mlx5e_sq *sq;
+ struct mlx5e_txqsq *sq;
u32 dma_fifo_cc;
u32 nbytes;
u16 npkts;
u16 sqcc;
int i;
- sq = container_of(cq, struct mlx5e_sq, cq);
+ sq = container_of(cq, struct mlx5e_txqsq, cq);
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
@@ -450,8 +426,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
last_wqe = (sqcc == wqe_counter);
ci = sqcc & sq->wq.sz_m1;
- skb = sq->db.txq.skb[ci];
- wi = &sq->db.txq.wqe_info[ci];
+ wi = &sq->db.wqe_info[ci];
+ skb = wi->skb;
if (unlikely(!skb)) { /* nop */
sqcc++;
@@ -492,7 +468,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
if (netif_tx_queue_stopped(sq->txq) &&
- mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) {
+ mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) {
netif_tx_wake_queue(sq->txq);
sq->stats.wake++;
}
@@ -500,7 +476,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
-static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
+void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
{
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
@@ -509,8 +485,8 @@ static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
- skb = sq->db.txq.skb[ci];
- wi = &sq->db.txq.wqe_info[ci];
+ wi = &sq->db.wqe_info[ci];
+ skb = wi->skb;
if (!skb) { /* nop */
sq->cc++;
@@ -529,36 +505,89 @@ static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
}
}
-static void mlx5e_free_xdp_sq_descs(struct mlx5e_sq *sq)
+#ifdef CONFIG_MLX5_CORE_IPOIB
+
+struct mlx5_wqe_eth_pad {
+ u8 rsvd0[16];
+};
+
+struct mlx5i_tx_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_datagram_seg datagram;
+ struct mlx5_wqe_eth_pad pad;
+ struct mlx5_wqe_eth_seg eth;
+};
+
+static inline void
+mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
+ struct mlx5_wqe_datagram_seg *dseg)
{
- struct mlx5e_sq_wqe_info *wi;
- struct mlx5e_dma_info *di;
- u16 ci;
+ memcpy(&dseg->av, av, sizeof(struct mlx5_av));
+ dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
+ dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
+}
- while (sq->cc != sq->pc) {
- ci = sq->cc & sq->wq.sz_m1;
- di = &sq->db.xdp.di[ci];
- wi = &sq->db.xdp.wqe_info[ci];
+netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_av *av, u32 dqpn, u32 dqkey)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5i_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
- if (wi->opcode == MLX5_OPCODE_NOP) {
- sq->cc++;
- continue;
- }
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
- sq->cc += wi->num_wqebbs;
+ unsigned char *skb_data = skb->data;
+ unsigned int skb_len = skb->len;
+ u8 opcode = MLX5_OPCODE_SEND;
+ unsigned int num_bytes;
+ int num_dma;
+ u16 headlen;
+ u16 ds_cnt;
+ u16 ihs;
- mlx5e_page_release(&sq->channel->rq, di, false);
+ memset(wqe, 0, sizeof(*wqe));
+
+ mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
+
+ mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+
+ if (skb_is_gso(skb)) {
+ opcode = MLX5_OPCODE_LSO;
+ ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
+ } else {
+ ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
+ num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
}
-}
-void mlx5e_free_sq_descs(struct mlx5e_sq *sq)
-{
- switch (sq->type) {
- case MLX5E_SQ_TXQ:
- mlx5e_free_txq_sq_descs(sq);
- break;
- case MLX5E_SQ_XDP:
- mlx5e_free_xdp_sq_descs(sq);
- break;
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+ if (ihs) {
+ memcpy(eseg->inline_hdr.start, skb_data, ihs);
+ mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
+ eseg->inline_hdr.sz = cpu_to_be16(ihs);
+ ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
}
+
+ headlen = skb_len - skb->data_len;
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
+ (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
+ if (unlikely(num_dma < 0))
+ goto dma_unmap_wqe_err;
+
+ mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
+ num_bytes, num_dma, wi, cseg);
+
+ return NETDEV_TX_OK;
+
+dma_unmap_wqe_err:
+ sq->stats.dropped++;
+ mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
+
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index e5c12a732aa1..5ca6714e3e02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -37,124 +37,69 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
struct mlx5_cqwq *wq = &cq->wq;
u32 ci = mlx5_cqwq_get_ci(wq);
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
- int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
- int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
+ u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
+ u8 sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
if (cqe_ownership_bit != sw_ownership_val)
return NULL;
/* ensure cqe content is read after cqe ownership bit */
- rmb();
+ dma_rmb();
return cqe;
}
-static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
+static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
+ struct mlx5e_icosq *sq,
+ struct mlx5_cqe64 *cqe,
+ u16 *sqcc)
{
- struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq);
- struct mlx5_wq_cyc *wq;
- struct mlx5_cqe64 *cqe;
- u16 sqcc;
-
- if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
+ struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
+ struct mlx5e_rq *rq = &sq->channel->rq;
+
+ prefetch(rq);
+ mlx5_cqwq_pop(&cq->wq);
+ *sqcc += icowi->num_wqebbs;
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
+ WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
+ cqe->op_own);
return;
+ }
- cqe = mlx5e_get_cqe(cq);
- if (likely(!cqe))
+ if (likely(icowi->opcode == MLX5_OPCODE_UMR)) {
+ mlx5e_post_rx_mpwqe(rq);
return;
+ }
- wq = &sq->wq;
-
- /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
- * otherwise a cq overrun may occur
- */
- sqcc = sq->cc;
-
- do {
- u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
- struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
-
- mlx5_cqwq_pop(&cq->wq);
- sqcc += icowi->num_wqebbs;
-
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
- WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
- cqe->op_own);
- break;
- }
-
- switch (icowi->opcode) {
- case MLX5_OPCODE_NOP:
- break;
- case MLX5_OPCODE_UMR:
- mlx5e_post_rx_mpwqe(&sq->channel->rq);
- break;
- default:
- WARN_ONCE(true,
- "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
- icowi->opcode);
- }
-
- } while ((cqe = mlx5e_get_cqe(cq)));
-
- mlx5_cqwq_update_db_record(&cq->wq);
-
- /* ensure cq space is freed before enabling more cqes */
- wmb();
-
- sq->cc = sqcc;
+ if (unlikely(icowi->opcode != MLX5_OPCODE_NOP))
+ WARN_ONCE(true,
+ "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
+ icowi->opcode);
}
-static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq)
+static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
- struct mlx5e_sq *sq;
+ struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
+ struct mlx5_cqe64 *cqe;
u16 sqcc;
- int i;
-
- sq = container_of(cq, struct mlx5e_sq, cq);
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
- return false;
+ return;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (likely(!cqe))
+ return;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
*/
sqcc = sq->cc;
- for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
- struct mlx5_cqe64 *cqe;
- u16 wqe_counter;
- bool last_wqe;
-
- cqe = mlx5e_get_cqe(cq);
- if (!cqe)
- break;
-
- mlx5_cqwq_pop(&cq->wq);
-
- wqe_counter = be16_to_cpu(cqe->wqe_counter);
-
- do {
- struct mlx5e_sq_wqe_info *wi;
- struct mlx5e_dma_info *di;
- u16 ci;
-
- last_wqe = (sqcc == wqe_counter);
-
- ci = sqcc & sq->wq.sz_m1;
- di = &sq->db.xdp.di[ci];
- wi = &sq->db.xdp.wqe_info[ci];
-
- if (unlikely(wi->opcode == MLX5_OPCODE_NOP)) {
- sqcc++;
- continue;
- }
-
- sqcc += wi->num_wqebbs;
- /* Recycle RX page */
- mlx5e_page_release(&sq->channel->rq, di, true);
- } while (!last_wqe);
- }
+ /* by design, there's only a single cqe */
+ mlx5e_poll_ico_single_cqe(cq, sq, cqe, &sqcc);
mlx5_cqwq_update_db_record(&cq->wq);
@@ -162,7 +107,6 @@ static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq)
wmb();
sq->cc = sqcc;
- return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
@@ -178,12 +122,12 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
+ if (c->xdp)
+ busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
+
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget;
- if (c->xdp)
- busy |= mlx5e_poll_xdp_tx_cq(&c->xdp_sq.cq);
-
mlx5e_poll_ico_cq(&c->icosq.cq);
busy |= mlx5e_post_rx_wqes(&c->rq);
@@ -224,8 +168,7 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
struct mlx5e_channel *c = cq->channel;
- struct mlx5e_priv *priv = c->priv;
- struct net_device *netdev = priv->netdev;
+ struct net_device *netdev = c->netdev;
netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
__func__, mcq->cqn, event);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index fcd5bc7e31db..2e34d95ea776 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -53,13 +53,6 @@ struct esw_uc_addr {
u32 vport;
};
-/* E-Switch MC FDB table hash node */
-struct esw_mc_addr { /* SRIOV only */
- struct l2addr_node node;
- struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
- u32 refcnt;
-};
-
/* Vport UC/MC hash node */
struct vport_addr {
struct l2addr_node node;
@@ -337,6 +330,7 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb;
@@ -362,7 +356,9 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
memset(flow_group_in, 0, inlen);
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
- fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0, 0);
+
+ ft_attr.max_fte = table_size;
+ fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
@@ -814,7 +810,7 @@ static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
bool promisc, bool mc_promisc)
{
- struct esw_mc_addr *allmulti_addr = esw->mc_promisc;
+ struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
struct mlx5_vport *vport = &esw->vports[vport_num];
if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
@@ -1685,7 +1681,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
esw->enabled_vports, esw->mode);
- mc_promisc = esw->mc_promisc;
+ mc_promisc = &esw->mc_promisc;
nvports = esw->enabled_vports;
for (i = 0; i < esw->total_vports; i++)
@@ -1729,7 +1725,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
int total_vports = MLX5_TOTAL_VPORTS(dev);
- struct esw_mc_addr *mc_promisc;
struct mlx5_eswitch *esw;
int vport_num;
int err;
@@ -1758,13 +1753,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
}
esw->l2_table.size = l2_table_size;
- mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL);
- if (!mc_promisc) {
- err = -ENOMEM;
- goto abort;
- }
- esw->mc_promisc = mc_promisc;
-
esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
if (!esw->work_queue) {
err = -ENOMEM;
@@ -1803,6 +1791,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->enabled_vports = 0;
esw->mode = SRIOV_NONE;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
+ esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
+ else
+ esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
dev->priv.eswitch = esw;
return 0;
@@ -1827,7 +1820,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
kfree(esw->l2_table.bitmap);
- kfree(esw->mc_promisc);
kfree(esw->offloads.vport_reps);
kfree(esw->vports);
kfree(esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index ad329b1680b4..b746f62c8c79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -36,7 +36,6 @@
#include <linux/if_ether.h>
#include <linux/if_link.h>
#include <net/devlink.h>
-#include <net/ip_tunnels.h>
#include <linux/mlx5/device.h>
#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -210,6 +209,14 @@ struct mlx5_esw_offload {
DECLARE_HASHTABLE(encap_tbl, 8);
u8 inline_mode;
u64 num_flows;
+ u8 encap;
+};
+
+/* E-Switch MC FDB table hash node */
+struct esw_mc_addr { /* SRIOV only */
+ struct l2addr_node node;
+ struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
+ u32 refcnt;
};
struct mlx5_eswitch {
@@ -225,7 +232,7 @@ struct mlx5_eswitch {
* and async SRIOV admin state changes
*/
struct mutex state_lock;
- struct esw_mc_addr *mc_promisc;
+ struct esw_mc_addr mc_promisc;
struct {
bool enabled;
@@ -285,20 +292,8 @@ enum {
SET_VLAN_INSERT = BIT(1)
};
-#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40
-#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
-
-struct mlx5_encap_entry {
- struct hlist_node encap_hlist;
- struct list_head flows;
- u32 encap_id;
- struct neighbour *n;
- struct ip_tunnel_info tun_info;
- unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
-
- struct net_device *out_dev;
- int tunnel_type;
-};
+#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x4000
+#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x8000
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
@@ -307,7 +302,9 @@ struct mlx5_esw_flow_attr {
int action;
u16 vlan;
bool vlan_handled;
- struct mlx5_encap_entry *encap;
+ u32 encap_id;
+ u32 mod_hdr_id;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
};
int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
@@ -321,6 +318,8 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
+int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
+int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index,
struct mlx5_eswitch_rep *rep);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d111cebca9f1..f991f669047e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -68,8 +68,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true);
- if (IS_ERR(counter))
- return ERR_CAST(counter);
+ if (IS_ERR(counter)) {
+ rule = ERR_CAST(counter);
+ goto err_counter_alloc;
+ }
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[i].counter = counter;
i++;
@@ -86,17 +88,25 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
- if (attr->encap)
- flow_act.encap_id = attr->encap->encap_id;
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ flow_act.modify_id = attr->mod_hdr_id;
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+ flow_act.encap_id = attr->encap_id;
rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
spec, &flow_act, dest, i);
if (IS_ERR(rule))
- mlx5_fc_destroy(esw->dev, counter);
+ goto err_add_rule;
else
esw->offloads.num_flows++;
return rule;
+
+err_add_rule:
+ mlx5_fc_destroy(esw->dev, counter);
+err_counter_alloc:
+ return rule;
}
void
@@ -106,12 +116,10 @@ mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
{
struct mlx5_fc *counter = NULL;
- if (!IS_ERR(rule)) {
- counter = mlx5_flow_rule_counter(rule);
- mlx5_del_flow_rules(rule);
- mlx5_fc_destroy(esw->dev, counter);
- esw->offloads.num_flows--;
- }
+ counter = mlx5_flow_rule_counter(rule);
+ mlx5_del_flow_rules(rule);
+ mlx5_fc_destroy(esw->dev, counter);
+ esw->offloads.num_flows--;
}
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
@@ -418,30 +426,21 @@ out:
return err;
}
-#define MAX_PF_SQ 256
#define ESW_OFFLOADS_NUM_GROUPS 4
-static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
+static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- int table_size, ix, esw_size, err = 0;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
- struct mlx5_flow_group *g;
- u32 *flow_group_in;
- void *match_criteria;
+ int esw_size, err = 0;
u32 flags = 0;
- flow_group_in = mlx5_vzalloc(inlen);
- if (!flow_group_in)
- return -ENOMEM;
-
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
err = -EOPNOTSUPP;
- goto ns_err;
+ goto out;
}
esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
@@ -451,8 +450,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS,
1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
- if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
+ if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
@@ -462,12 +460,55 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
- goto fast_fdb_err;
+ goto out;
}
esw->fdb_table.fdb = fdb;
+out:
+ return err;
+}
+
+static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
+{
+ mlx5_destroy_flow_table(esw->fdb_table.fdb);
+}
+
+#define MAX_PF_SQ 256
+
+static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *fdb = NULL;
+ int table_size, ix, err = 0;
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ u32 *flow_group_in;
+
+ esw_debug(esw->dev, "Create offloads FDB Tables\n");
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get FDB flow namespace\n");
+ err = -EOPNOTSUPP;
+ goto ns_err;
+ }
+
+ err = esw_create_offloads_fast_fdb_table(esw);
+ if (err)
+ goto fast_fdb_err;
+
table_size = nvports + MAX_PF_SQ + 1;
- fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0);
+
+ ft_attr.max_fte = table_size;
+ ft_attr.prio = FDB_SLOW_PATH;
+
+ fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
@@ -532,25 +573,26 @@ ns_err:
return err;
}
-static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
+static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
if (!esw->fdb_table.fdb)
return;
- esw_debug(esw->dev, "Destroy offloads FDB Table\n");
+ esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
- mlx5_destroy_flow_table(esw->fdb_table.fdb);
+ esw_destroy_offloads_fast_fdb_table(esw);
}
static int esw_create_offloads_table(struct mlx5_eswitch *esw)
{
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_table *ft_offloads;
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_table *ft_offloads;
+ struct mlx5_flow_namespace *ns;
int err = 0;
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
@@ -559,7 +601,9 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
return -EOPNOTSUPP;
}
- ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
+ ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
+
+ ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft_offloads)) {
err = PTR_ERR(ft_offloads);
esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
@@ -700,7 +744,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
- err = esw_create_offloads_fdb_table(esw, nvports);
+ err = esw_create_offloads_fdb_tables(esw, nvports);
if (err)
goto create_fdb_err;
@@ -737,7 +781,7 @@ create_fg_err:
esw_destroy_offloads_table(esw);
create_ft_err:
- esw_destroy_offloads_fdb_table(esw);
+ esw_destroy_offloads_fdb_tables(esw);
create_fdb_err:
/* enable back PF RoCE */
@@ -783,7 +827,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw);
- esw_destroy_offloads_fdb_table(esw);
+ esw_destroy_offloads_fdb_tables(esw);
}
static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
@@ -1012,6 +1056,66 @@ out:
return 0;
}
+int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ return -EOPNOTSUPP;
+
+ if (esw->mode == SRIOV_NONE)
+ return -EOPNOTSUPP;
+
+ if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
+ (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
+ return -EOPNOTSUPP;
+
+ if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
+ return -EOPNOTSUPP;
+
+ if (esw->mode == SRIOV_LEGACY) {
+ esw->offloads.encap = encap;
+ return 0;
+ }
+
+ if (esw->offloads.encap == encap)
+ return 0;
+
+ if (esw->offloads.num_flows > 0) {
+ esw_warn(dev, "Can't set encapsulation when flows are configured\n");
+ return -EOPNOTSUPP;
+ }
+
+ esw_destroy_offloads_fast_fdb_table(esw);
+
+ esw->offloads.encap = encap;
+ err = esw_create_offloads_fast_fdb_table(esw);
+ if (err) {
+ esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
+ esw->offloads.encap = !encap;
+ (void) esw_create_offloads_fast_fdb_table(esw);
+ }
+ return err;
+}
+
+int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ return -EOPNOTSUPP;
+
+ if (esw->mode == SRIOV_NONE)
+ return -EOPNOTSUPP;
+
+ *encap = esw->offloads.encap;
+ return 0;
+}
+
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index,
struct mlx5_eswitch_rep *__rep)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index b64a781c7e85..19e3d2fc2099 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -45,6 +45,10 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
+ if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
+ ft->underlay_qpn == 0)
+ return 0;
+
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
@@ -54,6 +58,10 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
}
+ if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
+ ft->underlay_qpn != 0)
+ MLX5_SET(set_flow_table_root_in, in, underlay_qpn, ft->underlay_qpn);
+
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
@@ -249,6 +257,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action);
MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
+ MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->modify_id);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
@@ -515,3 +524,69 @@ void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
+
+int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
+ u8 namespace, u8 num_actions,
+ void *modify_actions, u32 *modify_header_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
+ int max_actions, actions_size, inlen, err;
+ void *actions_in;
+ u8 table_type;
+ u32 *in;
+
+ switch (namespace) {
+ case MLX5_FLOW_NAMESPACE_FDB:
+ max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
+ table_type = FS_FT_FDB;
+ break;
+ case MLX5_FLOW_NAMESPACE_KERNEL:
+ max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
+ table_type = FS_FT_NIC_RX;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (num_actions > max_actions) {
+ mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
+ num_actions, max_actions);
+ return -EOPNOTSUPP;
+ }
+
+ actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
+ inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(alloc_modify_header_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
+ MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
+
+ actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
+ memcpy(actions_in, modify_actions, actions_size);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+
+ *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
+ kfree(in);
+ return err;
+}
+
+void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(dealloc_modify_header_context_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
+ modify_header_id);
+
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index ded27bb9a3b6..b8a176503d38 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -476,6 +476,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act,
fte->index = index;
fte->action = flow_act->action;
fte->encap_id = flow_act->encap_id;
+ fte->modify_id = flow_act->modify_id;
return fte;
}
@@ -777,18 +778,16 @@ static void list_add_flow_table(struct mlx5_flow_table *ft,
}
static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ struct mlx5_flow_table_attr *ft_attr,
enum fs_flow_table_op_mod op_mod,
- u16 vport, int prio,
- int max_fte, u32 level,
- u32 flags)
+ u16 vport)
{
+ struct mlx5_flow_root_namespace *root = find_root(&ns->node);
struct mlx5_flow_table *next_ft = NULL;
+ struct fs_prio *fs_prio = NULL;
struct mlx5_flow_table *ft;
- int err;
int log_table_sz;
- struct mlx5_flow_root_namespace *root =
- find_root(&ns->node);
- struct fs_prio *fs_prio = NULL;
+ int err;
if (!root) {
pr_err("mlx5: flow steering failed to find root of namespace\n");
@@ -796,29 +795,31 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
}
mutex_lock(&root->chain_lock);
- fs_prio = find_prio(ns, prio);
+ fs_prio = find_prio(ns, ft_attr->prio);
if (!fs_prio) {
err = -EINVAL;
goto unlock_root;
}
- if (level >= fs_prio->num_levels) {
+ if (ft_attr->level >= fs_prio->num_levels) {
err = -ENOSPC;
goto unlock_root;
}
/* The level is related to the
* priority level range.
*/
- level += fs_prio->start_level;
- ft = alloc_flow_table(level,
+ ft_attr->level += fs_prio->start_level;
+ ft = alloc_flow_table(ft_attr->level,
vport,
- max_fte ? roundup_pow_of_two(max_fte) : 0,
+ ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
root->table_type,
- op_mod, flags);
+ op_mod, ft_attr->flags);
if (!ft) {
err = -ENOMEM;
goto unlock_root;
}
+ ft->underlay_qpn = ft_attr->underlay_qpn;
+
tree_init_node(&ft->node, 1, del_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
@@ -848,44 +849,56 @@ unlock_root:
}
struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
- int prio, int max_fte,
- u32 level,
- u32 flags)
+ struct mlx5_flow_table_attr *ft_attr)
{
- return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, 0, prio,
- max_fte, level, flags);
+ return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
}
struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio, int max_fte,
u32 level, u16 vport)
{
- return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, vport, prio,
- max_fte, level, 0);
+ struct mlx5_flow_table_attr ft_attr = {};
+
+ ft_attr.max_fte = max_fte;
+ ft_attr.level = level;
+ ft_attr.prio = prio;
+
+ return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, 0);
}
-struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
- struct mlx5_flow_namespace *ns,
- int prio, u32 level)
+struct mlx5_flow_table*
+mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
+ int prio, u32 level)
{
- return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_LAG_DEMUX, 0, prio, 0,
- level, 0);
+ struct mlx5_flow_table_attr ft_attr = {};
+
+ ft_attr.level = level;
+ ft_attr.prio = prio;
+ return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
}
EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
-struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int num_flow_table_entries,
- int max_num_groups,
- u32 level,
- u32 flags)
+struct mlx5_flow_table*
+mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
+ int prio,
+ int num_flow_table_entries,
+ int max_num_groups,
+ u32 level,
+ u32 flags)
{
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
if (max_num_groups > num_flow_table_entries)
return ERR_PTR(-EINVAL);
- ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level, flags);
+ ft_attr.max_fte = num_flow_table_entries;
+ ft_attr.prio = prio;
+ ft_attr.level = level;
+ ft_attr.flags = flags;
+
+ ft = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft))
return ft;
@@ -1827,12 +1840,18 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
{
struct mlx5_flow_namespace *ns = NULL;
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
if (WARN_ON(!ns))
return -EINVAL;
- ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
+
+ ft_attr.max_fte = ANCHOR_SIZE;
+ ft_attr.level = ANCHOR_LEVEL;
+ ft_attr.prio = ANCHOR_PRIO;
+
+ ft = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) {
mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
return PTR_ERR(ft);
@@ -1886,9 +1905,6 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
- if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
- return;
-
cleanup_root_ns(steering->root_ns);
cleanup_root_ns(steering->esw_egress_root_ns);
cleanup_root_ns(steering->esw_ingress_root_ns);
@@ -1991,9 +2007,6 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
struct mlx5_flow_steering *steering;
int err = 0;
- if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
- return 0;
-
err = mlx5_init_fc_stats(dev);
if (err)
return err;
@@ -2004,7 +2017,10 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
steering->dev = dev;
dev->priv.steering = steering;
- if (MLX5_CAP_GEN(dev, nic_flow_table) &&
+ if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
+ (MLX5_CAP_GEN(dev, nic_flow_table))) ||
+ ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
+ MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
err = init_root_ns(steering);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 8e668c63f69e..81eafc7b9dd9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -118,6 +118,7 @@ struct mlx5_flow_table {
/* FWD rules that point on this flow table */
struct list_head fwd_rules;
u32 flags;
+ u32 underlay_qpn;
};
struct mlx5_fc_cache {
@@ -152,6 +153,7 @@ struct fs_fte {
u32 index;
u32 action;
u32 encap_id;
+ u32 modify_id;
enum fs_fte_status status;
struct mlx5_fc *counter;
};
@@ -197,6 +199,11 @@ struct mlx5_flow_root_namespace {
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
+void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
+ struct delayed_work *dwork,
+ unsigned long delay);
+void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
+ unsigned long interval);
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 7431f633de31..6507d8acc54d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -165,7 +165,8 @@ static void mlx5_fc_stats_work(struct work_struct *work)
list_splice_tail_init(&fc_stats->addlist, &tmplist);
if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
- queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
+ queue_delayed_work(fc_stats->wq, &fc_stats->work,
+ fc_stats->sampling_interval);
spin_unlock(&fc_stats->addlist_lock);
@@ -200,7 +201,7 @@ static void mlx5_fc_stats_work(struct work_struct *work)
node = mlx5_fc_stats_query(dev, counter, last->id);
}
- fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
+ fc_stats->next_query = now + fc_stats->sampling_interval;
}
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
@@ -265,6 +266,7 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
if (!fc_stats->wq)
return -ENOMEM;
+ fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
return 0;
@@ -317,3 +319,21 @@ void mlx5_fc_query_cached(struct mlx5_fc *counter,
counter->lastbytes = c.bytes;
counter->lastpackets = c.packets;
}
+
+void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
+ struct delayed_work *dwork,
+ unsigned long delay)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+ queue_delayed_work(fc_stats->wq, dwork, delay);
+}
+
+void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
+ unsigned long interval)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+ fc_stats->sampling_interval = min_t(unsigned long, interval,
+ fc_stats->sampling_interval);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index d0bbefa08af7..1bc14d0fded8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -137,7 +137,8 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
- if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+ if (MLX5_CAP_GEN(dev, nic_flow_table) ||
+ MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
new file mode 100644
index 000000000000..3c84e36af018
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/fs.h>
+#include "en.h"
+#include "ipoib.h"
+
+#define IB_DEFAULT_Q_KEY 0xb1b
+
+static int mlx5i_open(struct net_device *netdev);
+static int mlx5i_close(struct net_device *netdev);
+static int mlx5i_dev_init(struct net_device *dev);
+static void mlx5i_dev_cleanup(struct net_device *dev);
+
+static const struct net_device_ops mlx5i_netdev_ops = {
+ .ndo_open = mlx5i_open,
+ .ndo_stop = mlx5i_close,
+ .ndo_init = mlx5i_dev_init,
+ .ndo_uninit = mlx5i_dev_cleanup,
+};
+
+/* IPoIB mlx5 netdev profile */
+
+/* Called directly after IPoIB netdevice was created to initialize SW structs */
+static void mlx5i_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->profile = profile;
+ priv->ppriv = ppriv;
+
+ mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
+
+ mutex_init(&priv->state_lock);
+
+ netdev->hw_features |= NETIF_F_SG;
+ netdev->hw_features |= NETIF_F_IP_CSUM;
+ netdev->hw_features |= NETIF_F_IPV6_CSUM;
+ netdev->hw_features |= NETIF_F_GRO;
+ netdev->hw_features |= NETIF_F_TSO;
+ netdev->hw_features |= NETIF_F_TSO6;
+ netdev->hw_features |= NETIF_F_RXCSUM;
+ netdev->hw_features |= NETIF_F_RXHASH;
+
+ netdev->netdev_ops = &mlx5i_netdev_ops;
+}
+
+/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
+static void mlx5i_cleanup(struct mlx5e_priv *priv)
+{
+ /* Do nothing .. */
+}
+
+#define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2
+
+static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+{
+ struct mlx5_qp_context *context = NULL;
+ u32 *in = NULL;
+ void *addr_path;
+ int ret = 0;
+ int inlen;
+ void *qpc;
+
+ inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, ulp_stateless_offload_mode,
+ MLX5_QP_ENHANCED_ULP_STATELESS_MODE);
+
+ addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ MLX5_SET(ads, addr_path, port, 1);
+ MLX5_SET(ads, addr_path, grh, 1);
+
+ ret = mlx5_core_create_qp(mdev, qp, in, inlen);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed creating IPoIB QP err : %d\n", ret);
+ goto out;
+ }
+
+ /* QP states */
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
+ context->pri_path.port = 1;
+ context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY);
+
+ ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret);
+ goto out;
+ }
+ memset(context, 0, sizeof(*context));
+
+ ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret);
+ goto out;
+ }
+
+ ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret);
+ goto out;
+ }
+
+out:
+ kfree(context);
+ kvfree(in);
+ return ret;
+}
+
+static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+{
+ mlx5_core_destroy_qp(mdev, qp);
+}
+
+static int mlx5i_init_tx(struct mlx5e_priv *priv)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ int err;
+
+ err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[0]);
+ mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+}
+
+static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ int err;
+
+ priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL);
+
+ if (!priv->fs.ns)
+ return -EINVAL;
+
+ err = mlx5e_arfs_create_tables(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
+ err);
+ priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
+ }
+
+ err = mlx5e_create_ttc_table(priv, ipriv->qp.qpn);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
+ err);
+ goto err_destroy_arfs_tables;
+ }
+
+ return 0;
+
+err_destroy_arfs_tables:
+ mlx5e_arfs_destroy_tables(priv);
+
+ return err;
+}
+
+static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
+{
+ mlx5e_destroy_ttc_table(priv);
+ mlx5e_arfs_destroy_tables(priv);
+}
+
+static int mlx5i_init_rx(struct mlx5e_priv *priv)
+{
+ int err;
+
+ err = mlx5e_create_indirect_rqt(priv);
+ if (err)
+ return err;
+
+ err = mlx5e_create_direct_rqts(priv);
+ if (err)
+ goto err_destroy_indirect_rqts;
+
+ err = mlx5e_create_indirect_tirs(priv);
+ if (err)
+ goto err_destroy_direct_rqts;
+
+ err = mlx5e_create_direct_tirs(priv);
+ if (err)
+ goto err_destroy_indirect_tirs;
+
+ err = mlx5i_create_flow_steering(priv);
+ if (err)
+ goto err_destroy_direct_tirs;
+
+ return 0;
+
+err_destroy_direct_tirs:
+ mlx5e_destroy_direct_tirs(priv);
+err_destroy_indirect_tirs:
+ mlx5e_destroy_indirect_tirs(priv);
+err_destroy_direct_rqts:
+ mlx5e_destroy_direct_rqts(priv);
+err_destroy_indirect_rqts:
+ mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ return err;
+}
+
+static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
+{
+ mlx5i_destroy_flow_steering(priv);
+ mlx5e_destroy_direct_tirs(priv);
+ mlx5e_destroy_indirect_tirs(priv);
+ mlx5e_destroy_direct_rqts(priv);
+ mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+}
+
+static const struct mlx5e_profile mlx5i_nic_profile = {
+ .init = mlx5i_init,
+ .cleanup = mlx5i_cleanup,
+ .init_tx = mlx5i_init_tx,
+ .cleanup_tx = mlx5i_cleanup_tx,
+ .init_rx = mlx5i_init_rx,
+ .cleanup_rx = mlx5i_cleanup_rx,
+ .enable = NULL, /* mlx5i_enable */
+ .disable = NULL, /* mlx5i_disable */
+ .update_stats = NULL, /* mlx5i_update_stats */
+ .max_nch = mlx5e_get_max_num_channels,
+ .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
+ .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
+ .max_tc = MLX5I_MAX_NUM_TC,
+};
+
+/* mlx5i netdev NDos */
+
+static int mlx5i_dev_init(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+ struct mlx5i_priv *ipriv = priv->ppriv;
+
+ /* Set dev address using underlay QP */
+ dev->dev_addr[1] = (ipriv->qp.qpn >> 16) & 0xff;
+ dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff;
+ dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff;
+
+ return 0;
+}
+
+static void mlx5i_dev_cleanup(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ struct mlx5_qp_context context;
+
+ /* detach qp from flow-steering by reset it */
+ mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context, &ipriv->qp);
+}
+
+static int mlx5i_open(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ set_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ err = mlx5e_open_channels(priv, &priv->channels);
+ if (err)
+ goto err_clear_state_opened_flag;
+
+ mlx5e_refresh_tirs(priv, false);
+ mlx5e_activate_priv_channels(priv);
+ mutex_unlock(&priv->state_lock);
+ return 0;
+
+err_clear_state_opened_flag:
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static int mlx5i_close(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ /* May already be CLOSED in case a previous configuration operation
+ * (e.g RX/TX queue size change) that involves close&open failed.
+ */
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ netif_carrier_off(priv->netdev);
+ mlx5e_deactivate_priv_channels(priv);
+ mlx5e_close_channels(&priv->channels);
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return 0;
+}
+
+#ifdef notusedyet
+/* IPoIB RDMA netdev callbacks */
+static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca,
+ union ib_gid *gid, u16 lid, int set_qkey)
+{
+ struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
+ struct mlx5_core_dev *mdev = epriv->mdev;
+ struct mlx5i_priv *ipriv = epriv->ppriv;
+ int err;
+
+ mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
+ err = mlx5_core_attach_mcg(mdev, gid, ipriv->qp.qpn);
+ if (err)
+ mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n",
+ ipriv->qp.qpn, gid->raw);
+
+ return err;
+}
+
+static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca,
+ union ib_gid *gid, u16 lid)
+{
+ struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
+ struct mlx5_core_dev *mdev = epriv->mdev;
+ struct mlx5i_priv *ipriv = epriv->ppriv;
+ int err;
+
+ mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
+
+ err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn);
+ if (err)
+ mlx5_core_dbg(mdev, "failed dettaching QPN 0x%x, MGID %pI6\n",
+ ipriv->qp.qpn, gid->raw);
+
+ return err;
+}
+
+static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
+ struct ib_ah *address, u32 dqpn, u32 dqkey)
+{
+ struct mlx5e_priv *epriv = mlx5i_epriv(dev);
+ struct mlx5e_txqsq *sq = epriv->txq2sq[skb_get_queue_mapping(skb)];
+ struct mlx5_ib_ah *mah = to_mah(address);
+
+ return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, dqkey);
+}
+#endif
+
+static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
+{
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
+ return -EOPNOTSUPP;
+
+ if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
+ mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n");
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
+ struct ib_device *ibdev,
+ const char *name,
+ void (*setup)(struct net_device *))
+{
+ const struct mlx5e_profile *profile = &mlx5i_nic_profile;
+ int nch = profile->max_nch(mdev);
+ struct net_device *netdev;
+ struct mlx5i_priv *ipriv;
+ struct mlx5e_priv *epriv;
+ int err;
+
+ if (mlx5i_check_required_hca_cap(mdev)) {
+ mlx5_core_warn(mdev, "Accelerated mode is not supported\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ /* This function should only be called once per mdev */
+ err = mlx5e_create_mdev_resources(mdev);
+ if (err)
+ return NULL;
+
+ netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv),
+ name, NET_NAME_UNKNOWN,
+ setup,
+ nch * MLX5E_MAX_NUM_TC,
+ nch);
+ if (!netdev) {
+ mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n");
+ goto free_mdev_resources;
+ }
+
+ ipriv = netdev_priv(netdev);
+ epriv = mlx5i_epriv(netdev);
+
+ epriv->wq = create_singlethread_workqueue("mlx5i");
+ if (!epriv->wq)
+ goto err_free_netdev;
+
+ profile->init(mdev, netdev, profile, ipriv);
+
+ mlx5e_attach_netdev(epriv);
+ netif_carrier_off(netdev);
+
+ /* TODO: set rdma_netdev func pointers
+ * rn = &ipriv->rn;
+ * rn->hca = ibdev;
+ * rn->send = mlx5i_xmit;
+ * rn->attach_mcast = mlx5i_attach_mcast;
+ * rn->detach_mcast = mlx5i_detach_mcast;
+ */
+ return netdev;
+
+err_free_netdev:
+ free_netdev(netdev);
+free_mdev_resources:
+ mlx5e_destroy_mdev_resources(mdev);
+
+ return NULL;
+}
+EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
+
+static void mlx5_rdma_netdev_free(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ const struct mlx5e_profile *profile = priv->profile;
+
+ mlx5e_detach_netdev(priv);
+ profile->cleanup(priv);
+ destroy_workqueue(priv->wq);
+ free_netdev(netdev);
+
+ mlx5e_destroy_mdev_resources(priv->mdev);
+}
+EXPORT_SYMBOL(mlx5_rdma_netdev_free);
+
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h
index 63c666d0b739..bae0a5cbc8ad 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h
@@ -1,5 +1,5 @@
-/* QLogic qed NIC Driver
- * Copyright (c) 2015-2017 QLogic Corporation
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -17,7 +17,7 @@
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
+ * disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
@@ -29,19 +29,26 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef _QED_PTP_H
-#define _QED_PTP_H
-#include <linux/types.h>
-int qed_ptp_hwtstamp_tx_on(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-int qed_ptp_cfg_rx_filters(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- enum qed_ptp_filter_type type);
-int qed_ptp_read_rx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts);
-int qed_ptp_read_tx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts);
-int qed_ptp_read_cc(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u64 *cycles);
-int qed_ptp_adjfreq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, s32 ppb);
-int qed_ptp_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-int qed_ptp_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+#ifndef __MLX5E_IPOB_H__
+#define __MLX5E_IPOB_H__
-#endif
+#include <linux/mlx5/fs.h>
+#include "en.h"
+
+#define MLX5I_MAX_NUM_TC 1
+
+/* ipoib rdma netdev's private data structure */
+struct mlx5i_priv {
+ struct mlx5_core_qp qp;
+ char *mlx5e_priv[0];
+};
+
+/* Extract mlx5e_priv from IPoIB netdev */
+#define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv))
+
+netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_av *av, u32 dqpn, u32 dqkey);
+void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+
+#endif /* __MLX5E_IPOB_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 0ad66324247f..0c123d571b4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1280,6 +1280,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
.eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
.eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set,
.eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
+ .eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set,
+ .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
#endif
};
@@ -1514,8 +1516,10 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
{ PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
- { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
- { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5, PCIe 4.0 VF */
+ { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5 Ex */
+ { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 Ex VF */
+ { PCI_VDEVICE(MELLANOX, 0x101b) }, /* ConnectX-6 */
+ { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b3dabe6e8836..fbc6e9e9e305 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -141,6 +141,11 @@ int mlx5_encap_alloc(struct mlx5_core_dev *dev,
u32 *encap_id);
void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
+int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
+ u8 namespace, u8 num_actions,
+ void *modify_actions, u32 *modify_header_id);
+void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id);
+
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 6b6c30deee83..2fb8c6585ac7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -15,7 +15,8 @@ obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
spectrum_kvdl.o spectrum_acl_tcam.o \
- spectrum_acl.o spectrum_flower.o
+ spectrum_acl.o spectrum_flower.o \
+ spectrum_cnt.o spectrum_dpipe.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
mlxsw_minimal-objs := minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index a1b48421648a..479511cf79bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -1043,13 +1043,6 @@ MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
*/
MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1);
-/* cmd_mbox_sw2hw_cq_oi
- * When set, overrun ignore is enabled. When set, updates of
- * CQ consumer counter (poll for completion) or Request completion
- * notifications (Arm CQ) DoorBells should not be rung on that CQ.
- */
-MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1);
-
/* cmd_mbox_sw2hw_cq_st
* Event delivery state machine
* 0x0 - FIRED
@@ -1132,11 +1125,6 @@ static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
*/
MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
-/* cmd_mbox_sw2hw_eq_oi
- * When set, overrun ignore is enabled.
- */
-MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
-
/* cmd_mbox_sw2hw_eq_st
* Event delivery state machine
* 0x0 - FIRED
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index a4c07841aaf6..affe84eb4bff 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -40,9 +40,6 @@
#include <linux/export.h>
#include <linux/err.h>
#include <linux/if_link.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/u64_stats_sync.h>
#include <linux/netdevice.h>
#include <linux/completion.h>
#include <linux/skbuff.h>
@@ -74,23 +71,9 @@ static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
static const char mlxsw_core_driver_name[] = "mlxsw_core";
-static struct dentry *mlxsw_core_dbg_root;
-
static struct workqueue_struct *mlxsw_wq;
static struct workqueue_struct *mlxsw_owq;
-struct mlxsw_core_pcpu_stats {
- u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
- u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
- u64 port_rx_packets[MLXSW_PORT_MAX_PORTS];
- u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS];
- struct u64_stats_sync syncp;
- u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX];
- u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS];
- u32 trap_rx_invalid;
- u32 port_rx_invalid;
-};
-
struct mlxsw_core_port {
struct devlink_port devlink_port;
void *port_driver_priv;
@@ -121,23 +104,48 @@ struct mlxsw_core {
spinlock_t trans_list_lock; /* protects trans_list writes */
bool use_emad;
} emad;
- struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
- struct dentry *dbg_dir;
- struct {
- struct debugfs_blob_wrapper vsd_blob;
- struct debugfs_blob_wrapper psid_blob;
- } dbg;
struct {
u8 *mapping; /* lag_id+port_index to local_port mapping */
} lag;
struct mlxsw_res res;
struct mlxsw_hwmon *hwmon;
struct mlxsw_thermal *thermal;
- struct mlxsw_core_port ports[MLXSW_PORT_MAX_PORTS];
+ struct mlxsw_core_port *ports;
+ unsigned int max_ports;
unsigned long driver_priv[0];
/* driver_priv has to be always the last item */
};
+#define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
+
+static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
+{
+ /* Switch ports are numbered from 1 to queried value */
+ if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
+ mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
+ MAX_SYSTEM_PORT) + 1;
+ else
+ mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
+
+ mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
+ sizeof(struct mlxsw_core_port), GFP_KERNEL);
+ if (!mlxsw_core->ports)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core)
+{
+ kfree(mlxsw_core->ports);
+}
+
+unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->max_ports;
+}
+EXPORT_SYMBOL(mlxsw_core_max_ports);
+
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
{
return mlxsw_core->driver_priv;
@@ -703,91 +711,6 @@ err_out:
* Core functions
*****************/
-static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
-{
- struct mlxsw_core *mlxsw_core = file->private;
- struct mlxsw_core_pcpu_stats *p;
- u64 rx_packets, rx_bytes;
- u64 tmp_rx_packets, tmp_rx_bytes;
- u32 rx_dropped, rx_invalid;
- unsigned int start;
- int i;
- int j;
- static const char hdr[] =
- " NUM RX_PACKETS RX_BYTES RX_DROPPED\n";
-
- seq_printf(file, hdr);
- for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
- rx_packets = 0;
- rx_bytes = 0;
- rx_dropped = 0;
- for_each_possible_cpu(j) {
- p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
- do {
- start = u64_stats_fetch_begin(&p->syncp);
- tmp_rx_packets = p->trap_rx_packets[i];
- tmp_rx_bytes = p->trap_rx_bytes[i];
- } while (u64_stats_fetch_retry(&p->syncp, start));
-
- rx_packets += tmp_rx_packets;
- rx_bytes += tmp_rx_bytes;
- rx_dropped += p->trap_rx_dropped[i];
- }
- seq_printf(file, "trap %3d %12llu %12llu %10u\n",
- i, rx_packets, rx_bytes, rx_dropped);
- }
- rx_invalid = 0;
- for_each_possible_cpu(j) {
- p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
- rx_invalid += p->trap_rx_invalid;
- }
- seq_printf(file, "trap INV %10u\n",
- rx_invalid);
-
- for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
- rx_packets = 0;
- rx_bytes = 0;
- rx_dropped = 0;
- for_each_possible_cpu(j) {
- p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
- do {
- start = u64_stats_fetch_begin(&p->syncp);
- tmp_rx_packets = p->port_rx_packets[i];
- tmp_rx_bytes = p->port_rx_bytes[i];
- } while (u64_stats_fetch_retry(&p->syncp, start));
-
- rx_packets += tmp_rx_packets;
- rx_bytes += tmp_rx_bytes;
- rx_dropped += p->port_rx_dropped[i];
- }
- seq_printf(file, "port %3d %12llu %12llu %10u\n",
- i, rx_packets, rx_bytes, rx_dropped);
- }
- rx_invalid = 0;
- for_each_possible_cpu(j) {
- p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
- rx_invalid += p->port_rx_invalid;
- }
- seq_printf(file, "port INV %10u\n",
- rx_invalid);
- return 0;
-}
-
-static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
-{
- struct mlxsw_core *mlxsw_core = inode->i_private;
-
- return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
-}
-
-static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
- .owner = THIS_MODULE,
- .open = mlxsw_core_rx_stats_dbg_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek
-};
-
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
{
spin_lock(&mlxsw_core_driver_list_lock);
@@ -835,39 +758,13 @@ static void mlxsw_core_driver_put(const char *kind)
spin_unlock(&mlxsw_core_driver_list_lock);
}
-static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
-{
- const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
-
- mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
- mlxsw_core_dbg_root);
- if (!mlxsw_core->dbg_dir)
- return -ENOMEM;
- debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
- mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
- mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
- mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
- debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
- &mlxsw_core->dbg.vsd_blob);
- mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
- mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
- debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
- &mlxsw_core->dbg.psid_blob);
- return 0;
-}
-
-static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
-{
- debugfs_remove_recursive(mlxsw_core->dbg_dir);
-}
-
static int mlxsw_devlink_port_split(struct devlink *devlink,
unsigned int port_index,
unsigned int count)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- if (port_index >= MLXSW_PORT_MAX_PORTS)
+ if (port_index >= mlxsw_core->max_ports)
return -EINVAL;
if (!mlxsw_core->driver->port_split)
return -EOPNOTSUPP;
@@ -879,7 +776,7 @@ static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- if (port_index >= MLXSW_PORT_MAX_PORTS)
+ if (port_index >= mlxsw_core->max_ports)
return -EINVAL;
if (!mlxsw_core->driver->port_unsplit)
return -EOPNOTSUPP;
@@ -1101,18 +998,15 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
mlxsw_core->bus_priv = bus_priv;
mlxsw_core->bus_info = mlxsw_bus_info;
- mlxsw_core->pcpu_stats =
- netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
- if (!mlxsw_core->pcpu_stats) {
- err = -ENOMEM;
- goto err_alloc_stats;
- }
-
err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
&mlxsw_core->res);
if (err)
goto err_bus_init;
+ err = mlxsw_ports_init(mlxsw_core);
+ if (err)
+ goto err_ports_init;
+
if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
alloc_size = sizeof(u8) *
@@ -1148,15 +1042,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_driver_init;
}
- err = mlxsw_core_debugfs_init(mlxsw_core);
- if (err)
- goto err_debugfs_init;
-
return 0;
-err_debugfs_init:
- if (mlxsw_core->driver->fini)
- mlxsw_core->driver->fini(mlxsw_core);
err_driver_init:
mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
@@ -1167,10 +1054,10 @@ err_devlink_register:
err_emad_init:
kfree(mlxsw_core->lag.mapping);
err_alloc_lag_mapping:
+ mlxsw_ports_fini(mlxsw_core);
+err_ports_init:
mlxsw_bus->fini(bus_priv);
err_bus_init:
- free_percpu(mlxsw_core->pcpu_stats);
-err_alloc_stats:
devlink_free(devlink);
err_devlink_alloc:
mlxsw_core_driver_put(device_kind);
@@ -1183,15 +1070,14 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
const char *device_kind = mlxsw_core->bus_info->device_kind;
struct devlink *devlink = priv_to_devlink(mlxsw_core);
- mlxsw_core_debugfs_fini(mlxsw_core);
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
kfree(mlxsw_core->lag.mapping);
+ mlxsw_ports_fini(mlxsw_core);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
- free_percpu(mlxsw_core->pcpu_stats);
devlink_free(devlink);
mlxsw_core_driver_put(device_kind);
}
@@ -1639,7 +1525,6 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
{
struct mlxsw_rx_listener_item *rxl_item;
const struct mlxsw_rx_listener *rxl;
- struct mlxsw_core_pcpu_stats *pcpu_stats;
u8 local_port;
bool found = false;
@@ -1661,7 +1546,7 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
__func__, local_port, rx_info->trap_id);
if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
- (local_port >= MLXSW_PORT_MAX_PORTS))
+ (local_port >= mlxsw_core->max_ports))
goto drop;
rcu_read_lock();
@@ -1678,26 +1563,10 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
if (!found)
goto drop;
- pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
- u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->port_rx_packets[local_port]++;
- pcpu_stats->port_rx_bytes[local_port] += skb->len;
- pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
- pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
- u64_stats_update_end(&pcpu_stats->syncp);
-
rxl->func(skb, local_port, rxl_item->priv);
return;
drop:
- if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
- this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
- else
- this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
- if (local_port >= MLXSW_PORT_MAX_PORTS)
- this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
- else
- this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
dev_kfree_skb(skb);
}
EXPORT_SYMBOL(mlxsw_core_skb_receive);
@@ -1926,15 +1795,8 @@ static int __init mlxsw_core_module_init(void)
err = -ENOMEM;
goto err_alloc_ordered_workqueue;
}
- mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
- if (!mlxsw_core_dbg_root) {
- err = -ENOMEM;
- goto err_debugfs_create_dir;
- }
return 0;
-err_debugfs_create_dir:
- destroy_workqueue(mlxsw_owq);
err_alloc_ordered_workqueue:
destroy_workqueue(mlxsw_wq);
return err;
@@ -1942,7 +1804,6 @@ err_alloc_ordered_workqueue:
static void __exit mlxsw_core_module_exit(void)
{
- debugfs_remove_recursive(mlxsw_core_dbg_root);
destroy_workqueue(mlxsw_owq);
destroy_workqueue(mlxsw_wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index cf38cf9027f8..7fb35395adf5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -57,6 +57,8 @@ struct mlxsw_driver;
struct mlxsw_bus;
struct mlxsw_bus_info;
+unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
+
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 5f337715a4da..46304ffb9449 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -567,6 +567,89 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
return oneact + MLXSW_AFA_PAYLOAD_OFFSET;
}
+/* VLAN Action
+ * -----------
+ * VLAN action is used for manipulating VLANs. It can be used to implement QinQ,
+ * VLAN translation, change of PCP bits of the VLAN tag, push, pop as swap VLANs
+ * and more.
+ */
+
+#define MLXSW_AFA_VLAN_CODE 0x02
+#define MLXSW_AFA_VLAN_SIZE 1
+
+enum mlxsw_afa_vlan_vlan_tag_cmd {
+ MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
+ MLXSW_AFA_VLAN_VLAN_TAG_CMD_PUSH_TAG,
+ MLXSW_AFA_VLAN_VLAN_TAG_CMD_POP_TAG,
+};
+
+enum mlxsw_afa_vlan_cmd {
+ MLXSW_AFA_VLAN_CMD_NOP,
+ MLXSW_AFA_VLAN_CMD_SET_OUTER,
+ MLXSW_AFA_VLAN_CMD_SET_INNER,
+ MLXSW_AFA_VLAN_CMD_COPY_OUTER_TO_INNER,
+ MLXSW_AFA_VLAN_CMD_COPY_INNER_TO_OUTER,
+ MLXSW_AFA_VLAN_CMD_SWAP,
+};
+
+/* afa_vlan_vlan_tag_cmd
+ * Tag command: push, pop, nop VLAN header.
+ */
+MLXSW_ITEM32(afa, vlan, vlan_tag_cmd, 0x00, 29, 3);
+
+/* afa_vlan_vid_cmd */
+MLXSW_ITEM32(afa, vlan, vid_cmd, 0x04, 29, 3);
+
+/* afa_vlan_vid */
+MLXSW_ITEM32(afa, vlan, vid, 0x04, 0, 12);
+
+/* afa_vlan_ethertype_cmd */
+MLXSW_ITEM32(afa, vlan, ethertype_cmd, 0x08, 29, 3);
+
+/* afa_vlan_ethertype
+ * Index to EtherTypes in Switch VLAN EtherType Register (SVER).
+ */
+MLXSW_ITEM32(afa, vlan, ethertype, 0x08, 24, 3);
+
+/* afa_vlan_pcp_cmd */
+MLXSW_ITEM32(afa, vlan, pcp_cmd, 0x08, 13, 3);
+
+/* afa_vlan_pcp */
+MLXSW_ITEM32(afa, vlan, pcp, 0x08, 8, 3);
+
+static inline void
+mlxsw_afa_vlan_pack(char *payload,
+ enum mlxsw_afa_vlan_vlan_tag_cmd vlan_tag_cmd,
+ enum mlxsw_afa_vlan_cmd vid_cmd, u16 vid,
+ enum mlxsw_afa_vlan_cmd pcp_cmd, u8 pcp,
+ enum mlxsw_afa_vlan_cmd ethertype_cmd, u8 ethertype)
+{
+ mlxsw_afa_vlan_vlan_tag_cmd_set(payload, vlan_tag_cmd);
+ mlxsw_afa_vlan_vid_cmd_set(payload, vid_cmd);
+ mlxsw_afa_vlan_vid_set(payload, vid);
+ mlxsw_afa_vlan_pcp_cmd_set(payload, pcp_cmd);
+ mlxsw_afa_vlan_pcp_set(payload, pcp);
+ mlxsw_afa_vlan_ethertype_cmd_set(payload, ethertype_cmd);
+ mlxsw_afa_vlan_ethertype_set(payload, ethertype);
+}
+
+int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
+ u16 vid, u8 pcp, u8 et)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_VLAN_CODE,
+ MLXSW_AFA_VLAN_SIZE);
+
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
+ MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
+ MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
+ MLXSW_AFA_VLAN_CMD_SET_OUTER, et);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
+
/* Trap / Discard Action
* ---------------------
* The Trap / Discard action enables trapping / mirroring packets to the CPU
@@ -677,3 +760,98 @@ err_append_action:
return err;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);
+
+/* Policing and Counting Action
+ * ----------------------------
+ * Policing and Counting action is used for binding policer and counter
+ * to ACL rules.
+ */
+
+#define MLXSW_AFA_POLCNT_CODE 0x08
+#define MLXSW_AFA_POLCNT_SIZE 1
+
+enum mlxsw_afa_polcnt_counter_set_type {
+ /* No count */
+ MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
+ /* Count packets and bytes */
+ MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
+ /* Count only packets */
+ MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS = 0x05,
+};
+
+/* afa_polcnt_counter_set_type
+ * Counter set type for flow counters.
+ */
+MLXSW_ITEM32(afa, polcnt, counter_set_type, 0x04, 24, 8);
+
+/* afa_polcnt_counter_index
+ * Counter index for flow counters.
+ */
+MLXSW_ITEM32(afa, polcnt, counter_index, 0x04, 0, 24);
+
+static inline void
+mlxsw_afa_polcnt_pack(char *payload,
+ enum mlxsw_afa_polcnt_counter_set_type set_type,
+ u32 counter_index)
+{
+ mlxsw_afa_polcnt_counter_set_type_set(payload, set_type);
+ mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
+}
+
+int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
+ u32 counter_index)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_POLCNT_CODE,
+ MLXSW_AFA_POLCNT_SIZE);
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
+ counter_index);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
+
+/* Virtual Router and Forwarding Domain Action
+ * -------------------------------------------
+ * Virtual Switch action is used for manipulate the Virtual Router (VR),
+ * MPLS label space and the Forwarding Identifier (FID).
+ */
+
+#define MLXSW_AFA_VIRFWD_CODE 0x0E
+#define MLXSW_AFA_VIRFWD_SIZE 1
+
+enum mlxsw_afa_virfwd_fid_cmd {
+ /* Do nothing */
+ MLXSW_AFA_VIRFWD_FID_CMD_NOOP,
+ /* Set the Forwarding Identifier (FID) to fid */
+ MLXSW_AFA_VIRFWD_FID_CMD_SET,
+};
+
+/* afa_virfwd_fid_cmd */
+MLXSW_ITEM32(afa, virfwd, fid_cmd, 0x08, 29, 3);
+
+/* afa_virfwd_fid
+ * The FID value.
+ */
+MLXSW_ITEM32(afa, virfwd, fid, 0x08, 0, 16);
+
+static inline void mlxsw_afa_virfwd_pack(char *payload,
+ enum mlxsw_afa_virfwd_fid_cmd fid_cmd,
+ u16 fid)
+{
+ mlxsw_afa_virfwd_fid_cmd_set(payload, fid_cmd);
+ mlxsw_afa_virfwd_fid_set(payload, fid);
+}
+
+int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_VIRFWD_CODE,
+ MLXSW_AFA_VIRFWD_SIZE);
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 43f78dcfe394..bd8b91d02880 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -62,5 +62,10 @@ void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
u8 local_port, bool in_port);
+int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
+ u16 vid, u8 pcp, u8 et);
+int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
+ u32 counter_index);
+int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index e4fcba7c2af2..c75e9141e3ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -54,6 +54,8 @@ enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_DST_IP6_LO,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+ MLXSW_AFK_ELEMENT_VID,
+ MLXSW_AFK_ELEMENT_PCP,
MLXSW_AFK_ELEMENT_MAX,
};
@@ -88,7 +90,7 @@ struct mlxsw_afk_element_info {
MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF, \
_element, _offset, 0, _size)
-/* For the purpose of the driver, define a internal storage scratchpad
+/* For the purpose of the driver, define an internal storage scratchpad
* that will be used to store key/mask values. For each defined element type
* define an internal storage geometry.
*/
@@ -98,6 +100,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6),
MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
+ MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index a223c85dfde0..23f7d828cf67 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -44,8 +44,6 @@
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/log2.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
#include <linux/string.h>
#include "pci_hw.h"
@@ -57,8 +55,6 @@
static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
-static struct dentry *mlxsw_pci_dbg_root;
-
#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
#define mlxsw_pci_read32(mlxsw_pci, reg) \
@@ -71,21 +67,6 @@ enum mlxsw_pci_queue_type {
MLXSW_PCI_QUEUE_TYPE_EQ,
};
-static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type)
-{
- switch (q_type) {
- case MLXSW_PCI_QUEUE_TYPE_SDQ:
- return "sdq";
- case MLXSW_PCI_QUEUE_TYPE_RDQ:
- return "rdq";
- case MLXSW_PCI_QUEUE_TYPE_CQ:
- return "cq";
- case MLXSW_PCI_QUEUE_TYPE_EQ:
- return "eq";
- }
- BUG();
-}
-
#define MLXSW_PCI_QUEUE_TYPE_COUNT 4
static const u16 mlxsw_pci_doorbell_type_offset[] = {
@@ -155,7 +136,6 @@ struct mlxsw_pci {
u8 __iomem *hw_addr;
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset;
- struct msix_entry msix_entry;
struct mlxsw_core *core;
struct {
struct mlxsw_pci_mem_item *items;
@@ -174,7 +154,6 @@ struct mlxsw_pci {
} comp;
} cmd;
struct mlxsw_bus_info bus_info;
- struct dentry *dbg_dir;
};
static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@@ -261,21 +240,11 @@ static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
}
-static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci)
-{
- return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ);
-}
-
static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
{
return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
}
-static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci)
-{
- return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ);
-}
-
static struct mlxsw_pci_queue *
__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
enum mlxsw_pci_queue_type q_type, u8 q_num)
@@ -390,26 +359,6 @@ static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
}
-static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data)
-{
- struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
- struct mlxsw_pci_queue *q;
- int i;
- static const char hdr[] =
- "NUM PROD_COUNT CONS_COUNT COUNT\n";
-
- seq_printf(file, hdr);
- for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) {
- q = mlxsw_pci_sdq_get(mlxsw_pci, i);
- spin_lock_bh(&q->lock);
- seq_printf(file, "%3d %10d %10d %5d\n",
- i, q->producer_counter, q->consumer_counter,
- q->count);
- spin_unlock_bh(&q->lock);
- }
- return 0;
-}
-
static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
int index, char *frag_data, size_t frag_len,
int direction)
@@ -544,26 +493,6 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
}
}
-static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data)
-{
- struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
- struct mlxsw_pci_queue *q;
- int i;
- static const char hdr[] =
- "NUM PROD_COUNT CONS_COUNT COUNT\n";
-
- seq_printf(file, hdr);
- for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) {
- q = mlxsw_pci_rdq_get(mlxsw_pci, i);
- spin_lock_bh(&q->lock);
- seq_printf(file, "%3d %10d %10d %5d\n",
- i, q->producer_counter, q->consumer_counter,
- q->count);
- spin_unlock_bh(&q->lock);
- }
- return 0;
-}
-
static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
@@ -580,7 +509,6 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
- mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
@@ -602,27 +530,6 @@ static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
}
-static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data)
-{
- struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
-
- struct mlxsw_pci_queue *q;
- int i;
- static const char hdr[] =
- "NUM CONS_INDEX SDQ_COUNT RDQ_COUNT COUNT\n";
-
- seq_printf(file, hdr);
- for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) {
- q = mlxsw_pci_cq_get(mlxsw_pci, i);
- spin_lock_bh(&q->lock);
- seq_printf(file, "%3d %10d %10d %10d %5d\n",
- i, q->consumer_counter, q->u.cq.comp_sdq_count,
- q->u.cq.comp_rdq_count, q->count);
- spin_unlock_bh(&q->lock);
- }
- return 0;
-}
-
static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
@@ -755,7 +662,6 @@ static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
}
mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
- mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
@@ -777,27 +683,6 @@ static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
}
-static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data)
-{
- struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
- struct mlxsw_pci_queue *q;
- int i;
- static const char hdr[] =
- "NUM CONS_COUNT EV_CMD EV_COMP EV_OTHER COUNT\n";
-
- seq_printf(file, hdr);
- for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) {
- q = mlxsw_pci_eq_get(mlxsw_pci, i);
- spin_lock_bh(&q->lock);
- seq_printf(file, "%3d %10d %10d %10d %10d %5d\n",
- i, q->consumer_counter, q->u.eq.ev_cmd_count,
- q->u.eq.ev_comp_count, q->u.eq.ev_other_count,
- q->count);
- spin_unlock_bh(&q->lock);
- }
- return 0;
-}
-
static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
{
mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
@@ -868,7 +753,6 @@ struct mlxsw_pci_queue_ops {
void (*fini)(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q);
void (*tasklet)(unsigned long data);
- int (*dbg_read)(struct seq_file *s, void *data);
u16 elem_count;
u8 elem_size;
};
@@ -877,7 +761,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
.type = MLXSW_PCI_QUEUE_TYPE_SDQ,
.init = mlxsw_pci_sdq_init,
.fini = mlxsw_pci_sdq_fini,
- .dbg_read = mlxsw_pci_sdq_dbg_read,
.elem_count = MLXSW_PCI_WQE_COUNT,
.elem_size = MLXSW_PCI_WQE_SIZE,
};
@@ -886,7 +769,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
.type = MLXSW_PCI_QUEUE_TYPE_RDQ,
.init = mlxsw_pci_rdq_init,
.fini = mlxsw_pci_rdq_fini,
- .dbg_read = mlxsw_pci_rdq_dbg_read,
.elem_count = MLXSW_PCI_WQE_COUNT,
.elem_size = MLXSW_PCI_WQE_SIZE
};
@@ -896,7 +778,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
.init = mlxsw_pci_cq_init,
.fini = mlxsw_pci_cq_fini,
.tasklet = mlxsw_pci_cq_tasklet,
- .dbg_read = mlxsw_pci_cq_dbg_read,
.elem_count = MLXSW_PCI_CQE_COUNT,
.elem_size = MLXSW_PCI_CQE_SIZE
};
@@ -906,7 +787,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
.init = mlxsw_pci_eq_init,
.fini = mlxsw_pci_eq_fini,
.tasklet = mlxsw_pci_eq_tasklet,
- .dbg_read = mlxsw_pci_eq_dbg_read,
.elem_count = MLXSW_PCI_EQE_COUNT,
.elem_size = MLXSW_PCI_EQE_SIZE
};
@@ -984,9 +864,7 @@ static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
const struct mlxsw_pci_queue_ops *q_ops,
u8 num_qs)
{
- struct pci_dev *pdev = mlxsw_pci->pdev;
struct mlxsw_pci_queue_type_group *queue_group;
- char tmp[16];
int i;
int err;
@@ -1003,10 +881,6 @@ static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
}
queue_group->count = num_qs;
- sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type));
- debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir,
- q_ops->dbg_read);
-
return 0;
err_queue_init:
@@ -1534,7 +1408,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_aqs_init;
- err = request_irq(mlxsw_pci->msix_entry.vector,
+ err = request_irq(pci_irq_vector(pdev, 0),
mlxsw_pci_eq_irq_handler, 0,
mlxsw_pci->bus_info.device_kind, mlxsw_pci);
if (err) {
@@ -1567,7 +1441,7 @@ static void mlxsw_pci_fini(void *bus_priv)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
- free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci);
+ free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
mlxsw_pci_aqs_fini(mlxsw_pci);
mlxsw_pci_fw_area_fini(mlxsw_pci);
mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
@@ -1842,8 +1716,8 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_sw_reset;
}
- err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1);
- if (err) {
+ err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ if (err < 0) {
dev_err(&pdev->dev, "MSI-X init failed\n");
goto err_msix_init;
}
@@ -1852,14 +1726,6 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
- mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name,
- mlxsw_pci_dbg_root);
- if (!mlxsw_pci->dbg_dir) {
- dev_err(&pdev->dev, "Failed to create debugfs dir\n");
- err = -ENOMEM;
- goto err_dbg_create_dir;
- }
-
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
&mlxsw_pci_bus, mlxsw_pci);
if (err) {
@@ -1870,9 +1736,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_bus_device_register:
- debugfs_remove_recursive(mlxsw_pci->dbg_dir);
-err_dbg_create_dir:
- pci_disable_msix(mlxsw_pci->pdev);
+ pci_free_irq_vectors(mlxsw_pci->pdev);
err_msix_init:
err_sw_reset:
iounmap(mlxsw_pci->hw_addr);
@@ -1892,8 +1756,7 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
mlxsw_core_bus_device_unregister(mlxsw_pci->core);
- debugfs_remove_recursive(mlxsw_pci->dbg_dir);
- pci_disable_msix(mlxsw_pci->pdev);
+ pci_free_irq_vectors(mlxsw_pci->pdev);
iounmap(mlxsw_pci->hw_addr);
pci_release_regions(mlxsw_pci->pdev);
pci_disable_device(mlxsw_pci->pdev);
@@ -1916,15 +1779,11 @@ EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
static int __init mlxsw_pci_module_init(void)
{
- mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
- if (!mlxsw_pci_dbg_root)
- return -ENOMEM;
return 0;
}
static void __exit mlxsw_pci_module_exit(void)
{
- debugfs_remove_recursive(mlxsw_pci_dbg_root);
}
module_init(mlxsw_pci_module_init);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index 3d42146473b3..c580abba8d34 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -49,20 +49,12 @@
#define MLXSW_PORT_MID 0xd000
-#define MLXSW_PORT_MAX_PHY_PORTS 0x40
-#define MLXSW_PORT_MAX_PORTS (MLXSW_PORT_MAX_PHY_PORTS + 1)
-
#define MLXSW_PORT_MAX_IB_PHY_PORTS 36
#define MLXSW_PORT_MAX_IB_PORTS (MLXSW_PORT_MAX_IB_PHY_PORTS + 1)
-#define MLXSW_PORT_DEVID_BITS_OFFSET 10
-#define MLXSW_PORT_PHY_BITS_OFFSET 4
-#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1)
-
#define MLXSW_PORT_CPU_PORT 0x0
-#define MLXSW_PORT_ROUTER_PORT (MLXSW_PORT_MAX_PHY_PORTS + 2)
-#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS)
+#define MLXSW_PORT_DONT_CARE 0xFF
#define MLXSW_PORT_MODULE_MAX_WIDTH 4
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index d9616daf8a70..83b277c8090e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4125,6 +4125,60 @@ MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16);
*/
MLXSW_ITEM32(reg, ritr, sp_if_vid, 0x18, 0, 12);
+/* Shared between ingress/egress */
+enum mlxsw_reg_ritr_counter_set_type {
+ /* No Count. */
+ MLXSW_REG_RITR_COUNTER_SET_TYPE_NO_COUNT = 0x0,
+ /* Basic. Used for router interfaces, counting the following:
+ * - Error and Discard counters.
+ * - Unicast, Multicast and Broadcast counters. Sharing the
+ * same set of counters for the different type of traffic
+ * (IPv4, IPv6 and mpls).
+ */
+ MLXSW_REG_RITR_COUNTER_SET_TYPE_BASIC = 0x9,
+};
+
+/* reg_ritr_ingress_counter_index
+ * Counter Index for flow counter.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ingress_counter_index, 0x38, 0, 24);
+
+/* reg_ritr_ingress_counter_set_type
+ * Igress Counter Set Type for router interface counter.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ingress_counter_set_type, 0x38, 24, 8);
+
+/* reg_ritr_egress_counter_index
+ * Counter Index for flow counter.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, egress_counter_index, 0x3C, 0, 24);
+
+/* reg_ritr_egress_counter_set_type
+ * Egress Counter Set Type for router interface counter.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, egress_counter_set_type, 0x3C, 24, 8);
+
+static inline void mlxsw_reg_ritr_counter_pack(char *payload, u32 index,
+ bool enable, bool egress)
+{
+ enum mlxsw_reg_ritr_counter_set_type set_type;
+
+ if (enable)
+ set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_BASIC;
+ else
+ set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_NO_COUNT;
+ mlxsw_reg_ritr_egress_counter_set_type_set(payload, set_type);
+
+ if (egress)
+ mlxsw_reg_ritr_egress_counter_index_set(payload, index);
+ else
+ mlxsw_reg_ritr_ingress_counter_index_set(payload, index);
+}
+
static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
{
MLXSW_REG_ZERO(ritr, payload);
@@ -4141,7 +4195,8 @@ static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
enum mlxsw_reg_ritr_if_type type,
- u16 rif, u16 mtu, const char *mac)
+ u16 rif, u16 vr_id, u16 mtu,
+ const char *mac)
{
bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL;
@@ -4153,6 +4208,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
mlxsw_reg_ritr_rif_set(payload, rif);
mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
mlxsw_reg_ritr_lb_en_set(payload, 1);
+ mlxsw_reg_ritr_virtual_router_set(payload, vr_id);
mlxsw_reg_ritr_mtu_set(payload, mtu);
mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
}
@@ -4285,6 +4341,129 @@ static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload,
mlxsw_reg_ratr_eth_destination_mac_memcpy_to(payload, dest_mac);
}
+/* RICNT - Router Interface Counter Register
+ * -----------------------------------------
+ * The RICNT register retrieves per port performance counters
+ */
+#define MLXSW_REG_RICNT_ID 0x800B
+#define MLXSW_REG_RICNT_LEN 0x100
+
+MLXSW_REG_DEFINE(ricnt, MLXSW_REG_RICNT_ID, MLXSW_REG_RICNT_LEN);
+
+/* reg_ricnt_counter_index
+ * Counter index
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ricnt, counter_index, 0x04, 0, 24);
+
+enum mlxsw_reg_ricnt_counter_set_type {
+ /* No Count. */
+ MLXSW_REG_RICNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
+ /* Basic. Used for router interfaces, counting the following:
+ * - Error and Discard counters.
+ * - Unicast, Multicast and Broadcast counters. Sharing the
+ * same set of counters for the different type of traffic
+ * (IPv4, IPv6 and mpls).
+ */
+ MLXSW_REG_RICNT_COUNTER_SET_TYPE_BASIC = 0x09,
+};
+
+/* reg_ricnt_counter_set_type
+ * Counter Set Type for router interface counter
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ricnt, counter_set_type, 0x04, 24, 8);
+
+enum mlxsw_reg_ricnt_opcode {
+ /* Nop. Supported only for read access*/
+ MLXSW_REG_RICNT_OPCODE_NOP = 0x00,
+ /* Clear. Setting the clr bit will reset the counter value for
+ * all counters of the specified Router Interface.
+ */
+ MLXSW_REG_RICNT_OPCODE_CLEAR = 0x08,
+};
+
+/* reg_ricnt_opcode
+ * Opcode
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ricnt, op, 0x00, 28, 4);
+
+/* reg_ricnt_good_unicast_packets
+ * good unicast packets.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_unicast_packets, 0x08, 0, 64);
+
+/* reg_ricnt_good_multicast_packets
+ * good multicast packets.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_multicast_packets, 0x10, 0, 64);
+
+/* reg_ricnt_good_broadcast_packets
+ * good broadcast packets
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_broadcast_packets, 0x18, 0, 64);
+
+/* reg_ricnt_good_unicast_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for good unicast frames.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_unicast_bytes, 0x20, 0, 64);
+
+/* reg_ricnt_good_multicast_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for good multicast frames.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_multicast_bytes, 0x28, 0, 64);
+
+/* reg_ritr_good_broadcast_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for good broadcast frames.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, good_broadcast_bytes, 0x30, 0, 64);
+
+/* reg_ricnt_error_packets
+ * A count of errored frames that do not pass the router checks.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, error_packets, 0x38, 0, 64);
+
+/* reg_ricnt_discrad_packets
+ * A count of non-errored frames that do not pass the router checks.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, discard_packets, 0x40, 0, 64);
+
+/* reg_ricnt_error_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for errored frames.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, error_bytes, 0x48, 0, 64);
+
+/* reg_ricnt_discard_bytes
+ * A count of L3 data and padding octets not including L2 headers
+ * for non-errored frames that do not pass the router checks.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, ricnt, discard_bytes, 0x50, 0, 64);
+
+static inline void mlxsw_reg_ricnt_pack(char *payload, u32 index,
+ enum mlxsw_reg_ricnt_opcode op)
+{
+ MLXSW_REG_ZERO(ricnt, payload);
+ mlxsw_reg_ricnt_op_set(payload, op);
+ mlxsw_reg_ricnt_counter_index_set(payload, index);
+ mlxsw_reg_ricnt_counter_set_type_set(payload,
+ MLXSW_REG_RICNT_COUNTER_SET_TYPE_BASIC);
+}
+
/* RALTA - Router Algorithmic LPM Tree Allocation Register
* -------------------------------------------------------
* RALTA is used to allocate the LPM trees of the SHSPM method.
@@ -5504,6 +5683,70 @@ static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e,
mlxsw_reg_mpsc_rate_set(payload, rate);
}
+/* MGPC - Monitoring General Purpose Counter Set Register
+ * The MGPC register retrieves and sets the General Purpose Counter Set.
+ */
+#define MLXSW_REG_MGPC_ID 0x9081
+#define MLXSW_REG_MGPC_LEN 0x18
+
+MLXSW_REG_DEFINE(mgpc, MLXSW_REG_MGPC_ID, MLXSW_REG_MGPC_LEN);
+
+enum mlxsw_reg_mgpc_counter_set_type {
+ /* No count */
+ MLXSW_REG_MGPC_COUNTER_SET_TYPE_NO_COUT = 0x00,
+ /* Count packets and bytes */
+ MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
+ /* Count only packets */
+ MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS = 0x05,
+};
+
+/* reg_mgpc_counter_set_type
+ * Counter set type.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mgpc, counter_set_type, 0x00, 24, 8);
+
+/* reg_mgpc_counter_index
+ * Counter index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mgpc, counter_index, 0x00, 0, 24);
+
+enum mlxsw_reg_mgpc_opcode {
+ /* Nop */
+ MLXSW_REG_MGPC_OPCODE_NOP = 0x00,
+ /* Clear counters */
+ MLXSW_REG_MGPC_OPCODE_CLEAR = 0x08,
+};
+
+/* reg_mgpc_opcode
+ * Opcode.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mgpc, opcode, 0x04, 28, 4);
+
+/* reg_mgpc_byte_counter
+ * Byte counter value.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, mgpc, byte_counter, 0x08, 0, 64);
+
+/* reg_mgpc_packet_counter
+ * Packet counter value.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, mgpc, packet_counter, 0x10, 0, 64);
+
+static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
+ enum mlxsw_reg_mgpc_opcode opcode,
+ enum mlxsw_reg_mgpc_counter_set_type set_type)
+{
+ MLXSW_REG_ZERO(mgpc, payload);
+ mlxsw_reg_mgpc_counter_index_set(payload, counter_index);
+ mlxsw_reg_mgpc_counter_set_type_set(payload, set_type);
+ mlxsw_reg_mgpc_opcode_set(payload, opcode);
+}
+
/* SBPR - Shared Buffer Pools Register
* -----------------------------------
* The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -5960,6 +6203,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rgcr),
MLXSW_REG(ritr),
MLXSW_REG(ratr),
+ MLXSW_REG(ricnt),
MLXSW_REG(ralta),
MLXSW_REG(ralst),
MLXSW_REG(raltb),
@@ -5977,6 +6221,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mpar),
MLXSW_REG(mlcr),
MLXSW_REG(mpsc),
+ MLXSW_REG(mgpc),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
MLXSW_REG(sbpm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index bce8c2e00630..9556d934714b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -43,11 +43,15 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
+ MLXSW_RES_ID_COUNTER_POOL_SIZE,
MLXSW_RES_ID_MAX_SPAN,
+ MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
+ MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC,
MLXSW_RES_ID_MAX_SYSTEM_PORT,
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
MLXSW_RES_ID_MAX_BUFFER_SIZE,
+ MLXSW_RES_ID_CELL_SIZE,
MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS,
MLXSW_RES_ID_ACL_MAX_TCAM_RULES,
MLXSW_RES_ID_ACL_MAX_REGIONS,
@@ -59,6 +63,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_CPU_POLICERS,
MLXSW_RES_ID_MAX_VRS,
MLXSW_RES_ID_MAX_RIFS,
+ MLXSW_RES_ID_MAX_LPM_TREES,
/* Internal resources.
* Determined by the SW, not queried from the HW.
@@ -75,11 +80,15 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
+ [MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410,
[MLXSW_RES_ID_MAX_SPAN] = 0x2420,
+ [MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
+ [MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC] = 0x2449,
[MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
[MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */
+ [MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */
[MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901,
[MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902,
[MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903,
@@ -91,6 +100,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
[MLXSW_RES_ID_MAX_VRS] = 0x2C01,
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
+ [MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30,
};
struct mlxsw_res {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 16484f24b7db..88357cee7679 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -66,6 +66,8 @@
#include "port.h"
#include "trap.h"
#include "txheader.h"
+#include "spectrum_cnt.h"
+#include "spectrum_dpipe.h"
static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp_driver_version[] = "1.0";
@@ -138,6 +140,60 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
*/
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index, u64 *packets,
+ u64 *bytes)
+{
+ char mgpc_pl[MLXSW_REG_MGPC_LEN];
+ int err;
+
+ mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
+ MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
+ if (err)
+ return err;
+ *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
+ *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
+ return 0;
+}
+
+static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index)
+{
+ char mgpc_pl[MLXSW_REG_MGPC_LEN];
+
+ mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
+ MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
+}
+
+int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ unsigned int *p_counter_index)
+{
+ int err;
+
+ err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+ p_counter_index);
+ if (err)
+ return err;
+ err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
+ if (err)
+ goto err_counter_clear;
+ return 0;
+
+err_counter_clear:
+ mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+ *p_counter_index);
+ return err;
+}
+
+void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index)
+{
+ mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+ counter_index);
+}
+
static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info)
{
@@ -304,9 +360,10 @@ static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
return false;
}
-static int mlxsw_sp_span_mtu_to_buffsize(int mtu)
+static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
+ int mtu)
{
- return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1;
+ return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
}
static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
@@ -319,8 +376,9 @@ static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
* updated according to the mtu value
*/
if (mlxsw_sp_span_is_egress_mirror(port)) {
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
- mlxsw_sp_span_mtu_to_buffsize(mtu));
+ u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
+
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
if (err) {
netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
@@ -357,8 +415,10 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
/* if it is an egress SPAN, bind a shared buffer to it */
if (type == MLXSW_SP_SPAN_EGRESS) {
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
- mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu));
+ u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
+ port->dev->mtu);
+
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
if (err) {
netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
@@ -745,19 +805,47 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
- bool pause_en, bool pfc_en, u16 delay)
+static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
+ int mtu)
{
- u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
+ return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
+}
- delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
- MLXSW_SP_PAUSE_DELAY;
+#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
+
+static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
+ u16 delay)
+{
+ delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
+ BITS_PER_BYTE));
+ return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
+ mtu);
+}
+
+/* Maximum delay buffer needed in case of PAUSE frames, in bytes.
+ * Assumes 100m cable and maximum MTU.
+ */
+#define MLXSW_SP_PAUSE_DELAY 58752
- if (pause_en || pfc_en)
- mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
- pg_size + delay, pg_size);
+static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
+ u16 delay, bool pfc, bool pause)
+{
+ if (pfc)
+ return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
+ else if (pause)
+ return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
+ else
+ return 0;
+}
+
+static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
+ bool lossy)
+{
+ if (lossy)
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
else
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
+ mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
+ thres);
}
int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
@@ -778,6 +866,8 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
bool configure = false;
bool pfc = false;
+ bool lossy;
+ u16 thres;
for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
if (prio_tc[j] == i) {
@@ -789,7 +879,12 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
if (!configure)
continue;
- mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
+
+ lossy = !(pfc || pause_en);
+ thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
+ delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
+ pause_en);
+ mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
@@ -966,8 +1061,9 @@ mlxsw_sp_port_get_stats64(struct net_device *dev,
memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
}
-int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
- u16 vid_end, bool is_member, bool untagged)
+static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end,
+ bool is_member, bool untagged)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char *spvm_pl;
@@ -984,6 +1080,26 @@ int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
return err;
}
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+ u16 vid_end, bool is_member, bool untagged)
+{
+ u16 vid, vid_e;
+ int err;
+
+ for (vid = vid_begin; vid <= vid_end;
+ vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
+ vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
+ vid_end);
+
+ err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
+ is_member, untagged);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
@@ -1368,7 +1484,7 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
tc->cls_mall);
return 0;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
case TC_SETUP_CLSFLOWER:
switch (tc->cls_flower->command) {
@@ -1379,6 +1495,9 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
tc->cls_flower);
return 0;
+ case TC_CLSFLOWER_STATS:
+ return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
+ tc->cls_flower);
default:
return -EOPNOTSUPP;
}
@@ -1492,6 +1611,7 @@ err_port_pause_configure:
struct mlxsw_sp_port_hw_stats {
char str[ETH_GSTRING_LEN];
u64 (*getter)(const char *payload);
+ bool cells_bytes;
};
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
@@ -1612,17 +1732,11 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
-static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl)
-{
- u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
-
- return MLXSW_SP_CELLS_TO_BYTES(transmit_queue);
-}
-
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
{
.str = "tc_transmit_queue_tc",
- .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get,
+ .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
+ .cells_bytes = true,
},
{
.str = "tc_no_buffer_discard_uc_tc",
@@ -1734,6 +1848,8 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
enum mlxsw_reg_ppcnt_grp grp, int prio,
u64 *data, int data_index)
{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_hw_stats *hw_stats;
char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
int i, len;
@@ -1743,8 +1859,13 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
if (err)
return;
mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
- for (i = 0; i < len; i++)
+ for (i = 0; i < len; i++) {
data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
+ if (!hw_stats[i].cells_bytes)
+ continue;
+ data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
+ data[data_index + i]);
+ }
}
static void mlxsw_sp_port_get_stats(struct net_device *dev,
@@ -2537,25 +2658,33 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
{
int i;
- for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+ for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
+ kfree(mlxsw_sp->port_to_module);
kfree(mlxsw_sp->ports);
}
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
u8 module, width, lane;
size_t alloc_size;
int i;
int err;
- alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
+ alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
if (!mlxsw_sp->ports)
return -ENOMEM;
- for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+ mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
+ if (!mlxsw_sp->port_to_module) {
+ err = -ENOMEM;
+ goto err_port_to_module_alloc;
+ }
+
+ for (i = 1; i < max_ports; i++) {
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
&width, &lane);
if (err)
@@ -2575,6 +2704,8 @@ err_port_module_info_get:
for (i--; i >= 1; i--)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
+ kfree(mlxsw_sp->port_to_module);
+err_port_to_module_alloc:
kfree(mlxsw_sp->ports);
return err;
}
@@ -2877,6 +3008,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
+ MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
/* L3 traps */
MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
@@ -3158,6 +3290,18 @@ static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
+static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create);
+
+static int mlxsw_sp_dummy_fid_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, true);
+}
+
+static void mlxsw_sp_dummy_fid_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, false);
+}
+
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info)
{
@@ -3224,6 +3368,24 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_acl_init;
}
+ err = mlxsw_sp_counter_pool_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
+ goto err_counter_pool_init;
+ }
+
+ err = mlxsw_sp_dpipe_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
+ goto err_dpipe_init;
+ }
+
+ err = mlxsw_sp_dummy_fid_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init dummy FID\n");
+ goto err_dummy_fid_init;
+ }
+
err = mlxsw_sp_ports_create(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
@@ -3233,6 +3395,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return 0;
err_ports_create:
+ mlxsw_sp_dummy_fid_fini(mlxsw_sp);
+err_dummy_fid_init:
+ mlxsw_sp_dpipe_fini(mlxsw_sp);
+err_dpipe_init:
+ mlxsw_sp_counter_pool_fini(mlxsw_sp);
+err_counter_pool_init:
mlxsw_sp_acl_fini(mlxsw_sp);
err_acl_init:
mlxsw_sp_span_fini(mlxsw_sp);
@@ -3255,6 +3423,9 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_ports_remove(mlxsw_sp);
+ mlxsw_sp_dummy_fid_fini(mlxsw_sp);
+ mlxsw_sp_dpipe_fini(mlxsw_sp);
+ mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_router_fini(mlxsw_sp);
@@ -3326,13 +3497,13 @@ bool mlxsw_sp_port_dev_check(const struct net_device *dev)
return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
}
-static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data)
+static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
{
- struct mlxsw_sp_port **port = data;
+ struct mlxsw_sp_port **p_mlxsw_sp_port = data;
int ret = 0;
if (mlxsw_sp_port_dev_check(lower_dev)) {
- *port = netdev_priv(lower_dev);
+ *p_mlxsw_sp_port = netdev_priv(lower_dev);
ret = 1;
}
@@ -3341,18 +3512,18 @@ static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data)
static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
{
- struct mlxsw_sp_port *port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
if (mlxsw_sp_port_dev_check(dev))
return netdev_priv(dev);
- port = NULL;
- netdev_walk_all_lower_dev(dev, mlxsw_lower_dev_walk, &port);
+ mlxsw_sp_port = NULL;
+ netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
- return port;
+ return mlxsw_sp_port;
}
-static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
+struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -3362,15 +3533,16 @@ static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
{
- struct mlxsw_sp_port *port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
if (mlxsw_sp_port_dev_check(dev))
return netdev_priv(dev);
- port = NULL;
- netdev_walk_all_lower_dev_rcu(dev, mlxsw_lower_dev_walk, &port);
+ mlxsw_sp_port = NULL;
+ netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
+ &mlxsw_sp_port);
- return port;
+ return mlxsw_sp_port;
}
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
@@ -3390,546 +3562,6 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
dev_put(mlxsw_sp_port->dev);
}
-static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
- unsigned long event)
-{
- switch (event) {
- case NETDEV_UP:
- if (!r)
- return true;
- r->ref_count++;
- return false;
- case NETDEV_DOWN:
- if (r && --r->ref_count == 0)
- return true;
- /* It is possible we already removed the RIF ourselves
- * if it was assigned to a netdev that is now a bridge
- * or LAG slave.
- */
- return false;
- }
-
- return false;
-}
-
-static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
-{
- int i;
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- if (!mlxsw_sp->rifs[i])
- return i;
-
- return MLXSW_SP_INVALID_RIF;
-}
-
-static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
- bool *p_lagged, u16 *p_system_port)
-{
- u8 local_port = mlxsw_sp_vport->local_port;
-
- *p_lagged = mlxsw_sp_vport->lagged;
- *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
-}
-
-static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *l3_dev, u16 rif,
- bool create)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- bool lagged = mlxsw_sp_vport->lagged;
- char ritr_pl[MLXSW_REG_RITR_LEN];
- u16 system_port;
-
- mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
- l3_dev->mtu, l3_dev->dev_addr);
-
- mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
- mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
- mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
-
-static struct mlxsw_sp_fid *
-mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
-{
- struct mlxsw_sp_fid *f;
-
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
- return NULL;
-
- f->leave = mlxsw_sp_vport_rif_sp_leave;
- f->ref_count = 0;
- f->dev = l3_dev;
- f->fid = fid;
-
- return f;
-}
-
-static struct mlxsw_sp_rif *
-mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
-{
- struct mlxsw_sp_rif *r;
-
- r = kzalloc(sizeof(*r), GFP_KERNEL);
- if (!r)
- return NULL;
-
- INIT_LIST_HEAD(&r->nexthop_list);
- INIT_LIST_HEAD(&r->neigh_list);
- ether_addr_copy(r->addr, l3_dev->dev_addr);
- r->mtu = l3_dev->mtu;
- r->ref_count = 1;
- r->dev = l3_dev;
- r->rif = rif;
- r->f = f;
-
- return r;
-}
-
-static struct mlxsw_sp_rif *
-mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *l3_dev)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- struct mlxsw_sp_fid *f;
- struct mlxsw_sp_rif *r;
- u16 fid, rif;
- int err;
-
- rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif == MLXSW_SP_INVALID_RIF)
- return ERR_PTR(-ERANGE);
-
- err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
- if (err)
- return ERR_PTR(err);
-
- fid = mlxsw_sp_rif_sp_to_fid(rif);
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
- if (err)
- goto err_rif_fdb_op;
-
- f = mlxsw_sp_rfid_alloc(fid, l3_dev);
- if (!f) {
- err = -ENOMEM;
- goto err_rfid_alloc;
- }
-
- r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
- if (!r) {
- err = -ENOMEM;
- goto err_rif_alloc;
- }
-
- f->r = r;
- mlxsw_sp->rifs[rif] = r;
-
- return r;
-
-err_rif_alloc:
- kfree(f);
-err_rfid_alloc:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
-err_rif_fdb_op:
- mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
- return ERR_PTR(err);
-}
-
-static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct mlxsw_sp_rif *r)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- struct net_device *l3_dev = r->dev;
- struct mlxsw_sp_fid *f = r->f;
- u16 fid = f->fid;
- u16 rif = r->rif;
-
- mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
-
- mlxsw_sp->rifs[rif] = NULL;
- f->r = NULL;
-
- kfree(r);
-
- kfree(f);
-
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
-
- mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
-}
-
-static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *l3_dev)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- struct mlxsw_sp_rif *r;
-
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
- if (!r) {
- r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
- if (IS_ERR(r))
- return PTR_ERR(r);
- }
-
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
- r->f->ref_count++;
-
- netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
-
- return 0;
-}
-
-static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
-
- netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
-
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
- if (--f->ref_count == 0)
- mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
-}
-
-static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
- struct net_device *port_dev,
- unsigned long event, u16 vid)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
- struct mlxsw_sp_port *mlxsw_sp_vport;
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (WARN_ON(!mlxsw_sp_vport))
- return -EINVAL;
-
- switch (event) {
- case NETDEV_UP:
- return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
- case NETDEV_DOWN:
- mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
- break;
- }
-
- return 0;
-}
-
-static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
- unsigned long event)
-{
- if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
- return 0;
-
- return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
-}
-
-static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
- struct net_device *lag_dev,
- unsigned long event, u16 vid)
-{
- struct net_device *port_dev;
- struct list_head *iter;
- int err;
-
- netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
- if (mlxsw_sp_port_dev_check(port_dev)) {
- err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
- event, vid);
- if (err)
- return err;
- }
- }
-
- return 0;
-}
-
-static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
- unsigned long event)
-{
- if (netif_is_bridge_port(lag_dev))
- return 0;
-
- return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
-}
-
-static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev)
-{
- u16 fid;
-
- if (is_vlan_dev(l3_dev))
- fid = vlan_dev_vlan_id(l3_dev);
- else if (mlxsw_sp->master_bridge.dev == l3_dev)
- fid = 1;
- else
- return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
-
- return mlxsw_sp_fid_find(mlxsw_sp, fid);
-}
-
-static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
-{
- return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
- MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
-}
-
-static u16 mlxsw_sp_flood_table_index_get(u16 fid)
-{
- return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
-}
-
-static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
- bool set)
-{
- enum mlxsw_flood_table_type table_type;
- char *sftr_pl;
- u16 index;
- int err;
-
- sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
- if (!sftr_pl)
- return -ENOMEM;
-
- table_type = mlxsw_sp_flood_table_type_get(fid);
- index = mlxsw_sp_flood_table_index_get(fid);
- mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
- 1, MLXSW_PORT_ROUTER_PORT, set);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
-
- kfree(sftr_pl);
- return err;
-}
-
-static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
-{
- if (mlxsw_sp_fid_is_vfid(fid))
- return MLXSW_REG_RITR_FID_IF;
- else
- return MLXSW_REG_RITR_VLAN_IF;
-}
-
-static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev,
- u16 fid, u16 rif,
- bool create)
-{
- enum mlxsw_reg_ritr_if_type rif_type;
- char ritr_pl[MLXSW_REG_RITR_LEN];
-
- rif_type = mlxsw_sp_rif_type_get(fid);
- mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
- l3_dev->dev_addr);
- mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev,
- struct mlxsw_sp_fid *f)
-{
- struct mlxsw_sp_rif *r;
- u16 rif;
- int err;
-
- rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif == MLXSW_SP_INVALID_RIF)
- return -ERANGE;
-
- err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
- if (err)
- return err;
-
- err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
- if (err)
- goto err_rif_bridge_op;
-
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
- if (err)
- goto err_rif_fdb_op;
-
- r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
- if (!r) {
- err = -ENOMEM;
- goto err_rif_alloc;
- }
-
- f->r = r;
- mlxsw_sp->rifs[rif] = r;
-
- netdev_dbg(l3_dev, "RIF=%d created\n", rif);
-
- return 0;
-
-err_rif_alloc:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
-err_rif_fdb_op:
- mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
-err_rif_bridge_op:
- mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
- return err;
-}
-
-void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *r)
-{
- struct net_device *l3_dev = r->dev;
- struct mlxsw_sp_fid *f = r->f;
- u16 rif = r->rif;
-
- mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
-
- mlxsw_sp->rifs[rif] = NULL;
- f->r = NULL;
-
- kfree(r);
-
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
-
- mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
-
- mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
-
- netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
-}
-
-static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
- struct net_device *br_dev,
- unsigned long event)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
- struct mlxsw_sp_fid *f;
-
- /* FID can either be an actual FID if the L3 device is the
- * VLAN-aware bridge or a VLAN device on top. Otherwise, the
- * L3 device is a VLAN-unaware bridge and we get a vFID.
- */
- f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
- if (WARN_ON(!f))
- return -EINVAL;
-
- switch (event) {
- case NETDEV_UP:
- return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
- case NETDEV_DOWN:
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
- break;
- }
-
- return 0;
-}
-
-static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
- unsigned long event)
-{
- struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
- u16 vid = vlan_dev_vlan_id(vlan_dev);
-
- if (mlxsw_sp_port_dev_check(real_dev))
- return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
- vid);
- else if (netif_is_lag_master(real_dev))
- return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
- vid);
- else if (netif_is_bridge_master(real_dev) &&
- mlxsw_sp->master_bridge.dev == real_dev)
- return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
- event);
-
- return 0;
-}
-
-static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
-{
- struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
- struct net_device *dev = ifa->ifa_dev->dev;
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_rif *r;
- int err = 0;
-
- mlxsw_sp = mlxsw_sp_lower_get(dev);
- if (!mlxsw_sp)
- goto out;
-
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!mlxsw_sp_rif_should_config(r, event))
- goto out;
-
- if (mlxsw_sp_port_dev_check(dev))
- err = mlxsw_sp_inetaddr_port_event(dev, event);
- else if (netif_is_lag_master(dev))
- err = mlxsw_sp_inetaddr_lag_event(dev, event);
- else if (netif_is_bridge_master(dev))
- err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
- else if (is_vlan_dev(dev))
- err = mlxsw_sp_inetaddr_vlan_event(dev, event);
-
-out:
- return notifier_from_errno(err);
-}
-
-static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
- const char *mac, int mtu)
-{
- char ritr_pl[MLXSW_REG_RITR_LEN];
- int err;
-
- mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
- if (err)
- return err;
-
- mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
- mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
- mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
-{
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_rif *r;
- int err;
-
- mlxsw_sp = mlxsw_sp_lower_get(dev);
- if (!mlxsw_sp)
- return 0;
-
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!r)
- return 0;
-
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
- if (err)
- return err;
-
- err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
- if (err)
- goto err_rif_edit;
-
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
- if (err)
- goto err_rif_fdb_op;
-
- ether_addr_copy(r->addr, dev->dev_addr);
- r->mtu = dev->mtu;
-
- netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
-
- return 0;
-
-err_rif_fdb_op:
- mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
-err_rif_edit:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
- return err;
-}
-
static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
u16 fid)
{
@@ -4220,7 +3852,7 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 lag_id)
+ struct net_device *lag_dev, u16 lag_id)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
struct mlxsw_sp_fid *f;
@@ -4238,6 +3870,7 @@ mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_vport->lag_id = lag_id;
mlxsw_sp_vport->lagged = 1;
+ mlxsw_sp_vport->dev = lag_dev;
}
static void
@@ -4254,6 +3887,7 @@ mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
if (f)
f->leave(mlxsw_sp_vport);
+ mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
mlxsw_sp_vport->lagged = 0;
}
@@ -4293,7 +3927,7 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lagged = 1;
lag->ref_count++;
- mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id);
+ mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
return 0;
@@ -4403,6 +4037,56 @@ static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
}
+static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ enum mlxsw_reg_spms_state spms_state;
+ char *spms_pl;
+ u16 vid;
+ int err;
+
+ spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
+ MLXSW_REG_SPMS_STATE_DISCARDING;
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+
+ for (vid = 0; vid < VLAN_N_VID; vid++)
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
+ true, false);
+ if (err)
+ goto err_port_vlan_set;
+ return 0;
+
+err_port_vlan_set:
+ mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
+ return err;
+}
+
+static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
+ false, false);
+ mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
+}
+
static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
unsigned long event, void *ptr)
{
@@ -4421,7 +4105,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
upper_dev = info->upper_dev;
if (!is_vlan_dev(upper_dev) &&
!netif_is_lag_master(upper_dev) &&
- !netif_is_bridge_master(upper_dev))
+ !netif_is_bridge_master(upper_dev) &&
+ !netif_is_ovs_master(upper_dev))
return -EINVAL;
if (!info->linking)
break;
@@ -4438,6 +4123,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
!netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
return -EINVAL;
+ if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev))
+ return -EINVAL;
+ if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev))
+ return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -4446,8 +4135,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
upper_dev);
else
- mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
- upper_dev);
+ mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
+ upper_dev);
} else if (netif_is_bridge_master(upper_dev)) {
if (info->linking)
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
@@ -4461,6 +4150,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
else
mlxsw_sp_port_lag_leave(mlxsw_sp_port,
upper_dev);
+ } else if (netif_is_ovs_master(upper_dev)) {
+ if (info->linking)
+ err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
+ else
+ mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
} else {
err = -EINVAL;
WARN_ON(1);
@@ -4552,8 +4246,8 @@ static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *f;
f = mlxsw_sp_fid_find(mlxsw_sp, fid);
- if (f && f->r)
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+ if (f && f->rif)
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
if (f && --f->ref_count == 0)
mlxsw_sp_fid_destroy(mlxsw_sp, f);
}
@@ -4564,33 +4258,40 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
struct netdev_notifier_changeupper_info *info;
struct net_device *upper_dev;
struct mlxsw_sp *mlxsw_sp;
- int err;
+ int err = 0;
mlxsw_sp = mlxsw_sp_lower_get(br_dev);
if (!mlxsw_sp)
return 0;
- if (br_dev != mlxsw_sp->master_bridge.dev)
- return 0;
info = ptr;
switch (event) {
- case NETDEV_CHANGEUPPER:
+ case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
if (!is_vlan_dev(upper_dev))
- break;
- if (info->linking) {
- err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
- upper_dev);
- if (err)
- return err;
+ return -EINVAL;
+ if (is_vlan_dev(upper_dev) &&
+ br_dev != mlxsw_sp->master_bridge.dev)
+ return -EINVAL;
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (is_vlan_dev(upper_dev)) {
+ if (info->linking)
+ err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
+ upper_dev);
+ else
+ mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
+ upper_dev);
} else {
- mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
+ err = -EINVAL;
+ WARN_ON(1);
}
break;
}
- return 0;
+ return err;
}
static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
@@ -4657,8 +4358,8 @@ static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
clear_bit(vfid, mlxsw_sp->vfids.mapped);
list_del(&f->list);
- if (f->r)
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+ if (f->rif)
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
kfree(f);
@@ -4810,6 +4511,8 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
int err = 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
+ if (!mlxsw_sp_vport)
+ return 0;
switch (event) {
case NETDEV_PRECHANGEUPPER:
@@ -4821,22 +4524,24 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
/* We can't have multiple VLAN interfaces configured on
* the same port and being members in the same bridge.
*/
- if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
+ if (netif_is_bridge_master(upper_dev) &&
+ !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
upper_dev))
return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
- if (info->linking) {
- if (WARN_ON(!mlxsw_sp_vport))
- return -EINVAL;
- err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
- upper_dev);
+ if (netif_is_bridge_master(upper_dev)) {
+ if (info->linking)
+ err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
+ upper_dev);
+ else
+ mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
} else {
- if (!mlxsw_sp_vport)
- return 0;
- mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
+ err = -EINVAL;
+ WARN_ON(1);
}
+ break;
}
return err;
@@ -4878,6 +4583,15 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
return 0;
}
+static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info = ptr;
+
+ if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
+ return false;
+ return netif_is_l3_master(info->upper_dev);
+}
+
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
@@ -4886,6 +4600,8 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
err = mlxsw_sp_netdevice_router_port_event(dev);
+ else if (mlxsw_sp_is_vrf_event(event, ptr))
+ err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
else if (mlxsw_sp_port_dev_check(dev))
err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
else if (netif_is_lag_master(dev))
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 13ec85e7c392..0c23bc1e946d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -57,41 +57,21 @@
#define MLXSW_SP_VFID_BASE VLAN_N_VID
#define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */
+#define MLXSW_SP_DUMMY_FID 15359
+
#define MLXSW_SP_RFID_BASE 15360
-#define MLXSW_SP_INVALID_RIF 0xffff
#define MLXSW_SP_MID_MAX 7000
#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
-#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
-#define MLXSW_SP_LPM_TREE_MAX 22
-#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
-
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
-#define MLXSW_SP_BYTES_PER_CELL 96
-
-#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
-#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
-
#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
#define MLXSW_SP_KVD_GRANULARITY 128
-/* Maximum delay buffer needed in case of PAUSE frames, in cells.
- * Assumes 100m cable and maximum MTU.
- */
-#define MLXSW_SP_PAUSE_DELAY 612
-
-#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
-
-static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
-{
- delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE));
- return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu);
-}
-
struct mlxsw_sp_port;
+struct mlxsw_sp_rif;
struct mlxsw_sp_upper {
struct net_device *dev;
@@ -103,21 +83,10 @@ struct mlxsw_sp_fid {
struct list_head list;
unsigned int ref_count;
struct net_device *dev;
- struct mlxsw_sp_rif *r;
+ struct mlxsw_sp_rif *rif;
u16 fid;
};
-struct mlxsw_sp_rif {
- struct list_head nexthop_list;
- struct list_head neigh_list;
- struct net_device *dev;
- unsigned int ref_count;
- struct mlxsw_sp_fid *f;
- unsigned char addr[ETH_ALEN];
- int mtu;
- u16 rif;
-};
-
struct mlxsw_sp_mid {
struct list_head list;
unsigned char addr[ETH_ALEN];
@@ -138,17 +107,7 @@ static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
{
- return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
-}
-
-static inline bool mlxsw_sp_fid_is_rfid(u16 fid)
-{
- return fid >= MLXSW_SP_RFID_BASE;
-}
-
-static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
-{
- return MLXSW_SP_RFID_BASE + rif;
+ return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_DUMMY_FID;
}
struct mlxsw_sp_sb_pr {
@@ -177,12 +136,15 @@ struct mlxsw_sp_sb_pm {
#define MLXSW_SP_SB_POOL_COUNT 4
#define MLXSW_SP_SB_TC_COUNT 8
+struct mlxsw_sp_sb_port {
+ struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
+ struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
+};
+
struct mlxsw_sp_sb {
struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
- struct {
- struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
- struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
- } ports[MLXSW_PORT_MAX_PORTS];
+ struct mlxsw_sp_sb_port *ports;
+ u32 cell_size;
};
#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
@@ -207,11 +169,9 @@ struct mlxsw_sp_fib;
struct mlxsw_sp_vr {
u16 id; /* virtual router ID */
- bool used;
- enum mlxsw_sp_l3proto proto;
u32 tb_id; /* kernel fib table id */
- struct mlxsw_sp_lpm_tree *lpm_tree;
- struct mlxsw_sp_fib *fib;
+ unsigned int rif_count;
+ struct mlxsw_sp_fib *fib4;
};
enum mlxsw_sp_span_type {
@@ -253,12 +213,15 @@ struct mlxsw_sp_port_mall_tc_entry {
};
struct mlxsw_sp_router {
- struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
struct mlxsw_sp_vr *vrs;
struct rhashtable neigh_ht;
struct rhashtable nexthop_group_ht;
struct rhashtable nexthop_ht;
struct {
+ struct mlxsw_sp_lpm_tree *trees;
+ unsigned int tree_count;
+ } lpm;
+ struct {
struct delayed_work dw;
unsigned long interval; /* ms */
} neighs_update;
@@ -269,6 +232,7 @@ struct mlxsw_sp_router {
};
struct mlxsw_sp_acl;
+struct mlxsw_sp_counter_pool;
struct mlxsw_sp {
struct {
@@ -296,7 +260,7 @@ struct mlxsw_sp {
u32 ageing_time;
struct mlxsw_sp_upper master_bridge;
struct mlxsw_sp_upper *lags;
- u8 port_to_module[MLXSW_PORT_MAX_PORTS];
+ u8 *port_to_module;
struct mlxsw_sp_sb sb;
struct mlxsw_sp_router router;
struct mlxsw_sp_acl *acl;
@@ -304,6 +268,7 @@ struct mlxsw_sp {
DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
} kvdl;
+ struct mlxsw_sp_counter_pool *counter_pool;
struct {
struct mlxsw_sp_span_entry *entries;
int entries_count;
@@ -317,6 +282,18 @@ mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
return &mlxsw_sp->lags[lag_id];
}
+static inline u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp,
+ u32 cells)
+{
+ return mlxsw_sp->sb.cell_size * cells;
+}
+
+static inline u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp,
+ u32 bytes)
+{
+ return DIV_ROUND_UP(bytes, mlxsw_sp->sb.cell_size);
+}
+
struct mlxsw_sp_port_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
@@ -386,6 +363,7 @@ struct mlxsw_sp_port {
};
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
+struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
@@ -497,19 +475,6 @@ mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
return NULL;
}
-static inline struct mlxsw_sp_rif *
-mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev)
-{
- int i;
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
- return mlxsw_sp->rifs[i];
-
- return NULL;
-}
-
enum mlxsw_sp_flood_table {
MLXSW_SP_FLOOD_TABLE_UC,
MLXSW_SP_FLOOD_TABLE_BC,
@@ -570,8 +535,6 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding);
struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
-void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *r);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight);
@@ -608,10 +571,16 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
unsigned long event, void *ptr);
-void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *r);
+int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
+int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
+void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif);
+int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
+ struct netdev_notifier_changeupper_info *info);
-int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
+ u32 *p_entry_index);
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
@@ -620,6 +589,8 @@ struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
struct mlxsw_afa_block *act_block;
+ unsigned int counter_index;
+ bool counter_valid;
};
enum mlxsw_sp_acl_profile {
@@ -639,6 +610,8 @@ struct mlxsw_sp_acl_profile_ops {
void *ruleset_priv, void *rule_priv,
struct mlxsw_sp_acl_rule_info *rulei);
void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
+ int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
+ bool *activity);
};
struct mlxsw_sp_acl_ops {
@@ -679,6 +652,14 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct net_device *out_dev);
+int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 action, u16 vid, u16 proto, u8 prio);
+int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u16 fid);
struct mlxsw_sp_acl_rule;
@@ -698,6 +679,9 @@ mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
unsigned long cookie);
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
+int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule,
+ u64 *packets, u64 *bytes, u64 *last_use);
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
@@ -708,5 +692,14 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
__be16 protocol, struct tc_cls_flower_offload *f);
void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
struct tc_cls_flower_offload *f);
+int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+ struct tc_cls_flower_offload *f);
+int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index, u64 *packets,
+ u64 *bytes);
+int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ unsigned int *p_counter_index);
+void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 8a18b3aa70dc..317f7b14627f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -39,6 +39,7 @@
#include <linux/string.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
+#include <net/tc_act/tc_vlan.h>
#include "reg.h"
#include "core.h"
@@ -49,10 +50,17 @@
#include "spectrum_acl_flex_keys.h"
struct mlxsw_sp_acl {
+ struct mlxsw_sp *mlxsw_sp;
struct mlxsw_afk *afk;
struct mlxsw_afa *afa;
const struct mlxsw_sp_acl_ops *ops;
struct rhashtable ruleset_ht;
+ struct list_head rules;
+ struct {
+ struct delayed_work dw;
+ unsigned long interval; /* ms */
+#define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
+ } rule_activity_update;
unsigned long priv[0];
/* priv has to be always the last item */
};
@@ -79,9 +87,13 @@ struct mlxsw_sp_acl_ruleset {
struct mlxsw_sp_acl_rule {
struct rhash_head ht_node; /* Member of rule HT */
+ struct list_head list;
unsigned long cookie; /* HT key */
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule_info *rulei;
+ u64 last_used;
+ u64 last_packets;
+ u64 last_bytes;
unsigned long priv[0];
/* priv has to be always the last item */
};
@@ -237,6 +249,27 @@ void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
}
+static int
+mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ int err;
+
+ err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
+ if (err)
+ return err;
+ rulei->counter_valid = true;
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ rulei->counter_valid = false;
+ mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
+}
+
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
{
@@ -335,6 +368,48 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
local_port, in_port);
}
+int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 action, u16 vid, u16 proto, u8 prio)
+{
+ u8 ethertype;
+
+ if (action == TCA_VLAN_ACT_MODIFY) {
+ switch (proto) {
+ case ETH_P_8021Q:
+ ethertype = 0;
+ break;
+ case ETH_P_8021AD:
+ ethertype = 1;
+ break;
+ default:
+ dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
+ proto);
+ return -EINVAL;
+ }
+
+ return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
+ vid, prio, ethertype);
+ } else {
+ dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
+ return -EINVAL;
+ }
+}
+
+int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_afa_block_append_counter(rulei->act_block,
+ rulei->counter_index);
+}
+
+int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u16 fid)
+{
+ return mlxsw_afa_block_append_fid_set(rulei->act_block, fid);
+}
+
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
@@ -358,8 +433,14 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
err = PTR_ERR(rule->rulei);
goto err_rulei_create;
}
+
+ err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
+ if (err)
+ goto err_counter_alloc;
return rule;
+err_counter_alloc:
+ mlxsw_sp_acl_rulei_destroy(rule->rulei);
err_rulei_create:
kfree(rule);
err_alloc:
@@ -372,6 +453,7 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+ mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
mlxsw_sp_acl_rulei_destroy(rule->rulei);
kfree(rule);
mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
@@ -393,6 +475,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rhashtable_insert;
+ list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
return 0;
err_rhashtable_insert:
@@ -406,6 +489,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ list_del(&rule->list);
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params);
ops->rule_del(mlxsw_sp, rule->priv);
@@ -426,6 +510,90 @@ mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
return rule->rulei;
}
+static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ bool active;
+ int err;
+
+ err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
+ if (err)
+ return err;
+ if (active)
+ rule->last_used = jiffies;
+ return 0;
+}
+
+static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
+{
+ struct mlxsw_sp_acl_rule *rule;
+ int err;
+
+ /* Protect internal structures from changes */
+ rtnl_lock();
+ list_for_each_entry(rule, &acl->rules, list) {
+ err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
+ rule);
+ if (err)
+ goto err_rule_update;
+ }
+ rtnl_unlock();
+ return 0;
+
+err_rule_update:
+ rtnl_unlock();
+ return err;
+}
+
+static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
+{
+ unsigned long interval = acl->rule_activity_update.interval;
+
+ mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
+ msecs_to_jiffies(interval));
+}
+
+static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
+{
+ struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
+ rule_activity_update.dw.work);
+ int err;
+
+ err = mlxsw_sp_acl_rules_activity_update(acl);
+ if (err)
+ dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
+
+ mlxsw_sp_acl_rule_activity_work_schedule(acl);
+}
+
+int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule,
+ u64 *packets, u64 *bytes, u64 *last_use)
+
+{
+ struct mlxsw_sp_acl_rule_info *rulei;
+ u64 current_packets;
+ u64 current_bytes;
+ int err;
+
+ rulei = mlxsw_sp_acl_rule_rulei(rule);
+ err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
+ &current_packets, &current_bytes);
+ if (err)
+ return err;
+
+ *packets = current_packets - rule->last_packets;
+ *bytes = current_bytes - rule->last_bytes;
+ *last_use = rule->last_used;
+
+ rule->last_bytes = current_bytes;
+ rule->last_packets = current_packets;
+
+ return 0;
+}
+
#define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
@@ -434,7 +602,6 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
struct mlxsw_sp *mlxsw_sp = priv;
char pefa_pl[MLXSW_REG_PEFA_LEN];
u32 kvdl_index;
- int ret;
int err;
/* The first action set of a TCAM entry is stored directly in TCAM,
@@ -443,10 +610,10 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
if (is_first)
return 0;
- ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE);
- if (ret < 0)
- return ret;
- kvdl_index = ret;
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE,
+ &kvdl_index);
+ if (err)
+ return err;
mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
if (err)
@@ -475,13 +642,11 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
struct mlxsw_sp *mlxsw_sp = priv;
char ppbs_pl[MLXSW_REG_PPBS_LEN];
u32 kvdl_index;
- int ret;
int err;
- ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1);
- if (ret < 0)
- return ret;
- kvdl_index = ret;
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
+ if (err)
+ return err;
mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
if (err)
@@ -518,7 +683,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
if (!acl)
return -ENOMEM;
mlxsw_sp->acl = acl;
-
+ acl->mlxsw_sp = mlxsw_sp;
acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_FLEX_KEYS),
mlxsw_sp_afk_blocks,
@@ -541,11 +706,18 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_rhashtable_init;
+ INIT_LIST_HEAD(&acl->rules);
err = acl_ops->init(mlxsw_sp, acl->priv);
if (err)
goto err_acl_ops_init;
acl->ops = acl_ops;
+
+ /* Create the delayed work for the rule activity_update */
+ INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
+ mlxsw_sp_acl_rul_activity_update_work);
+ acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
+ mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
return 0;
err_acl_ops_init:
@@ -564,7 +736,9 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
+ cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
acl_ops->fini(mlxsw_sp, acl->priv);
+ WARN_ON(!list_empty(&acl->rules));
rhashtable_destroy(&acl->ruleset_ht);
mlxsw_afa_destroy(acl->afa);
mlxsw_afk_destroy(acl->afk);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
index 82b81cf7f4a7..af7b7bad48df 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
@@ -39,11 +39,15 @@
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
@@ -65,6 +69,8 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 7382832215fa..3a24289979d9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -561,6 +561,24 @@ mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
}
+static int
+mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ unsigned int offset,
+ bool *activity)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ int err;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
+ region->tcam_region_info, offset);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ if (err)
+ return err;
+ *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
+ return 0;
+}
+
#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
static int
@@ -940,6 +958,19 @@ static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
}
+static int
+mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_entry *entry,
+ bool *activity)
+{
+ struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
+ struct mlxsw_sp_acl_tcam_region *region = chunk->region;
+
+ return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
+ entry->parman_item.index,
+ activity);
+}
+
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
MLXSW_AFK_ELEMENT_DMAC,
@@ -950,6 +981,8 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_DST_IP4,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+ MLXSW_AFK_ELEMENT_VID,
+ MLXSW_AFK_ELEMENT_PCP,
};
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
@@ -1046,6 +1079,16 @@ mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
}
+static int
+mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *rule_priv, bool *activity)
+{
+ struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
+
+ return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
+ activity);
+}
+
static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
.ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add,
@@ -1055,6 +1098,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
.rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
.rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
+ .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
};
static const struct mlxsw_sp_acl_profile_ops *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index a7468262f118..997189cfe7fd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -162,8 +162,8 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
}
static const u16 mlxsw_sp_pbs[] = {
- [0] = 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN),
- [9] = 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU),
+ [0] = 2 * ETH_FRAME_LEN,
+ [9] = 2 * MLXSW_PORT_MAX_MTU,
};
#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
@@ -171,20 +171,22 @@ static const u16 mlxsw_sp_pbs[] = {
static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pbmc_pl[MLXSW_REG_PBMC_LEN];
int i;
mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
0xffff, 0xffff / 2);
for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
+ u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp_pbs[i]);
+
if (i == MLXSW_SP_PB_UNUSED)
continue;
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]);
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
}
mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
- return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
- MLXSW_REG(pbmc), pbmc_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
}
static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -209,11 +211,25 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
}
-#define MLXSW_SP_SB_PR_INGRESS_SIZE \
- (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS))
+static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+
+ mlxsw_sp->sb.ports = kcalloc(max_ports, sizeof(struct mlxsw_sp_sb_port),
+ GFP_KERNEL);
+ if (!mlxsw_sp->sb.ports)
+ return -ENOMEM;
+ return 0;
+}
+
+static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->sb.ports);
+}
+
+#define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000
#define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
-#define MLXSW_SP_SB_PR_EGRESS_SIZE \
- (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS))
+#define MLXSW_SP_SB_PR_EGRESS_SIZE 13232000
#define MLXSW_SP_SB_PR(_mode, _size) \
{ \
@@ -223,18 +239,17 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)),
+ MLXSW_SP_SB_PR_INGRESS_SIZE),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)),
+ MLXSW_SP_SB_PR_INGRESS_MNG_SIZE),
};
#define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
@@ -251,11 +266,9 @@ static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
int err;
for (i = 0; i < prs_len; i++) {
- const struct mlxsw_sp_sb_pr *pr;
+ u32 size = mlxsw_sp_bytes_cells(mlxsw_sp, prs[i].size);
- pr = &prs[i];
- err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir,
- pr->mode, pr->size);
+ err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, prs[i].mode, size);
if (err)
return err;
}
@@ -284,7 +297,7 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
}
static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0),
+ MLXSW_SP_SB_CM(10000, 8, 0),
MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
@@ -293,20 +306,20 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3),
+ MLXSW_SP_SB_CM(20000, 1, 3),
};
#define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(1500, 9, 0),
+ MLXSW_SP_SB_CM(1500, 9, 0),
+ MLXSW_SP_SB_CM(1500, 9, 0),
+ MLXSW_SP_SB_CM(1500, 9, 0),
+ MLXSW_SP_SB_CM(1500, 9, 0),
+ MLXSW_SP_SB_CM(1500, 9, 0),
+ MLXSW_SP_SB_CM(1500, 9, 0),
+ MLXSW_SP_SB_CM(1500, 9, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
@@ -330,7 +343,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0),
+ MLXSW_SP_SB_CM(10000, 0, 0),
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
@@ -370,13 +383,17 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
for (i = 0; i < cms_len; i++) {
const struct mlxsw_sp_sb_cm *cm;
+ u32 min_buff;
if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
continue; /* PG number 8 does not exist, skip it */
cm = &cms[i];
+ /* All pools are initialized using dynamic thresholds,
+ * therefore 'max_buff' isn't specified in cells.
+ */
+ min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
- cm->min_buff, cm->max_buff,
- cm->pool);
+ min_buff, cm->max_buff, cm->pool);
if (err)
return err;
}
@@ -484,21 +501,21 @@ struct mlxsw_sp_sb_mm {
}
static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
- MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(20000, 0xff, 0),
};
#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
@@ -511,10 +528,15 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
const struct mlxsw_sp_sb_mm *mc;
+ u32 min_buff;
mc = &mlxsw_sp_sb_mms[i];
- mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff,
- mc->max_buff, mc->pool);
+ /* All pools are initialized using dynamic thresholds,
+ * therefore 'max_buff' isn't specified in cells.
+ */
+ min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
+ mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
+ mc->pool);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
if (err)
return err;
@@ -522,32 +544,53 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
return 0;
}
-#define MLXSW_SP_SB_SIZE (16 * 1024 * 1024)
-
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
{
+ u64 sb_size;
int err;
- err = mlxsw_sp_sb_prs_init(mlxsw_sp);
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
+ return -EIO;
+ mlxsw_sp->sb.cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
+ return -EIO;
+ sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE);
+
+ err = mlxsw_sp_sb_ports_init(mlxsw_sp);
if (err)
return err;
+ err = mlxsw_sp_sb_prs_init(mlxsw_sp);
+ if (err)
+ goto err_sb_prs_init;
err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
if (err)
- return err;
+ goto err_sb_cpu_port_sb_cms_init;
err = mlxsw_sp_sb_mms_init(mlxsw_sp);
if (err)
- return err;
- return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
- MLXSW_SP_SB_SIZE,
- MLXSW_SP_SB_POOL_COUNT,
- MLXSW_SP_SB_POOL_COUNT,
- MLXSW_SP_SB_TC_COUNT,
- MLXSW_SP_SB_TC_COUNT);
+ goto err_sb_mms_init;
+ err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, sb_size,
+ MLXSW_SP_SB_POOL_COUNT,
+ MLXSW_SP_SB_POOL_COUNT,
+ MLXSW_SP_SB_TC_COUNT,
+ MLXSW_SP_SB_TC_COUNT);
+ if (err)
+ goto err_devlink_sb_register;
+
+ return 0;
+
+err_devlink_sb_register:
+err_sb_mms_init:
+err_sb_cpu_port_sb_cms_init:
+err_sb_prs_init:
+ mlxsw_sp_sb_ports_fini(mlxsw_sp);
+ return err;
}
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
{
devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
+ mlxsw_sp_sb_ports_fini(mlxsw_sp);
}
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -596,7 +639,7 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
pool_info->pool_type = (enum devlink_sb_pool_type) dir;
- pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
+ pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
return 0;
}
@@ -606,9 +649,9 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
enum devlink_sb_threshold_type threshold_type)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
u8 pool = pool_get(pool_index);
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
- u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
enum mlxsw_reg_sbpr_mode mode;
if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
@@ -627,7 +670,7 @@ static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
- return MLXSW_SP_CELLS_TO_BYTES(max_buff);
+ return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
}
static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
@@ -645,7 +688,7 @@ static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
return -EINVAL;
*p_max_buff = val;
} else {
- *p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold);
+ *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
}
return 0;
}
@@ -761,7 +804,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
masked_count = 0;
for (local_port = cb_ctx.local_port_1;
- local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
@@ -775,7 +818,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
}
masked_count = 0;
for (local_port = cb_ctx.local_port_1;
- local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
@@ -817,7 +860,7 @@ next_batch:
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
}
- for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
@@ -847,7 +890,7 @@ do_query:
cb_priv);
if (err)
goto out;
- if (local_port < MLXSW_PORT_MAX_PORTS)
+ if (local_port < mlxsw_core_max_ports(mlxsw_core))
goto next_batch;
out:
@@ -882,7 +925,7 @@ next_batch:
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
}
- for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
@@ -908,7 +951,7 @@ do_query:
&bulk_list, NULL, 0);
if (err)
goto out;
- if (local_port < MLXSW_PORT_MAX_PORTS)
+ if (local_port < mlxsw_core_max_ports(mlxsw_core))
goto next_batch;
out:
@@ -932,8 +975,8 @@ int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
pool, dir);
- *p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur);
- *p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max);
+ *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
+ *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
return 0;
}
@@ -951,7 +994,7 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
pg_buff, dir);
- *p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur);
- *p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max);
+ *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
+ *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
new file mode 100644
index 000000000000..0f46775e0307
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -0,0 +1,207 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum_cnt.h"
+
+#define MLXSW_SP_COUNTER_POOL_BANK_SIZE 4096
+
+struct mlxsw_sp_counter_sub_pool {
+ unsigned int base_index;
+ unsigned int size;
+ unsigned int entry_size;
+ unsigned int bank_count;
+};
+
+struct mlxsw_sp_counter_pool {
+ unsigned int pool_size;
+ unsigned long *usage; /* Usage bitmap */
+ struct mlxsw_sp_counter_sub_pool *sub_pools;
+};
+
+static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
+ [MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
+ .bank_count = 6,
+ },
+ [MLXSW_SP_COUNTER_SUB_POOL_RIF] = {
+ .bank_count = 2,
+ }
+};
+
+static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int total_bank_config = 0;
+ unsigned int pool_size;
+ int i;
+
+ pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
+ /* Check config is valid, no bank over subscription */
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++)
+ total_bank_config += mlxsw_sp_counter_sub_pools[i].bank_count;
+ if (total_bank_config > pool_size / MLXSW_SP_COUNTER_POOL_BANK_SIZE + 1)
+ return -EINVAL;
+ return 0;
+}
+
+static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+
+ /* Prepare generic flow pool*/
+ sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW];
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_PACKETS_BYTES))
+ return -EIO;
+ sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ COUNTER_SIZE_PACKETS_BYTES);
+ /* Prepare erif pool*/
+ sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_RIF];
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_ROUTER_BASIC))
+ return -EIO;
+ sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ COUNTER_SIZE_ROUTER_BASIC);
+ return 0;
+}
+
+int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+ struct mlxsw_sp_counter_pool *pool;
+ unsigned int base_index;
+ unsigned int map_size;
+ int i;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_POOL_SIZE))
+ return -EIO;
+
+ err = mlxsw_sp_counter_pool_validate(mlxsw_sp);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_counter_sub_pools_prepare(mlxsw_sp);
+ if (err)
+ return err;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return -ENOMEM;
+
+ pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
+ map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
+
+ pool->usage = kzalloc(map_size, GFP_KERNEL);
+ if (!pool->usage) {
+ err = -ENOMEM;
+ goto err_usage_alloc;
+ }
+
+ pool->sub_pools = mlxsw_sp_counter_sub_pools;
+ /* Allocation is based on bank count which should be
+ * specified for each sub pool statically.
+ */
+ base_index = 0;
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
+ sub_pool = &pool->sub_pools[i];
+ sub_pool->size = sub_pool->bank_count *
+ MLXSW_SP_COUNTER_POOL_BANK_SIZE;
+ sub_pool->base_index = base_index;
+ base_index += sub_pool->size;
+ /* The last bank can't be fully used */
+ if (sub_pool->base_index + sub_pool->size > pool->pool_size)
+ sub_pool->size = pool->pool_size - sub_pool->base_index;
+ }
+
+ mlxsw_sp->counter_pool = pool;
+ return 0;
+
+err_usage_alloc:
+ kfree(pool);
+ return err;
+}
+
+void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+
+ WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
+ pool->pool_size);
+ kfree(pool->usage);
+ kfree(pool);
+}
+
+int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+ unsigned int *p_counter_index)
+{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+ unsigned int entry_index;
+ unsigned int stop_index;
+ int i;
+
+ sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+ stop_index = sub_pool->base_index + sub_pool->size;
+ entry_index = sub_pool->base_index;
+
+ entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
+ if (entry_index == stop_index)
+ return -ENOBUFS;
+ /* The sub-pools can contain non-integer number of entries
+ * so we must check for overflow
+ */
+ if (entry_index + sub_pool->entry_size > stop_index)
+ return -ENOBUFS;
+ for (i = 0; i < sub_pool->entry_size; i++)
+ __set_bit(entry_index + i, pool->usage);
+
+ *p_counter_index = entry_index;
+ return 0;
+}
+
+void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+ unsigned int counter_index)
+{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+ int i;
+
+ if (WARN_ON(counter_index >= pool->pool_size))
+ return;
+ sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+ for (i = 0; i < sub_pool->entry_size; i++)
+ __clear_bit(counter_index + i, pool->usage);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
new file mode 100644
index 000000000000..fd34d0a01073
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
@@ -0,0 +1,54 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arkdis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_CNT_H
+#define _MLXSW_SPECTRUM_CNT_H
+
+#include "spectrum.h"
+
+enum mlxsw_sp_counter_sub_pool_id {
+ MLXSW_SP_COUNTER_SUB_POOL_FLOW,
+ MLXSW_SP_COUNTER_SUB_POOL_RIF,
+};
+
+int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+ unsigned int *p_counter_index);
+void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
+ unsigned int counter_index);
+int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
new file mode 100644
index 000000000000..ea56f6ade6b4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -0,0 +1,351 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arakdis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <net/devlink.h>
+
+#include "spectrum.h"
+#include "spectrum_dpipe.h"
+#include "spectrum_router.h"
+
+enum mlxsw_sp_field_metadata_id {
+ MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT,
+ MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD,
+ MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP,
+};
+
+static struct devlink_dpipe_field mlxsw_sp_dpipe_fields_metadata[] = {
+ { .name = "erif_port",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT,
+ .bitwidth = 32,
+ .mapping_type = DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX,
+ },
+ { .name = "l3_forward",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD,
+ .bitwidth = 1,
+ },
+ { .name = "l3_drop",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP,
+ .bitwidth = 1,
+ },
+};
+
+enum mlxsw_sp_dpipe_header_id {
+ MLXSW_SP_DPIPE_HEADER_METADATA,
+};
+
+static struct devlink_dpipe_header mlxsw_sp_dpipe_header_metadata = {
+ .name = "mlxsw_meta",
+ .id = MLXSW_SP_DPIPE_HEADER_METADATA,
+ .fields = mlxsw_sp_dpipe_fields_metadata,
+ .fields_count = ARRAY_SIZE(mlxsw_sp_dpipe_fields_metadata),
+};
+
+static struct devlink_dpipe_header *mlxsw_dpipe_headers[] = {
+ &mlxsw_sp_dpipe_header_metadata,
+};
+
+static struct devlink_dpipe_headers mlxsw_sp_dpipe_headers = {
+ .headers = mlxsw_dpipe_headers,
+ .headers_count = ARRAY_SIZE(mlxsw_dpipe_headers),
+};
+
+static int mlxsw_sp_dpipe_table_erif_actions_dump(void *priv,
+ struct sk_buff *skb)
+{
+ struct devlink_dpipe_action action = {0};
+ int err;
+
+ action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action.header = &mlxsw_sp_dpipe_header_metadata;
+ action.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD;
+
+ err = devlink_dpipe_action_put(skb, &action);
+ if (err)
+ return err;
+
+ action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action.header = &mlxsw_sp_dpipe_header_metadata;
+ action.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP;
+
+ return devlink_dpipe_action_put(skb, &action);
+}
+
+static int mlxsw_sp_dpipe_table_erif_matches_dump(void *priv,
+ struct sk_buff *skb)
+{
+ struct devlink_dpipe_match match = {0};
+
+ match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match.header = &mlxsw_sp_dpipe_header_metadata;
+ match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT;
+
+ return devlink_dpipe_match_put(skb, &match);
+}
+
+static void mlxsw_sp_erif_entry_clear(struct devlink_dpipe_entry *entry)
+{
+ unsigned int value_count, value_index;
+ struct devlink_dpipe_value *value;
+
+ value = entry->action_values;
+ value_count = entry->action_values_count;
+ for (value_index = 0; value_index < value_count; value_index++) {
+ kfree(value[value_index].value);
+ kfree(value[value_index].mask);
+ }
+
+ value = entry->match_values;
+ value_count = entry->match_values_count;
+ for (value_index = 0; value_index < value_count; value_index++) {
+ kfree(value[value_index].value);
+ kfree(value[value_index].mask);
+ }
+}
+
+static void
+mlxsw_sp_erif_match_action_prepare(struct devlink_dpipe_match *match,
+ struct devlink_dpipe_action *action)
+{
+ action->type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action->header = &mlxsw_sp_dpipe_header_metadata;
+ action->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD;
+
+ match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match->header = &mlxsw_sp_dpipe_header_metadata;
+ match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT;
+}
+
+static int mlxsw_sp_erif_entry_prepare(struct devlink_dpipe_entry *entry,
+ struct devlink_dpipe_value *match_value,
+ struct devlink_dpipe_match *match,
+ struct devlink_dpipe_value *action_value,
+ struct devlink_dpipe_action *action)
+{
+ entry->match_values = match_value;
+ entry->match_values_count = 1;
+
+ entry->action_values = action_value;
+ entry->action_values_count = 1;
+
+ match_value->match = match;
+ match_value->value_size = sizeof(u32);
+ match_value->value = kmalloc(match_value->value_size, GFP_KERNEL);
+ if (!match_value->value)
+ return -ENOMEM;
+
+ action_value->action = action;
+ action_value->value_size = sizeof(u32);
+ action_value->value = kmalloc(action_value->value_size, GFP_KERNEL);
+ if (!action_value->value)
+ goto err_action_alloc;
+ return 0;
+
+err_action_alloc:
+ kfree(match_value->value);
+ return -ENOMEM;
+}
+
+static int mlxsw_sp_erif_entry_get(struct mlxsw_sp *mlxsw_sp,
+ struct devlink_dpipe_entry *entry,
+ struct mlxsw_sp_rif *rif,
+ bool counters_enabled)
+{
+ u32 *action_value;
+ u32 *rif_value;
+ u64 cnt;
+ int err;
+
+ /* Set Match RIF index */
+ rif_value = entry->match_values->value;
+ *rif_value = mlxsw_sp_rif_index(rif);
+ entry->match_values->mapping_value = mlxsw_sp_rif_dev_ifindex(rif);
+ entry->match_values->mapping_valid = true;
+
+ /* Set Action Forwarding */
+ action_value = entry->action_values->value;
+ *action_value = 1;
+
+ entry->counter_valid = false;
+ entry->counter = 0;
+ if (!counters_enabled)
+ return 0;
+
+ entry->index = mlxsw_sp_rif_index(rif);
+ err = mlxsw_sp_rif_counter_value_get(mlxsw_sp, rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS,
+ &cnt);
+ if (!err) {
+ entry->counter = cnt;
+ entry->counter_valid = true;
+ }
+ return 0;
+}
+
+static int
+mlxsw_sp_table_erif_entries_dump(void *priv, bool counters_enabled,
+ struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+ struct devlink_dpipe_value match_value = {{0}}, action_value = {{0}};
+ struct devlink_dpipe_action action = {0};
+ struct devlink_dpipe_match match = {0};
+ struct devlink_dpipe_entry entry = {0};
+ struct mlxsw_sp *mlxsw_sp = priv;
+ unsigned int rif_count;
+ int i, j;
+ int err;
+
+ mlxsw_sp_erif_match_action_prepare(&match, &action);
+ err = mlxsw_sp_erif_entry_prepare(&entry, &match_value, &match,
+ &action_value, &action);
+ if (err)
+ return err;
+
+ rif_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ rtnl_lock();
+ i = 0;
+start_again:
+ err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
+ if (err)
+ return err;
+ j = 0;
+ for (; i < rif_count; i++) {
+ if (!mlxsw_sp->rifs[i])
+ continue;
+ err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry,
+ mlxsw_sp->rifs[i],
+ counters_enabled);
+ if (err)
+ goto err_entry_get;
+ err = devlink_dpipe_entry_ctx_append(dump_ctx, &entry);
+ if (err) {
+ if (err == -EMSGSIZE) {
+ if (!j)
+ goto err_entry_append;
+ break;
+ }
+ goto err_entry_append;
+ }
+ j++;
+ }
+
+ devlink_dpipe_entry_ctx_close(dump_ctx);
+ if (i != rif_count)
+ goto start_again;
+ rtnl_unlock();
+
+ mlxsw_sp_erif_entry_clear(&entry);
+ return 0;
+err_entry_append:
+err_entry_get:
+ rtnl_unlock();
+ mlxsw_sp_erif_entry_clear(&entry);
+ return err;
+}
+
+static int mlxsw_sp_table_erif_counters_update(void *priv, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ int i;
+
+ rtnl_lock();
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
+ if (!mlxsw_sp->rifs[i])
+ continue;
+ if (enable)
+ mlxsw_sp_rif_counter_alloc(mlxsw_sp,
+ mlxsw_sp->rifs[i],
+ MLXSW_SP_RIF_COUNTER_EGRESS);
+ else
+ mlxsw_sp_rif_counter_free(mlxsw_sp,
+ mlxsw_sp->rifs[i],
+ MLXSW_SP_RIF_COUNTER_EGRESS);
+ }
+ rtnl_unlock();
+ return 0;
+}
+
+static struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = {
+ .matches_dump = mlxsw_sp_dpipe_table_erif_matches_dump,
+ .actions_dump = mlxsw_sp_dpipe_table_erif_actions_dump,
+ .entries_dump = mlxsw_sp_table_erif_entries_dump,
+ .counters_set_update = mlxsw_sp_table_erif_counters_update,
+};
+
+static int mlxsw_sp_dpipe_erif_table_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ u64 table_size;
+
+ table_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ return devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ERIF,
+ &mlxsw_sp_erif_ops,
+ mlxsw_sp, table_size,
+ false);
+}
+
+static void mlxsw_sp_dpipe_erif_table_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ devlink_dpipe_table_unregister(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF);
+}
+
+int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
+
+ err = devlink_dpipe_headers_register(devlink,
+ &mlxsw_sp_dpipe_headers);
+ if (err)
+ return err;
+ err = mlxsw_sp_dpipe_erif_table_init(mlxsw_sp);
+ if (err)
+ goto err_erif_register;
+ return 0;
+
+err_erif_register:
+ devlink_dpipe_headers_unregister(priv_to_devlink(mlxsw_sp->core));
+ return err;
+}
+
+void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp);
+ devlink_dpipe_headers_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
new file mode 100644
index 000000000000..d2089298cba3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
@@ -0,0 +1,43 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_PIPELINE_H_
+#define _MLXSW_PIPELINE_H_
+
+int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp);
+
+#define MLXSW_SP_DPIPE_TABLE_NAME_ERIF "mlxsw_erif"
+
+#endif /* _MLXSW_PIPELINE_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index ae6cccc666e4..7d87e23578a3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -39,6 +39,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
#include "spectrum.h"
#include "core_acl_flex_keys.h"
@@ -55,6 +56,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (tc_no_actions(exts))
return 0;
+ /* Count action is inserted first */
+ err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
+ if (err)
+ return err;
+
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_shot(a)) {
@@ -65,6 +71,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
+ err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
+ MLXSW_SP_DUMMY_FID);
+ if (err)
+ return err;
+
out_dev = __dev_get_by_index(dev_net(dev), ifindex);
if (out_dev == dev)
out_dev = NULL;
@@ -73,6 +84,15 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
out_dev);
if (err)
return err;
+ } else if (is_tcf_vlan(a)) {
+ u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
+ u32 action = tcf_vlan_action(a);
+ u8 prio = tcf_vlan_push_prio(a);
+ u16 vid = tcf_vlan_push_vid(a);
+
+ return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
+ action, vid,
+ proto, prio);
} else {
dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
return -EOPNOTSUPP;
@@ -173,7 +193,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN))) {
dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
return -EOPNOTSUPP;
}
@@ -234,6 +255,27 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
sizeof(key->src));
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_dissector_key_vlan *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->key);
+ struct flow_dissector_key_vlan *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->mask);
+ if (mask->vlan_id != 0)
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_VID,
+ key->vlan_id,
+ mask->vlan_id);
+ if (mask->vlan_priority != 0)
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_PCP,
+ key->vlan_priority,
+ mask->vlan_priority);
+ }
+
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
mlxsw_sp_flower_parse_ipv4(rulei, f);
@@ -314,3 +356,47 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}
+
+int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+ struct tc_cls_flower_offload *f)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+ struct tc_action *a;
+ LIST_HEAD(actions);
+ u64 packets;
+ u64 lastuse;
+ u64 bytes;
+ int err;
+
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
+ ingress,
+ MLXSW_SP_ACL_PROFILE_FLOWER);
+ if (WARN_ON(IS_ERR(ruleset)))
+ return -EINVAL;
+
+ rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
+ if (!rule)
+ return -EINVAL;
+
+ err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
+ &lastuse);
+ if (err)
+ goto err_rule_get_stats;
+
+ preempt_disable();
+
+ tcf_exts_to_list(f->exts, &actions);
+ list_for_each_entry(a, &actions, list)
+ tcf_action_stats_update(a, bytes, packets, lastuse);
+
+ preempt_enable();
+
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+ return 0;
+
+err_rule_get_stats:
+ mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index ac321e8e5c1a..26c26cd30c3d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -45,7 +45,8 @@
(MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_CHUNKS_BASE)
#define MLXSW_SP_CHUNK_MAX 32
-int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count)
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
+ u32 *p_entry_index)
{
int entry_index;
int size;
@@ -72,7 +73,8 @@ int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count)
for (i = 0; i < type_entries; i++)
set_bit(entry_index + i, mlxsw_sp->kvdl.usage);
- return entry_index;
+ *p_entry_index = entry_index;
+ return 0;
}
return -ENOBUFS;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index bd8de6b9be71..33cec1cc1642 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -41,14 +41,184 @@
#include <linux/in6.h>
#include <linux/notifier.h>
#include <linux/inetdevice.h>
+#include <linux/netdevice.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
#include <net/ip_fib.h>
+#include <net/fib_rules.h>
+#include <net/l3mdev.h>
#include "spectrum.h"
#include "core.h"
#include "reg.h"
+#include "spectrum_cnt.h"
+#include "spectrum_dpipe.h"
+#include "spectrum_router.h"
+
+struct mlxsw_sp_rif {
+ struct list_head nexthop_list;
+ struct list_head neigh_list;
+ struct net_device *dev;
+ struct mlxsw_sp_fid *f;
+ unsigned char addr[ETH_ALEN];
+ int mtu;
+ u16 rif_index;
+ u16 vr_id;
+ unsigned int counter_ingress;
+ bool counter_ingress_valid;
+ unsigned int counter_egress;
+ bool counter_egress_valid;
+};
+
+static unsigned int *
+mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir)
+{
+ switch (dir) {
+ case MLXSW_SP_RIF_COUNTER_EGRESS:
+ return &rif->counter_egress;
+ case MLXSW_SP_RIF_COUNTER_INGRESS:
+ return &rif->counter_ingress;
+ }
+ return NULL;
+}
+
+static bool
+mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir)
+{
+ switch (dir) {
+ case MLXSW_SP_RIF_COUNTER_EGRESS:
+ return rif->counter_egress_valid;
+ case MLXSW_SP_RIF_COUNTER_INGRESS:
+ return rif->counter_ingress_valid;
+ }
+ return false;
+}
+
+static void
+mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir,
+ bool valid)
+{
+ switch (dir) {
+ case MLXSW_SP_RIF_COUNTER_EGRESS:
+ rif->counter_egress_valid = valid;
+ break;
+ case MLXSW_SP_RIF_COUNTER_INGRESS:
+ rif->counter_ingress_valid = valid;
+ break;
+ }
+}
+
+static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
+ unsigned int counter_index, bool enable,
+ enum mlxsw_sp_rif_counter_dir dir)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ bool is_egress = false;
+ int err;
+
+ if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
+ is_egress = true;
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
+ is_egress);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
+{
+ char ricnt_pl[MLXSW_REG_RICNT_LEN];
+ unsigned int *p_counter_index;
+ bool valid;
+ int err;
+
+ valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
+ if (!valid)
+ return -EINVAL;
+
+ p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+ if (!p_counter_index)
+ return -EINVAL;
+ mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
+ MLXSW_REG_RICNT_OPCODE_NOP);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
+ if (err)
+ return err;
+ *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
+ return 0;
+}
+
+static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
+ unsigned int counter_index)
+{
+ char ricnt_pl[MLXSW_REG_RICNT_LEN];
+
+ mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
+ MLXSW_REG_RICNT_OPCODE_CLEAR);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
+}
+
+int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir)
+{
+ unsigned int *p_counter_index;
+ int err;
+
+ p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+ if (!p_counter_index)
+ return -EINVAL;
+ err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
+ p_counter_index);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
+ if (err)
+ goto err_counter_clear;
+
+ err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
+ *p_counter_index, true, dir);
+ if (err)
+ goto err_counter_edit;
+ mlxsw_sp_rif_counter_valid_set(rif, dir, true);
+ return 0;
+
+err_counter_edit:
+err_counter_clear:
+ mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
+ *p_counter_index);
+ return err;
+}
+
+void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir)
+{
+ unsigned int *p_counter_index;
+
+ p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+ if (WARN_ON(!p_counter_index))
+ return;
+ mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
+ *p_counter_index, false, dir);
+ mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
+ *p_counter_index);
+ mlxsw_sp_rif_counter_valid_set(rif, dir, false);
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
@@ -89,12 +259,6 @@ mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
}
static void
-mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
-{
- memset(prefix_usage, 0, sizeof(*prefix_usage));
-}
-
-static void
mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
unsigned char prefix_len)
{
@@ -125,7 +289,7 @@ struct mlxsw_sp_fib_node {
struct list_head entry_list;
struct list_head list;
struct rhash_head ht_node;
- struct mlxsw_sp_vr *vr;
+ struct mlxsw_sp_fib *fib;
struct mlxsw_sp_fib_key key;
};
@@ -149,13 +313,17 @@ struct mlxsw_sp_fib_entry {
struct mlxsw_sp_fib {
struct rhashtable ht;
struct list_head node_list;
+ struct mlxsw_sp_vr *vr;
+ struct mlxsw_sp_lpm_tree *lpm_tree;
unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
+ enum mlxsw_sp_l3proto proto;
};
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
-static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
+static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
+ enum mlxsw_sp_l3proto proto)
{
struct mlxsw_sp_fib *fib;
int err;
@@ -167,6 +335,8 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
if (err)
goto err_rhashtable_init;
INIT_LIST_HEAD(&fib->node_list);
+ fib->proto = proto;
+ fib->vr = vr;
return fib;
err_rhashtable_init:
@@ -177,24 +347,21 @@ err_rhashtable_init:
static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
{
WARN_ON(!list_empty(&fib->node_list));
+ WARN_ON(fib->lpm_tree);
rhashtable_destroy(&fib->ht);
kfree(fib);
}
static struct mlxsw_sp_lpm_tree *
-mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
+mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
{
static struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
- for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
- lpm_tree = &mlxsw_sp->router.lpm_trees[i];
- if (lpm_tree->ref_count == 0) {
- if (one_reserved)
- one_reserved = false;
- else
- return lpm_tree;
- }
+ for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router.lpm.trees[i];
+ if (lpm_tree->ref_count == 0)
+ return lpm_tree;
}
return NULL;
}
@@ -248,12 +415,12 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
- enum mlxsw_sp_l3proto proto, bool one_reserved)
+ enum mlxsw_sp_l3proto proto)
{
struct mlxsw_sp_lpm_tree *lpm_tree;
int err;
- lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
+ lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
if (!lpm_tree)
return ERR_PTR(-EBUSY);
lpm_tree->proto = proto;
@@ -283,13 +450,13 @@ static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
- enum mlxsw_sp_l3proto proto, bool one_reserved)
+ enum mlxsw_sp_l3proto proto)
{
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
- for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
- lpm_tree = &mlxsw_sp->router.lpm_trees[i];
+ for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router.lpm.trees[i];
if (lpm_tree->ref_count != 0 &&
lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
@@ -297,7 +464,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
goto inc_ref_count;
}
lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
- proto, one_reserved);
+ proto);
if (IS_ERR(lpm_tree))
return lpm_tree;
@@ -314,15 +481,41 @@ static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
+#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
+
+static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_lpm_tree *lpm_tree;
+ u64 max_trees;
int i;
- for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
- lpm_tree = &mlxsw_sp->router.lpm_trees[i];
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
+ return -EIO;
+
+ max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
+ mlxsw_sp->router.lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
+ mlxsw_sp->router.lpm.trees = kcalloc(mlxsw_sp->router.lpm.tree_count,
+ sizeof(struct mlxsw_sp_lpm_tree),
+ GFP_KERNEL);
+ if (!mlxsw_sp->router.lpm.trees)
+ return -ENOMEM;
+
+ for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router.lpm.trees[i];
lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
}
+
+ return 0;
+}
+
+static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->router.lpm.trees);
+}
+
+static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
+{
+ return !!vr->fib4;
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
@@ -332,31 +525,31 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
vr = &mlxsw_sp->router.vrs[i];
- if (!vr->used)
+ if (!mlxsw_sp_vr_is_used(vr))
return vr;
}
return NULL;
}
static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vr *vr)
+ const struct mlxsw_sp_fib *fib)
{
char raltb_pl[MLXSW_REG_RALTB_LEN];
- mlxsw_reg_raltb_pack(raltb_pl, vr->id,
- (enum mlxsw_reg_ralxx_protocol) vr->proto,
- vr->lpm_tree->id);
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto,
+ fib->lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vr *vr)
+ const struct mlxsw_sp_fib *fib)
{
char raltb_pl[MLXSW_REG_RALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_raltb_pack(raltb_pl, vr->id,
- (enum mlxsw_reg_ralxx_protocol) vr->proto, 0);
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
@@ -369,8 +562,7 @@ static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
- u32 tb_id,
- enum mlxsw_sp_l3proto proto)
+ u32 tb_id)
{
struct mlxsw_sp_vr *vr;
int i;
@@ -379,69 +571,50 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
vr = &mlxsw_sp->router.vrs[i];
- if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
+ if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
return vr;
}
return NULL;
}
+static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
+ enum mlxsw_sp_l3proto proto)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return vr->fib4;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ BUG_ON(1);
+ }
+ return NULL;
+}
+
static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
- unsigned char prefix_len,
- u32 tb_id,
- enum mlxsw_sp_l3proto proto)
+ u32 tb_id)
{
- struct mlxsw_sp_prefix_usage req_prefix_usage;
- struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_vr *vr;
- int err;
vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
if (!vr)
return ERR_PTR(-EBUSY);
- vr->fib = mlxsw_sp_fib_create();
- if (IS_ERR(vr->fib))
- return ERR_CAST(vr->fib);
-
- vr->proto = proto;
+ vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(vr->fib4))
+ return ERR_CAST(vr->fib4);
vr->tb_id = tb_id;
- mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
- mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
- lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
- proto, true);
- if (IS_ERR(lpm_tree)) {
- err = PTR_ERR(lpm_tree);
- goto err_tree_get;
- }
- vr->lpm_tree = lpm_tree;
- err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
- if (err)
- goto err_tree_bind;
-
- vr->used = true;
return vr;
-
-err_tree_bind:
- mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
-err_tree_get:
- mlxsw_sp_fib_destroy(vr->fib);
-
- return ERR_PTR(err);
}
-static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
{
- mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
- mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
- mlxsw_sp_fib_destroy(vr->fib);
- vr->used = false;
+ mlxsw_sp_fib_destroy(vr->fib4);
+ vr->fib4 = NULL;
}
static int
-mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
+mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
struct mlxsw_sp_prefix_usage *req_prefix_usage)
{
- struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree;
+ struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
struct mlxsw_sp_lpm_tree *new_tree;
int err;
@@ -449,7 +622,7 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
return 0;
new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
- vr->proto, false);
+ fib->proto);
if (IS_ERR(new_tree)) {
/* We failed to get a tree according to the required
* prefix usage. However, the current tree might be still good
@@ -463,8 +636,8 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
}
/* Prevent packet loss by overwriting existing binding */
- vr->lpm_tree = new_tree;
- err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
+ fib->lpm_tree = new_tree;
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
if (err)
goto err_tree_bind;
mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
@@ -472,53 +645,26 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
return 0;
err_tree_bind:
- vr->lpm_tree = lpm_tree;
+ fib->lpm_tree = lpm_tree;
mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
return err;
}
-static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
- unsigned char prefix_len,
- u32 tb_id,
- enum mlxsw_sp_l3proto proto)
+static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
{
struct mlxsw_sp_vr *vr;
- int err;
tb_id = mlxsw_sp_fix_tb_id(tb_id);
- vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
- if (!vr) {
- vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
- if (IS_ERR(vr))
- return vr;
- } else {
- struct mlxsw_sp_prefix_usage req_prefix_usage;
-
- mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
- &vr->fib->prefix_usage);
- mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
- /* Need to replace LPM tree in case new prefix is required. */
- err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
- &req_prefix_usage);
- if (err)
- return ERR_PTR(err);
- }
+ vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
+ if (!vr)
+ vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
return vr;
}
-static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
{
- /* Destroy virtual router entity in case the associated FIB is empty
- * and allow it to be used for other tables in future. Otherwise,
- * check if some prefix usage did not disappear and change tree if
- * that is the case. Note that in case new, smaller tree cannot be
- * allocated, the original one will be kept being used.
- */
- if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
- mlxsw_sp_vr_destroy(mlxsw_sp, vr);
- else
- mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
- &vr->fib->prefix_usage);
+ if (!vr->rif_count && list_empty(&vr->fib4->node_list))
+ mlxsw_sp_vr_destroy(vr);
}
static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
@@ -627,14 +773,14 @@ static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
- struct mlxsw_sp_rif *r;
+ struct mlxsw_sp_rif *rif;
int err;
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
- if (!r)
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
+ if (!rif)
return ERR_PTR(-EINVAL);
- neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, r->rif);
+ neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
if (!neigh_entry)
return ERR_PTR(-ENOMEM);
@@ -642,7 +788,7 @@ mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
if (err)
goto err_neigh_entry_insert;
- list_add(&neigh_entry->rif_list_node, &r->neigh_list);
+ list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
return neigh_entry;
@@ -1050,22 +1196,22 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
}
static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_rif *r)
+ const struct mlxsw_sp_rif *rif)
{
char rauht_pl[MLXSW_REG_RAUHT_LEN];
mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
- r->rif, r->addr);
+ rif->rif_index, rif->addr);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
}
static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *r)
+ struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
- mlxsw_sp_neigh_rif_flush(mlxsw_sp, r);
- list_for_each_entry_safe(neigh_entry, tmp, &r->neigh_list,
+ mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
+ list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
rif_list_node)
mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
}
@@ -1082,7 +1228,7 @@ struct mlxsw_sp_nexthop {
*/
struct rhash_head ht_node;
struct mlxsw_sp_nexthop_key key;
- struct mlxsw_sp_rif *r;
+ struct mlxsw_sp_rif *rif;
u8 should_offload:1, /* set indicates this neigh is connected and
* should be put to KVD linear area of this group.
*/
@@ -1109,7 +1255,7 @@ struct mlxsw_sp_nexthop_group {
u16 ecmp_size;
u16 count;
struct mlxsw_sp_nexthop nexthops[0];
-#define nh_rif nexthops[0].r
+#define nh_rif nexthops[0].rif
};
static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
@@ -1171,7 +1317,7 @@ mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vr *vr,
+ const struct mlxsw_sp_fib *fib,
u32 adj_index, u16 ecmp_size,
u32 new_adj_index,
u16 new_ecmp_size)
@@ -1179,8 +1325,8 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
char raleu_pl[MLXSW_REG_RALEU_LEN];
mlxsw_reg_raleu_pack(raleu_pl,
- (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id,
- adj_index, ecmp_size, new_adj_index,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto,
+ fib->vr->id, adj_index, ecmp_size, new_adj_index,
new_ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
}
@@ -1190,14 +1336,14 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
u32 old_adj_index, u16 old_ecmp_size)
{
struct mlxsw_sp_fib_entry *fib_entry;
- struct mlxsw_sp_vr *vr = NULL;
+ struct mlxsw_sp_fib *fib = NULL;
int err;
list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
- if (vr == fib_entry->fib_node->vr)
+ if (fib == fib_entry->fib_node->fib)
continue;
- vr = fib_entry->fib_node->vr;
- err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr,
+ fib = fib_entry->fib_node->fib;
+ err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
old_adj_index,
old_ecmp_size,
nh_grp->adj_index,
@@ -1280,7 +1426,6 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
bool old_adj_index_valid;
u32 old_adj_index;
u16 old_ecmp_size;
- int ret;
int i;
int err;
@@ -1318,15 +1463,14 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
*/
goto set_trap;
- ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size);
- if (ret < 0) {
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
+ if (err) {
/* We ran out of KVD linear space, just set the
* trap and let everything flow through kernel.
*/
dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
goto set_trap;
}
- adj_index = ret;
old_adj_index_valid = nh_grp->adj_index_valid;
old_adj_index = nh_grp->adj_index;
old_ecmp_size = nh_grp->ecmp_size;
@@ -1399,22 +1543,22 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
}
static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
- struct mlxsw_sp_rif *r)
+ struct mlxsw_sp_rif *rif)
{
- if (nh->r)
+ if (nh->rif)
return;
- nh->r = r;
- list_add(&nh->rif_list_node, &r->nexthop_list);
+ nh->rif = rif;
+ list_add(&nh->rif_list_node, &rif->nexthop_list);
}
static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
{
- if (!nh->r)
+ if (!nh->rif)
return;
list_del(&nh->rif_list_node);
- nh->r = NULL;
+ nh->rif = NULL;
}
static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
@@ -1505,7 +1649,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
{
struct net_device *dev = fib_nh->nh_dev;
struct in_device *in_dev;
- struct mlxsw_sp_rif *r;
+ struct mlxsw_sp_rif *rif;
int err;
nh->nh_grp = nh_grp;
@@ -1514,15 +1658,18 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
+ if (!dev)
+ return 0;
+
in_dev = __in_dev_get_rtnl(dev);
if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
fib_nh->nh_flags & RTNH_F_LINKDOWN)
return 0;
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!r)
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
return 0;
- mlxsw_sp_nexthop_rif_init(nh, r);
+ mlxsw_sp_nexthop_rif_init(nh, rif);
err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
if (err)
@@ -1548,7 +1695,7 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_key key;
struct mlxsw_sp_nexthop *nh;
- struct mlxsw_sp_rif *r;
+ struct mlxsw_sp_rif *rif;
if (mlxsw_sp->router.aborted)
return;
@@ -1558,13 +1705,13 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON_ONCE(!nh))
return;
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
- if (!r)
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
+ if (!rif)
return;
switch (event) {
case FIB_EVENT_NH_ADD:
- mlxsw_sp_nexthop_rif_init(nh, r);
+ mlxsw_sp_nexthop_rif_init(nh, rif);
mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
break;
case FIB_EVENT_NH_DEL:
@@ -1577,11 +1724,11 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
}
static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *r)
+ struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_nexthop *nh, *tmp;
- list_for_each_entry_safe(nh, tmp, &r->nexthop_list, rif_list_node) {
+ list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
mlxsw_sp_nexthop_rif_fini(nh);
mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
@@ -1699,7 +1846,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
{
fib_entry->offloaded = true;
- switch (fib_entry->fib_node->vr->proto) {
+ switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
fib_info_offload_inc(fib_entry->nh_group->key.fi);
break;
@@ -1711,7 +1858,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
static void
mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
{
- switch (fib_entry->fib_node->vr->proto) {
+ switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
fib_info_offload_dec(fib_entry->nh_group->key.fi);
break;
@@ -1751,8 +1898,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_reg_ralue_op op)
{
char ralue_pl[MLXSW_REG_RALUE_LEN];
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
- struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
enum mlxsw_reg_ralue_trap_action trap_action;
u16 trap_id = 0;
u32 adjacency_index = 0;
@@ -1772,8 +1919,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
}
mlxsw_reg_ralue_pack4(ralue_pl,
- (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
- vr->id, fib_entry->fib_node->key.prefix_len,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
+ fib->vr->id, fib_entry->fib_node->key.prefix_len,
*p_dip);
mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
adjacency_index, ecmp_size);
@@ -1784,27 +1931,28 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
{
- struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif;
+ struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
enum mlxsw_reg_ralue_trap_action trap_action;
char ralue_pl[MLXSW_REG_RALUE_LEN];
u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
- struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
u16 trap_id = 0;
- u16 rif = 0;
+ u16 rif_index = 0;
if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
- rif = r->rif;
+ rif_index = rif->rif_index;
} else {
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
mlxsw_reg_ralue_pack4(ralue_pl,
- (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
- vr->id, fib_entry->fib_node->key.prefix_len,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
+ fib->vr->id, fib_entry->fib_node->key.prefix_len,
*p_dip);
- mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
+ rif_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
@@ -1812,13 +1960,13 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
{
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
char ralue_pl[MLXSW_REG_RALUE_LEN];
u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
- struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
mlxsw_reg_ralue_pack4(ralue_pl,
- (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
- vr->id, fib_entry->fib_node->key.prefix_len,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
+ fib->vr->id, fib_entry->fib_node->key.prefix_len,
*p_dip);
mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
@@ -1845,7 +1993,7 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
{
int err = -EINVAL;
- switch (fib_entry->fib_node->vr->proto) {
+ switch (fib_entry->fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
break;
@@ -1877,17 +2025,29 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
{
struct fib_info *fi = fen_info->fi;
- if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) {
+ switch (fen_info->type) {
+ case RTN_BROADCAST: /* fall through */
+ case RTN_LOCAL:
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
return 0;
- }
- if (fen_info->type != RTN_UNICAST)
- return -EINVAL;
- if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
+ case RTN_UNREACHABLE: /* fall through */
+ case RTN_BLACKHOLE: /* fall through */
+ case RTN_PROHIBIT:
+ /* Packets hitting these routes need to be trapped, but
+ * can do so with a lower priority than packets directed
+ * at the host, so use action type local instead of trap.
+ */
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
- else
- fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
- return 0;
+ return 0;
+ case RTN_UNICAST:
+ if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+ else
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static struct mlxsw_sp_fib_entry *
@@ -1996,7 +2156,7 @@ mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
}
static struct mlxsw_sp_fib_node *
-mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr,
+mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
size_t addr_len, unsigned char prefix_len)
{
struct mlxsw_sp_fib_node *fib_node;
@@ -2006,18 +2166,15 @@ mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr,
return NULL;
INIT_LIST_HEAD(&fib_node->entry_list);
- list_add(&fib_node->list, &vr->fib->node_list);
+ list_add(&fib_node->list, &fib->node_list);
memcpy(fib_node->key.addr, addr, addr_len);
fib_node->key.prefix_len = prefix_len;
- mlxsw_sp_fib_node_insert(vr->fib, fib_node);
- fib_node->vr = vr;
return fib_node;
}
static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
{
- mlxsw_sp_fib_node_remove(fib_node->vr->fib, fib_node);
list_del(&fib_node->list);
WARN_ON(!list_empty(&fib_node->entry_list));
kfree(fib_node);
@@ -2034,7 +2191,7 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
{
unsigned char prefix_len = fib_node->key.prefix_len;
- struct mlxsw_sp_fib *fib = fib_node->vr->fib;
+ struct mlxsw_sp_fib *fib = fib_node->fib;
if (fib->prefix_ref_count[prefix_len]++ == 0)
mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
@@ -2043,32 +2200,98 @@ static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
{
unsigned char prefix_len = fib_node->key.prefix_len;
- struct mlxsw_sp_fib *fib = fib_node->vr->fib;
+ struct mlxsw_sp_fib *fib = fib_node->fib;
if (--fib->prefix_ref_count[prefix_len] == 0)
mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
}
+static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node,
+ struct mlxsw_sp_fib *fib)
+{
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ int err;
+
+ err = mlxsw_sp_fib_node_insert(fib, fib_node);
+ if (err)
+ return err;
+ fib_node->fib = fib;
+
+ mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
+ mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
+
+ if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
+ err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
+ &req_prefix_usage);
+ if (err)
+ goto err_tree_check;
+ } else {
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ fib->proto);
+ if (IS_ERR(lpm_tree))
+ return PTR_ERR(lpm_tree);
+ fib->lpm_tree = lpm_tree;
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
+ if (err)
+ goto err_tree_bind;
+ }
+
+ mlxsw_sp_fib_node_prefix_inc(fib_node);
+
+ return 0;
+
+err_tree_bind:
+ fib->lpm_tree = NULL;
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+err_tree_check:
+ fib_node->fib = NULL;
+ mlxsw_sp_fib_node_remove(fib, fib_node);
+ return err;
+}
+
+static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
+ struct mlxsw_sp_fib *fib = fib_node->fib;
+
+ mlxsw_sp_fib_node_prefix_dec(fib_node);
+
+ if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
+ mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
+ fib->lpm_tree = NULL;
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+ } else {
+ mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
+ }
+
+ fib_node->fib = NULL;
+ mlxsw_sp_fib_node_remove(fib, fib_node);
+}
+
static struct mlxsw_sp_fib_node *
mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib_node *fib_node;
+ struct mlxsw_sp_fib *fib;
struct mlxsw_sp_vr *vr;
int err;
- vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id,
- MLXSW_SP_L3_PROTO_IPV4);
+ vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
if (IS_ERR(vr))
return ERR_CAST(vr);
+ fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
- fib_node = mlxsw_sp_fib_node_lookup(vr->fib, &fen_info->dst,
+ fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
sizeof(fen_info->dst),
fen_info->dst_len);
if (fib_node)
return fib_node;
- fib_node = mlxsw_sp_fib_node_create(vr, &fen_info->dst,
+ fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
sizeof(fen_info->dst),
fen_info->dst_len);
if (!fib_node) {
@@ -2076,22 +2299,29 @@ mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
goto err_fib_node_create;
}
+ err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
+ if (err)
+ goto err_fib_node_init;
+
return fib_node;
+err_fib_node_init:
+ mlxsw_sp_fib_node_destroy(fib_node);
err_fib_node_create:
- mlxsw_sp_vr_put(mlxsw_sp, vr);
+ mlxsw_sp_vr_put(vr);
return ERR_PTR(err);
}
static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
- struct mlxsw_sp_vr *vr = fib_node->vr;
+ struct mlxsw_sp_vr *vr = fib_node->fib->vr;
if (!list_empty(&fib_node->entry_list))
return;
+ mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
mlxsw_sp_fib_node_destroy(fib_node);
- mlxsw_sp_vr_put(mlxsw_sp, vr);
+ mlxsw_sp_vr_put(vr);
}
static struct mlxsw_sp_fib_entry *
@@ -2236,8 +2466,6 @@ static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_fib4_node_entry_add;
- mlxsw_sp_fib_node_prefix_inc(fib_node);
-
return 0;
err_fib4_node_entry_add:
@@ -2251,7 +2479,6 @@ mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
- mlxsw_sp_fib_node_prefix_dec(fib_node);
mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
mlxsw_sp_fib4_node_list_remove(fib_entry);
}
@@ -2340,9 +2567,7 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
{
char ralta_pl[MLXSW_REG_RALTA_LEN];
char ralst_pl[MLXSW_REG_RALST_LEN];
- char raltb_pl[MLXSW_REG_RALTB_LEN];
- char ralue_pl[MLXSW_REG_RALUE_LEN];
- int err;
+ int i, err;
mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
MLXSW_SP_LPM_TREE_MIN);
@@ -2355,16 +2580,33 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4,
- MLXSW_SP_LPM_TREE_MIN);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
- if (err)
- return err;
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
- mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
- MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0);
- mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ if (!mlxsw_sp_vr_is_used(vr))
+ continue;
+
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id,
+ MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_SP_LPM_TREE_MIN);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
+ raltb_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
+ 0);
+ mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
+ ralue_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
@@ -2390,7 +2632,7 @@ static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
- switch (fib_node->vr->proto) {
+ switch (fib_node->fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
break;
@@ -2400,26 +2642,32 @@ static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
}
}
-static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr,
+ enum mlxsw_sp_l3proto proto)
{
+ struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
struct mlxsw_sp_fib_node *fib_node, *tmp;
- struct mlxsw_sp_vr *vr;
+
+ list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
+ bool do_break = &tmp->list == &fib->node_list;
+
+ mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
+ if (do_break)
+ break;
+ }
+}
+
+static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
+{
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- vr = &mlxsw_sp->router.vrs[i];
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
- if (!vr->used)
+ if (!mlxsw_sp_vr_is_used(vr))
continue;
-
- list_for_each_entry_safe(fib_node, tmp, &vr->fib->node_list,
- list) {
- bool do_break = &tmp->list == &vr->fib->node_list;
-
- mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
- if (do_break)
- break;
- }
+ mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
}
}
@@ -2437,74 +2685,11 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
}
-static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
-{
- char ritr_pl[MLXSW_REG_RITR_LEN];
- int err;
-
- mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
- if (WARN_ON_ONCE(err))
- return err;
-
- mlxsw_reg_ritr_enable_set(ritr_pl, false);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *r)
-{
- mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif);
- mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r);
- mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r);
-}
-
-static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
-{
- char rgcr_pl[MLXSW_REG_RGCR_LEN];
- u64 max_rifs;
- int err;
-
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
- return -EIO;
-
- max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
- mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
- GFP_KERNEL);
- if (!mlxsw_sp->rifs)
- return -ENOMEM;
-
- mlxsw_reg_rgcr_pack(rgcr_pl, true);
- mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
- if (err)
- goto err_rgcr_fail;
-
- return 0;
-
-err_rgcr_fail:
- kfree(mlxsw_sp->rifs);
- return err;
-}
-
-static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
-{
- char rgcr_pl[MLXSW_REG_RGCR_LEN];
- int i;
-
- mlxsw_reg_rgcr_pack(rgcr_pl, false);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- WARN_ON_ONCE(mlxsw_sp->rifs[i]);
-
- kfree(mlxsw_sp->rifs);
-}
-
struct mlxsw_sp_fib_event_work {
struct work_struct work;
union {
struct fib_entry_notifier_info fen_info;
+ struct fib_rule_notifier_info fr_info;
struct fib_nh_notifier_info fnh_info;
};
struct mlxsw_sp *mlxsw_sp;
@@ -2516,6 +2701,7 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
struct mlxsw_sp_fib_event_work *fib_work =
container_of(work, struct mlxsw_sp_fib_event_work, work);
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
+ struct fib_rule *rule;
bool replace, append;
int err;
@@ -2539,7 +2725,10 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
break;
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
- mlxsw_sp_router_fib4_abort(mlxsw_sp);
+ rule = fib_work->fr_info.rule;
+ if (!fib4_rule_default(rule) && !rule->l3mdev)
+ mlxsw_sp_router_fib4_abort(mlxsw_sp);
+ fib_rule_put(rule);
break;
case FIB_EVENT_NH_ADD: /* fall through */
case FIB_EVENT_NH_DEL:
@@ -2582,6 +2771,11 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
*/
fib_info_hold(fib_work->fen_info.fi);
break;
+ case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_DEL:
+ memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
+ fib_rule_get(fib_work->fr_info.rule);
+ break;
case FIB_EVENT_NH_ADD: /* fall through */
case FIB_EVENT_NH_DEL:
memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
@@ -2594,6 +2788,707 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+ if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
+ return mlxsw_sp->rifs[i];
+
+ return NULL;
+}
+
+static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ int err;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (WARN_ON_ONCE(err))
+ return err;
+
+ mlxsw_reg_ritr_enable_set(ritr_pl, false);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
+{
+ mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
+ mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
+ mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
+}
+
+static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif,
+ const struct in_device *in_dev,
+ unsigned long event)
+{
+ switch (event) {
+ case NETDEV_UP:
+ if (!rif)
+ return true;
+ return false;
+ case NETDEV_DOWN:
+ if (rif && !in_dev->ifa_list &&
+ !netif_is_l3_slave(rif->dev))
+ return true;
+ /* It is possible we already removed the RIF ourselves
+ * if it was assigned to a netdev that is now a bridge
+ * or LAG slave.
+ */
+ return false;
+ }
+
+ return false;
+}
+
+#define MLXSW_SP_INVALID_INDEX_RIF 0xffff
+static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+ if (!mlxsw_sp->rifs[i])
+ return i;
+
+ return MLXSW_SP_INVALID_INDEX_RIF;
+}
+
+static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
+ bool *p_lagged, u16 *p_system_port)
+{
+ u8 local_port = mlxsw_sp_vport->local_port;
+
+ *p_lagged = mlxsw_sp_vport->lagged;
+ *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
+}
+
+static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
+ u16 vr_id, struct net_device *l3_dev,
+ u16 rif_index, bool create)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ bool lagged = mlxsw_sp_vport->lagged;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ u16 system_port;
+
+ mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif_index,
+ vr_id, l3_dev->mtu, l3_dev->dev_addr);
+
+ mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
+ mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
+ mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
+
+static u16 mlxsw_sp_rif_sp_to_fid(u16 rif_index)
+{
+ return MLXSW_SP_RFID_BASE + rif_index;
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
+{
+ struct mlxsw_sp_fid *f;
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ f->leave = mlxsw_sp_vport_rif_sp_leave;
+ f->ref_count = 0;
+ f->dev = l3_dev;
+ f->fid = fid;
+
+ return f;
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev,
+ struct mlxsw_sp_fid *f)
+{
+ struct mlxsw_sp_rif *rif;
+
+ rif = kzalloc(sizeof(*rif), GFP_KERNEL);
+ if (!rif)
+ return NULL;
+
+ INIT_LIST_HEAD(&rif->nexthop_list);
+ INIT_LIST_HEAD(&rif->neigh_list);
+ ether_addr_copy(rif->addr, l3_dev->dev_addr);
+ rif->mtu = l3_dev->mtu;
+ rif->vr_id = vr_id;
+ rif->dev = l3_dev;
+ rif->rif_index = rif_index;
+ rif->f = f;
+
+ return rif;
+}
+
+u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
+{
+ return rif->rif_index;
+}
+
+int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
+{
+ return rif->dev->ifindex;
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ u32 tb_id = l3mdev_fib_table(l3_dev);
+ struct mlxsw_sp_vr *vr;
+ struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_rif *rif;
+ u16 fid, rif_index;
+ int err;
+
+ rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
+ if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
+ return ERR_PTR(-ERANGE);
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
+ if (IS_ERR(vr))
+ return ERR_CAST(vr);
+
+ err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev,
+ rif_index, true);
+ if (err)
+ goto err_vport_rif_sp_op;
+
+ fid = mlxsw_sp_rif_sp_to_fid(rif_index);
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ f = mlxsw_sp_rfid_alloc(fid, l3_dev);
+ if (!f) {
+ err = -ENOMEM;
+ goto err_rfid_alloc;
+ }
+
+ rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
+ if (!rif) {
+ err = -ENOMEM;
+ goto err_rif_alloc;
+ }
+
+ if (devlink_dpipe_table_counter_enabled(priv_to_devlink(mlxsw_sp->core),
+ MLXSW_SP_DPIPE_TABLE_NAME_ERIF)) {
+ err = mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS);
+ if (err)
+ netdev_dbg(mlxsw_sp_vport->dev,
+ "Counter alloc Failed err=%d\n", err);
+ }
+
+ f->rif = rif;
+ mlxsw_sp->rifs[rif_index] = rif;
+ vr->rif_count++;
+
+ return rif;
+
+err_rif_alloc:
+ kfree(f);
+err_rfid_alloc:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
+err_rif_fdb_op:
+ mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
+ false);
+err_vport_rif_sp_op:
+ mlxsw_sp_vr_put(vr);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
+ struct net_device *l3_dev = rif->dev;
+ struct mlxsw_sp_fid *f = rif->f;
+ u16 rif_index = rif->rif_index;
+ u16 fid = f->fid;
+
+ mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
+
+ mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+
+ vr->rif_count--;
+ mlxsw_sp->rifs[rif_index] = NULL;
+ f->rif = NULL;
+
+ kfree(rif);
+
+ kfree(f);
+
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
+
+ mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
+ false);
+ mlxsw_sp_vr_put(vr);
+}
+
+static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ if (!rif) {
+ rif = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
+ if (IS_ERR(rif))
+ return PTR_ERR(rif);
+ }
+
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, rif->f);
+ rif->f->ref_count++;
+
+ netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", rif->f->fid);
+
+ return 0;
+}
+
+static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+
+ netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
+
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
+ if (--f->ref_count == 0)
+ mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->rif);
+}
+
+static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
+ struct net_device *port_dev,
+ unsigned long event, u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_vport))
+ return -EINVAL;
+
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
+ case NETDEV_DOWN:
+ mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
+ unsigned long event)
+{
+ if (netif_is_bridge_port(port_dev) ||
+ netif_is_lag_port(port_dev) ||
+ netif_is_ovs_port(port_dev))
+ return 0;
+
+ return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
+}
+
+static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
+ struct net_device *lag_dev,
+ unsigned long event, u16 vid)
+{
+ struct net_device *port_dev;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
+ if (mlxsw_sp_port_dev_check(port_dev)) {
+ err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
+ event, vid);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
+ unsigned long event)
+{
+ if (netif_is_bridge_port(lag_dev))
+ return 0;
+
+ return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
+}
+
+static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev)
+{
+ u16 fid;
+
+ if (is_vlan_dev(l3_dev))
+ fid = vlan_dev_vlan_id(l3_dev);
+ else if (mlxsw_sp->master_bridge.dev == l3_dev)
+ fid = 1;
+ else
+ return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
+
+ return mlxsw_sp_fid_find(mlxsw_sp, fid);
+}
+
+static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
+}
+
+static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
+{
+ return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+}
+
+static u16 mlxsw_sp_flood_table_index_get(u16 fid)
+{
+ return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
+}
+
+static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
+ bool set)
+{
+ u8 router_port = mlxsw_sp_router_port(mlxsw_sp);
+ enum mlxsw_flood_table_type table_type;
+ char *sftr_pl;
+ u16 index;
+ int err;
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+
+ table_type = mlxsw_sp_flood_table_type_get(fid);
+ index = mlxsw_sp_flood_table_index_get(fid);
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
+ 1, router_port, set);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+ kfree(sftr_pl);
+ return err;
+}
+
+static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
+{
+ if (mlxsw_sp_fid_is_vfid(fid))
+ return MLXSW_REG_RITR_FID_IF;
+ else
+ return MLXSW_REG_RITR_VLAN_IF;
+}
+
+static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
+ struct net_device *l3_dev,
+ u16 fid, u16 rif,
+ bool create)
+{
+ enum mlxsw_reg_ritr_if_type rif_type;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ rif_type = mlxsw_sp_rif_type_get(fid);
+ mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
+ l3_dev->dev_addr);
+ mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ struct mlxsw_sp_fid *f)
+{
+ u32 tb_id = l3mdev_fib_table(l3_dev);
+ struct mlxsw_sp_rif *rif;
+ struct mlxsw_sp_vr *vr;
+ u16 rif_index;
+ int err;
+
+ rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
+ if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
+ return -ERANGE;
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
+ if (IS_ERR(vr))
+ return PTR_ERR(vr);
+
+ err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
+ if (err)
+ goto err_port_flood_set;
+
+ err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid,
+ rif_index, true);
+ if (err)
+ goto err_rif_bridge_op;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
+ if (!rif) {
+ err = -ENOMEM;
+ goto err_rif_alloc;
+ }
+
+ f->rif = rif;
+ mlxsw_sp->rifs[rif_index] = rif;
+ vr->rif_count++;
+
+ netdev_dbg(l3_dev, "RIF=%d created\n", rif_index);
+
+ return 0;
+
+err_rif_alloc:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
+err_rif_fdb_op:
+ mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
+ false);
+err_rif_bridge_op:
+ mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
+err_port_flood_set:
+ mlxsw_sp_vr_put(vr);
+ return err;
+}
+
+void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
+ struct net_device *l3_dev = rif->dev;
+ struct mlxsw_sp_fid *f = rif->f;
+ u16 rif_index = rif->rif_index;
+
+ mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
+
+ vr->rif_count--;
+ mlxsw_sp->rifs[rif_index] = NULL;
+ f->rif = NULL;
+
+ kfree(rif);
+
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
+
+ mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
+ false);
+
+ mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
+
+ mlxsw_sp_vr_put(vr);
+
+ netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif_index);
+}
+
+static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
+ struct net_device *br_dev,
+ unsigned long event)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
+ struct mlxsw_sp_fid *f;
+
+ /* FID can either be an actual FID if the L3 device is the
+ * VLAN-aware bridge or a VLAN device on top. Otherwise, the
+ * L3 device is a VLAN-unaware bridge and we get a vFID.
+ */
+ f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
+ if (WARN_ON(!f))
+ return -EINVAL;
+
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
+ case NETDEV_DOWN:
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
+ unsigned long event)
+{
+ struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
+ u16 vid = vlan_dev_vlan_id(vlan_dev);
+
+ if (mlxsw_sp_port_dev_check(real_dev))
+ return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
+ vid);
+ else if (netif_is_lag_master(real_dev))
+ return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
+ vid);
+ else if (netif_is_bridge_master(real_dev) &&
+ mlxsw_sp->master_bridge.dev == real_dev)
+ return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
+ event);
+
+ return 0;
+}
+
+static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
+ unsigned long event)
+{
+ if (mlxsw_sp_port_dev_check(dev))
+ return mlxsw_sp_inetaddr_port_event(dev, event);
+ else if (netif_is_lag_master(dev))
+ return mlxsw_sp_inetaddr_lag_event(dev, event);
+ else if (netif_is_bridge_master(dev))
+ return mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
+ else if (is_vlan_dev(dev))
+ return mlxsw_sp_inetaddr_vlan_event(dev, event);
+ else
+ return 0;
+}
+
+int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+ int err = 0;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ goto out;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event))
+ goto out;
+
+ err = __mlxsw_sp_inetaddr_event(dev, event);
+out:
+ return notifier_from_errno(err);
+}
+
+static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
+ const char *mac, int mtu)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ int err;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
+ mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
+ mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
+{
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+ int err;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ return 0;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
+ return 0;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, false);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
+ dev->mtu);
+ if (err)
+ goto err_rif_edit;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, rif->f->fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ ether_addr_copy(rif->addr, dev->dev_addr);
+ rif->mtu = dev->mtu;
+
+ netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
+
+ return 0;
+
+err_rif_fdb_op:
+ mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
+err_rif_edit:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, true);
+ return err;
+}
+
+static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp_rif *rif;
+
+ /* If netdev is already associated with a RIF, then we need to
+ * destroy it and create a new one with the new virtual router ID.
+ */
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ if (rif)
+ __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
+
+ return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP);
+}
+
+static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ if (!rif)
+ return;
+ __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
+}
+
+int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
+ int err = 0;
+
+ if (!mlxsw_sp)
+ return 0;
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ return 0;
+ case NETDEV_CHANGEUPPER:
+ if (info->linking)
+ err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev);
+ else
+ mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
+ break;
+ }
+
+ return err;
+}
+
static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
{
struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
@@ -2606,6 +3501,48 @@ static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
mlxsw_sp_router_fib_flush(mlxsw_sp);
}
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ u64 max_rifs;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
+ return -EIO;
+
+ max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
+ GFP_KERNEL);
+ if (!mlxsw_sp->rifs)
+ return -ENOMEM;
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, true);
+ mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+ if (err)
+ goto err_rgcr_fail;
+
+ return 0;
+
+err_rgcr_fail:
+ kfree(mlxsw_sp->rifs);
+ return err;
+}
+
+static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ int i;
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+ WARN_ON_ONCE(mlxsw_sp->rifs[i]);
+
+ kfree(mlxsw_sp->rifs);
+}
+
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
int err;
@@ -2625,7 +3562,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_nexthop_group_ht_init;
- mlxsw_sp_lpm_init(mlxsw_sp);
+ err = mlxsw_sp_lpm_init(mlxsw_sp);
+ if (err)
+ goto err_lpm_init;
+
err = mlxsw_sp_vrs_init(mlxsw_sp);
if (err)
goto err_vrs_init;
@@ -2647,6 +3587,8 @@ err_register_fib_notifier:
err_neigh_init:
mlxsw_sp_vrs_fini(mlxsw_sp);
err_vrs_init:
+ mlxsw_sp_lpm_fini(mlxsw_sp);
+err_lpm_init:
rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
err_nexthop_group_ht_init:
rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
@@ -2660,6 +3602,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
unregister_fib_notifier(&mlxsw_sp->fib_nb);
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
+ mlxsw_sp_lpm_fini(mlxsw_sp);
rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
__mlxsw_sp_router_fini(mlxsw_sp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
new file mode 100644
index 000000000000..c3095fef6697
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -0,0 +1,58 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_ROUTER_H_
+#define _MLXSW_ROUTER_H_
+
+#include "spectrum.h"
+
+enum mlxsw_sp_rif_counter_dir {
+ MLXSW_SP_RIF_COUNTER_INGRESS,
+ MLXSW_SP_RIF_COUNTER_EGRESS,
+};
+
+u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
+int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
+int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir,
+ u64 *cnt);
+void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir);
+int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir);
+
+#endif /* _MLXSW_ROUTER_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 598727d578c1..0d8411f1f954 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -568,8 +568,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
list_del(&f->list);
- if (f->r)
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+ if (f->rif)
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
kfree(f);
@@ -745,27 +745,6 @@ err_port_allow_untagged_set:
return err;
}
-static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end, bool is_member,
- bool untagged)
-{
- u16 vid, vid_e;
- int err;
-
- for (vid = vid_begin; vid <= vid_end;
- vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
- vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
- vid_end);
-
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
- is_member, untagged);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end,
bool learn_enable)
@@ -804,8 +783,8 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
- err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
- true, flag_untagged);
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
+ true, flag_untagged);
if (err) {
netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
vid_end);
@@ -863,8 +842,8 @@ err_port_vid_learning_set:
if (old_pvid != mlxsw_sp_port->pvid)
mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
err_port_pvid_set:
- __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
- false);
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
+ false, false);
err_port_vlans_set:
mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
return err;
@@ -1012,7 +991,7 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
if (clear_all_ports) {
- for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+ for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
if (mlxsw_sp->ports[i])
mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
}
@@ -1171,8 +1150,8 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
if (pvid >= vid_begin && pvid <= vid_end)
mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
- __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
- false);
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
+ false, false);
mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index ec1e886d4566..3b0f72455681 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1321,7 +1321,7 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
{
int i;
- for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+ for (i = 1; i < mlxsw_core_max_ports(mlxsw_sx->core); i++)
if (mlxsw_sx_port_created(mlxsw_sx, i))
mlxsw_sx_port_remove(mlxsw_sx, i);
kfree(mlxsw_sx->ports);
@@ -1329,17 +1329,18 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sx->core);
size_t alloc_size;
u8 module, width;
int i;
int err;
- alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS;
+ alloc_size = sizeof(struct mlxsw_sx_port *) * max_ports;
mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
if (!mlxsw_sx->ports)
return -ENOMEM;
- for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+ for (i = 1; i < max_ports; i++) {
err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module,
&width);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 02ea48b15eb5..e008fdbed20f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -55,6 +55,7 @@ enum {
MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
MLXSW_TRAP_ID_PKT_SAMPLE = 0x38,
+ MLXSW_TRAP_ID_FID_MISS = 0x3D,
MLXSW_TRAP_ID_ARPBC = 0x50,
MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 279ee4612981..20358f87de57 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -212,25 +212,6 @@ static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
}
/**
- * ks8851_rx_1msg - select whether to use one or two messages for spi read
- * @ks: The device structure
- *
- * Return whether to generate a single message with a tx and rx buffer
- * supplied to spi_sync(), or alternatively send the tx and rx buffers
- * as separate messages.
- *
- * Depending on the hardware in use, a single message may be more efficient
- * on interrupts or work done by the driver.
- *
- * This currently always returns true until we add some per-device data passed
- * from the platform code to specify which mode is better.
- */
-static inline bool ks8851_rx_1msg(struct ks8851_net *ks)
-{
- return true;
-}
-
-/**
* ks8851_rdreg - issue read register command and return the data
* @ks: The device state
* @op: The register address and byte enables in message format.
@@ -251,14 +232,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
- if (ks8851_rx_1msg(ks)) {
- msg = &ks->spi_msg1;
- xfer = &ks->spi_xfer1;
-
- xfer->tx_buf = txb;
- xfer->rx_buf = trx;
- xfer->len = rxl + 2;
- } else {
+ if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) {
msg = &ks->spi_msg2;
xfer = ks->spi_xfer2;
@@ -270,15 +244,22 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
xfer->tx_buf = NULL;
xfer->rx_buf = trx;
xfer->len = rxl;
+ } else {
+ msg = &ks->spi_msg1;
+ xfer = &ks->spi_xfer1;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = trx;
+ xfer->len = rxl + 2;
}
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
netdev_err(ks->netdev, "read: spi_sync() failed\n");
- else if (ks8851_rx_1msg(ks))
- memcpy(rxb, trx + 2, rxl);
- else
+ else if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX)
memcpy(rxb, trx, rxl);
+ else
+ memcpy(rxb, trx + 2, rxl);
}
/**
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 6ad44be08b33..c0d7d5eec7e7 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -228,8 +228,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (desc0 & (RX_DESC0_ERR | RX_DESC0_CRC_ERR | RX_DESC0_FTL |
RX_DESC0_RUNT | RX_DESC0_ODD_NB)) {
net_dbg_ratelimited("packet error\n");
- priv->stats.rx_dropped++;
- priv->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
goto rx_next;
}
@@ -245,8 +245,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (unlikely(!skb)) {
net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n");
- priv->stats.rx_dropped++;
- priv->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
goto rx_next;
}
@@ -256,10 +256,10 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
napi_gro_receive(&priv->napi, skb);
rx++;
- priv->stats.rx_packets++;
- priv->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
if (desc0 & RX_DESC0_MULTICAST)
- priv->stats.multicast++;
+ ndev->stats.multicast++;
rx_next:
wmb(); /* prevent setting ownership back too early */
@@ -296,8 +296,8 @@ static void moxart_tx_finished(struct net_device *ndev)
dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
priv->tx_len[tx_tail], DMA_TO_DEVICE);
- priv->stats.tx_packets++;
- priv->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
dev_kfree_skb_irq(priv->tx_skb[tx_tail]);
priv->tx_skb[tx_tail] = NULL;
@@ -349,7 +349,7 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
net_dbg_ratelimited("no TX space for packet\n");
- priv->stats.tx_dropped++;
+ ndev->stats.tx_dropped++;
goto out_unlock;
}
rmb(); /* ensure data is only read that had TX_DESC0_DMA_OWN cleared */
@@ -400,13 +400,6 @@ out_unlock:
return ret;
}
-static struct net_device_stats *moxart_mac_get_stats(struct net_device *ndev)
-{
- struct moxart_mac_priv_t *priv = netdev_priv(ndev);
-
- return &priv->stats;
-}
-
static void moxart_mac_setmulticast(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
@@ -456,7 +449,6 @@ static const struct net_device_ops moxart_netdev_ops = {
.ndo_open = moxart_mac_open,
.ndo_stop = moxart_mac_stop,
.ndo_start_xmit = moxart_mac_start_xmit,
- .ndo_get_stats = moxart_mac_get_stats,
.ndo_set_rx_mode = moxart_mac_set_rx_mode,
.ndo_set_mac_address = moxart_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
index afc32ec998c0..686b8957d5cf 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.h
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -293,7 +293,6 @@
struct moxart_mac_priv_t {
void __iomem *base;
- struct net_device_stats stats;
unsigned int reg_maccr;
unsigned int reg_imr;
struct napi_struct napi;
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 6933afa69df2..4b15f0f496aa 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -6,8 +6,10 @@ nfp-objs := \
nfpcore/nfp_cpplib.o \
nfpcore/nfp_hwinfo.o \
nfpcore/nfp_mip.o \
+ nfpcore/nfp_mutex.o \
nfpcore/nfp_nffw.o \
nfpcore/nfp_nsp.o \
+ nfpcore/nfp_nsp_cmds.o \
nfpcore/nfp_nsp_eth.o \
nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
index 335beb8b8b45..97a8f00674d0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
@@ -798,7 +798,7 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
const struct bpf_insn *insn = &meta->insn;
if (insn->off < 0) /* TODO */
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
insn->src_reg * 2, br_mask, insn->off);
@@ -818,7 +818,7 @@ wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u32 tmp_reg;
if (insn->off < 0) /* TODO */
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
if (!swap)
@@ -847,7 +847,7 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u8 areg = insn->src_reg * 2, breg = insn->dst_reg * 2;
if (insn->off < 0) /* TODO */
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (swap) {
areg ^= breg;
@@ -1132,7 +1132,7 @@ static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2),
reg_none(), ALU_OP_NONE, NFP_BPF_ABI_LEN);
else
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return 0;
}
@@ -1143,7 +1143,7 @@ static int mem_ldx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
if (meta->insn.off != offsetof(struct xdp_md, data) &&
meta->insn.off != offsetof(struct xdp_md, data_end))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
@@ -1174,12 +1174,12 @@ static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
if (meta->insn.off == offsetof(struct sk_buff, mark))
return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int mem_stx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -1192,7 +1192,7 @@ static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
if (meta->insn.off < 0) /* TODO */
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
return 0;
@@ -1206,7 +1206,7 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u32 tmp_reg;
if (insn->off < 0) /* TODO */
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (imm & ~0U) {
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
@@ -1245,7 +1245,7 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u32 tmp_reg;
if (insn->off < 0) /* TODO */
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (!imm) {
meta->skip = true;
@@ -1276,7 +1276,7 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u32 tmp_reg;
if (insn->off < 0) /* TODO */
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (!imm) {
emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
@@ -1302,7 +1302,7 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
const struct bpf_insn *insn = &meta->insn;
if (insn->off < 0) /* TODO */
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
ALU_OP_XOR, reg_b(insn->src_reg * 2));
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index dedac720fb29..dde35dae35c5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -48,7 +48,7 @@
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_cpp.h"
#include "nfpcore/nfp_nffw.h"
-#include "nfpcore/nfp_nsp_eth.h"
+#include "nfpcore/nfp_nsp.h"
#include "nfpcore/nfp6000_pcie.h"
@@ -253,6 +253,7 @@ exit_release_fw:
static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf)
{
+ struct nfp_nsp_identify *nspi;
struct nfp_nsp *nsp;
int err;
@@ -269,6 +270,12 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf)
pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
+ nspi = __nfp_nsp_identify(nsp);
+ if (nspi) {
+ dev_info(&pdev->dev, "BSP: %s\n", nspi->version);
+ kfree(nspi);
+ }
+
err = nfp_fw_load(pdev, pf, nsp);
if (err < 0) {
kfree(pf->eth_tbl);
@@ -385,8 +392,7 @@ static void nfp_pci_remove(struct pci_dev *pdev)
{
struct nfp_pf *pf = pci_get_drvdata(pdev);
- if (!list_empty(&pf->ports))
- nfp_net_pci_remove(pf);
+ nfp_net_pci_remove(pf);
nfp_pcie_sriov_disable(pdev);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index 39105d0435e9..b57de047b002 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -42,7 +42,9 @@
#include <linux/list.h>
#include <linux/types.h>
#include <linux/msi.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/workqueue.h>
struct dentry;
struct pci_dev;
@@ -64,8 +66,11 @@ struct nfp_eth_table;
* @fw_loaded: Is the firmware loaded?
* @eth_tbl: NSP ETH table
* @ddir: Per-device debugfs directory
- * @num_ports: Number of adapter ports
+ * @num_ports: Number of adapter ports app firmware supports
+ * @num_netdevs: Number of netdevs spawned
* @ports: Linked list of port structures (struct nfp_net)
+ * @port_lock: Protects @ports, @num_ports, @num_netdevs
+ * @port_refresh_work: Work entry for taking netdevs out
*/
struct nfp_pf {
struct pci_dev *pdev;
@@ -88,7 +93,11 @@ struct nfp_pf {
struct dentry *ddir;
unsigned int num_ports;
+ unsigned int num_netdevs;
+
struct list_head ports;
+ struct work_struct port_refresh_work;
+ struct mutex port_lock;
};
extern struct pci_driver nfp_netvf_pci_driver;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index e614a376b595..fcf81b3be830 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -50,14 +50,14 @@
#include "nfp_net_ctrl.h"
-#define nn_err(nn, fmt, args...) netdev_err((nn)->netdev, fmt, ## args)
-#define nn_warn(nn, fmt, args...) netdev_warn((nn)->netdev, fmt, ## args)
-#define nn_info(nn, fmt, args...) netdev_info((nn)->netdev, fmt, ## args)
-#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->netdev, fmt, ## args)
-#define nn_warn_ratelimit(nn, fmt, args...) \
+#define nn_err(nn, fmt, args...) netdev_err((nn)->dp.netdev, fmt, ## args)
+#define nn_warn(nn, fmt, args...) netdev_warn((nn)->dp.netdev, fmt, ## args)
+#define nn_info(nn, fmt, args...) netdev_info((nn)->dp.netdev, fmt, ## args)
+#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->dp.netdev, fmt, ## args)
+#define nn_dp_warn(dp, fmt, args...) \
do { \
if (unlikely(net_ratelimit())) \
- netdev_warn((nn)->netdev, fmt, ## args); \
+ netdev_warn((dp)->netdev, fmt, ## args); \
} while (0)
/* Max time to wait for NFP to respond on updates (in seconds) */
@@ -112,6 +112,7 @@
/* Forward declarations */
struct nfp_cpp;
+struct nfp_eth_table_port;
struct nfp_net;
struct nfp_net_r_vector;
@@ -200,6 +201,7 @@ struct nfp_net_tx_buf {
* @txds: Virtual address of TX ring in host memory
* @dma: DMA address of the TX ring
* @size: Size, in bytes, of the TX ring (needed to free)
+ * @is_xdp: Is this a XDP TX ring?
*/
struct nfp_net_tx_ring {
struct nfp_net_r_vector *r_vec;
@@ -220,6 +222,7 @@ struct nfp_net_tx_ring {
dma_addr_t dma;
unsigned int size;
+ bool is_xdp;
} ____cacheline_aligned;
/* RX and freelist descriptor format */
@@ -283,6 +286,12 @@ struct nfp_net_rx_desc {
#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
+struct nfp_meta_parsed {
+ u32 hash_type;
+ u32 hash;
+ u32 mark;
+};
+
struct nfp_net_rx_hash {
__be32 hash_type;
__be32 hash;
@@ -306,17 +315,13 @@ struct nfp_net_rx_buf {
* @rd_p: FL/RX ring read pointer (free running)
* @idx: Ring index from Linux's perspective
* @fl_qcidx: Queue Controller Peripheral (QCP) queue index for the freelist
- * @rx_qcidx: Queue Controller Peripheral (QCP) queue index for the RX queue
* @qcp_fl: Pointer to base of the QCP freelist queue
- * @qcp_rx: Pointer to base of the QCP RX queue
* @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer
* (used for free list batching)
* @rxbufs: Array of transmitted FL/RX buffers
* @rxds: Virtual address of FL/RX ring in host memory
* @dma: DMA address of the FL/RX ring
* @size: Size, in bytes, of the FL/RX ring (needed to free)
- * @bufsz: Buffer allocation size for convenience of management routines
- * (NOTE: this is in second cache line, do not use on fast path!)
*/
struct nfp_net_rx_ring {
struct nfp_net_r_vector *r_vec;
@@ -325,20 +330,17 @@ struct nfp_net_rx_ring {
u32 wr_p;
u32 rd_p;
- u16 idx;
- u16 wr_ptr_add;
+ u32 idx;
+ u32 wr_ptr_add;
int fl_qcidx;
- int rx_qcidx;
u8 __iomem *qcp_fl;
- u8 __iomem *qcp_rx;
struct nfp_net_rx_buf *rxbufs;
struct nfp_net_rx_desc *rxds;
dma_addr_t dma;
unsigned int size;
- unsigned int bufsz;
} ____cacheline_aligned;
/**
@@ -433,19 +435,76 @@ struct nfp_stat_pair {
};
/**
- * struct nfp_net - NFP network device structure
- * @pdev: Backpointer to PCI device
- * @netdev: Backpointer to net_device structure
- * @is_vf: Is the driver attached to a VF?
+ * struct nfp_net_dp - NFP network device datapath data structure
+ * @dev: Backpointer to struct device
+ * @netdev: Backpointer to net_device structure
+ * @is_vf: Is the driver attached to a VF?
* @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
* @bpf_offload_xdp: Offloaded BPF program is XDP
- * @ctrl: Local copy of the control register/word.
- * @fl_bufsz: Currently configured size of the freelist buffers
+ * @chained_metadata_format: Firemware will use new metadata format
+ * @rx_dma_dir: Mapping direction for RX buffers
+ * @rx_dma_off: Offset at which DMA packets (for XDP headroom)
* @rx_offset: Offset in the RX buffers where packet data starts
+ * @ctrl: Local copy of the control register/word.
+ * @fl_bufsz: Currently configured size of the freelist buffers
* @xdp_prog: Installed XDP program
- * @fw_ver: Firmware version
+ * @tx_rings: Array of pre-allocated TX ring structures
+ * @rx_rings: Array of pre-allocated RX ring structures
+ * @ctrl_bar: Pointer to mapped control BAR
+ *
+ * @txd_cnt: Size of the TX ring in number of descriptors
+ * @rxd_cnt: Size of the RX ring in number of descriptors
+ * @num_r_vecs: Number of used ring vectors
+ * @num_tx_rings: Currently configured number of TX rings
+ * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
+ * @num_rx_rings: Currently configured number of RX rings
+ * @mtu: Device MTU
+ */
+struct nfp_net_dp {
+ struct device *dev;
+ struct net_device *netdev;
+
+ u8 is_vf:1;
+ u8 bpf_offload_skip_sw:1;
+ u8 bpf_offload_xdp:1;
+ u8 chained_metadata_format:1;
+
+ u8 rx_dma_dir;
+ u8 rx_offset;
+
+ u32 rx_dma_off;
+
+ u32 ctrl;
+ u32 fl_bufsz;
+
+ struct bpf_prog *xdp_prog;
+
+ struct nfp_net_tx_ring *tx_rings;
+ struct nfp_net_rx_ring *rx_rings;
+
+ u8 __iomem *ctrl_bar;
+
+ /* Cold data follows */
+
+ unsigned int txd_cnt;
+ unsigned int rxd_cnt;
+
+ unsigned int num_r_vecs;
+
+ unsigned int num_tx_rings;
+ unsigned int num_stack_tx_rings;
+ unsigned int num_rx_rings;
+
+ unsigned int mtu;
+};
+
+/**
+ * struct nfp_net - NFP network device structure
+ * @dp: Datapath structure
+ * @fw_ver: Firmware version
* @cap: Capabilities advertised by the Firmware
* @max_mtu: Maximum support MTU advertised by the Firmware
+ * @rss_hfunc: RSS selected hash function
* @rss_cfg: RSS configuration
* @rss_key: RSS secret key
* @rss_itbl: RSS indirection table
@@ -454,17 +513,9 @@ struct nfp_stat_pair {
* @rx_filter_change: Jiffies when statistics last changed
* @rx_filter_stats_timer: Timer for polling filter offload statistics
* @rx_filter_lock: Lock protecting timer state changes (teardown)
+ * @max_r_vecs: Number of allocated interrupt vectors for RX/TX
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
- * @num_tx_rings: Currently configured number of TX rings
- * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
- * @num_rx_rings: Currently configured number of RX rings
- * @txd_cnt: Size of the TX ring in number of descriptors
- * @rxd_cnt: Size of the RX ring in number of descriptors
- * @tx_rings: Array of pre-allocated TX ring structures
- * @rx_rings: Array of pre-allocated RX ring structures
- * @max_r_vecs: Number of allocated interrupt vectors for RX/TX
- * @num_r_vecs: Number of used ring vectors
* @r_vecs: Pre-allocated array of ring vectors
* @irq_entries: Pre-allocated array of MSI-X entries
* @lsc_handler: Handler for Link State Change interrupt
@@ -480,7 +531,8 @@ struct nfp_stat_pair {
* @reconfig_sync_present: Some thread is performing synchronous reconfig
* @reconfig_timer: Timer for async reading of reconfig results
* @link_up: Is the link up?
- * @link_status_lock: Protects @link_up and ensures atomicity with BAR reading
+ * @link_changed: Has link state changes since last port refresh?
+ * @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
* @rx_coalesce_max_frames: RX interrupt moderation frame count parameter
* @tx_coalesce_usecs: TX interrupt moderation usecs delay parameter
@@ -488,36 +540,24 @@ struct nfp_stat_pair {
* @vxlan_ports: VXLAN ports for RX inner csum offload communicated to HW
* @vxlan_usecnt: IPv4/IPv6 VXLAN port use counts
* @qcp_cfg: Pointer to QCP queue used for configuration notification
- * @ctrl_bar: Pointer to mapped control BAR
* @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues
* @debugfs_dir: Device directory in debugfs
* @ethtool_dump_flag: Ethtool dump flag
* @port_list: Entry on device port list
+ * @pdev: Backpointer to PCI device
* @cpp: CPP device handle if available
+ * @eth_port: Translated ETH Table port entry
*/
struct nfp_net {
- struct pci_dev *pdev;
- struct net_device *netdev;
-
- unsigned is_vf:1;
- unsigned bpf_offload_skip_sw:1;
- unsigned bpf_offload_xdp:1;
-
- u32 ctrl;
- u32 fl_bufsz;
-
- u32 rx_offset;
-
- struct bpf_prog *xdp_prog;
-
- struct nfp_net_tx_ring *tx_rings;
- struct nfp_net_rx_ring *rx_rings;
+ struct nfp_net_dp dp;
struct nfp_net_fw_version fw_ver;
+
u32 cap;
u32 max_mtu;
+ u8 rss_hfunc;
u32 rss_cfg;
u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
@@ -530,18 +570,10 @@ struct nfp_net {
unsigned int max_tx_rings;
unsigned int max_rx_rings;
- unsigned int num_tx_rings;
- unsigned int num_stack_tx_rings;
- unsigned int num_rx_rings;
-
int stride_tx;
int stride_rx;
- int txd_cnt;
- int rxd_cnt;
-
unsigned int max_r_vecs;
- unsigned int num_r_vecs;
struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS];
struct msix_entry irq_entries[NFP_NET_MAX_IRQS];
@@ -557,6 +589,7 @@ struct nfp_net {
u32 me_freq_mhz;
bool link_up;
+ bool link_changed;
spinlock_t link_status_lock;
spinlock_t reconfig_lock;
@@ -575,7 +608,6 @@ struct nfp_net {
u8 __iomem *qcp_cfg;
- u8 __iomem *ctrl_bar;
u8 __iomem *tx_bar;
u8 __iomem *rx_bar;
@@ -584,14 +616,10 @@ struct nfp_net {
struct list_head port_list;
+ struct pci_dev *pdev;
struct nfp_cpp *cpp;
-};
-struct nfp_net_ring_set {
- unsigned int n_rings;
- unsigned int mtu;
- unsigned int dcnt;
- void *rings;
+ struct nfp_eth_table_port *eth_port;
};
/* Functions to read/write from/to a BAR
@@ -599,42 +627,42 @@ struct nfp_net_ring_set {
*/
static inline u16 nn_readb(struct nfp_net *nn, int off)
{
- return readb(nn->ctrl_bar + off);
+ return readb(nn->dp.ctrl_bar + off);
}
static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
{
- writeb(val, nn->ctrl_bar + off);
+ writeb(val, nn->dp.ctrl_bar + off);
}
static inline u16 nn_readw(struct nfp_net *nn, int off)
{
- return readw(nn->ctrl_bar + off);
+ return readw(nn->dp.ctrl_bar + off);
}
static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
{
- writew(val, nn->ctrl_bar + off);
+ writew(val, nn->dp.ctrl_bar + off);
}
static inline u32 nn_readl(struct nfp_net *nn, int off)
{
- return readl(nn->ctrl_bar + off);
+ return readl(nn->dp.ctrl_bar + off);
}
static inline void nn_writel(struct nfp_net *nn, int off, u32 val)
{
- writel(val, nn->ctrl_bar + off);
+ writel(val, nn->dp.ctrl_bar + off);
}
static inline u64 nn_readq(struct nfp_net *nn, int off)
{
- return readq(nn->ctrl_bar + off);
+ return readq(nn->dp.ctrl_bar + off);
}
static inline void nn_writeq(struct nfp_net *nn, int off, u64 val)
{
- writeq(val, nn->ctrl_bar + off);
+ writeq(val, nn->dp.ctrl_bar + off);
}
/* Flush posted PCI writes by reading something without side effects */
@@ -776,6 +804,7 @@ void nfp_net_netdev_clean(struct net_device *netdev);
void nfp_net_set_ethtool_ops(struct net_device *netdev);
void nfp_net_info(struct nfp_net *nn);
int nfp_net_reconfig(struct nfp_net *nn, u32 update);
+unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
void nfp_net_rss_write_itbl(struct nfp_net *nn);
void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
@@ -787,9 +816,14 @@ void nfp_net_irqs_disable(struct pci_dev *pdev);
void
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
unsigned int n);
-int
-nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
- struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx);
+
+struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
+int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
+ struct netlink_ext_ack *extack);
+
+bool nfp_net_link_changed_read_clear(struct nfp_net *nn);
+int nfp_net_refresh_eth_port(struct nfp_net *nn);
+void nfp_net_refresh_port_table(struct nfp_net *nn);
#ifdef CONFIG_NFP_DEBUG
void nfp_net_debugfs_create(void);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index a41377e26c07..db20376260f5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -41,6 +41,7 @@
* Chris Telfer <chris.telfer@netronome.com>
*/
+#include <linux/bitfield.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/module.h>
@@ -66,6 +67,7 @@
#include <net/pkt_cls.h>
#include <net/vxlan.h>
+#include "nfpcore/nfp_nsp.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
@@ -83,20 +85,33 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
put_unaligned_le32(reg, fw_ver);
}
-static dma_addr_t
-nfp_net_dma_map_rx(struct nfp_net *nn, void *frag, unsigned int bufsz,
- int direction)
+static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
{
- return dma_map_single(&nn->pdev->dev, frag + NFP_NET_RX_BUF_HEADROOM,
- bufsz - NFP_NET_RX_BUF_NON_DATA, direction);
+ return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
}
static void
-nfp_net_dma_unmap_rx(struct nfp_net *nn, dma_addr_t dma_addr,
- unsigned int bufsz, int direction)
+nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
{
- dma_unmap_single(&nn->pdev->dev, dma_addr,
- bufsz - NFP_NET_RX_BUF_NON_DATA, direction);
+ dma_sync_single_for_device(dp->dev, dma_addr,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir);
+}
+
+static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
+{
+ dma_unmap_single_attrs(dp->dev, dma_addr,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr,
+ unsigned int len)
+{
+ dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
+ len, dp->rx_dma_dir);
}
/* Firmware reconfig
@@ -327,19 +342,22 @@ void
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
unsigned int n)
{
+ struct nfp_net_dp *dp = &nn->dp;
+
nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
- nn->num_r_vecs = nn->max_r_vecs;
+ dp->num_r_vecs = nn->max_r_vecs;
memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
- if (nn->num_rx_rings > nn->num_r_vecs ||
- nn->num_tx_rings > nn->num_r_vecs)
- nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
- nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs);
+ if (dp->num_rx_rings > dp->num_r_vecs ||
+ dp->num_tx_rings > dp->num_r_vecs)
+ dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n",
+ dp->num_rx_rings, dp->num_tx_rings,
+ dp->num_r_vecs);
- nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
- nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
- nn->num_stack_tx_rings = nn->num_tx_rings;
+ dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
+ dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
+ dp->num_stack_tx_rings = dp->num_tx_rings;
}
/**
@@ -373,6 +391,19 @@ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
return IRQ_HANDLED;
}
+bool nfp_net_link_changed_read_clear(struct nfp_net *nn)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&nn->link_status_lock, flags);
+ ret = nn->link_changed;
+ nn->link_changed = false;
+ spin_unlock_irqrestore(&nn->link_status_lock, flags);
+
+ return ret;
+}
+
/**
* nfp_net_read_link_status() - Reread link status from control BAR
* @nn: NFP Network structure
@@ -392,13 +423,14 @@ static void nfp_net_read_link_status(struct nfp_net *nn)
goto out;
nn->link_up = link_up;
+ nn->link_changed = true;
if (nn->link_up) {
- netif_carrier_on(nn->netdev);
- netdev_info(nn->netdev, "NIC Link is Up\n");
+ netif_carrier_on(nn->dp.netdev);
+ netdev_info(nn->dp.netdev, "NIC Link is Up\n");
} else {
- netif_carrier_off(nn->netdev);
- netdev_info(nn->netdev, "NIC Link is Down\n");
+ netif_carrier_off(nn->dp.netdev);
+ netdev_info(nn->dp.netdev, "NIC Link is Down\n");
}
out:
spin_unlock_irqrestore(&nn->link_status_lock, flags);
@@ -446,15 +478,18 @@ static irqreturn_t nfp_net_irq_exn(int irq, void *data)
* @tx_ring: TX ring structure
* @r_vec: IRQ vector servicing this ring
* @idx: Ring index
+ * @is_xdp: Is this an XDP TX ring?
*/
static void
nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
- struct nfp_net_r_vector *r_vec, unsigned int idx)
+ struct nfp_net_r_vector *r_vec, unsigned int idx,
+ bool is_xdp)
{
struct nfp_net *nn = r_vec->nfp_net;
tx_ring->idx = idx;
tx_ring->r_vec = r_vec;
+ tx_ring->is_xdp = is_xdp;
tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
@@ -476,10 +511,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
rx_ring->r_vec = r_vec;
rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
- rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1);
-
rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
- rx_ring->qcp_rx = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->rx_qcidx);
}
/**
@@ -530,7 +562,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
entry = &nn->irq_entries[vector_idx];
- snprintf(name, name_sz, format, netdev_name(nn->netdev));
+ snprintf(name, name_sz, format, netdev_name(nn->dp.netdev));
err = request_irq(entry->vector, handler, 0, name, nn);
if (err) {
nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
@@ -617,7 +649,6 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
/**
* nfp_net_tx_tso() - Set up Tx descriptor for LSO
- * @nn: NFP Net device
* @r_vec: per-ring structure
* @txbuf: Pointer to driver soft TX descriptor
* @txd: Pointer to HW TX descriptor
@@ -626,7 +657,7 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
* Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
* Return error on packet header greater than maximum supported LSO header size.
*/
-static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_buf *txbuf,
struct nfp_net_tx_desc *txd, struct sk_buff *skb)
{
@@ -657,7 +688,7 @@ static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
/**
* nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
- * @nn: NFP Net device
+ * @dp: NFP Net data path struct
* @r_vec: per-ring structure
* @txbuf: Pointer to driver soft TX descriptor
* @txd: Pointer to TX descriptor
@@ -666,7 +697,8 @@ static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
* This function sets the TX checksum flags in the TX descriptor based
* on the configuration and the protocol of the packet to be transmitted.
*/
-static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+static void nfp_net_tx_csum(struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_buf *txbuf,
struct nfp_net_tx_desc *txd, struct sk_buff *skb)
{
@@ -674,7 +706,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
struct iphdr *iph;
u8 l4_hdr;
- if (!(nn->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
+ if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
return;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -693,8 +725,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
} else if (ipv6h->version == 6) {
l4_hdr = ipv6h->nexthdr;
} else {
- nn_warn_ratelimit(nn, "partial checksum but ipv=%x!\n",
- iph->version);
+ nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
return;
}
@@ -706,8 +737,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
txd->flags |= PCIE_DESC_TX_UDP_CSUM;
break;
default:
- nn_warn_ratelimit(nn, "partial checksum but l4 proto=%x!\n",
- l4_hdr);
+ nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
return;
}
@@ -737,28 +767,31 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
const struct skb_frag_struct *frag;
- struct nfp_net_r_vector *r_vec;
struct nfp_net_tx_desc *txd, txdg;
- struct nfp_net_tx_buf *txbuf;
struct nfp_net_tx_ring *tx_ring;
+ struct nfp_net_r_vector *r_vec;
+ struct nfp_net_tx_buf *txbuf;
struct netdev_queue *nd_q;
+ struct nfp_net_dp *dp;
dma_addr_t dma_addr;
unsigned int fsize;
int f, nr_frags;
int wr_idx;
u16 qidx;
+ dp = &nn->dp;
qidx = skb_get_queue_mapping(skb);
- tx_ring = &nn->tx_rings[qidx];
+ tx_ring = &dp->tx_rings[qidx];
r_vec = tx_ring->r_vec;
- nd_q = netdev_get_tx_queue(nn->netdev, qidx);
+ nd_q = netdev_get_tx_queue(dp->netdev, qidx);
nr_frags = skb_shinfo(skb)->nr_frags;
if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
- nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n",
- qidx, tx_ring->wr_p, tx_ring->rd_p);
+ nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
+ qidx, tx_ring->wr_p, tx_ring->rd_p);
netif_tx_stop_queue(nd_q);
+ nfp_net_tx_xmit_more_flush(tx_ring);
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_busy++;
u64_stats_update_end(&r_vec->tx_sync);
@@ -766,9 +799,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
}
/* Start with the head skbuf */
- dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb),
+ dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
- if (dma_mapping_error(&nn->pdev->dev, dma_addr))
+ if (dma_mapping_error(dp->dev, dma_addr))
goto err_free;
wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
@@ -792,11 +825,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
txd->mss = 0;
txd->l4_offset = 0;
- nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb);
+ nfp_net_tx_tso(r_vec, txbuf, txd, skb);
- nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb);
+ nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
- if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
+ if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
txd->flags |= PCIE_DESC_TX_VLAN;
txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
}
@@ -810,9 +843,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[f];
fsize = skb_frag_size(frag);
- dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0,
+ dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
fsize, DMA_TO_DEVICE);
- if (dma_mapping_error(&nn->pdev->dev, dma_addr))
+ if (dma_mapping_error(dp->dev, dma_addr))
goto err_unmap;
wr_idx = (wr_idx + 1) & (tx_ring->cnt - 1);
@@ -851,8 +884,7 @@ err_unmap:
--f;
while (f >= 0) {
frag = &skb_shinfo(skb)->frags[f];
- dma_unmap_page(&nn->pdev->dev,
- tx_ring->txbufs[wr_idx].dma_addr,
+ dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
skb_frag_size(frag), DMA_TO_DEVICE);
tx_ring->txbufs[wr_idx].skb = NULL;
tx_ring->txbufs[wr_idx].dma_addr = 0;
@@ -861,13 +893,14 @@ err_unmap:
if (wr_idx < 0)
wr_idx += tx_ring->cnt;
}
- dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr,
+ dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
skb_headlen(skb), DMA_TO_DEVICE);
tx_ring->txbufs[wr_idx].skb = NULL;
tx_ring->txbufs[wr_idx].dma_addr = 0;
tx_ring->txbufs[wr_idx].fidx = -2;
err_free:
- nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n");
+ nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+ nfp_net_tx_xmit_more_flush(tx_ring);
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_errors++;
u64_stats_update_end(&r_vec->tx_sync);
@@ -884,7 +917,7 @@ err_free:
static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
const struct skb_frag_struct *frag;
struct netdev_queue *nd_q;
u32 done_pkts = 0, done_bytes = 0;
@@ -894,6 +927,9 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
int fidx;
int idx;
+ if (tx_ring->wr_p == tx_ring->rd_p)
+ return;
+
/* Work out how many descriptors have been transmitted */
qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
@@ -918,8 +954,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
if (fidx == -1) {
/* unmap head */
- dma_unmap_single(&nn->pdev->dev,
- tx_ring->txbufs[idx].dma_addr,
+ dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr,
skb_headlen(skb), DMA_TO_DEVICE);
done_pkts += tx_ring->txbufs[idx].pkt_cnt;
@@ -927,8 +962,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
} else {
/* unmap fragment */
frag = &skb_shinfo(skb)->frags[fidx];
- dma_unmap_page(&nn->pdev->dev,
- tx_ring->txbufs[idx].dma_addr,
+ dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr,
skb_frag_size(frag), DMA_TO_DEVICE);
}
@@ -948,7 +982,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
r_vec->tx_pkts += done_pkts;
u64_stats_update_end(&r_vec->tx_sync);
- nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
if (nfp_net_tx_ring_should_wake(tx_ring)) {
/* Make sure TX thread will see updated tx_ring->rd_p */
@@ -966,11 +1000,13 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
u32 done_pkts = 0, done_bytes = 0;
int idx, todo;
u32 qcp_rd_p;
+ if (tx_ring->wr_p == tx_ring->rd_p)
+ return;
+
/* Work out how many descriptors have been transmitted */
qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
@@ -982,23 +1018,12 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
else
todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
+ done_pkts = todo;
while (todo--) {
idx = tx_ring->rd_p & (tx_ring->cnt - 1);
tx_ring->rd_p++;
- if (!tx_ring->txbufs[idx].frag)
- continue;
-
- nfp_net_dma_unmap_rx(nn, tx_ring->txbufs[idx].dma_addr,
- nn->fl_bufsz, DMA_BIDIRECTIONAL);
- __free_page(virt_to_page(tx_ring->txbufs[idx].frag));
-
- done_pkts++;
done_bytes += tx_ring->txbufs[idx].real_len;
-
- tx_ring->txbufs[idx].dma_addr = 0;
- tx_ring->txbufs[idx].frag = NULL;
- tx_ring->txbufs[idx].fidx = -2;
}
tx_ring->qcp_rd_p = qcp_rd_p;
@@ -1015,52 +1040,43 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
/**
* nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
- * @nn: NFP Net device
+ * @dp: NFP Net data path struct
* @tx_ring: TX ring structure
*
* Assumes that the device is stopped
*/
static void
-nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
+nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
const struct skb_frag_struct *frag;
- struct pci_dev *pdev = nn->pdev;
struct netdev_queue *nd_q;
- while (tx_ring->rd_p != tx_ring->wr_p) {
+ while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
struct nfp_net_tx_buf *tx_buf;
- int idx;
+ struct sk_buff *skb;
+ int idx, nr_frags;
idx = tx_ring->rd_p & (tx_ring->cnt - 1);
tx_buf = &tx_ring->txbufs[idx];
- if (tx_ring == r_vec->xdp_ring) {
- nfp_net_dma_unmap_rx(nn, tx_buf->dma_addr,
- nn->fl_bufsz, DMA_BIDIRECTIONAL);
- __free_page(virt_to_page(tx_ring->txbufs[idx].frag));
- } else {
- struct sk_buff *skb = tx_ring->txbufs[idx].skb;
- int nr_frags = skb_shinfo(skb)->nr_frags;
-
- if (tx_buf->fidx == -1) {
- /* unmap head */
- dma_unmap_single(&pdev->dev, tx_buf->dma_addr,
- skb_headlen(skb),
- DMA_TO_DEVICE);
- } else {
- /* unmap fragment */
- frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
- dma_unmap_page(&pdev->dev, tx_buf->dma_addr,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- }
+ skb = tx_ring->txbufs[idx].skb;
+ nr_frags = skb_shinfo(skb)->nr_frags;
- /* check for last gather fragment */
- if (tx_buf->fidx == nr_frags - 1)
- dev_kfree_skb_any(skb);
+ if (tx_buf->fidx == -1) {
+ /* unmap head */
+ dma_unmap_single(dp->dev, tx_buf->dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ } else {
+ /* unmap fragment */
+ frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
+ dma_unmap_page(dp->dev, tx_buf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
}
+ /* check for last gather fragment */
+ if (tx_buf->fidx == nr_frags - 1)
+ dev_kfree_skb_any(skb);
+
tx_buf->dma_addr = 0;
tx_buf->skb = NULL;
tx_buf->fidx = -2;
@@ -1075,10 +1091,10 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
tx_ring->qcp_rd_p = 0;
tx_ring->wr_ptr_add = 0;
- if (tx_ring == r_vec->xdp_ring)
+ if (tx_ring->is_xdp)
return;
- nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
netdev_tx_reset_queue(nd_q);
}
@@ -1087,7 +1103,7 @@ static void nfp_net_tx_timeout(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
int i;
- for (i = 0; i < nn->netdev->real_num_tx_queues; i++) {
+ for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) {
if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
continue;
nn_warn(nn, "TX timeout on ring: %d\n", i);
@@ -1098,16 +1114,17 @@ static void nfp_net_tx_timeout(struct net_device *netdev)
/* Receive processing
*/
static unsigned int
-nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu)
+nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
{
unsigned int fl_bufsz;
fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
- if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ fl_bufsz += dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
fl_bufsz += NFP_NET_MAX_PREPEND;
else
- fl_bufsz += nn->rx_offset;
- fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + mtu;
+ fl_bufsz += dp->rx_offset;
+ fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1126,62 +1143,53 @@ nfp_net_free_frag(void *frag, bool xdp)
/**
* nfp_net_rx_alloc_one() - Allocate and map page frag for RX
- * @rx_ring: RX ring structure of the skb
+ * @dp: NFP Net data path struct
* @dma_addr: Pointer to storage for DMA address (output param)
- * @fl_bufsz: size of freelist buffers
- * @xdp: Whether XDP is enabled
*
* This function will allcate a new page frag, map it for DMA.
*
* Return: allocated page frag or NULL on failure.
*/
-static void *
-nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
- unsigned int fl_bufsz, bool xdp)
+static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
{
- struct nfp_net *nn = rx_ring->r_vec->nfp_net;
- int direction;
void *frag;
- if (!xdp)
- frag = netdev_alloc_frag(fl_bufsz);
+ if (!dp->xdp_prog)
+ frag = netdev_alloc_frag(dp->fl_bufsz);
else
frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD));
if (!frag) {
- nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n");
+ nn_dp_warn(dp, "Failed to alloc receive page frag\n");
return NULL;
}
- direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
-
- *dma_addr = nfp_net_dma_map_rx(nn, frag, fl_bufsz, direction);
- if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
- nfp_net_free_frag(frag, xdp);
- nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
+ *dma_addr = nfp_net_dma_map_rx(dp, frag);
+ if (dma_mapping_error(dp->dev, *dma_addr)) {
+ nfp_net_free_frag(frag, dp->xdp_prog);
+ nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
return NULL;
}
return frag;
}
-static void *
-nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr)
+static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
{
void *frag;
- if (!nn->xdp_prog)
- frag = napi_alloc_frag(nn->fl_bufsz);
+ if (!dp->xdp_prog)
+ frag = napi_alloc_frag(dp->fl_bufsz);
else
frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD));
if (!frag) {
- nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n");
+ nn_dp_warn(dp, "Failed to alloc receive page frag\n");
return NULL;
}
- *dma_addr = nfp_net_dma_map_rx(nn, frag, nn->fl_bufsz, direction);
- if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
- nfp_net_free_frag(frag, nn->xdp_prog);
- nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
+ *dma_addr = nfp_net_dma_map_rx(dp, frag);
+ if (dma_mapping_error(dp->dev, *dma_addr)) {
+ nfp_net_free_frag(frag, dp->xdp_prog);
+ nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
return NULL;
}
@@ -1190,17 +1198,21 @@ nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr)
/**
* nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
+ * @dp: NFP Net data path struct
* @rx_ring: RX ring structure
* @frag: page fragment buffer
* @dma_addr: DMA address of skb mapping
*/
-static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
+static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring,
void *frag, dma_addr_t dma_addr)
{
unsigned int wr_idx;
wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1);
+ nfp_net_dma_sync_dev_rx(dp, dma_addr);
+
/* Stash SKB and DMA address away */
rx_ring->rxbufs[wr_idx].frag = frag;
rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
@@ -1208,7 +1220,8 @@ static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
/* Fill freelist descriptor */
rx_ring->rxds[wr_idx].fld.reserved = 0;
rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
- nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, dma_addr);
+ nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
+ dma_addr + dp->rx_dma_off);
rx_ring->wr_p++;
rx_ring->wr_ptr_add++;
@@ -1249,19 +1262,17 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
/**
* nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
- * @nn: NFP Net device
+ * @dp: NFP Net data path struct
* @rx_ring: RX ring to remove buffers from
- * @xdp: Whether XDP is enabled
*
* Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
* entries. After device is disabled nfp_net_rx_ring_reset() must be called
* to restore required ring geometry.
*/
static void
-nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
- bool xdp)
+nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
{
- int direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
unsigned int i;
for (i = 0; i < rx_ring->cnt - 1; i++) {
@@ -1272,9 +1283,8 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
if (!rx_ring->rxbufs[i].frag)
continue;
- nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[i].dma_addr,
- rx_ring->bufsz, direction);
- nfp_net_free_frag(rx_ring->rxbufs[i].frag, xdp);
+ nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
+ nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
rx_ring->rxbufs[i].dma_addr = 0;
rx_ring->rxbufs[i].frag = NULL;
}
@@ -1282,13 +1292,12 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
/**
* nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
- * @nn: NFP Net device
+ * @dp: NFP Net data path struct
* @rx_ring: RX ring to remove buffers from
- * @xdp: Whether XDP is enabled
*/
static int
-nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
- bool xdp)
+nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
{
struct nfp_net_rx_buf *rxbufs;
unsigned int i;
@@ -1296,11 +1305,9 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
rxbufs = rx_ring->rxbufs;
for (i = 0; i < rx_ring->cnt - 1; i++) {
- rxbufs[i].frag =
- nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
- rx_ring->bufsz, xdp);
+ rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
if (!rxbufs[i].frag) {
- nfp_net_rx_ring_bufs_free(nn, rx_ring, xdp);
+ nfp_net_rx_ring_bufs_free(dp, rx_ring);
return -ENOMEM;
}
}
@@ -1310,14 +1317,17 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
/**
* nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
+ * @dp: NFP Net data path struct
* @rx_ring: RX ring to fill
*/
-static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
+static void
+nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
{
unsigned int i;
for (i = 0; i < rx_ring->cnt - 1; i++)
- nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].frag,
+ nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
rx_ring->rxbufs[i].dma_addr);
}
@@ -1337,17 +1347,18 @@ static int nfp_net_rx_csum_has_errors(u16 flags)
/**
* nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
- * @nn: NFP Net device
+ * @dp: NFP Net data path struct
* @r_vec: per-ring structure
* @rxd: Pointer to RX descriptor
* @skb: Pointer to SKB
*/
-static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+static void nfp_net_rx_csum(struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec,
struct nfp_net_rx_desc *rxd, struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
- if (!(nn->netdev->features & NETIF_F_RXCSUM))
+ if (!(dp->netdev->features & NETIF_F_RXCSUM))
return;
if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
@@ -1378,8 +1389,9 @@ static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
}
}
-static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
- unsigned int type, __be32 *hash)
+static void
+nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ unsigned int type, __be32 *hash)
{
if (!(netdev->features & NETIF_F_RXHASH))
return;
@@ -1388,34 +1400,33 @@ static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
case NFP_NET_RSS_IPV4:
case NFP_NET_RSS_IPV6:
case NFP_NET_RSS_IPV6_EX:
- skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L3);
+ meta->hash_type = PKT_HASH_TYPE_L3;
break;
default:
- skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L4);
+ meta->hash_type = PKT_HASH_TYPE_L4;
break;
}
+
+ meta->hash = get_unaligned_be32(hash);
}
static void
-nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb,
- struct nfp_net_rx_desc *rxd)
+nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ void *data, struct nfp_net_rx_desc *rxd)
{
- struct nfp_net_rx_hash *rx_hash;
+ struct nfp_net_rx_hash *rx_hash = data;
if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
return;
- rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
-
- nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type),
+ nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
&rx_hash->hash);
}
static void *
-nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
- int meta_len)
+nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ void *data, int meta_len)
{
- u8 *data = skb->data - meta_len;
u32 meta_info;
meta_info = get_unaligned_be32(data);
@@ -1425,13 +1436,13 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
switch (meta_info & NFP_NET_META_FIELD_MASK) {
case NFP_NET_META_HASH:
meta_info >>= NFP_NET_META_FIELD_SIZE;
- nfp_net_set_hash(netdev, skb,
+ nfp_net_set_hash(netdev, meta,
meta_info & NFP_NET_META_FIELD_MASK,
(__be32 *)data);
data += 4;
break;
case NFP_NET_META_MARK:
- skb->mark = get_unaligned_be32(data);
+ meta->mark = get_unaligned_be32(data);
data += 4;
break;
default:
@@ -1445,8 +1456,9 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
}
static void
-nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
- struct nfp_net_rx_buf *rxbuf, struct sk_buff *skb)
+nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
+ struct sk_buff *skb)
{
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_drops++;
@@ -1458,53 +1470,47 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
if (skb && rxbuf && skb->head == rxbuf->frag)
page_ref_inc(virt_to_head_page(rxbuf->frag));
if (rxbuf)
- nfp_net_rx_give_one(rx_ring, rxbuf->frag, rxbuf->dma_addr);
+ nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
if (skb)
dev_kfree_skb_any(skb);
}
static bool
-nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
+nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
struct nfp_net_tx_ring *tx_ring,
- struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off,
+ struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
unsigned int pkt_len)
{
struct nfp_net_tx_buf *txbuf;
struct nfp_net_tx_desc *txd;
- dma_addr_t new_dma_addr;
- void *new_frag;
int wr_idx;
if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
- nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
- return false;
- }
-
- new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr);
- if (unlikely(!new_frag)) {
- nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
+ nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, NULL);
return false;
}
- nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
/* Stash the soft descriptor of the head then initialize it */
txbuf = &tx_ring->txbufs[wr_idx];
+
+ nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
+
txbuf->frag = rxbuf->frag;
txbuf->dma_addr = rxbuf->dma_addr;
txbuf->fidx = -1;
txbuf->pkt_cnt = 1;
txbuf->real_len = pkt_len;
- dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off,
+ dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
pkt_len, DMA_BIDIRECTIONAL);
/* Build TX descriptor */
txd = &tx_ring->txds[wr_idx];
txd->offset_eop = PCIE_DESC_TX_EOP;
txd->dma_len = cpu_to_le16(pkt_len);
- nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + pkt_off);
+ nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
txd->data_len = cpu_to_le16(pkt_len);
txd->flags = 0;
@@ -1516,14 +1522,24 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
return true;
}
-static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
+static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, void *hard_start,
+ unsigned int *off, unsigned int *len)
{
struct xdp_buff xdp;
+ void *orig_data;
+ int ret;
- xdp.data = data;
- xdp.data_end = data + len;
+ xdp.data_hard_start = hard_start;
+ xdp.data = data + *off;
+ xdp.data_end = data + *off + *len;
- return bpf_prog_run_xdp(prog, &xdp);
+ orig_data = xdp.data;
+ ret = bpf_prog_run_xdp(prog, &xdp);
+
+ *len -= xdp.data - orig_data;
+ *off += xdp.data - orig_data;
+
+ return ret;
}
/**
@@ -1540,25 +1556,24 @@ static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
{
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
struct nfp_net_tx_ring *tx_ring;
struct bpf_prog *xdp_prog;
unsigned int true_bufsz;
struct sk_buff *skb;
int pkts_polled = 0;
- int rx_dma_map_dir;
int idx;
rcu_read_lock();
- xdp_prog = READ_ONCE(nn->xdp_prog);
- rx_dma_map_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
- true_bufsz = xdp_prog ? PAGE_SIZE : nn->fl_bufsz;
+ xdp_prog = READ_ONCE(dp->xdp_prog);
+ true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
tx_ring = r_vec->xdp_ring;
while (pkts_polled < budget) {
- unsigned int meta_len, data_len, data_off, pkt_len, pkt_off;
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd;
+ struct nfp_meta_parsed meta;
dma_addr_t new_dma_addr;
void *new_frag;
@@ -1573,6 +1588,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
*/
dma_rmb();
+ memset(&meta, 0, sizeof(meta));
+
rx_ring->rd_p++;
pkts_polled++;
@@ -1593,11 +1610,12 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
data_len = le16_to_cpu(rxd->rxd.data_len);
pkt_len = data_len - meta_len;
- if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
- pkt_off = meta_len;
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ pkt_off += meta_len;
else
- pkt_off = nn->rx_offset;
- data_off = NFP_NET_RX_BUF_HEADROOM + pkt_off;
+ pkt_off += dp->rx_offset;
+ meta_off = pkt_off - meta_len;
/* Stats update */
u64_stats_update_begin(&r_vec->rx_sync);
@@ -1605,30 +1623,62 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
r_vec->rx_bytes += pkt_len;
u64_stats_update_end(&r_vec->rx_sync);
+ if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
+ (dp->rx_offset && meta_len > dp->rx_offset))) {
+ nn_dp_warn(dp, "oversized RX packet metadata %u\n",
+ meta_len);
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ continue;
+ }
+
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
+ data_len);
+
+ if (!dp->chained_metadata_format) {
+ nfp_net_set_hash_desc(dp->netdev, &meta,
+ rxbuf->frag + meta_off, rxd);
+ } else if (meta_len) {
+ void *end;
+
+ end = nfp_net_parse_meta(dp->netdev, &meta,
+ rxbuf->frag + meta_off,
+ meta_len);
+ if (unlikely(end != rxbuf->frag + pkt_off)) {
+ nn_dp_warn(dp, "invalid RX packet metadata\n");
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+ }
+
if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
- nn->bpf_offload_xdp)) {
+ dp->bpf_offload_xdp)) {
+ unsigned int dma_off;
+ void *hard_start;
int act;
- dma_sync_single_for_cpu(&nn->pdev->dev,
- rxbuf->dma_addr + pkt_off,
- pkt_len, DMA_BIDIRECTIONAL);
- act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off,
- pkt_len);
+ hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
+
+ act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start,
+ &pkt_off, &pkt_len);
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
- if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring,
+ dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
+ if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
tx_ring, rxbuf,
- pkt_off, pkt_len)))
- trace_xdp_exception(nn->netdev, xdp_prog, act);
+ dma_off,
+ pkt_len)))
+ trace_xdp_exception(dp->netdev,
+ xdp_prog, act);
continue;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
- trace_xdp_exception(nn->netdev, xdp_prog, act);
+ trace_xdp_exception(dp->netdev, xdp_prog, act);
case XDP_DROP:
- nfp_net_rx_give_one(rx_ring, rxbuf->frag,
+ nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
rxbuf->dma_addr);
continue;
}
@@ -1636,41 +1686,29 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
skb = build_skb(rxbuf->frag, true_bufsz);
if (unlikely(!skb)) {
- nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL);
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
continue;
}
- new_frag = nfp_net_napi_alloc_one(nn, rx_dma_map_dir,
- &new_dma_addr);
+ new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
if (unlikely(!new_frag)) {
- nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb);
+ nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
continue;
}
- nfp_net_dma_unmap_rx(nn, rxbuf->dma_addr, nn->fl_bufsz,
- rx_dma_map_dir);
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
- nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
+ nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
- skb_reserve(skb, data_off);
+ skb_reserve(skb, pkt_off);
skb_put(skb, pkt_len);
- if (nn->fw_ver.major <= 3) {
- nfp_net_set_hash_desc(nn->netdev, skb, rxd);
- } else if (meta_len) {
- void *end;
-
- end = nfp_net_parse_meta(nn->netdev, skb, meta_len);
- if (unlikely(end != skb->data)) {
- nn_warn_ratelimit(nn, "invalid RX packet metadata\n");
- nfp_net_rx_drop(r_vec, rx_ring, NULL, skb);
- continue;
- }
- }
+ skb->mark = meta.mark;
+ skb_set_hash(skb, meta.hash, meta.hash_type);
skb_record_rx_queue(skb, rx_ring->idx);
- skb->protocol = eth_type_trans(skb, nn->netdev);
+ skb->protocol = eth_type_trans(skb, dp->netdev);
- nfp_net_rx_csum(nn, r_vec, rxd, skb);
+ nfp_net_rx_csum(dp, r_vec, rxd, skb);
if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
@@ -1707,10 +1745,9 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
nfp_net_xdp_complete(r_vec->xdp_ring);
}
- if (pkts_polled < budget) {
- napi_complete_done(napi, pkts_polled);
- nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
- }
+ if (pkts_polled < budget)
+ if (napi_complete_done(napi, pkts_polled))
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
return pkts_polled;
}
@@ -1725,13 +1762,12 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
kfree(tx_ring->txbufs);
if (tx_ring->txds)
- dma_free_coherent(&pdev->dev, tx_ring->size,
+ dma_free_coherent(dp->dev, tx_ring->size,
tx_ring->txds, tx_ring->dma);
tx_ring->cnt = 0;
@@ -1743,24 +1779,21 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
/**
* nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
+ * @dp: NFP Net data path struct
* @tx_ring: TX Ring structure to allocate
- * @cnt: Ring buffer count
- * @is_xdp: True if ring will be used for XDP
*
* Return: 0 on success, negative errno otherwise.
*/
static int
-nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp)
+nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
int sz;
- tx_ring->cnt = cnt;
+ tx_ring->cnt = dp->txd_cnt;
tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
- tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
+ tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->txds)
goto err_alloc;
@@ -1770,15 +1803,10 @@ nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp)
if (!tx_ring->txbufs)
goto err_alloc;
- if (!is_xdp)
- netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask,
+ if (!tx_ring->is_xdp)
+ netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
tx_ring->idx);
- nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p %s\n",
- tx_ring->idx, tx_ring->qcidx,
- tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds,
- is_xdp ? "XDP" : "");
-
return 0;
err_alloc:
@@ -1786,62 +1814,92 @@ err_alloc:
return -ENOMEM;
}
-static struct nfp_net_tx_ring *
-nfp_net_tx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s,
- unsigned int num_stack_tx_rings)
+static void
+nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
{
- struct nfp_net_tx_ring *rings;
- unsigned int r;
+ unsigned int i;
- rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL);
- if (!rings)
- return NULL;
+ if (!tx_ring->is_xdp)
+ return;
- for (r = 0; r < s->n_rings; r++) {
- int bias = 0;
+ for (i = 0; i < tx_ring->cnt; i++) {
+ if (!tx_ring->txbufs[i].frag)
+ return;
- if (r >= num_stack_tx_rings)
- bias = num_stack_tx_rings;
+ nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
+ __free_page(virt_to_page(tx_ring->txbufs[i].frag));
+ }
+}
+
+static int
+nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_tx_buf *txbufs = tx_ring->txbufs;
+ unsigned int i;
- nfp_net_tx_ring_init(&rings[r], &nn->r_vecs[r - bias], r);
+ if (!tx_ring->is_xdp)
+ return 0;
- if (nfp_net_tx_ring_alloc(&rings[r], s->dcnt, bias))
- goto err_free_prev;
+ for (i = 0; i < tx_ring->cnt; i++) {
+ txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
+ if (!txbufs[i].frag) {
+ nfp_net_tx_ring_bufs_free(dp, tx_ring);
+ return -ENOMEM;
+ }
}
- return s->rings = rings;
-
-err_free_prev:
- while (r--)
- nfp_net_tx_ring_free(&rings[r]);
- kfree(rings);
- return NULL;
+ return 0;
}
-static void
-nfp_net_tx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
+static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
{
- struct nfp_net_ring_set new = *s;
+ unsigned int r;
+
+ dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
+ GFP_KERNEL);
+ if (!dp->tx_rings)
+ return -ENOMEM;
- s->dcnt = nn->txd_cnt;
- s->rings = nn->tx_rings;
- s->n_rings = nn->num_tx_rings;
+ for (r = 0; r < dp->num_tx_rings; r++) {
+ int bias = 0;
+
+ if (r >= dp->num_stack_tx_rings)
+ bias = dp->num_stack_tx_rings;
- nn->txd_cnt = new.dcnt;
- nn->tx_rings = new.rings;
- nn->num_tx_rings = new.n_rings;
+ nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
+ r, bias);
+
+ if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
+ goto err_free_prev;
+
+ if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
+ goto err_free_ring;
+ }
+
+ return 0;
+
+err_free_prev:
+ while (r--) {
+ nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
+err_free_ring:
+ nfp_net_tx_ring_free(&dp->tx_rings[r]);
+ }
+ kfree(dp->tx_rings);
+ return -ENOMEM;
}
-static void
-nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s)
+static void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
{
- struct nfp_net_tx_ring *rings = s->rings;
unsigned int r;
- for (r = 0; r < s->n_rings; r++)
- nfp_net_tx_ring_free(&rings[r]);
+ for (r = 0; r < dp->num_tx_rings; r++) {
+ nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
+ nfp_net_tx_ring_free(&dp->tx_rings[r]);
+ }
- kfree(rings);
+ kfree(dp->tx_rings);
}
/**
@@ -1851,13 +1909,12 @@ nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s)
static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
{
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
kfree(rx_ring->rxbufs);
if (rx_ring->rxds)
- dma_free_coherent(&pdev->dev, rx_ring->size,
+ dma_free_coherent(dp->dev, rx_ring->size,
rx_ring->rxds, rx_ring->dma);
rx_ring->cnt = 0;
@@ -1869,26 +1926,19 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
/**
* nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
+ * @dp: NFP Net data path struct
* @rx_ring: RX ring to allocate
- * @fl_bufsz: Size of buffers to allocate
- * @cnt: Ring buffer count
*
* Return: 0 on success, negative errno otherwise.
*/
static int
-nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
- u32 cnt)
+nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
{
- struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
int sz;
- rx_ring->cnt = cnt;
- rx_ring->bufsz = fl_bufsz;
-
+ rx_ring->cnt = dp->rxd_cnt;
rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
- rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
+ rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->rxds)
goto err_alloc;
@@ -1898,10 +1948,6 @@ nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
if (!rx_ring->rxbufs)
goto err_alloc;
- nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n",
- rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
- rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds);
-
return 0;
err_alloc:
@@ -1909,82 +1955,59 @@ err_alloc:
return -ENOMEM;
}
-static struct nfp_net_rx_ring *
-nfp_net_rx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s,
- bool xdp)
+static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
{
- unsigned int fl_bufsz = nfp_net_calc_fl_bufsz(nn, s->mtu);
- struct nfp_net_rx_ring *rings;
unsigned int r;
- rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL);
- if (!rings)
- return NULL;
+ dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
+ GFP_KERNEL);
+ if (!dp->rx_rings)
+ return -ENOMEM;
- for (r = 0; r < s->n_rings; r++) {
- nfp_net_rx_ring_init(&rings[r], &nn->r_vecs[r], r);
+ for (r = 0; r < dp->num_rx_rings; r++) {
+ nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
- if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, s->dcnt))
+ if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
goto err_free_prev;
- if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r], xdp))
+ if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
goto err_free_ring;
}
- return s->rings = rings;
+ return 0;
err_free_prev:
while (r--) {
- nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp);
+ nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
err_free_ring:
- nfp_net_rx_ring_free(&rings[r]);
+ nfp_net_rx_ring_free(&dp->rx_rings[r]);
}
- kfree(rings);
- return NULL;
-}
-
-static void
-nfp_net_rx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
-{
- struct nfp_net_ring_set new = *s;
-
- s->mtu = nn->netdev->mtu;
- s->dcnt = nn->rxd_cnt;
- s->rings = nn->rx_rings;
- s->n_rings = nn->num_rx_rings;
-
- nn->netdev->mtu = new.mtu;
- nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, new.mtu);
- nn->rxd_cnt = new.dcnt;
- nn->rx_rings = new.rings;
- nn->num_rx_rings = new.n_rings;
+ kfree(dp->rx_rings);
+ return -ENOMEM;
}
-static void
-nfp_net_rx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s,
- bool xdp)
+static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
{
- struct nfp_net_rx_ring *rings = s->rings;
unsigned int r;
- for (r = 0; r < s->n_rings; r++) {
- nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp);
- nfp_net_rx_ring_free(&rings[r]);
+ for (r = 0; r < dp->num_rx_rings; r++) {
+ nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
+ nfp_net_rx_ring_free(&dp->rx_rings[r]);
}
- kfree(rings);
+ kfree(dp->rx_rings);
}
static void
-nfp_net_vector_assign_rings(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
- int idx)
+nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec, int idx)
{
- r_vec->rx_ring = idx < nn->num_rx_rings ? &nn->rx_rings[idx] : NULL;
+ r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
r_vec->tx_ring =
- idx < nn->num_stack_tx_rings ? &nn->tx_rings[idx] : NULL;
+ idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
- r_vec->xdp_ring = idx < nn->num_tx_rings - nn->num_stack_tx_rings ?
- &nn->tx_rings[nn->num_stack_tx_rings + idx] : NULL;
+ r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
+ &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
}
static int
@@ -1994,11 +2017,11 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
int err;
/* Setup NAPI */
- netif_napi_add(nn->netdev, &r_vec->napi,
+ netif_napi_add(nn->dp.netdev, &r_vec->napi,
nfp_net_poll, NAPI_POLL_WEIGHT);
snprintf(r_vec->name, sizeof(r_vec->name),
- "%s-rxtx-%d", nn->netdev->name, idx);
+ "%s-rxtx-%d", nn->dp.netdev->name, idx);
err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
r_vec);
if (err) {
@@ -2045,7 +2068,7 @@ void nfp_net_rss_write_key(struct nfp_net *nn)
{
int i;
- for (i = 0; i < NFP_NET_CFG_RSS_KEY_SZ; i += 4)
+ for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
get_unaligned_le32(nn->rss_key + i));
}
@@ -2069,13 +2092,13 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
/* copy RX interrupt coalesce parameters */
value = (nn->rx_coalesce_max_frames << 16) |
(factor * nn->rx_coalesce_usecs);
- for (i = 0; i < nn->num_rx_rings; i++)
+ for (i = 0; i < nn->dp.num_rx_rings; i++)
nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
/* copy TX interrupt coalesce parameters */
value = (nn->tx_coalesce_max_frames << 16) |
(factor * nn->tx_coalesce_usecs);
- for (i = 0; i < nn->num_tx_rings; i++)
+ for (i = 0; i < nn->dp.num_tx_rings; i++)
nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
}
@@ -2090,9 +2113,9 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
static void nfp_net_write_mac_addr(struct nfp_net *nn)
{
nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
- get_unaligned_be32(nn->netdev->dev_addr));
+ get_unaligned_be32(nn->dp.netdev->dev_addr));
nn_writew(nn, NFP_NET_CFG_MACADDR + 6,
- get_unaligned_be16(nn->netdev->dev_addr + 4));
+ get_unaligned_be16(nn->dp.netdev->dev_addr + 4));
}
static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
@@ -2116,7 +2139,7 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
unsigned int r;
int err;
- new_ctrl = nn->ctrl;
+ new_ctrl = nn->dp.ctrl;
new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
update = NFP_NET_CFG_UPDATE_GEN;
update |= NFP_NET_CFG_UPDATE_MSIX;
@@ -2133,14 +2156,14 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
if (err)
nn_err(nn, "Could not disable device: %d\n", err);
- for (r = 0; r < nn->num_rx_rings; r++)
- nfp_net_rx_ring_reset(&nn->rx_rings[r]);
- for (r = 0; r < nn->num_tx_rings; r++)
- nfp_net_tx_ring_reset(nn, &nn->tx_rings[r]);
- for (r = 0; r < nn->num_r_vecs; r++)
+ for (r = 0; r < nn->dp.num_rx_rings; r++)
+ nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
+ for (r = 0; r < nn->dp.num_tx_rings; r++)
+ nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
+ for (r = 0; r < nn->dp.num_r_vecs; r++)
nfp_net_vec_clear_ring_data(nn, r);
- nn->ctrl = new_ctrl;
+ nn->dp.ctrl = new_ctrl;
}
static void
@@ -2162,13 +2185,17 @@ nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
}
-static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
+/**
+ * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
+ * @nn: NFP Net device to reconfigure
+ */
+static int nfp_net_set_config_and_enable(struct nfp_net *nn)
{
- u32 new_ctrl, update = 0;
+ u32 bufsz, new_ctrl, update = 0;
unsigned int r;
int err;
- new_ctrl = nn->ctrl;
+ new_ctrl = nn->dp.ctrl;
if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
nfp_net_rss_write_key(nn);
@@ -2184,22 +2211,23 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
update |= NFP_NET_CFG_UPDATE_IRQMOD;
}
- for (r = 0; r < nn->num_tx_rings; r++)
- nfp_net_tx_ring_hw_cfg_write(nn, &nn->tx_rings[r], r);
- for (r = 0; r < nn->num_rx_rings; r++)
- nfp_net_rx_ring_hw_cfg_write(nn, &nn->rx_rings[r], r);
+ for (r = 0; r < nn->dp.num_tx_rings; r++)
+ nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
+ for (r = 0; r < nn->dp.num_rx_rings; r++)
+ nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
- nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ?
- 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1);
+ nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ?
+ 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1);
- nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
- 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
+ nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
+ 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
nfp_net_write_mac_addr(nn);
- nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
- nn_writel(nn, NFP_NET_CFG_FLBUFSZ,
- nn->fl_bufsz - NFP_NET_RX_BUF_NON_DATA);
+ nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.netdev->mtu);
+
+ bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
+ nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
/* Enable device */
new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
@@ -2211,37 +2239,26 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
err = nfp_net_reconfig(nn, update);
+ if (err) {
+ nfp_net_clear_config_and_disable(nn);
+ return err;
+ }
- nn->ctrl = new_ctrl;
+ nn->dp.ctrl = new_ctrl;
- for (r = 0; r < nn->num_rx_rings; r++)
- nfp_net_rx_ring_fill_freelist(&nn->rx_rings[r]);
+ for (r = 0; r < nn->dp.num_rx_rings; r++)
+ nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
/* Since reconfiguration requests while NFP is down are ignored we
* have to wipe the entire VXLAN configuration and reinitialize it.
*/
- if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) {
memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
- udp_tunnel_get_rx_info(nn->netdev);
+ udp_tunnel_get_rx_info(nn->dp.netdev);
}
- return err;
-}
-
-/**
- * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
- * @nn: NFP Net device to reconfigure
- */
-static int nfp_net_set_config_and_enable(struct nfp_net *nn)
-{
- int err;
-
- err = __nfp_net_set_config_and_enable(nn);
- if (err)
- nfp_net_clear_config_and_disable(nn);
-
- return err;
+ return 0;
}
/**
@@ -2252,12 +2269,12 @@ static void nfp_net_open_stack(struct nfp_net *nn)
{
unsigned int r;
- for (r = 0; r < nn->num_r_vecs; r++) {
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
napi_enable(&nn->r_vecs[r].napi);
enable_irq(nn->r_vecs[r].irq_vector);
}
- netif_tx_wake_all_queues(nn->netdev);
+ netif_tx_wake_all_queues(nn->dp.netdev);
enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
nfp_net_read_link_status(nn);
@@ -2266,22 +2283,8 @@ static void nfp_net_open_stack(struct nfp_net *nn)
static int nfp_net_netdev_open(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- struct nfp_net_ring_set rx = {
- .n_rings = nn->num_rx_rings,
- .mtu = nn->netdev->mtu,
- .dcnt = nn->rxd_cnt,
- };
- struct nfp_net_ring_set tx = {
- .n_rings = nn->num_tx_rings,
- .dcnt = nn->txd_cnt,
- };
int err, r;
- if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
- nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
- return -EBUSY;
- }
-
/* Step 1: Allocate resources for rings and the like
* - Request interrupts
* - Allocate RX and TX ring resources
@@ -2299,33 +2302,28 @@ static int nfp_net_netdev_open(struct net_device *netdev)
goto err_free_exn;
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
- for (r = 0; r < nn->num_r_vecs; r++) {
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
if (err)
goto err_cleanup_vec_p;
}
- nn->rx_rings = nfp_net_rx_ring_set_prepare(nn, &rx, nn->xdp_prog);
- if (!nn->rx_rings) {
- err = -ENOMEM;
+ err = nfp_net_rx_rings_prepare(nn, &nn->dp);
+ if (err)
goto err_cleanup_vec;
- }
- nn->tx_rings = nfp_net_tx_ring_set_prepare(nn, &tx,
- nn->num_stack_tx_rings);
- if (!nn->tx_rings) {
- err = -ENOMEM;
+ err = nfp_net_tx_rings_prepare(nn, &nn->dp);
+ if (err)
goto err_free_rx_rings;
- }
for (r = 0; r < nn->max_r_vecs; r++)
- nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r);
+ nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
- err = netif_set_real_num_tx_queues(netdev, nn->num_stack_tx_rings);
+ err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
if (err)
goto err_free_rings;
- err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
+ err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
if (err)
goto err_free_rings;
@@ -2351,11 +2349,11 @@ static int nfp_net_netdev_open(struct net_device *netdev)
return 0;
err_free_rings:
- nfp_net_tx_ring_set_free(nn, &tx);
+ nfp_net_tx_rings_free(&nn->dp);
err_free_rx_rings:
- nfp_net_rx_ring_set_free(nn, &rx, nn->xdp_prog);
+ nfp_net_rx_rings_free(&nn->dp);
err_cleanup_vec:
- r = nn->num_r_vecs;
+ r = nn->dp.num_r_vecs;
err_cleanup_vec_p:
while (r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
@@ -2374,15 +2372,15 @@ static void nfp_net_close_stack(struct nfp_net *nn)
unsigned int r;
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
- netif_carrier_off(nn->netdev);
+ netif_carrier_off(nn->dp.netdev);
nn->link_up = false;
- for (r = 0; r < nn->num_r_vecs; r++) {
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
disable_irq(nn->r_vecs[r].irq_vector);
napi_disable(&nn->r_vecs[r].napi);
}
- netif_tx_disable(nn->netdev);
+ netif_tx_disable(nn->dp.netdev);
}
/**
@@ -2393,17 +2391,19 @@ static void nfp_net_close_free_all(struct nfp_net *nn)
{
unsigned int r;
- for (r = 0; r < nn->num_rx_rings; r++) {
- nfp_net_rx_ring_bufs_free(nn, &nn->rx_rings[r], nn->xdp_prog);
- nfp_net_rx_ring_free(&nn->rx_rings[r]);
+ for (r = 0; r < nn->dp.num_rx_rings; r++) {
+ nfp_net_rx_ring_bufs_free(&nn->dp, &nn->dp.rx_rings[r]);
+ nfp_net_rx_ring_free(&nn->dp.rx_rings[r]);
}
- for (r = 0; r < nn->num_tx_rings; r++)
- nfp_net_tx_ring_free(&nn->tx_rings[r]);
- for (r = 0; r < nn->num_r_vecs; r++)
+ for (r = 0; r < nn->dp.num_tx_rings; r++) {
+ nfp_net_tx_ring_bufs_free(&nn->dp, &nn->dp.tx_rings[r]);
+ nfp_net_tx_ring_free(&nn->dp.tx_rings[r]);
+ }
+ for (r = 0; r < nn->dp.num_r_vecs; r++)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
- kfree(nn->rx_rings);
- kfree(nn->tx_rings);
+ kfree(nn->dp.rx_rings);
+ kfree(nn->dp.tx_rings);
nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
@@ -2417,11 +2417,6 @@ static int nfp_net_netdev_close(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
- nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl);
- return 0;
- }
-
/* Step 1: Disable RX and TX rings from the Linux kernel perspective
*/
nfp_net_close_stack(nn);
@@ -2443,7 +2438,7 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
u32 new_ctrl;
- new_ctrl = nn->ctrl;
+ new_ctrl = nn->dp.ctrl;
if (netdev->flags & IFF_PROMISC) {
if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
@@ -2454,13 +2449,13 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
}
- if (new_ctrl == nn->ctrl)
+ if (new_ctrl == nn->dp.ctrl)
return;
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
- nn->ctrl = new_ctrl;
+ nn->dp.ctrl = new_ctrl;
}
static void nfp_net_rss_init_itbl(struct nfp_net *nn)
@@ -2469,181 +2464,174 @@ static void nfp_net_rss_init_itbl(struct nfp_net *nn)
for (i = 0; i < sizeof(nn->rss_itbl); i++)
nn->rss_itbl[i] =
- ethtool_rxfh_indir_default(i, nn->num_rx_rings);
+ ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
}
-static int
-nfp_net_ring_swap_enable(struct nfp_net *nn, unsigned int *num_vecs,
- unsigned int *stack_tx_rings,
- struct bpf_prog **xdp_prog,
- struct nfp_net_ring_set *rx,
- struct nfp_net_ring_set *tx)
+static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
+{
+ struct nfp_net_dp new_dp = *dp;
+
+ *dp = nn->dp;
+ nn->dp = new_dp;
+
+ nn->dp.netdev->mtu = new_dp.mtu;
+
+ if (!netif_is_rxfh_configured(nn->dp.netdev))
+ nfp_net_rss_init_itbl(nn);
+}
+
+static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
{
unsigned int r;
int err;
- if (rx)
- nfp_net_rx_ring_set_swap(nn, rx);
- if (tx)
- nfp_net_tx_ring_set_swap(nn, tx);
-
- swap(*num_vecs, nn->num_r_vecs);
- swap(*stack_tx_rings, nn->num_stack_tx_rings);
- *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog);
+ nfp_net_dp_swap(nn, dp);
for (r = 0; r < nn->max_r_vecs; r++)
- nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r);
-
- if (!netif_is_rxfh_configured(nn->netdev))
- nfp_net_rss_init_itbl(nn);
+ nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
- err = netif_set_real_num_rx_queues(nn->netdev,
- nn->num_rx_rings);
+ err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings);
if (err)
return err;
- if (nn->netdev->real_num_tx_queues != nn->num_stack_tx_rings) {
- err = netif_set_real_num_tx_queues(nn->netdev,
- nn->num_stack_tx_rings);
+ if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) {
+ err = netif_set_real_num_tx_queues(nn->dp.netdev,
+ nn->dp.num_stack_tx_rings);
if (err)
return err;
}
- return __nfp_net_set_config_and_enable(nn);
+ return nfp_net_set_config_and_enable(nn);
+}
+
+struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
+{
+ struct nfp_net_dp *new;
+
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ *new = nn->dp;
+
+ /* Clear things which need to be recomputed */
+ new->fl_bufsz = 0;
+ new->tx_rings = NULL;
+ new->rx_rings = NULL;
+ new->num_r_vecs = 0;
+ new->num_stack_tx_rings = 0;
+
+ return new;
}
static int
-nfp_net_check_config(struct nfp_net *nn, struct bpf_prog *xdp_prog,
- struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx)
+nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
+ struct netlink_ext_ack *extack)
{
/* XDP-enabled tests */
- if (!xdp_prog)
+ if (!dp->xdp_prog)
return 0;
- if (rx && nfp_net_calc_fl_bufsz(nn, rx->mtu) > PAGE_SIZE) {
- nn_warn(nn, "MTU too large w/ XDP enabled\n");
+ if (dp->fl_bufsz > PAGE_SIZE) {
+ NL_MOD_TRY_SET_ERR_MSG(extack, "MTU too large w/ XDP enabled");
return -EINVAL;
}
- if (tx && tx->n_rings > nn->max_tx_rings) {
- nn_warn(nn, "Insufficient number of TX rings w/ XDP enabled\n");
+ if (dp->num_tx_rings > nn->max_tx_rings) {
+ NL_MOD_TRY_SET_ERR_MSG(extack, "Insufficient number of TX rings w/ XDP enabled");
return -EINVAL;
}
return 0;
}
-static void
-nfp_net_ring_reconfig_down(struct nfp_net *nn, struct bpf_prog **xdp_prog,
- struct nfp_net_ring_set *rx,
- struct nfp_net_ring_set *tx,
- unsigned int stack_tx_rings, unsigned int num_vecs)
-{
- nn->netdev->mtu = rx ? rx->mtu : nn->netdev->mtu;
- nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, nn->netdev->mtu);
- nn->rxd_cnt = rx ? rx->dcnt : nn->rxd_cnt;
- nn->txd_cnt = tx ? tx->dcnt : nn->txd_cnt;
- nn->num_rx_rings = rx ? rx->n_rings : nn->num_rx_rings;
- nn->num_tx_rings = tx ? tx->n_rings : nn->num_tx_rings;
- nn->num_stack_tx_rings = stack_tx_rings;
- nn->num_r_vecs = num_vecs;
- *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog);
-
- if (!netif_is_rxfh_configured(nn->netdev))
- nfp_net_rss_init_itbl(nn);
-}
-
-int
-nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
- struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx)
+int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
+ struct netlink_ext_ack *extack)
{
- unsigned int stack_tx_rings, num_vecs, r;
- int err;
+ int r, err;
+
+ dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
- stack_tx_rings = tx ? tx->n_rings : nn->num_tx_rings;
- if (*xdp_prog)
- stack_tx_rings -= rx ? rx->n_rings : nn->num_rx_rings;
+ dp->num_stack_tx_rings = dp->num_tx_rings;
+ if (dp->xdp_prog)
+ dp->num_stack_tx_rings -= dp->num_rx_rings;
- num_vecs = max(rx ? rx->n_rings : nn->num_rx_rings, stack_tx_rings);
+ dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
- err = nfp_net_check_config(nn, *xdp_prog, rx, tx);
+ err = nfp_net_check_config(nn, dp, extack);
if (err)
- return err;
+ goto exit_free_dp;
- if (!netif_running(nn->netdev)) {
- nfp_net_ring_reconfig_down(nn, xdp_prog, rx, tx,
- stack_tx_rings, num_vecs);
- return 0;
+ if (!netif_running(dp->netdev)) {
+ nfp_net_dp_swap(nn, dp);
+ err = 0;
+ goto exit_free_dp;
}
/* Prepare new rings */
- for (r = nn->num_r_vecs; r < num_vecs; r++) {
+ for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
if (err) {
- num_vecs = r;
- goto err_cleanup_vecs;
- }
- }
- if (rx) {
- if (!nfp_net_rx_ring_set_prepare(nn, rx, *xdp_prog)) {
- err = -ENOMEM;
+ dp->num_r_vecs = r;
goto err_cleanup_vecs;
}
}
- if (tx) {
- if (!nfp_net_tx_ring_set_prepare(nn, tx, stack_tx_rings)) {
- err = -ENOMEM;
- goto err_free_rx;
- }
- }
+
+ err = nfp_net_rx_rings_prepare(nn, dp);
+ if (err)
+ goto err_cleanup_vecs;
+
+ err = nfp_net_tx_rings_prepare(nn, dp);
+ if (err)
+ goto err_free_rx;
/* Stop device, swap in new rings, try to start the firmware */
nfp_net_close_stack(nn);
nfp_net_clear_config_and_disable(nn);
- err = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings,
- xdp_prog, rx, tx);
+ err = nfp_net_dp_swap_enable(nn, dp);
if (err) {
int err2;
nfp_net_clear_config_and_disable(nn);
/* Try with old configuration and old rings */
- err2 = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings,
- xdp_prog, rx, tx);
+ err2 = nfp_net_dp_swap_enable(nn, dp);
if (err2)
nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
err, err2);
}
- for (r = num_vecs - 1; r >= nn->num_r_vecs; r--)
+ for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
- if (rx)
- nfp_net_rx_ring_set_free(nn, rx, *xdp_prog);
- if (tx)
- nfp_net_tx_ring_set_free(nn, tx);
+ nfp_net_rx_rings_free(dp);
+ nfp_net_tx_rings_free(dp);
nfp_net_open_stack(nn);
+exit_free_dp:
+ kfree(dp);
return err;
err_free_rx:
- if (rx)
- nfp_net_rx_ring_set_free(nn, rx, *xdp_prog);
+ nfp_net_rx_rings_free(dp);
err_cleanup_vecs:
- for (r = num_vecs - 1; r >= nn->num_r_vecs; r--)
+ for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+ kfree(dp);
return err;
}
static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nfp_net *nn = netdev_priv(netdev);
- struct nfp_net_ring_set rx = {
- .n_rings = nn->num_rx_rings,
- .mtu = new_mtu,
- .dcnt = nn->rxd_cnt,
- };
+ struct nfp_net_dp *dp;
- return nfp_net_ring_reconfig(nn, &nn->xdp_prog, &rx, NULL);
+ dp = nfp_net_clone_dp(nn);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->mtu = new_mtu;
+
+ return nfp_net_ring_reconfig(nn, dp, NULL);
}
static void nfp_net_stat64(struct net_device *netdev,
@@ -2652,7 +2640,7 @@ static void nfp_net_stat64(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
int r;
- for (r = 0; r < nn->num_r_vecs; r++) {
+ for (r = 0; r < nn->dp.num_r_vecs; r++) {
struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
u64 data[3];
unsigned int start;
@@ -2694,12 +2682,12 @@ nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct nfp_net *nn = netdev_priv(netdev);
if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (proto != htons(ETH_P_ALL))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) {
- if (!nn->bpf_offload_xdp)
+ if (!nn->dp.bpf_offload_xdp)
return nfp_net_bpf_offload(nn, tc->cls_bpf);
else
return -EBUSY;
@@ -2718,7 +2706,7 @@ static int nfp_net_set_features(struct net_device *netdev,
/* Assume this is not called with features we have not advertised */
- new_ctrl = nn->ctrl;
+ new_ctrl = nn->dp.ctrl;
if (changed & NETIF_F_RXCSUM) {
if (features & NETIF_F_RXCSUM)
@@ -2762,7 +2750,7 @@ static int nfp_net_set_features(struct net_device *netdev,
new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
}
- if (changed & NETIF_F_HW_TC && nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
+ if (changed & NETIF_F_HW_TC && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
nn_err(nn, "Cannot disable HW TC offload while in use\n");
return -EBUSY;
}
@@ -2770,16 +2758,16 @@ static int nfp_net_set_features(struct net_device *netdev,
nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
netdev->features, features, changed);
- if (new_ctrl == nn->ctrl)
+ if (new_ctrl == nn->dp.ctrl)
return 0;
- nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->ctrl, new_ctrl);
+ nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
if (err)
return err;
- nn->ctrl = new_ctrl;
+ nn->dp.ctrl = new_ctrl;
return 0;
}
@@ -2830,6 +2818,26 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
+static int
+nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int err;
+
+ if (!nn->eth_port)
+ return -EOPNOTSUPP;
+
+ if (!nn->eth_port->is_split)
+ err = snprintf(name, len, "p%d", nn->eth_port->label_port);
+ else
+ err = snprintf(name, len, "p%ds%d", nn->eth_port->label_port,
+ nn->eth_port->label_subport);
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
* @nn: NFP Net device to reconfigure
@@ -2842,7 +2850,7 @@ static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
nn->vxlan_ports[idx] = port;
- if (!(nn->ctrl & NFP_NET_CFG_CTRL_VXLAN))
+ if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN))
return;
BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
@@ -2921,8 +2929,8 @@ static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog)
if (!nfp_net_ebpf_capable(nn))
return -EINVAL;
- if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
- if (!nn->bpf_offload_xdp)
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
+ if (!nn->dp.bpf_offload_xdp)
return prog ? -EBUSY : 0;
cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
} else {
@@ -2935,48 +2943,44 @@ static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog)
/* Stop offload if replace not possible */
if (ret && cmd.command == TC_CLSBPF_REPLACE)
nfp_net_xdp_offload(nn, NULL);
- nn->bpf_offload_xdp = prog && !ret;
+ nn->dp.bpf_offload_xdp = prog && !ret;
return ret;
}
-static int nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog)
+static int nfp_net_xdp_setup(struct nfp_net *nn, struct netdev_xdp *xdp)
{
- struct nfp_net_ring_set rx = {
- .n_rings = nn->num_rx_rings,
- .mtu = nn->netdev->mtu,
- .dcnt = nn->rxd_cnt,
- };
- struct nfp_net_ring_set tx = {
- .n_rings = nn->num_tx_rings,
- .dcnt = nn->txd_cnt,
- };
+ struct bpf_prog *old_prog = nn->dp.xdp_prog;
+ struct bpf_prog *prog = xdp->prog;
+ struct nfp_net_dp *dp;
int err;
- if (prog && prog->xdp_adjust_head) {
- nn_err(nn, "Does not support bpf_xdp_adjust_head()\n");
- return -EOPNOTSUPP;
- }
- if (!prog && !nn->xdp_prog)
+ if (!prog && !nn->dp.xdp_prog)
return 0;
- if (prog && nn->xdp_prog) {
- prog = xchg(&nn->xdp_prog, prog);
+ if (prog && nn->dp.xdp_prog) {
+ prog = xchg(&nn->dp.xdp_prog, prog);
bpf_prog_put(prog);
- nfp_net_xdp_offload(nn, nn->xdp_prog);
+ nfp_net_xdp_offload(nn, nn->dp.xdp_prog);
return 0;
}
- tx.n_rings += prog ? nn->num_rx_rings : -nn->num_rx_rings;
+ dp = nfp_net_clone_dp(nn);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->xdp_prog = prog;
+ dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
+ dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+ dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
/* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
- err = nfp_net_ring_reconfig(nn, &prog, &rx, &tx);
+ err = nfp_net_ring_reconfig(nn, dp, xdp->extack);
if (err)
return err;
- /* @prog got swapped and is now the old one */
- if (prog)
- bpf_prog_put(prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
- nfp_net_xdp_offload(nn, nn->xdp_prog);
+ nfp_net_xdp_offload(nn, nn->dp.xdp_prog);
return 0;
}
@@ -2987,9 +2991,9 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
- return nfp_net_xdp_setup(nn, xdp->prog);
+ return nfp_net_xdp_setup(nn, xdp);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!nn->xdp_prog;
+ xdp->prog_attached = !!nn->dp.xdp_prog;
return 0;
default:
return -EINVAL;
@@ -3008,6 +3012,7 @@ static const struct net_device_ops nfp_net_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_features = nfp_net_set_features,
.ndo_features_check = nfp_net_features_check,
+ .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_xdp = nfp_net_xdp,
@@ -3020,9 +3025,9 @@ static const struct net_device_ops nfp_net_netdev_ops = {
void nfp_net_info(struct nfp_net *nn)
{
nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
- nn->is_vf ? "VF " : "",
- nn->num_tx_rings, nn->max_tx_rings,
- nn->num_rx_rings, nn->max_rx_rings);
+ nn->dp.is_vf ? "VF " : "",
+ nn->dp.num_tx_rings, nn->max_tx_rings,
+ nn->dp.num_rx_rings, nn->max_rx_rings);
nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
nn->fw_ver.resv, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
@@ -3074,21 +3079,24 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
nn = netdev_priv(netdev);
- nn->netdev = netdev;
+ nn->dp.netdev = netdev;
+ nn->dp.dev = &pdev->dev;
nn->pdev = pdev;
nn->max_tx_rings = max_tx_rings;
nn->max_rx_rings = max_rx_rings;
- nn->num_tx_rings = min_t(unsigned int, max_tx_rings, num_online_cpus());
- nn->num_rx_rings = min_t(unsigned int, max_rx_rings,
+ nn->dp.num_tx_rings = min_t(unsigned int,
+ max_tx_rings, num_online_cpus());
+ nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
netif_get_num_default_rss_queues());
- nn->num_r_vecs = max(nn->num_tx_rings, nn->num_rx_rings);
- nn->num_r_vecs = min_t(unsigned int, nn->num_r_vecs, num_online_cpus());
+ nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
+ nn->dp.num_r_vecs = min_t(unsigned int,
+ nn->dp.num_r_vecs, num_online_cpus());
- nn->txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
- nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
+ nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
+ nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->rx_filter_lock);
@@ -3108,7 +3116,28 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
*/
void nfp_net_netdev_free(struct nfp_net *nn)
{
- free_netdev(nn->netdev);
+ free_netdev(nn->dp.netdev);
+}
+
+/**
+ * nfp_net_rss_key_sz() - Get current size of the RSS key
+ * @nn: NFP Net device instance
+ *
+ * Return: size of the RSS key for currently selected hash function.
+ */
+unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
+{
+ switch (nn->rss_hfunc) {
+ case ETH_RSS_HASH_TOP:
+ return NFP_NET_CFG_RSS_KEY_SZ;
+ case ETH_RSS_HASH_XOR:
+ return 0;
+ case ETH_RSS_HASH_CRC32:
+ return 4;
+ }
+
+ nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
+ return 0;
}
/**
@@ -3117,14 +3146,32 @@ void nfp_net_netdev_free(struct nfp_net *nn)
*/
static void nfp_net_rss_init(struct nfp_net *nn)
{
- netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
+ unsigned long func_bit, rss_cap_hfunc;
+ u32 reg;
+
+ /* Read the RSS function capability and select first supported func */
+ reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
+ rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
+ if (!rss_cap_hfunc)
+ rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
+ NFP_NET_CFG_RSS_TOEPLITZ);
+
+ func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
+ if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
+ dev_warn(nn->dp.dev,
+ "Bad RSS config, defaulting to Toeplitz hash\n");
+ func_bit = ETH_RSS_HASH_TOP_BIT;
+ }
+ nn->rss_hfunc = 1 << func_bit;
+
+ netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
nfp_net_rss_init_itbl(nn);
/* Enable IPv4/IPv6 TCP by default */
nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
NFP_NET_CFG_RSS_IPV6_TCP |
- NFP_NET_CFG_RSS_TOEPLITZ |
+ FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
NFP_NET_CFG_RSS_MASK;
}
@@ -3151,6 +3198,10 @@ int nfp_net_netdev_init(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
int err;
+ nn->dp.chained_metadata_format = nn->fw_ver.major > 3;
+
+ nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
+
/* Get some of the read-only fields from the BAR */
nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
@@ -3158,17 +3209,26 @@ int nfp_net_netdev_init(struct net_device *netdev)
nfp_net_write_mac_addr(nn);
/* Determine RX packet/metadata boundary offset */
- if (nn->fw_ver.major >= 2)
- nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
- else
- nn->rx_offset = NFP_NET_RX_OFFSET;
+ if (nn->fw_ver.major >= 2) {
+ u32 reg;
+
+ reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
+ if (reg > NFP_NET_MAX_PREPEND) {
+ nn_err(nn, "Invalid rx offset: %d\n", reg);
+ return -EINVAL;
+ }
+ nn->dp.rx_offset = reg;
+ } else {
+ nn->dp.rx_offset = NFP_NET_RX_OFFSET;
+ }
/* Set default MTU and Freelist buffer size */
if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
netdev->mtu = nn->max_mtu;
else
netdev->mtu = NFP_NET_DEFAULT_MTU;
- nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, netdev->mtu);
+ nn->dp.mtu = netdev->mtu;
+ nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
/* Advertise/enable offloads based on capabilities
*
@@ -3179,31 +3239,31 @@ int nfp_net_netdev_init(struct net_device *netdev)
netdev->hw_features = NETIF_F_HIGHDMA;
if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) {
netdev->hw_features |= NETIF_F_RXCSUM;
- nn->ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
}
if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- nn->ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
}
if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
netdev->hw_features |= NETIF_F_SG;
- nn->ctrl |= NFP_NET_CFG_CTRL_GATHER;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
}
if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) {
netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
- nn->ctrl |= NFP_NET_CFG_CTRL_LSO;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_LSO;
}
if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
netdev->hw_features |= NETIF_F_RXHASH;
nfp_net_rss_init(nn);
- nn->ctrl |= NFP_NET_CFG_CTRL_RSS;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_RSS;
}
if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
if (nn->cap & NFP_NET_CFG_CTRL_LSO)
netdev->hw_features |= NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL;
- nn->ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
netdev->hw_enc_features = netdev->hw_features;
}
@@ -3212,11 +3272,11 @@ int nfp_net_netdev_init(struct net_device *netdev)
if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- nn->ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
}
if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
- nn->ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
}
netdev->features = netdev->hw_features;
@@ -3229,14 +3289,14 @@ int nfp_net_netdev_init(struct net_device *netdev)
/* Allow L2 Broadcast and Multicast through by default, if supported */
if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
- nn->ctrl |= NFP_NET_CFG_CTRL_L2BC;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
- nn->ctrl |= NFP_NET_CFG_CTRL_L2MC;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC;
/* Allow IRQ moderation, if supported */
if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
nfp_net_irqmod_init(nn);
- nn->ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
/* Stash the re-configuration queue away. First odd queue in TX Bar */
@@ -3275,10 +3335,10 @@ void nfp_net_netdev_clean(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- unregister_netdev(nn->netdev);
+ unregister_netdev(nn->dp.netdev);
- if (nn->xdp_prog)
- bpf_prog_put(nn->xdp_prog);
- if (nn->bpf_offload_xdp)
+ if (nn->dp.xdp_prog)
+ bpf_prog_put(nn->dp.xdp_prog);
+ if (nn->dp.bpf_offload_xdp)
nfp_net_xdp_offload(nn, NULL);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 385ba355c965..d04ccc9f6116 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Netronome Systems, Inc.
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -177,6 +177,19 @@
#define NFP_NET_CFG_VERSION_MINOR(x) (((x) & 0xff) << 0)
#define NFP_NET_CFG_STS 0x0034
#define NFP_NET_CFG_STS_LINK (0x1 << 0) /* Link up or down */
+/* Link rate */
+#define NFP_NET_CFG_STS_LINK_RATE_SHIFT 1
+#define NFP_NET_CFG_STS_LINK_RATE_MASK 0xF
+#define NFP_NET_CFG_STS_LINK_RATE \
+ (NFP_NET_CFG_STS_LINK_RATE_MASK << NFP_NET_CFG_STS_LINK_RATE_SHIFT)
+#define NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED 0
+#define NFP_NET_CFG_STS_LINK_RATE_UNKNOWN 1
+#define NFP_NET_CFG_STS_LINK_RATE_1G 2
+#define NFP_NET_CFG_STS_LINK_RATE_10G 3
+#define NFP_NET_CFG_STS_LINK_RATE_25G 4
+#define NFP_NET_CFG_STS_LINK_RATE_40G 5
+#define NFP_NET_CFG_STS_LINK_RATE_50G 6
+#define NFP_NET_CFG_STS_LINK_RATE_100G 7
#define NFP_NET_CFG_CAP 0x0038
#define NFP_NET_CFG_MAX_TXRINGS 0x003c
#define NFP_NET_CFG_MAX_RXRINGS 0x0040
@@ -192,6 +205,14 @@
#define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */
/**
+ * RSS capabilities
+ * @NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
+ * @NFP_NET_CFG_RSS_HFUNC)
+ */
+#define NFP_NET_CFG_RSS_CAP 0x0054
+#define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000
+
+/**
* VXLAN/UDP encap configuration
* @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
* @NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
@@ -249,7 +270,11 @@
#define NFP_NET_CFG_RSS_IPV4_UDP (1 << 11) /* RSS for IPv4/UDP */
#define NFP_NET_CFG_RSS_IPV6_TCP (1 << 12) /* RSS for IPv6/TCP */
#define NFP_NET_CFG_RSS_IPV6_UDP (1 << 13) /* RSS for IPv6/UDP */
+#define NFP_NET_CFG_RSS_HFUNC 0xff000000
#define NFP_NET_CFG_RSS_TOEPLITZ (1 << 24) /* Use Toeplitz hash */
+#define NFP_NET_CFG_RSS_XOR (1 << 25) /* Use XOR as hash */
+#define NFP_NET_CFG_RSS_CRC32 (1 << 26) /* Use CRC32 as hash */
+#define NFP_NET_CFG_RSS_HFUNCS 3
#define NFP_NET_CFG_RSS_KEY (NFP_NET_CFG_RSS_BASE + 0x4)
#define NFP_NET_CFG_RSS_KEY_SZ 0x28
#define NFP_NET_CFG_RSS_ITBL (NFP_NET_CFG_RSS_BASE + 0x4 + \
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 6e9372a18375..4077c59bf782 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -40,9 +40,9 @@ static struct dentry *nfp_dir;
static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
{
- int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt;
struct nfp_net_r_vector *r_vec = file->private;
struct nfp_net_rx_ring *rx_ring;
+ int fl_rd_p, fl_wr_p, rxd_cnt;
struct nfp_net_rx_desc *rxd;
struct nfp_net *nn;
void *frag;
@@ -54,19 +54,18 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
goto out;
nn = r_vec->nfp_net;
rx_ring = r_vec->rx_ring;
- if (!netif_running(nn->netdev))
+ if (!netif_running(nn->dp.netdev))
goto out;
rxd_cnt = rx_ring->cnt;
fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl);
fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl);
- rx_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_rx);
- rx_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx);
- seq_printf(file, "RX[%02d]: H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d RX_RD=%d RX_WR=%d\n",
- rx_ring->idx, rx_ring->rd_p, rx_ring->wr_p,
- fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p);
+ seq_printf(file, "RX[%02d,%02d]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d\n",
+ rx_ring->idx, rx_ring->fl_qcidx,
+ rx_ring->cnt, &rx_ring->dma, rx_ring->rxds,
+ rx_ring->rd_p, rx_ring->wr_p, fl_rd_p, fl_wr_p);
for (i = 0; i < rxd_cnt; i++) {
rxd = &rx_ring->rxds[i];
@@ -89,10 +88,6 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
seq_puts(file, " FL_RD");
if (i == fl_wr_p % rxd_cnt)
seq_puts(file, " FL_WR");
- if (i == rx_rd_p % rxd_cnt)
- seq_puts(file, " RX_RD");
- if (i == rx_wr_p % rxd_cnt)
- seq_puts(file, " RX_WR");
seq_putc(file, '\n');
}
@@ -143,7 +138,7 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
if (!r_vec->nfp_net || !tx_ring)
goto out;
nn = r_vec->nfp_net;
- if (!netif_running(nn->netdev))
+ if (!netif_running(nn->dp.netdev))
goto out;
txd_cnt = tx_ring->cnt;
@@ -151,8 +146,11 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q);
- seq_printf(file, "TX[%02d]: H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n",
- tx_ring->idx, tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
+ seq_printf(file, "TX[%02d,%02d%s]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n",
+ tx_ring->idx, tx_ring->qcidx,
+ tx_ring == r_vec->tx_ring ? "" : "xdp",
+ tx_ring->cnt, &tx_ring->dma, tx_ring->txds,
+ tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
for (i = 0; i < txd_cnt; i++) {
txd = &tx_ring->txds[i];
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 2649f7523c81..abbb47e60cc3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -40,6 +40,7 @@
* Brad Petrus <brad.petrus@netronome.com>
*/
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -48,6 +49,7 @@
#include <linux/ethtool.h>
#include "nfpcore/nfp.h"
+#include "nfpcore/nfp_nsp.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
@@ -126,9 +128,9 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
};
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
-#define NN_ET_RVEC_STATS_LEN (nn->num_r_vecs * 3)
+#define NN_ET_RVEC_STATS_LEN (nn->dp.num_r_vecs * 3)
#define NN_ET_RVEC_GATHER_STATS 7
-#define NN_ET_QUEUE_STATS_LEN ((nn->num_tx_rings + nn->num_rx_rings) * 2)
+#define NN_ET_QUEUE_STATS_LEN ((nn->dp.num_tx_rings + nn->dp.num_rx_rings) * 2)
#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
@@ -172,6 +174,119 @@ static void nfp_net_get_drvinfo(struct net_device *netdev,
drvinfo->regdump_len = NFP_NET_CFG_BAR_SZ;
}
+/**
+ * nfp_net_get_link_ksettings - Get Link Speed settings
+ * @netdev: network interface device structure
+ * @cmd: ethtool command
+ *
+ * Reports speed settings based on info in the BAR provided by the fw.
+ */
+static int
+nfp_net_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ static const u32 ls_to_ethtool[] = {
+ [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = 0,
+ [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = SPEED_UNKNOWN,
+ [NFP_NET_CFG_STS_LINK_RATE_1G] = SPEED_1000,
+ [NFP_NET_CFG_STS_LINK_RATE_10G] = SPEED_10000,
+ [NFP_NET_CFG_STS_LINK_RATE_25G] = SPEED_25000,
+ [NFP_NET_CFG_STS_LINK_RATE_40G] = SPEED_40000,
+ [NFP_NET_CFG_STS_LINK_RATE_50G] = SPEED_50000,
+ [NFP_NET_CFG_STS_LINK_RATE_100G] = SPEED_100000,
+ };
+ struct nfp_net *nn = netdev_priv(netdev);
+ u32 sts, ls;
+
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ cmd->base.port = PORT_OTHER;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+
+ if (nn->eth_port)
+ cmd->base.autoneg = nn->eth_port->aneg != NFP_ANEG_DISABLED ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ if (!netif_carrier_ok(netdev))
+ return 0;
+
+ /* Use link speed from ETH table if available, otherwise try the BAR */
+ if (nn->eth_port) {
+ int err;
+
+ if (nfp_net_link_changed_read_clear(nn)) {
+ err = nfp_net_refresh_eth_port(nn);
+ if (err)
+ return err;
+ }
+
+ cmd->base.port = nn->eth_port->port_type;
+ cmd->base.speed = nn->eth_port->speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ return 0;
+ }
+
+ sts = nn_readl(nn, NFP_NET_CFG_STS);
+
+ ls = FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts);
+ if (ls == NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED)
+ return -EOPNOTSUPP;
+
+ if (ls == NFP_NET_CFG_STS_LINK_RATE_UNKNOWN ||
+ ls >= ARRAY_SIZE(ls_to_ethtool))
+ return 0;
+
+ cmd->base.speed = ls_to_ethtool[sts];
+ cmd->base.duplex = DUPLEX_FULL;
+
+ return 0;
+}
+
+static int
+nfp_net_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_nsp *nsp;
+ int err;
+
+ if (!nn->eth_port)
+ return -EOPNOTSUPP;
+
+ if (netif_running(netdev)) {
+ nn_warn(nn, "Changing settings not allowed on an active interface. It may cause the port to be disabled until reboot.\n");
+ return -EBUSY;
+ }
+
+ nsp = nfp_eth_config_start(nn->cpp, nn->eth_port->index);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ err = __nfp_eth_set_aneg(nsp, cmd->base.autoneg == AUTONEG_ENABLE ?
+ NFP_ANEG_AUTO : NFP_ANEG_DISABLED);
+ if (err)
+ goto err_bad_set;
+ if (cmd->base.speed != SPEED_UNKNOWN) {
+ u32 speed = cmd->base.speed / nn->eth_port->lanes;
+
+ err = __nfp_eth_set_speed(nsp, speed);
+ if (err)
+ goto err_bad_set;
+ }
+
+ err = nfp_eth_config_commit_end(nsp);
+ if (err > 0)
+ return 0; /* no change */
+
+ nfp_net_refresh_port_table(nn);
+
+ return err;
+
+err_bad_set:
+ nfp_eth_config_cleanup_end(nsp);
+ return err;
+}
+
static void nfp_net_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
@@ -179,30 +294,22 @@ static void nfp_net_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
- ring->rx_pending = nn->rxd_cnt;
- ring->tx_pending = nn->txd_cnt;
+ ring->rx_pending = nn->dp.rxd_cnt;
+ ring->tx_pending = nn->dp.txd_cnt;
}
static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
{
- struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
- struct nfp_net_ring_set rx = {
- .n_rings = nn->num_rx_rings,
- .mtu = nn->netdev->mtu,
- .dcnt = rxd_cnt,
- };
- struct nfp_net_ring_set tx = {
- .n_rings = nn->num_tx_rings,
- .dcnt = txd_cnt,
- };
+ struct nfp_net_dp *dp;
- if (nn->rxd_cnt != rxd_cnt)
- reconfig_rx = &rx;
- if (nn->txd_cnt != txd_cnt)
- reconfig_tx = &tx;
+ dp = nfp_net_clone_dp(nn);
+ if (!dp)
+ return -ENOMEM;
- return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
- reconfig_rx, reconfig_tx);
+ dp->rxd_cnt = rxd_cnt;
+ dp->txd_cnt = txd_cnt;
+
+ return nfp_net_ring_reconfig(nn, dp, NULL);
}
static int nfp_net_set_ringparam(struct net_device *netdev,
@@ -223,11 +330,11 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
return -EINVAL;
- if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt)
+ if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
return 0;
nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
- nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
+ nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt);
return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
}
@@ -245,7 +352,7 @@ static void nfp_net_get_strings(struct net_device *netdev,
memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < nn->num_r_vecs; i++) {
+ for (i = 0; i < nn->dp.num_r_vecs; i++) {
sprintf(p, "rvec_%u_rx_pkts", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rvec_%u_tx_pkts", i);
@@ -267,13 +374,13 @@ static void nfp_net_get_strings(struct net_device *netdev,
p += ETH_GSTRING_LEN;
strncpy(p, "tx_lso", ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
- for (i = 0; i < nn->num_tx_rings; i++) {
+ for (i = 0; i < nn->dp.num_tx_rings; i++) {
sprintf(p, "txq_%u_pkts", i);
p += ETH_GSTRING_LEN;
sprintf(p, "txq_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < nn->num_rx_rings; i++) {
+ for (i = 0; i < nn->dp.num_rx_rings; i++) {
sprintf(p, "rxq_%u_pkts", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rxq_%u_bytes", i);
@@ -306,12 +413,12 @@ static void nfp_net_get_stats(struct net_device *netdev,
break;
case NFP_NET_DEV_ET_STATS:
- io_p = nn->ctrl_bar + nfp_net_et_stats[i].off;
+ io_p = nn->dp.ctrl_bar + nfp_net_et_stats[i].off;
data[i] = readq(io_p);
break;
}
}
- for (j = 0; j < nn->num_r_vecs; j++) {
+ for (j = 0; j < nn->dp.num_r_vecs; j++) {
unsigned int start;
do {
@@ -337,16 +444,16 @@ static void nfp_net_get_stats(struct net_device *netdev,
}
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
data[i++] = gathered_stats[j];
- for (j = 0; j < nn->num_tx_rings; j++) {
- io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
+ for (j = 0; j < nn->dp.num_tx_rings; j++) {
+ io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
data[i++] = readq(io_p);
- io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
+ io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
data[i++] = readq(io_p);
}
- for (j = 0; j < nn->num_rx_rings; j++) {
- io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
+ for (j = 0; j < nn->dp.num_rx_rings; j++) {
+ io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
data[i++] = readq(io_p);
- io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
+ io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
data[i++] = readq(io_p);
}
}
@@ -410,7 +517,7 @@ static int nfp_net_get_rxnfc(struct net_device *netdev,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = nn->num_rx_rings;
+ cmd->data = nn->dp.num_rx_rings;
return 0;
case ETHTOOL_GRXFH:
return nfp_net_get_rss_hash_opts(nn, cmd);
@@ -454,13 +561,13 @@ static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
return -EINVAL;
}
- new_rss_cfg |= NFP_NET_CFG_RSS_TOEPLITZ;
+ new_rss_cfg |= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc);
new_rss_cfg |= NFP_NET_CFG_RSS_MASK;
if (new_rss_cfg == nn->rss_cfg)
return 0;
- writel(new_rss_cfg, nn->ctrl_bar + NFP_NET_CFG_RSS_CTRL);
+ writel(new_rss_cfg, nn->dp.ctrl_bar + NFP_NET_CFG_RSS_CTRL);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
if (err)
return err;
@@ -496,7 +603,12 @@ static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev)
static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
{
- return NFP_NET_CFG_RSS_KEY_SZ;
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ return -EOPNOTSUPP;
+
+ return nfp_net_rss_key_sz(nn);
}
static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -512,9 +624,12 @@ static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
indir[i] = nn->rss_itbl[i];
if (key)
- memcpy(key, nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ memcpy(key, nn->rss_key, nfp_net_rss_key_sz(nn));
+ if (hfunc) {
+ *hfunc = nn->rss_hfunc;
+ if (*hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT)
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
+ }
return 0;
}
@@ -527,14 +642,14 @@ static int nfp_net_set_rxfh(struct net_device *netdev,
int i;
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
- !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == ETH_RSS_HASH_TOP))
+ !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc))
return -EOPNOTSUPP;
if (!key && !indir)
return 0;
if (key) {
- memcpy(nn->rss_key, key, NFP_NET_CFG_RSS_KEY_SZ);
+ memcpy(nn->rss_key, key, nfp_net_rss_key_sz(nn));
nfp_net_rss_write_key(nn);
}
if (indir) {
@@ -564,7 +679,7 @@ static void nfp_net_get_regs(struct net_device *netdev,
regs->version = nn_readl(nn, NFP_NET_CFG_VERSION);
for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++)
- regs_buf[i] = readl(nn->ctrl_bar + (i * sizeof(u32)));
+ regs_buf[i] = readl(nn->dp.ctrl_bar + (i * sizeof(u32)));
}
static int nfp_net_get_coalesce(struct net_device *netdev,
@@ -676,7 +791,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
ec->tx_coalesce_usecs_high ||
ec->tx_max_coalesced_frames_high ||
ec->rate_sample_interval)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/* Compute factor used to convert coalesce '_usecs' parameters to
* ME timestamp ticks. There are 16 ME clock cycles for each timestamp
@@ -736,16 +851,16 @@ static void nfp_net_get_channels(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
unsigned int num_tx_rings;
- num_tx_rings = nn->num_tx_rings;
- if (nn->xdp_prog)
- num_tx_rings -= nn->num_rx_rings;
+ num_tx_rings = nn->dp.num_tx_rings;
+ if (nn->dp.xdp_prog)
+ num_tx_rings -= nn->dp.num_rx_rings;
channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs);
channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs);
channel->max_combined = min(channel->max_rx, channel->max_tx);
channel->max_other = NFP_NET_NON_Q_VECTORS;
- channel->combined_count = min(nn->num_rx_rings, num_tx_rings);
- channel->rx_count = nn->num_rx_rings - channel->combined_count;
+ channel->combined_count = min(nn->dp.num_rx_rings, num_tx_rings);
+ channel->rx_count = nn->dp.num_rx_rings - channel->combined_count;
channel->tx_count = num_tx_rings - channel->combined_count;
channel->other_count = NFP_NET_NON_Q_VECTORS;
}
@@ -753,29 +868,19 @@ static void nfp_net_get_channels(struct net_device *netdev,
static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx,
unsigned int total_tx)
{
- struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
- struct nfp_net_ring_set rx = {
- .n_rings = total_rx,
- .mtu = nn->netdev->mtu,
- .dcnt = nn->rxd_cnt,
- };
- struct nfp_net_ring_set tx = {
- .n_rings = total_tx,
- .dcnt = nn->txd_cnt,
- };
+ struct nfp_net_dp *dp;
- if (nn->num_rx_rings != total_rx)
- reconfig_rx = &rx;
- if (nn->num_stack_tx_rings != total_tx ||
- (nn->xdp_prog && reconfig_rx))
- reconfig_tx = &tx;
+ dp = nfp_net_clone_dp(nn);
+ if (!dp)
+ return -ENOMEM;
- /* nfp_net_check_config() will catch tx.n_rings > nn->max_tx_rings */
- if (nn->xdp_prog)
- tx.n_rings += total_rx;
+ dp->num_rx_rings = total_rx;
+ dp->num_tx_rings = total_tx;
+ /* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */
+ if (dp->xdp_prog)
+ dp->num_tx_rings += total_rx;
- return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
- reconfig_rx, reconfig_tx);
+ return nfp_net_ring_reconfig(nn, dp, NULL);
}
static int nfp_net_set_channels(struct net_device *netdev,
@@ -823,6 +928,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_coalesce = nfp_net_set_coalesce,
.get_channels = nfp_net_get_channels,
.set_channels = nfp_net_set_channels,
+ .get_link_ksettings = nfp_net_get_link_ksettings,
+ .set_link_ksettings = nfp_net_set_link_ksettings,
};
void nfp_net_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 3afcdc11480c..8cb87cbe1120 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -47,11 +47,12 @@
#include <linux/pci_regs.h>
#include <linux/msi.h>
#include <linux/random.h>
+#include <linux/rtnetlink.h>
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_cpp.h"
#include "nfpcore/nfp_nffw.h"
-#include "nfpcore/nfp_nsp_eth.h"
+#include "nfpcore/nfp_nsp.h"
#include "nfpcore/nfp6000_pcie.h"
#include "nfp_net_ctrl.h"
@@ -129,61 +130,61 @@ err_area:
return (u8 __iomem *)ERR_PTR(err);
}
+/**
+ * nfp_net_get_mac_addr() - Get the MAC address.
+ * @nn: NFP Network structure
+ * @cpp: NFP CPP handle
+ * @id: NFP port id
+ *
+ * First try to get the MAC address from NSP ETH table. If that
+ * fails try HWInfo. As a last resort generate a random address.
+ */
static void
-nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp,
- unsigned int id)
+nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id)
{
+ struct nfp_net_dp *dp = &nn->dp;
u8 mac_addr[ETH_ALEN];
const char *mac_str;
char name[32];
+ if (nn->eth_port) {
+ ether_addr_copy(dp->netdev->dev_addr, nn->eth_port->mac_addr);
+ ether_addr_copy(dp->netdev->perm_addr, nn->eth_port->mac_addr);
+ return;
+ }
+
snprintf(name, sizeof(name), "eth%d.mac", id);
mac_str = nfp_hwinfo_lookup(cpp, name);
if (!mac_str) {
- dev_warn(&nn->pdev->dev,
- "Can't lookup MAC address. Generate\n");
- eth_hw_addr_random(nn->netdev);
+ dev_warn(dp->dev, "Can't lookup MAC address. Generate\n");
+ eth_hw_addr_random(dp->netdev);
return;
}
if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
&mac_addr[0], &mac_addr[1], &mac_addr[2],
&mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
- dev_warn(&nn->pdev->dev,
+ dev_warn(dp->dev,
"Can't parse MAC address (%s). Generate.\n", mac_str);
- eth_hw_addr_random(nn->netdev);
+ eth_hw_addr_random(dp->netdev);
return;
}
- ether_addr_copy(nn->netdev->dev_addr, mac_addr);
- ether_addr_copy(nn->netdev->perm_addr, mac_addr);
+ ether_addr_copy(dp->netdev->dev_addr, mac_addr);
+ ether_addr_copy(dp->netdev->perm_addr, mac_addr);
}
-/**
- * nfp_net_get_mac_addr() - Get the MAC address.
- * @nn: NFP Network structure
- * @pf: NFP PF device structure
- * @id: NFP port id
- *
- * First try to get the MAC address from NSP ETH table. If that
- * fails try HWInfo. As a last resort generate a random address.
- */
-static void
-nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id)
+static struct nfp_eth_table_port *
+nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id)
{
int i;
- for (i = 0; pf->eth_tbl && i < pf->eth_tbl->count; i++)
- if (pf->eth_tbl->ports[i].eth_index == id) {
- const u8 *mac_addr = pf->eth_tbl->ports[i].mac_addr;
+ for (i = 0; eth_tbl && i < eth_tbl->count; i++)
+ if (eth_tbl->ports[i].eth_index == id)
+ return &eth_tbl->ports[i];
- ether_addr_copy(nn->netdev->dev_addr, mac_addr);
- ether_addr_copy(nn->netdev->perm_addr, mac_addr);
- return;
- }
-
- nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id);
+ return NULL;
}
static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
@@ -282,6 +283,7 @@ static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
while (!list_empty(&pf->ports)) {
nn = list_first_entry(&pf->ports, struct nfp_net, port_list);
list_del(&nn->port_list);
+ pf->num_netdevs--;
nfp_net_netdev_free(nn);
}
@@ -290,7 +292,8 @@ static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
static struct nfp_net *
nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
void __iomem *tx_bar, void __iomem *rx_bar,
- int stride, struct nfp_net_fw_version *fw_ver)
+ int stride, struct nfp_net_fw_version *fw_ver,
+ struct nfp_eth_table_port *eth_port)
{
u32 n_tx_rings, n_rx_rings;
struct nfp_net *nn;
@@ -305,12 +308,13 @@ nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
nn->cpp = pf->cpp;
nn->fw_ver = *fw_ver;
- nn->ctrl_bar = ctrl_bar;
+ nn->dp.ctrl_bar = ctrl_bar;
nn->tx_bar = tx_bar;
nn->rx_bar = rx_bar;
- nn->is_vf = 0;
+ nn->dp.is_vf = 0;
nn->stride_rx = stride;
nn->stride_tx = stride;
+ nn->eth_port = eth_port;
return nn;
}
@@ -322,7 +326,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
int err;
/* Get MAC address */
- nfp_net_get_mac_addr(nn, pf, id);
+ nfp_net_get_mac_addr(nn, pf->cpp, id);
/* Get ME clock frequency from ctrl BAR
* XXX for now frequency is hardcoded until we figure out how
@@ -330,7 +334,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
*/
nn->me_freq_mhz = 1200;
- err = nfp_net_netdev_init(nn->netdev);
+ err = nfp_net_netdev_init(nn->dp.netdev);
if (err)
return err;
@@ -347,6 +351,7 @@ nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
int stride, struct nfp_net_fw_version *fw_ver)
{
u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
+ struct nfp_eth_table_port *eth_port;
struct nfp_net *nn;
unsigned int i;
int err;
@@ -362,17 +367,27 @@ nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
prev_tx_base = tgt_tx_base;
prev_rx_base = tgt_rx_base;
- nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar, rx_bar,
- stride, fw_ver);
- if (IS_ERR(nn)) {
- err = PTR_ERR(nn);
- goto err_free_prev;
+ eth_port = nfp_net_find_port(pf->eth_tbl, i);
+ if (eth_port && eth_port->override_changed) {
+ nfp_warn(pf->cpp, "Config changed for port #%d, reboot required before port will be operational\n", i);
+ } else {
+ nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar,
+ rx_bar, stride,
+ fw_ver, eth_port);
+ if (IS_ERR(nn)) {
+ err = PTR_ERR(nn);
+ goto err_free_prev;
+ }
+ list_add_tail(&nn->port_list, &pf->ports);
+ pf->num_netdevs++;
}
- list_add_tail(&nn->port_list, &pf->ports);
ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
}
+ if (list_empty(&pf->ports))
+ return -ENODEV;
+
return 0;
err_free_prev:
@@ -399,7 +414,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
/* Get MSI-X vectors */
wanted_irqs = 0;
list_for_each_entry(nn, &pf->ports, port_list)
- wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs;
+ wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
GFP_KERNEL);
if (!pf->irq_entries) {
@@ -408,7 +423,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
}
num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
- NFP_NET_MIN_PORT_IRQS * pf->num_ports,
+ NFP_NET_MIN_PORT_IRQS * pf->num_netdevs,
wanted_irqs);
if (!num_irqs) {
nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
@@ -418,7 +433,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
/* Distribute IRQs to ports */
irqs_left = num_irqs;
- ports_left = pf->num_ports;
+ ports_left = pf->num_netdevs;
list_for_each_entry(nn, &pf->ports, port_list) {
unsigned int n;
@@ -444,7 +459,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
err_prev_deinit:
list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
- nfp_net_netdev_clean(nn->netdev);
+ nfp_net_netdev_clean(nn->dp.netdev);
}
nfp_net_irqs_disable(pf->pdev);
err_vec_free:
@@ -454,6 +469,108 @@ err_nn_free:
return err;
}
+static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
+{
+ nfp_net_debugfs_dir_clean(&pf->ddir);
+
+ nfp_net_irqs_disable(pf->pdev);
+ kfree(pf->irq_entries);
+
+ nfp_cpp_area_release_free(pf->rx_area);
+ nfp_cpp_area_release_free(pf->tx_area);
+ nfp_cpp_area_release_free(pf->ctrl_area);
+}
+
+static void nfp_net_refresh_netdevs(struct work_struct *work)
+{
+ struct nfp_pf *pf = container_of(work, struct nfp_pf,
+ port_refresh_work);
+ struct nfp_eth_table *eth_table;
+ struct nfp_net *nn, *next;
+
+ mutex_lock(&pf->port_lock);
+
+ /* Check for nfp_net_pci_remove() racing against us */
+ if (list_empty(&pf->ports))
+ goto out;
+
+ list_for_each_entry(nn, &pf->ports, port_list)
+ nfp_net_link_changed_read_clear(nn);
+
+ eth_table = nfp_eth_read_ports(pf->cpp);
+ if (!eth_table) {
+ nfp_err(pf->cpp, "Error refreshing port config!\n");
+ goto out;
+ }
+
+ rtnl_lock();
+ list_for_each_entry(nn, &pf->ports, port_list) {
+ if (!nn->eth_port)
+ continue;
+ nn->eth_port = nfp_net_find_port(eth_table,
+ nn->eth_port->eth_index);
+ }
+ rtnl_unlock();
+
+ kfree(pf->eth_tbl);
+ pf->eth_tbl = eth_table;
+
+ list_for_each_entry_safe(nn, next, &pf->ports, port_list) {
+ if (!nn->eth_port) {
+ nfp_warn(pf->cpp, "Warning: port not present after reconfig\n");
+ continue;
+ }
+ if (!nn->eth_port->override_changed)
+ continue;
+
+ nn_warn(nn, "Port config changed, unregistering. Reboot required before port will be operational again.\n");
+
+ nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
+ nfp_net_netdev_clean(nn->dp.netdev);
+
+ list_del(&nn->port_list);
+ pf->num_netdevs--;
+ nfp_net_netdev_free(nn);
+ }
+
+ if (list_empty(&pf->ports))
+ nfp_net_pci_remove_finish(pf);
+out:
+ mutex_unlock(&pf->port_lock);
+}
+
+void nfp_net_refresh_port_table(struct nfp_net *nn)
+{
+ struct nfp_pf *pf = pci_get_drvdata(nn->pdev);
+
+ schedule_work(&pf->port_refresh_work);
+}
+
+int nfp_net_refresh_eth_port(struct nfp_net *nn)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_eth_table *eth_table;
+
+ eth_table = nfp_eth_read_ports(nn->cpp);
+ if (!eth_table) {
+ nn_err(nn, "Error refreshing port state table!\n");
+ return -EIO;
+ }
+
+ eth_port = nfp_net_find_port(eth_table, nn->eth_port->eth_index);
+ if (!eth_port) {
+ nn_err(nn, "Error finding state of the port!\n");
+ kfree(eth_table);
+ return -EIO;
+ }
+
+ memcpy(nn->eth_port, eth_port, sizeof(*eth_port));
+
+ kfree(eth_table);
+
+ return 0;
+}
+
/*
* PCI device functions
*/
@@ -467,17 +584,23 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
int stride;
int err;
+ INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_netdevs);
+ mutex_init(&pf->port_lock);
+
/* Verify that the board has completed initialization */
if (!nfp_is_ready(pf->cpp)) {
nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
return -EINVAL;
}
+ mutex_lock(&pf->port_lock);
pf->num_ports = nfp_net_pf_get_num_ports(pf);
ctrl_bar = nfp_net_pf_map_ctrl_bar(pf);
- if (!ctrl_bar)
- return pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
+ if (!ctrl_bar) {
+ err = pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
+ goto err_unlock;
+ }
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
@@ -551,6 +674,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_clean_ddir;
+ mutex_unlock(&pf->port_lock);
+
return 0;
err_clean_ddir:
@@ -560,6 +685,8 @@ err_unmap_tx:
nfp_cpp_area_release_free(pf->tx_area);
err_ctrl_unmap:
nfp_cpp_area_release_free(pf->ctrl_area);
+err_unlock:
+ mutex_unlock(&pf->port_lock);
return err;
}
@@ -567,20 +694,21 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
{
struct nfp_net *nn;
+ mutex_lock(&pf->port_lock);
+ if (list_empty(&pf->ports))
+ goto out;
+
list_for_each_entry(nn, &pf->ports, port_list) {
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
- nfp_net_netdev_clean(nn->netdev);
+ nfp_net_netdev_clean(nn->dp.netdev);
}
nfp_net_pf_free_netdevs(pf);
- nfp_net_debugfs_dir_clean(&pf->ddir);
-
- nfp_net_irqs_disable(pf->pdev);
- kfree(pf->irq_entries);
+ nfp_net_pci_remove_finish(pf);
+out:
+ mutex_unlock(&pf->port_lock);
- nfp_cpp_area_release_free(pf->rx_area);
- nfp_cpp_area_release_free(pf->tx_area);
- nfp_cpp_area_release_free(pf->ctrl_area);
+ cancel_work_sync(&pf->port_refresh_work);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
index 18a851eb3508..cc823df12c8a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
@@ -58,7 +58,7 @@ void nfp_net_filter_stats_timer(unsigned long data)
spin_lock_bh(&nn->rx_filter_lock);
- if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
mod_timer(&nn->rx_filter_stats_timer,
jiffies + NFP_NET_STAT_POLL_IVL);
@@ -119,12 +119,12 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
if (tc_no_actions(cls_bpf->exts))
return NN_ACT_DIRECT;
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
/* TC legacy mode */
if (!tc_single_action(cls_bpf->exts))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
tcf_exts_to_list(cls_bpf->exts, &actions);
list_for_each_entry(a, &actions, list) {
@@ -132,11 +132,11 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
return NN_ACT_TC_DROP;
if (is_tcf_mirred_egress_redirect(a) &&
- tcf_mirred_ifindex(a) == nn->netdev->ifindex)
+ tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex)
return NN_ACT_TC_REDIR;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int
@@ -152,7 +152,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
int ret;
if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
ret = nfp_net_bpf_get_act(nn, cls_bpf);
if (ret < 0)
@@ -160,16 +160,15 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
act = ret;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
- if (max_mtu < nn->netdev->mtu) {
+ if (max_mtu < nn->dp.netdev->mtu) {
nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
- *code = dma_zalloc_coherent(&nn->pdev->dev, code_sz, dma_addr,
- GFP_KERNEL);
+ *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
if (!*code)
return -ENOMEM;
@@ -181,7 +180,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
return 0;
out:
- dma_free_coherent(&nn->pdev->dev, code_sz, *code, *dma_addr);
+ dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr);
return ret;
}
@@ -194,7 +193,7 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
u64 bpf_addr = dma_addr;
int err;
- nn->bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
+ nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
if (dense_mode)
bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
@@ -208,13 +207,13 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
nn_err(nn, "FW command error while loading BPF: %d\n", err);
/* Enable passing packets through BPF function */
- nn->ctrl |= NFP_NET_CFG_CTRL_BPF;
- nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
+ nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
if (err)
nn_err(nn, "FW command error while enabling BPF: %d\n", err);
- dma_free_coherent(&nn->pdev->dev, code_sz, code, dma_addr);
+ dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
nfp_net_bpf_stats_reset(nn);
mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL);
@@ -222,16 +221,16 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
static int nfp_net_bpf_stop(struct nfp_net *nn)
{
- if (!(nn->ctrl & NFP_NET_CFG_CTRL_BPF))
+ if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
return 0;
spin_lock_bh(&nn->rx_filter_lock);
- nn->ctrl &= ~NFP_NET_CFG_CTRL_BPF;
+ nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
spin_unlock_bh(&nn->rx_filter_lock);
- nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
+ nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
del_timer_sync(&nn->rx_filter_stats_timer);
- nn->bpf_offload_skip_sw = 0;
+ nn->dp.bpf_offload_skip_sw = 0;
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
}
@@ -255,7 +254,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
* frames which didn't have BPF applied in the hardware should
* be fine if software fallback is available, though.
*/
- if (nn->bpf_offload_skip_sw)
+ if (nn->dp.bpf_offload_skip_sw)
return -EBUSY;
err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
@@ -270,7 +269,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
return 0;
case TC_CLSBPF_ADD:
- if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
return -EBUSY;
err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
@@ -290,6 +289,6 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
return nfp_net_bpf_stats_update(nn, cls_bpf);
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 39407f7cc586..86e61be6f35c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -84,12 +84,12 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]);
if (!is_valid_ether_addr(mac_addr)) {
- eth_hw_addr_random(nn->netdev);
+ eth_hw_addr_random(nn->dp.netdev);
return;
}
- ether_addr_copy(nn->netdev->dev_addr, mac_addr);
- ether_addr_copy(nn->netdev->perm_addr, mac_addr);
+ ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
+ ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
}
static int nfp_netvf_pci_probe(struct pci_dev *pdev,
@@ -210,8 +210,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
vf->nn = nn;
nn->fw_ver = fw_ver;
- nn->ctrl_bar = ctrl_bar;
- nn->is_vf = 1;
+ nn->dp.ctrl_bar = ctrl_bar;
+ nn->dp.is_vf = 1;
nn->stride_tx = stride;
nn->stride_rx = stride;
@@ -268,7 +268,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
NFP_NET_MIN_PORT_IRQS,
- NFP_NET_NON_Q_VECTORS + nn->num_r_vecs);
+ NFP_NET_NON_Q_VECTORS +
+ nn->dp.num_r_vecs);
if (!num_irqs) {
nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
err = -EIO;
@@ -282,7 +283,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
*/
nn->me_freq_mhz = 1200;
- err = nfp_net_netdev_init(nn->netdev);
+ err = nfp_net_netdev_init(nn->dp.netdev);
if (err)
goto err_irqs_disable;
@@ -327,7 +328,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_debugfs_dir_clean(&vf->ddir);
- nfp_net_netdev_clean(nn->netdev);
+ nfp_net_netdev_clean(nn->dp.netdev);
nfp_net_irqs_disable(pdev);
@@ -337,7 +338,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
} else {
iounmap(vf->q_bar);
}
- iounmap(nn->ctrl_bar);
+ iounmap(nn->dp.ctrl_bar);
nfp_net_netdev_free(nn);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
index 42cb720b696d..4df2ce261b3f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
@@ -48,32 +48,26 @@
const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup);
-/* Implemented in nfp_nsp.c */
+/* Implemented in nfp_nsp.c, low level functions */
struct nfp_nsp;
-struct firmware;
-
-struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp);
-void nfp_nsp_close(struct nfp_nsp *state);
-u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state);
-u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
-int nfp_nsp_wait(struct nfp_nsp *state);
-int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
-int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
+
+struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state);
+bool nfp_nsp_config_modified(struct nfp_nsp *state);
+void nfp_nsp_config_set_modified(struct nfp_nsp *state, bool modified);
+void *nfp_nsp_config_entries(struct nfp_nsp *state);
+unsigned int nfp_nsp_config_idx(struct nfp_nsp *state);
+void nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries,
+ unsigned int idx);
+void nfp_nsp_config_clear_state(struct nfp_nsp *state);
int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size);
int nfp_nsp_write_eth_table(struct nfp_nsp *state,
const void *buf, unsigned int size);
+int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size);
/* Implemented in nfp_resource.c */
-#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU
-#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL
-
-/* NFP Resource Table self-identifier */
-#define NFP_RESOURCE_TBL_NAME "nfp.res"
-#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */
-
-/* All other keys are CRC32-POSIX of the 8-byte identification string */
+/* All keys are CRC32-POSIX of the 8-byte identification string */
/* ARM/PCI vNIC Interfaces 0..3 */
#define NFP_RESOURCE_VNIC_PCI_0 "vnic.p0"
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 15cc3e77cf6a..43dc68e01274 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -217,7 +217,7 @@ static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar)
#define TARGET_WIDTH_64 8
static int
-compute_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar,
+compute_bar(const struct nfp6000_pcie *nfp, const struct nfp_bar *bar,
u32 *bar_config, u64 *bar_base,
int tgt, int act, int tok, u64 offset, size_t size, int width)
{
@@ -410,35 +410,36 @@ find_matching_bar(struct nfp6000_pcie *nfp,
/* Return EAGAIN if no resource is available */
static int
-find_unused_bar_noblock(struct nfp6000_pcie *nfp,
+find_unused_bar_noblock(const struct nfp6000_pcie *nfp,
int tgt, int act, int tok,
u64 offset, size_t size, int width)
{
- int n, invalid = 0;
+ int n, busy = 0;
for (n = 0; n < nfp->bars; n++) {
- struct nfp_bar *bar = &nfp->bar[n];
+ const struct nfp_bar *bar = &nfp->bar[n];
int err;
- if (bar->bitsize == 0) {
- invalid++;
- continue;
- }
-
- if (atomic_read(&bar->refcnt) != 0)
+ if (!bar->bitsize)
continue;
/* Just check to see if we can make it fit... */
err = compute_bar(nfp, bar, NULL, NULL,
tgt, act, tok, offset, size, width);
+ if (err)
+ continue;
- if (err < 0)
- invalid++;
- else
+ if (!atomic_read(&bar->refcnt))
return n;
+
+ busy++;
}
- return (n == invalid) ? -EINVAL : -EAGAIN;
+ if (WARN(!busy, "No suitable BAR found for request tgt:0x%x act:0x%x tok:0x%x off:0x%llx size:%zd width:%d\n",
+ tgt, act, tok, offset, size, width))
+ return -EINVAL;
+
+ return -EAGAIN;
}
static int
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 40108e66c654..e2abba4c3a3f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -65,39 +65,49 @@ struct nfp_cpp_resource {
u64 end;
};
-struct nfp_cpp_mutex {
- struct list_head list;
- struct nfp_cpp *cpp;
- int target;
- u16 usage;
- u16 depth;
- unsigned long long address;
- u32 key;
-};
-
+/**
+ * struct nfp_cpp - main nfpcore device structure
+ * Following fields are read-only after probe() exits or netdevs are spawned.
+ * @dev: embedded device structure
+ * @op: low-level implementation ops
+ * @priv: private data of the low-level implementation
+ * @model: chip model
+ * @interface: chip interface id we are using to reach it
+ * @serial: chip serial number
+ * @imb_cat_table: CPP Mapping Table
+ *
+ * Following fields can be used only in probe() or with rtnl held:
+ * @hwinfo: HWInfo database fetched from the device
+ * @rtsym: firmware run time symbols
+ *
+ * Following fields use explicit locking:
+ * @resource_list: NFP CPP resource list
+ * @resource_lock: protects @resource_list
+ *
+ * @area_cache_list: cached areas for cpp/xpb read/write speed up
+ * @area_cache_mutex: protects @area_cache_list
+ *
+ * @waitq: area wait queue
+ */
struct nfp_cpp {
struct device dev;
- void *priv; /* Private data of the low-level implementation */
+ void *priv;
u32 model;
u16 interface;
u8 serial[NFP_SERIAL_LEN];
const struct nfp_cpp_operations *op;
- struct list_head resource_list; /* NFP CPP resource list */
- struct list_head mutex_cache; /* Mutex cache */
+ struct list_head resource_list;
rwlock_t resource_lock;
wait_queue_head_t waitq;
- /* NFP6000 CPP Mapping Table */
u32 imb_cat_table[16];
- /* Cached areas for cpp/xpb readl/writel speedups */
- struct mutex area_cache_mutex; /* Lock for the area cache */
+ struct mutex area_cache_mutex;
struct list_head area_cache_list;
- /* Cached information */
void *hwinfo;
void *rtsym;
};
@@ -187,24 +197,6 @@ void nfp_cpp_free(struct nfp_cpp *cpp)
{
struct nfp_cpp_area_cache *cache, *ctmp;
struct nfp_cpp_resource *res, *rtmp;
- struct nfp_cpp_mutex *mutex, *mtmp;
-
- /* There should be no mutexes in the cache at this point. */
- WARN_ON(!list_empty(&cpp->mutex_cache));
- /* .. but if there are, unlock them and complain. */
- list_for_each_entry_safe(mutex, mtmp, &cpp->mutex_cache, list) {
- dev_err(cpp->dev.parent, "Dangling mutex: @%d::0x%llx, %d locks held by %d owners\n",
- mutex->target, (unsigned long long)mutex->address,
- mutex->depth, mutex->usage);
-
- /* Forcing an unlock */
- mutex->depth = 1;
- nfp_cpp_mutex_unlock(mutex);
-
- /* Forcing a free */
- mutex->usage = 1;
- nfp_cpp_mutex_free(mutex);
- }
/* Remove all caches */
list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) {
@@ -419,9 +411,43 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
*/
void nfp_cpp_area_free(struct nfp_cpp_area *area)
{
+ if (atomic_read(&area->refcount))
+ nfp_warn(area->cpp, "Warning: freeing busy area\n");
nfp_cpp_area_put(area);
}
+static bool nfp_cpp_area_acquire_try(struct nfp_cpp_area *area, int *status)
+{
+ *status = area->cpp->op->area_acquire(area);
+
+ return *status != -EAGAIN;
+}
+
+static int __nfp_cpp_area_acquire(struct nfp_cpp_area *area)
+{
+ int err, status;
+
+ if (atomic_inc_return(&area->refcount) > 1)
+ return 0;
+
+ if (!area->cpp->op->area_acquire)
+ return 0;
+
+ err = wait_event_interruptible(area->cpp->waitq,
+ nfp_cpp_area_acquire_try(area, &status));
+ if (!err)
+ err = status;
+ if (err) {
+ nfp_warn(area->cpp, "Warning: area wait failed: %d\n", err);
+ atomic_dec(&area->refcount);
+ return err;
+ }
+
+ nfp_cpp_area_get(area);
+
+ return 0;
+}
+
/**
* nfp_cpp_area_acquire() - lock down a CPP area for access
* @area: CPP area handle
@@ -433,27 +459,13 @@ void nfp_cpp_area_free(struct nfp_cpp_area *area)
*/
int nfp_cpp_area_acquire(struct nfp_cpp_area *area)
{
- mutex_lock(&area->mutex);
- if (atomic_inc_return(&area->refcount) == 1) {
- int (*a_a)(struct nfp_cpp_area *);
-
- a_a = area->cpp->op->area_acquire;
- if (a_a) {
- int err;
+ int ret;
- wait_event_interruptible(area->cpp->waitq,
- (err = a_a(area)) != -EAGAIN);
- if (err < 0) {
- atomic_dec(&area->refcount);
- mutex_unlock(&area->mutex);
- return err;
- }
- }
- }
+ mutex_lock(&area->mutex);
+ ret = __nfp_cpp_area_acquire(area);
mutex_unlock(&area->mutex);
- nfp_cpp_area_get(area);
- return 0;
+ return ret;
}
/**
@@ -829,10 +841,7 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
* the need for special case code below when
* checking against available cache size.
*/
- if (length == 0)
- return NULL;
-
- if (list_empty(&cpp->area_cache_list) || id == 0)
+ if (length == 0 || id == 0)
return NULL;
/* Remap from cpp_island to cpp_target */
@@ -840,10 +849,15 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
if (err < 0)
return NULL;
- addr += *offset;
-
mutex_lock(&cpp->area_cache_mutex);
+ if (list_empty(&cpp->area_cache_list)) {
+ mutex_unlock(&cpp->area_cache_mutex);
+ return NULL;
+ }
+
+ addr += *offset;
+
/* See if we have a match */
list_for_each_entry(cache, &cpp->area_cache_list, entry) {
if (id == cache->id &&
@@ -937,12 +951,14 @@ int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
return -ENOMEM;
err = nfp_cpp_area_acquire(area);
- if (err)
- goto out;
+ if (err) {
+ nfp_cpp_area_free(area);
+ return err;
+ }
}
err = nfp_cpp_area_read(area, offset, kernel_vaddr, length);
-out:
+
if (cache)
area_cache_put(cpp, cache);
else
@@ -979,13 +995,14 @@ int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
return -ENOMEM;
err = nfp_cpp_area_acquire(area);
- if (err)
- goto out;
+ if (err) {
+ nfp_cpp_area_free(area);
+ return err;
+ }
}
err = nfp_cpp_area_write(area, offset, kernel_vaddr, length);
-out:
if (cache)
area_cache_put(cpp, cache);
else
@@ -1127,7 +1144,6 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
rwlock_init(&cpp->resource_lock);
init_waitqueue_head(&cpp->waitq);
lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
- INIT_LIST_HEAD(&cpp->mutex_cache);
INIT_LIST_HEAD(&cpp->resource_list);
INIT_LIST_HEAD(&cpp->area_cache_list);
mutex_init(&cpp->area_cache_mutex);
@@ -1425,322 +1441,3 @@ void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit)
{
return &cpp_explicit[1];
}
-
-/* THIS FUNCTION IS NOT EXPORTED */
-static u32 nfp_mutex_locked(u16 interface)
-{
- return (u32)interface << 16 | 0x000f;
-}
-
-static u32 nfp_mutex_unlocked(u16 interface)
-{
- return (u32)interface << 16 | 0x0000;
-}
-
-static bool nfp_mutex_is_locked(u32 val)
-{
- return (val & 0xffff) == 0x000f;
-}
-
-static bool nfp_mutex_is_unlocked(u32 val)
-{
- return (val & 0xffff) == 0000;
-}
-
-/* If you need more than 65536 recursive locks, please rethink your code. */
-#define MUTEX_DEPTH_MAX 0xffff
-
-static int
-nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address)
-{
- /* Not permitted on invalid interfaces */
- if (NFP_CPP_INTERFACE_TYPE_of(interface) ==
- NFP_CPP_INTERFACE_TYPE_INVALID)
- return -EINVAL;
-
- /* Address must be 64-bit aligned */
- if (address & 7)
- return -EINVAL;
-
- if (*target != NFP_CPP_TARGET_MU)
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * nfp_cpp_mutex_init() - Initialize a mutex location
- * @cpp: NFP CPP handle
- * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
- * @address: Offset into the address space of the NFP CPP target ID
- * @key: Unique 32-bit value for this mutex
- *
- * The CPP target:address must point to a 64-bit aligned location, and
- * will initialize 64 bits of data at the location.
- *
- * This creates the initial mutex state, as locked by this
- * nfp_cpp_interface().
- *
- * This function should only be called when setting up
- * the initial lock state upon boot-up of the system.
- *
- * Return: 0 on success, or -errno on failure
- */
-int nfp_cpp_mutex_init(struct nfp_cpp *cpp,
- int target, unsigned long long address, u32 key)
-{
- const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */
- u16 interface = nfp_cpp_interface(cpp);
- int err;
-
- err = nfp_cpp_mutex_validate(interface, &target, address);
- if (err)
- return err;
-
- err = nfp_cpp_writel(cpp, muw, address + 4, key);
- if (err)
- return err;
-
- err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface));
- if (err)
- return err;
-
- return 0;
-}
-
-/**
- * nfp_cpp_mutex_alloc() - Create a mutex handle
- * @cpp: NFP CPP handle
- * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
- * @address: Offset into the address space of the NFP CPP target ID
- * @key: 32-bit unique key (must match the key at this location)
- *
- * The CPP target:address must point to a 64-bit aligned location, and
- * reserve 64 bits of data at the location for use by the handle.
- *
- * Only target/address pairs that point to entities that support the
- * MU Atomic Engine's CmpAndSwap32 command are supported.
- *
- * Return: A non-NULL struct nfp_cpp_mutex * on success, NULL on failure.
- */
-struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
- unsigned long long address, u32 key)
-{
- const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */
- u16 interface = nfp_cpp_interface(cpp);
- struct nfp_cpp_mutex *mutex;
- int err;
- u32 tmp;
-
- err = nfp_cpp_mutex_validate(interface, &target, address);
- if (err)
- return NULL;
-
- /* Look for mutex on cache list */
- list_for_each_entry(mutex, &cpp->mutex_cache, list) {
- if (mutex->target == target && mutex->address == address) {
- mutex->usage++;
- return mutex;
- }
- }
-
- err = nfp_cpp_readl(cpp, mur, address + 4, &tmp);
- if (err < 0)
- return NULL;
-
- if (tmp != key)
- return NULL;
-
- mutex = kzalloc(sizeof(*mutex), GFP_KERNEL);
- if (!mutex)
- return NULL;
-
- mutex->cpp = cpp;
- mutex->target = target;
- mutex->address = address;
- mutex->key = key;
- mutex->depth = 0;
- mutex->usage = 1;
-
- /* Add mutex to cache list */
- list_add(&mutex->list, &cpp->mutex_cache);
-
- return mutex;
-}
-
-/**
- * nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state
- * @mutex: NFP CPP Mutex handle
- */
-void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex)
-{
- if (--mutex->usage)
- return;
-
- /* Remove mutex from cache */
- list_del(&mutex->list);
- kfree(mutex);
-}
-
-/**
- * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine
- * @mutex: NFP CPP Mutex handle
- *
- * Return: 0 on success, or -errno on failure
- */
-int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
-{
- unsigned long warn_at = jiffies + 15 * HZ;
- unsigned int timeout_ms = 1;
- int err;
-
- /* We can't use a waitqueue here, because the unlocker
- * might be on a separate CPU.
- *
- * So just wait for now.
- */
- for (;;) {
- err = nfp_cpp_mutex_trylock(mutex);
- if (err != -EBUSY)
- break;
-
- err = msleep_interruptible(timeout_ms);
- if (err != 0)
- return -ERESTARTSYS;
-
- if (time_is_before_eq_jiffies(warn_at)) {
- warn_at = jiffies + 60 * HZ;
- dev_warn(mutex->cpp->dev.parent,
- "Warning: waiting for NFP mutex [usage:%hd depth:%hd target:%d addr:%llx key:%08x]\n",
- mutex->usage, mutex->depth,
- mutex->target, mutex->address, mutex->key);
- }
- }
-
- return err;
-}
-
-/**
- * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine
- * @mutex: NFP CPP Mutex handle
- *
- * Return: 0 on success, or -errno on failure
- */
-int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
-{
- const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
- const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
- struct nfp_cpp *cpp = mutex->cpp;
- u32 key, value;
- u16 interface;
- int err;
-
- interface = nfp_cpp_interface(cpp);
-
- if (mutex->depth > 1) {
- mutex->depth--;
- return 0;
- }
-
- err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
- if (err < 0)
- return err;
-
- if (key != mutex->key)
- return -EPERM;
-
- err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
- if (err < 0)
- return err;
-
- if (value != nfp_mutex_locked(interface))
- return -EACCES;
-
- err = nfp_cpp_writel(cpp, muw, mutex->address,
- nfp_mutex_unlocked(interface));
- if (err < 0)
- return err;
-
- mutex->depth = 0;
- return 0;
-}
-
-/**
- * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle
- * @mutex: NFP CPP Mutex handle
- *
- * Return: 0 if the lock succeeded, -errno on failure
- */
-int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
-{
- const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
- const u32 mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */
- const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
- struct nfp_cpp *cpp = mutex->cpp;
- u32 key, value, tmp;
- int err;
-
- if (mutex->depth > 0) {
- if (mutex->depth == MUTEX_DEPTH_MAX)
- return -E2BIG;
- mutex->depth++;
- return 0;
- }
-
- /* Verify that the lock marker is not damaged */
- err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
- if (err < 0)
- return err;
-
- if (key != mutex->key)
- return -EPERM;
-
- /* Compare against the unlocked state, and if true,
- * write the interface id into the top 16 bits, and
- * mark as locked.
- */
- value = nfp_mutex_locked(nfp_cpp_interface(cpp));
-
- /* We use test_set_imm here, as it implies a read
- * of the current state, and sets the bits in the
- * bytemask of the command to 1s. Since the mutex
- * is guaranteed to be 64-bit aligned, the bytemask
- * of this 32-bit command is ensured to be 8'b00001111,
- * which implies that the lower 4 bits will be set to
- * ones regardless of the initial state.
- *
- * Since this is a 'Readback' operation, with no Pull
- * data, we can treat this as a normal Push (read)
- * atomic, which returns the original value.
- */
- err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
- if (err < 0)
- return err;
-
- /* Was it unlocked? */
- if (nfp_mutex_is_unlocked(tmp)) {
- /* The read value can only be 0x....0000 in the unlocked state.
- * If there was another contending for this lock, then
- * the lock state would be 0x....000f
- */
-
- /* Write our owner ID into the lock
- * While not strictly necessary, this helps with
- * debug and bookkeeping.
- */
- err = nfp_cpp_writel(cpp, muw, mutex->address, value);
- if (err < 0)
- return err;
-
- mutex->depth = 1;
- return 0;
- }
-
- /* Already locked by us? Success! */
- if (tmp == value) {
- mutex->depth = 1;
- return 0;
- }
-
- return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
-}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
new file mode 100644
index 000000000000..8a99c189efa8
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+
+struct nfp_cpp_mutex {
+ struct nfp_cpp *cpp;
+ int target;
+ u16 depth;
+ unsigned long long address;
+ u32 key;
+};
+
+static u32 nfp_mutex_locked(u16 interface)
+{
+ return (u32)interface << 16 | 0x000f;
+}
+
+static u32 nfp_mutex_unlocked(u16 interface)
+{
+ return (u32)interface << 16 | 0x0000;
+}
+
+static bool nfp_mutex_is_locked(u32 val)
+{
+ return (val & 0xffff) == 0x000f;
+}
+
+static bool nfp_mutex_is_unlocked(u32 val)
+{
+ return (val & 0xffff) == 0000;
+}
+
+/* If you need more than 65536 recursive locks, please rethink your code. */
+#define NFP_MUTEX_DEPTH_MAX 0xffff
+
+static int
+nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address)
+{
+ /* Not permitted on invalid interfaces */
+ if (NFP_CPP_INTERFACE_TYPE_of(interface) ==
+ NFP_CPP_INTERFACE_TYPE_INVALID)
+ return -EINVAL;
+
+ /* Address must be 64-bit aligned */
+ if (address & 7)
+ return -EINVAL;
+
+ if (*target != NFP_CPP_TARGET_MU)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * nfp_cpp_mutex_init() - Initialize a mutex location
+ * @cpp: NFP CPP handle
+ * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
+ * @address: Offset into the address space of the NFP CPP target ID
+ * @key: Unique 32-bit value for this mutex
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and
+ * will initialize 64 bits of data at the location.
+ *
+ * This creates the initial mutex state, as locked by this
+ * nfp_cpp_interface().
+ *
+ * This function should only be called when setting up
+ * the initial lock state upon boot-up of the system.
+ *
+ * Return: 0 on success, or -errno on failure
+ */
+int nfp_cpp_mutex_init(struct nfp_cpp *cpp,
+ int target, unsigned long long address, u32 key)
+{
+ const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */
+ u16 interface = nfp_cpp_interface(cpp);
+ int err;
+
+ err = nfp_cpp_mutex_validate(interface, &target, address);
+ if (err)
+ return err;
+
+ err = nfp_cpp_writel(cpp, muw, address + 4, key);
+ if (err)
+ return err;
+
+ err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * nfp_cpp_mutex_alloc() - Create a mutex handle
+ * @cpp: NFP CPP handle
+ * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
+ * @address: Offset into the address space of the NFP CPP target ID
+ * @key: 32-bit unique key (must match the key at this location)
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and
+ * reserve 64 bits of data at the location for use by the handle.
+ *
+ * Only target/address pairs that point to entities that support the
+ * MU Atomic Engine's CmpAndSwap32 command are supported.
+ *
+ * Return: A non-NULL struct nfp_cpp_mutex * on success, NULL on failure.
+ */
+struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
+ unsigned long long address, u32 key)
+{
+ const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */
+ u16 interface = nfp_cpp_interface(cpp);
+ struct nfp_cpp_mutex *mutex;
+ int err;
+ u32 tmp;
+
+ err = nfp_cpp_mutex_validate(interface, &target, address);
+ if (err)
+ return NULL;
+
+ err = nfp_cpp_readl(cpp, mur, address + 4, &tmp);
+ if (err < 0)
+ return NULL;
+
+ if (tmp != key)
+ return NULL;
+
+ mutex = kzalloc(sizeof(*mutex), GFP_KERNEL);
+ if (!mutex)
+ return NULL;
+
+ mutex->cpp = cpp;
+ mutex->target = target;
+ mutex->address = address;
+ mutex->key = key;
+ mutex->depth = 0;
+
+ return mutex;
+}
+
+/**
+ * nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state
+ * @mutex: NFP CPP Mutex handle
+ */
+void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex)
+{
+ kfree(mutex);
+}
+
+/**
+ * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine
+ * @mutex: NFP CPP Mutex handle
+ *
+ * Return: 0 on success, or -errno on failure
+ */
+int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
+{
+ unsigned long warn_at = jiffies + 15 * HZ;
+ unsigned int timeout_ms = 1;
+ int err;
+
+ /* We can't use a waitqueue here, because the unlocker
+ * might be on a separate CPU.
+ *
+ * So just wait for now.
+ */
+ for (;;) {
+ err = nfp_cpp_mutex_trylock(mutex);
+ if (err != -EBUSY)
+ break;
+
+ err = msleep_interruptible(timeout_ms);
+ if (err != 0)
+ return -ERESTARTSYS;
+
+ if (time_is_before_eq_jiffies(warn_at)) {
+ warn_at = jiffies + 60 * HZ;
+ nfp_warn(mutex->cpp,
+ "Warning: waiting for NFP mutex [depth:%hd target:%d addr:%llx key:%08x]\n",
+ mutex->depth,
+ mutex->target, mutex->address, mutex->key);
+ }
+ }
+
+ return err;
+}
+
+/**
+ * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine
+ * @mutex: NFP CPP Mutex handle
+ *
+ * Return: 0 on success, or -errno on failure
+ */
+int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
+{
+ const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
+ const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
+ struct nfp_cpp *cpp = mutex->cpp;
+ u32 key, value;
+ u16 interface;
+ int err;
+
+ interface = nfp_cpp_interface(cpp);
+
+ if (mutex->depth > 1) {
+ mutex->depth--;
+ return 0;
+ }
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
+ if (err < 0)
+ return err;
+
+ if (key != mutex->key)
+ return -EPERM;
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
+ if (err < 0)
+ return err;
+
+ if (value != nfp_mutex_locked(interface))
+ return -EACCES;
+
+ err = nfp_cpp_writel(cpp, muw, mutex->address,
+ nfp_mutex_unlocked(interface));
+ if (err < 0)
+ return err;
+
+ mutex->depth = 0;
+ return 0;
+}
+
+/**
+ * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle
+ * @mutex: NFP CPP Mutex handle
+ *
+ * Return: 0 if the lock succeeded, -errno on failure
+ */
+int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
+{
+ const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
+ const u32 mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */
+ const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
+ struct nfp_cpp *cpp = mutex->cpp;
+ u32 key, value, tmp;
+ int err;
+
+ if (mutex->depth > 0) {
+ if (mutex->depth == NFP_MUTEX_DEPTH_MAX)
+ return -E2BIG;
+ mutex->depth++;
+ return 0;
+ }
+
+ /* Verify that the lock marker is not damaged */
+ err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
+ if (err < 0)
+ return err;
+
+ if (key != mutex->key)
+ return -EPERM;
+
+ /* Compare against the unlocked state, and if true,
+ * write the interface id into the top 16 bits, and
+ * mark as locked.
+ */
+ value = nfp_mutex_locked(nfp_cpp_interface(cpp));
+
+ /* We use test_set_imm here, as it implies a read
+ * of the current state, and sets the bits in the
+ * bytemask of the command to 1s. Since the mutex
+ * is guaranteed to be 64-bit aligned, the bytemask
+ * of this 32-bit command is ensured to be 8'b00001111,
+ * which implies that the lower 4 bits will be set to
+ * ones regardless of the initial state.
+ *
+ * Since this is a 'Readback' operation, with no Pull
+ * data, we can treat this as a normal Push (read)
+ * atomic, which returns the original value.
+ */
+ err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
+ if (err < 0)
+ return err;
+
+ /* Was it unlocked? */
+ if (nfp_mutex_is_unlocked(tmp)) {
+ /* The read value can only be 0x....0000 in the unlocked state.
+ * If there was another contending for this lock, then
+ * the lock state would be 0x....000f
+ */
+
+ /* Write our owner ID into the lock
+ * While not strictly necessary, this helps with
+ * debug and bookkeeping.
+ */
+ err = nfp_cpp_writel(cpp, muw, mutex->address, value);
+ if (err < 0)
+ return err;
+
+ mutex->depth = 1;
+ return 0;
+ }
+
+ return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 34c50987c377..2fa9247bb23d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -49,6 +49,7 @@
#include "nfp.h"
#include "nfp_cpp.h"
+#include "nfp_nsp.h"
/* Offsets relative to the CSR base */
#define NSP_STATUS 0x00
@@ -77,7 +78,7 @@
#define NSP_MAGIC 0xab10
#define NSP_MAJOR 0
-#define NSP_MINOR (__MAX_SPCODE - 1)
+#define NSP_MINOR 8
#define NSP_CODE_MAJOR GENMASK(15, 12)
#define NSP_CODE_MINOR GENMASK(11, 0)
@@ -92,8 +93,18 @@ enum nfp_nsp_cmd {
SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */
SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */
SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */
+ SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */
+};
- __MAX_SPCODE,
+static const struct {
+ int code;
+ const char *msg;
+} nsp_errors[] = {
+ { 6010, "could not map to phy for port" },
+ { 6011, "not an allowed rate/lanes for port" },
+ { 6012, "not an allowed rate/lanes for port" },
+ { 6013, "high/low error, change other port first" },
+ { 6014, "config not found in flash" },
};
struct nfp_nsp {
@@ -103,8 +114,63 @@ struct nfp_nsp {
u16 major;
u16 minor;
} ver;
+
+ /* Eth table config state */
+ bool modified;
+ unsigned int idx;
+ void *entries;
};
+struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state)
+{
+ return state->cpp;
+}
+
+bool nfp_nsp_config_modified(struct nfp_nsp *state)
+{
+ return state->modified;
+}
+
+void nfp_nsp_config_set_modified(struct nfp_nsp *state, bool modified)
+{
+ state->modified = modified;
+}
+
+void *nfp_nsp_config_entries(struct nfp_nsp *state)
+{
+ return state->entries;
+}
+
+unsigned int nfp_nsp_config_idx(struct nfp_nsp *state)
+{
+ return state->idx;
+}
+
+void
+nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, unsigned int idx)
+{
+ state->entries = entries;
+ state->idx = idx;
+}
+
+void nfp_nsp_config_clear_state(struct nfp_nsp *state)
+{
+ state->entries = NULL;
+ state->idx = 0;
+}
+
+static void nfp_nsp_print_extended_error(struct nfp_nsp *state, u32 ret_val)
+{
+ int i;
+
+ if (!ret_val)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(nsp_errors); i++)
+ if (ret_val == nsp_errors[i].code)
+ nfp_err(state->cpp, "err msg: %s\n", nsp_errors[i].msg);
+}
+
static int nfp_nsp_check(struct nfp_nsp *state)
{
struct nfp_cpp *cpp = state->cpp;
@@ -209,9 +275,8 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
if ((*reg & mask) == val)
return 0;
- err = msleep_interruptible(100);
- if (err)
- return err;
+ if (msleep_interruptible(25))
+ return -ERESTARTSYS;
if (time_after(start_time, wait_until))
return -ETIMEDOUT;
@@ -228,7 +293,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
*
* Return: 0 for success with no result
*
- * 1..255 for NSP completion with a result code
+ * positive value for NSP completion with a result code
*
* -EAGAIN if the NSP is not yet present
* -ENODEV if the NSP is not a supported model
@@ -239,7 +304,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
u32 buff_cpp, u64 buff_addr)
{
- u64 reg, nsp_base, nsp_buffer, nsp_status, nsp_command;
+ u64 reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command;
struct nfp_cpp *cpp = state->cpp;
u32 nsp_cpp;
int err;
@@ -292,18 +357,20 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
return err;
}
+ err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &ret_val);
+ if (err < 0)
+ return err;
+ ret_val = FIELD_GET(NSP_COMMAND_OPTION, ret_val);
+
err = FIELD_GET(NSP_STATUS_RESULT, reg);
if (err) {
- nfp_warn(cpp, "Result (error) code set: %d command: %d\n",
- -err, code);
+ nfp_warn(cpp, "Result (error) code set: %d (%d) command: %d\n",
+ -err, (int)ret_val, code);
+ nfp_nsp_print_extended_error(state, ret_val);
return -err;
}
- err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &reg);
- if (err < 0)
- return err;
-
- return FIELD_GET(NSP_COMMAND_OPTION, reg);
+ return ret_val;
}
static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
@@ -380,9 +447,10 @@ int nfp_nsp_wait(struct nfp_nsp *state)
if (err != -EAGAIN)
break;
- err = msleep_interruptible(100);
- if (err)
+ if (msleep_interruptible(25)) {
+ err = -ERESTARTSYS;
break;
+ }
if (time_after(start_time, wait_until)) {
err = -ETIMEDOUT;
@@ -424,3 +492,9 @@ int nfp_nsp_write_eth_table(struct nfp_nsp *state,
return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size,
NULL, 0);
}
+
+int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0,
+ buf, size);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
new file mode 100644
index 000000000000..36b21e4dc56d
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NSP_NSP_H
+#define NSP_NSP_H 1
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+struct firmware;
+struct nfp_cpp;
+struct nfp_nsp;
+
+struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp);
+void nfp_nsp_close(struct nfp_nsp *state);
+u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state);
+u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
+int nfp_nsp_wait(struct nfp_nsp *state);
+int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
+int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
+
+enum nfp_eth_interface {
+ NFP_INTERFACE_NONE = 0,
+ NFP_INTERFACE_SFP = 1,
+ NFP_INTERFACE_SFPP = 10,
+ NFP_INTERFACE_SFP28 = 28,
+ NFP_INTERFACE_QSFP = 40,
+ NFP_INTERFACE_CXP = 100,
+ NFP_INTERFACE_QSFP28 = 112,
+};
+
+enum nfp_eth_media {
+ NFP_MEDIA_DAC_PASSIVE = 0,
+ NFP_MEDIA_DAC_ACTIVE,
+ NFP_MEDIA_FIBRE,
+};
+
+enum nfp_eth_aneg {
+ NFP_ANEG_AUTO = 0,
+ NFP_ANEG_SEARCH,
+ NFP_ANEG_25G_CONSORTIUM,
+ NFP_ANEG_25G_IEEE,
+ NFP_ANEG_DISABLED,
+};
+
+/**
+ * struct nfp_eth_table - ETH table information
+ * @count: number of table entries
+ * @ports: table of ports
+ *
+ * @eth_index: port index according to legacy ethX numbering
+ * @index: chip-wide first channel index
+ * @nbi: NBI index
+ * @base: first channel index (within NBI)
+ * @lanes: number of channels
+ * @speed: interface speed (in Mbps)
+ * @interface: interface (module) plugged in
+ * @media: media type of the @interface
+ * @aneg: auto negotiation mode
+ * @mac_addr: interface MAC address
+ * @label_port: port id
+ * @label_subport: id of interface within port (for split ports)
+ * @enabled: is enabled?
+ * @tx_enabled: is TX enabled?
+ * @rx_enabled: is RX enabled?
+ * @override_changed: is media reconfig pending?
+ *
+ * @port_type: one of %PORT_* defines for ethtool
+ * @is_split: is interface part of a split port
+ */
+struct nfp_eth_table {
+ unsigned int count;
+ struct nfp_eth_table_port {
+ unsigned int eth_index;
+ unsigned int index;
+ unsigned int nbi;
+ unsigned int base;
+ unsigned int lanes;
+ unsigned int speed;
+
+ unsigned int interface;
+ enum nfp_eth_media media;
+
+ enum nfp_eth_aneg aneg;
+
+ u8 mac_addr[ETH_ALEN];
+
+ u8 label_port;
+ u8 label_subport;
+
+ bool enabled;
+ bool tx_enabled;
+ bool rx_enabled;
+
+ bool override_changed;
+
+ /* Computed fields */
+ u8 port_type;
+
+ bool is_split;
+ } ports[0];
+};
+
+struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
+struct nfp_eth_table *
+__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp);
+
+int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable);
+int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx,
+ bool configed);
+
+struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx);
+int nfp_eth_config_commit_end(struct nfp_nsp *nsp);
+void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp);
+
+int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode);
+int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed);
+int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes);
+
+/**
+ * struct nfp_nsp_identify - NSP static information
+ * @version: opaque version string
+ * @flags: version flags
+ * @br_primary: branch id of primary bootloader
+ * @br_secondary: branch id of secondary bootloader
+ * @br_nsp: branch id of NSP
+ * @primary: version of primarary bootloader
+ * @secondary: version id of secondary bootloader
+ * @nsp: version id of NSP
+ */
+struct nfp_nsp_identify {
+ char version[40];
+ u8 flags;
+ u8 br_primary;
+ u8 br_secondary;
+ u8 br_nsp;
+ u16 primary;
+ u16 secondary;
+ u16 nsp;
+};
+
+struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
index edf703d319c8..e7a263de3731 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ * Copyright (C) 2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -31,51 +31,59 @@
* SOFTWARE.
*/
-#ifndef NSP_NSP_ETH_H
-#define NSP_NSP_ETH_H 1
+#include <linux/kernel.h>
+#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/if_ether.h>
+#include "nfp.h"
+#include "nfp_nsp.h"
-/**
- * struct nfp_eth_table - ETH table information
- * @count: number of table entries
- * @ports: table of ports
- *
- * @eth_index: port index according to legacy ethX numbering
- * @index: chip-wide first channel index
- * @nbi: NBI index
- * @base: first channel index (within NBI)
- * @lanes: number of channels
- * @speed: interface speed (in Mbps)
- * @mac_addr: interface MAC address
- * @label: interface id string
- * @enabled: is enabled?
- * @tx_enabled: is TX enabled?
- * @rx_enabled: is RX enabled?
- */
-struct nfp_eth_table {
- unsigned int count;
- struct nfp_eth_table_port {
- unsigned int eth_index;
- unsigned int index;
- unsigned int nbi;
- unsigned int base;
- unsigned int lanes;
- unsigned int speed;
+struct nsp_identify {
+ u8 version[40];
+ u8 flags;
+ u8 br_primary;
+ u8 br_secondary;
+ u8 br_nsp;
+ __le16 primary;
+ __le16 secondary;
+ __le16 nsp;
+ __le16 reserved;
+};
- u8 mac_addr[ETH_ALEN];
- char label[8];
+struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp)
+{
+ struct nfp_nsp_identify *nspi = NULL;
+ struct nsp_identify *ni;
+ int ret;
- bool enabled;
- bool tx_enabled;
- bool rx_enabled;
- } ports[0];
-};
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 15)
+ return NULL;
+
+ ni = kzalloc(sizeof(*ni), GFP_KERNEL);
+ if (!ni)
+ return NULL;
+
+ ret = nfp_nsp_read_identify(nsp, ni, sizeof(*ni));
+ if (ret < 0) {
+ nfp_err(nfp_nsp_cpp(nsp), "reading bsp version failed %d\n",
+ ret);
+ goto exit_free;
+ }
+
+ nspi = kzalloc(sizeof(*nspi), GFP_KERNEL);
+ if (!nspi)
+ goto exit_free;
-struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
-struct nfp_eth_table *
-__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp);
-int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable);
+ memcpy(nspi->version, ni->version, sizeof(nspi->version));
+ nspi->version[sizeof(nspi->version) - 1] = '\0';
+ nspi->flags = ni->flags;
+ nspi->br_primary = ni->br_primary;
+ nspi->br_secondary = ni->br_secondary;
+ nspi->br_nsp = ni->br_nsp;
+ nspi->primary = le16_to_cpu(ni->primary);
+ nspi->secondary = le16_to_cpu(ni->secondary);
+ nspi->nsp = le16_to_cpu(ni->nsp);
-#endif
+exit_free:
+ kfree(ni);
+ return nspi;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 1ece1f8ae4b3..639438d8313a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -43,13 +43,13 @@
#include <linux/module.h>
#include "nfp.h"
-#include "nfp_nsp_eth.h"
+#include "nfp_nsp.h"
#include "nfp6000/nfp6000.h"
#define NSP_ETH_NBI_PORT_COUNT 24
#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT)
#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * \
- sizeof(struct eth_table_entry))
+ sizeof(union eth_table_entry))
#define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0)
#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8)
@@ -58,14 +58,32 @@
#define NSP_ETH_PORT_LANES_MASK cpu_to_le64(NSP_ETH_PORT_LANES)
+#define NSP_ETH_STATE_CONFIGURED BIT_ULL(0)
#define NSP_ETH_STATE_ENABLED BIT_ULL(1)
#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2)
#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3)
#define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8)
+#define NSP_ETH_STATE_INTERFACE GENMASK_ULL(19, 12)
+#define NSP_ETH_STATE_MEDIA GENMASK_ULL(21, 20)
+#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22)
+#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23)
+#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0)
#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2)
#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3)
+#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4)
+#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5)
+#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
+
+enum nfp_eth_raw {
+ NSP_ETH_RAW_PORT = 0,
+ NSP_ETH_RAW_STATE,
+ NSP_ETH_RAW_MAC,
+ NSP_ETH_RAW_CONTROL,
+
+ NSP_ETH_NUM_RAW
+};
enum nfp_eth_rate {
RATE_INVALID = 0,
@@ -76,29 +94,49 @@ enum nfp_eth_rate {
RATE_25G,
};
-struct eth_table_entry {
- __le64 port;
- __le64 state;
- u8 mac_addr[6];
- u8 resv[2];
- __le64 control;
+union eth_table_entry {
+ struct {
+ __le64 port;
+ __le64 state;
+ u8 mac_addr[6];
+ u8 resv[2];
+ __le64 control;
+ };
+ __le64 raw[NSP_ETH_NUM_RAW];
+};
+
+static const struct {
+ enum nfp_eth_rate rate;
+ unsigned int speed;
+} nsp_eth_rate_tbl[] = {
+ { RATE_INVALID, 0, },
+ { RATE_10M, SPEED_10, },
+ { RATE_100M, SPEED_100, },
+ { RATE_1G, SPEED_1000, },
+ { RATE_10G, SPEED_10000, },
+ { RATE_25G, SPEED_25000, },
};
-static unsigned int nfp_eth_rate(enum nfp_eth_rate rate)
+static unsigned int nfp_eth_rate2speed(enum nfp_eth_rate rate)
{
- unsigned int rate_xlate[] = {
- [RATE_INVALID] = 0,
- [RATE_10M] = SPEED_10,
- [RATE_100M] = SPEED_100,
- [RATE_1G] = SPEED_1000,
- [RATE_10G] = SPEED_10000,
- [RATE_25G] = SPEED_25000,
- };
+ int i;
- if (rate >= ARRAY_SIZE(rate_xlate))
- return 0;
+ for (i = 0; i < ARRAY_SIZE(nsp_eth_rate_tbl); i++)
+ if (nsp_eth_rate_tbl[i].rate == rate)
+ return nsp_eth_rate_tbl[i].speed;
+
+ return 0;
+}
+
+static unsigned int nfp_eth_speed2rate(unsigned int speed)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nsp_eth_rate_tbl); i++)
+ if (nsp_eth_rate_tbl[i].speed == speed)
+ return nsp_eth_rate_tbl[i].rate;
- return rate_xlate[rate];
+ return RATE_INVALID;
}
static void nfp_eth_copy_mac_reverse(u8 *dst, const u8 *src)
@@ -110,8 +148,8 @@ static void nfp_eth_copy_mac_reverse(u8 *dst, const u8 *src)
}
static void
-nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index,
- struct nfp_eth_table_port *dst)
+nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
+ unsigned int index, struct nfp_eth_table_port *dst)
{
unsigned int rate;
u64 port, state;
@@ -129,14 +167,60 @@ nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index,
dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state);
dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state);
- rate = nfp_eth_rate(FIELD_GET(NSP_ETH_STATE_RATE, state));
+ rate = nfp_eth_rate2speed(FIELD_GET(NSP_ETH_STATE_RATE, state));
dst->speed = dst->lanes * rate;
+ dst->interface = FIELD_GET(NSP_ETH_STATE_INTERFACE, state);
+ dst->media = FIELD_GET(NSP_ETH_STATE_MEDIA, state);
+
nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr);
- snprintf(dst->label, sizeof(dst->label) - 1, "%llu.%llu",
- FIELD_GET(NSP_ETH_PORT_PHYLABEL, port),
- FIELD_GET(NSP_ETH_PORT_LABEL, port));
+ dst->label_port = FIELD_GET(NSP_ETH_PORT_PHYLABEL, port);
+ dst->label_subport = FIELD_GET(NSP_ETH_PORT_LABEL, port);
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 17)
+ return;
+
+ dst->override_changed = FIELD_GET(NSP_ETH_STATE_OVRD_CHNG, state);
+ dst->aneg = FIELD_GET(NSP_ETH_STATE_ANEG, state);
+}
+
+static void
+nfp_eth_mark_split_ports(struct nfp_cpp *cpp, struct nfp_eth_table *table)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < table->count; i++)
+ for (j = 0; j < table->count; j++) {
+ if (i == j)
+ continue;
+ if (table->ports[i].label_port !=
+ table->ports[j].label_port)
+ continue;
+ if (table->ports[i].label_subport ==
+ table->ports[j].label_subport)
+ nfp_warn(cpp,
+ "Port %d subport %d is a duplicate\n",
+ table->ports[i].label_port,
+ table->ports[i].label_subport);
+
+ table->ports[i].is_split = true;
+ break;
+ }
+}
+
+static void
+nfp_eth_calc_port_type(struct nfp_cpp *cpp, struct nfp_eth_table_port *entry)
+{
+ if (entry->interface == NFP_INTERFACE_NONE) {
+ entry->port_type = PORT_NONE;
+ return;
+ }
+
+ if (entry->media == NFP_MEDIA_FIBRE)
+ entry->port_type = PORT_FIBRE;
+ else
+ entry->port_type = PORT_DA;
}
/**
@@ -166,10 +250,9 @@ struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp)
struct nfp_eth_table *
__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
{
- struct eth_table_entry *entries;
+ union eth_table_entry *entries;
struct nfp_eth_table *table;
- unsigned int cnt;
- int i, j, ret;
+ int i, j, ret, cnt = 0;
entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL);
if (!entries)
@@ -178,93 +261,288 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
if (ret < 0) {
nfp_err(cpp, "reading port table failed %d\n", ret);
- kfree(entries);
- return NULL;
+ goto err;
}
- /* Some versions of flash will give us 0 instead of port count */
- cnt = ret;
- if (!cnt) {
- for (i = 0; i < NSP_ETH_MAX_COUNT; i++)
- if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
- cnt++;
+ for (i = 0; i < NSP_ETH_MAX_COUNT; i++)
+ if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
+ cnt++;
+
+ /* Some versions of flash will give us 0 instead of port count.
+ * For those that give a port count, verify it against the value
+ * calculated above.
+ */
+ if (ret && ret != cnt) {
+ nfp_err(cpp, "table entry count reported (%d) does not match entries present (%d)\n",
+ ret, cnt);
+ goto err;
}
table = kzalloc(sizeof(*table) +
sizeof(struct nfp_eth_table_port) * cnt, GFP_KERNEL);
- if (!table) {
- kfree(entries);
- return NULL;
- }
+ if (!table)
+ goto err;
table->count = cnt;
for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++)
if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
- nfp_eth_port_translate(&entries[i], i,
+ nfp_eth_port_translate(nsp, &entries[i], i,
&table->ports[j++]);
+ nfp_eth_mark_split_ports(cpp, table);
+ for (i = 0; i < table->count; i++)
+ nfp_eth_calc_port_type(cpp, &table->ports[i]);
+
kfree(entries);
return table;
+
+err:
+ kfree(entries);
+ return NULL;
}
-/**
- * nfp_eth_set_mod_enable() - set PHY module enable control bit
- * @cpp: NFP CPP handle
- * @idx: NFP chip-wide port index
- * @enable: Desired state
- *
- * Enable or disable PHY module (this usually means setting the TX lanes
- * disable bits).
- *
- * Return: 0 or -ERRNO.
- */
-int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable)
+struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx)
{
- struct eth_table_entry *entries;
+ union eth_table_entry *entries;
struct nfp_nsp *nsp;
- u64 reg;
int ret;
entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL);
if (!entries)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
nsp = nfp_nsp_open(cpp);
if (IS_ERR(nsp)) {
kfree(entries);
- return PTR_ERR(nsp);
+ return nsp;
}
ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
if (ret < 0) {
nfp_err(cpp, "reading port table failed %d\n", ret);
- goto exit_close_nsp;
+ goto err;
}
if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) {
nfp_warn(cpp, "trying to set port state on disabled port %d\n",
idx);
- ret = -EINVAL;
- goto exit_close_nsp;
+ goto err;
+ }
+
+ nfp_nsp_config_set_state(nsp, entries, idx);
+ return nsp;
+
+err:
+ nfp_nsp_close(nsp);
+ kfree(entries);
+ return ERR_PTR(-EIO);
+}
+
+void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp)
+{
+ union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+
+ nfp_nsp_config_set_modified(nsp, false);
+ nfp_nsp_config_clear_state(nsp);
+ nfp_nsp_close(nsp);
+ kfree(entries);
+}
+
+/**
+ * nfp_eth_config_commit_end() - perform recorded configuration changes
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ *
+ * Perform the configuration which was requested with __nfp_eth_set_*()
+ * helpers and recorded in @nsp state. If device was already configured
+ * as requested or no __nfp_eth_set_*() operations were made no NSP command
+ * will be performed.
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int nfp_eth_config_commit_end(struct nfp_nsp *nsp)
+{
+ union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+ int ret = 1;
+
+ if (nfp_nsp_config_modified(nsp)) {
+ ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+ ret = ret < 0 ? ret : 0;
+ }
+
+ nfp_eth_config_cleanup_end(nsp);
+
+ return ret;
+}
+
+/**
+ * nfp_eth_set_mod_enable() - set PHY module enable control bit
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @enable: Desired state
+ *
+ * Enable or disable PHY module (this usually means setting the TX lanes
+ * disable bits).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ u64 reg;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ entries = nfp_nsp_config_entries(nsp);
+
+ /* Check if we are already in requested state */
+ reg = le64_to_cpu(entries[idx].state);
+ if (enable != FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) {
+ reg = le64_to_cpu(entries[idx].control);
+ reg &= ~NSP_ETH_CTRL_ENABLED;
+ reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable);
+ entries[idx].control = cpu_to_le64(reg);
+
+ nfp_nsp_config_set_modified(nsp, true);
}
+ return nfp_eth_config_commit_end(nsp);
+}
+
+/**
+ * nfp_eth_set_configured() - set PHY module configured control bit
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @configed: Desired state
+ *
+ * Set the ifup/ifdown state on the PHY.
+ *
+ * Return: 0 or -ERRNO.
+ */
+int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ u64 reg;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ entries = nfp_nsp_config_entries(nsp);
+
/* Check if we are already in requested state */
reg = le64_to_cpu(entries[idx].state);
- if (enable == FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) {
- ret = 0;
- goto exit_close_nsp;
+ if (configed != FIELD_GET(NSP_ETH_STATE_CONFIGURED, reg)) {
+ reg = le64_to_cpu(entries[idx].control);
+ reg &= ~NSP_ETH_CTRL_CONFIGURED;
+ reg |= FIELD_PREP(NSP_ETH_CTRL_CONFIGURED, configed);
+ entries[idx].control = cpu_to_le64(reg);
+
+ nfp_nsp_config_set_modified(nsp, true);
}
- reg = le64_to_cpu(entries[idx].control);
- reg &= ~NSP_ETH_CTRL_ENABLED;
- reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable);
- entries[idx].control = cpu_to_le64(reg);
+ return nfp_eth_config_commit_end(nsp);
+}
- ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
-exit_close_nsp:
- nfp_nsp_close(nsp);
- kfree(entries);
+/* Force inline, FIELD_* macroes require masks to be compilation-time known */
+static __always_inline int
+nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
+ const u64 mask, unsigned int val, const u64 ctrl_bit)
+{
+ union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+ unsigned int idx = nfp_nsp_config_idx(nsp);
+ u64 reg;
+
+ /* Note: set features were added in ABI 0.14 but the error
+ * codes were initially not populated correctly.
+ */
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 17) {
+ nfp_err(nfp_nsp_cpp(nsp),
+ "set operations not supported, please update flash\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if we are already in requested state */
+ reg = le64_to_cpu(entries[idx].raw[raw_idx]);
+ if (val == FIELD_GET(mask, reg))
+ return 0;
- return ret < 0 ? ret : 0;
+ reg &= ~mask;
+ reg |= FIELD_PREP(mask, val);
+ entries[idx].raw[raw_idx] = cpu_to_le64(reg);
+
+ entries[idx].control |= cpu_to_le64(ctrl_bit);
+
+ nfp_nsp_config_set_modified(nsp, true);
+
+ return 0;
+}
+
+/**
+ * __nfp_eth_set_aneg() - set PHY autonegotiation control bit
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @mode: Desired autonegotiation mode
+ *
+ * Allow/disallow PHY module to advertise/perform autonegotiation.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode)
+{
+ return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_STATE,
+ NSP_ETH_STATE_ANEG, mode,
+ NSP_ETH_CTRL_SET_ANEG);
+}
+
+/**
+ * __nfp_eth_set_speed() - set interface speed/rate
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @speed: Desired speed (per lane)
+ *
+ * Set lane speed. Provided @speed value should be subport speed divided
+ * by number of lanes this subport is spanning (i.e. 10000 for 40G, 25000 for
+ * 50G, etc.)
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed)
+{
+ enum nfp_eth_rate rate;
+
+ rate = nfp_eth_speed2rate(speed);
+ if (rate == RATE_INVALID) {
+ nfp_warn(nfp_nsp_cpp(nsp),
+ "could not find matching lane rate for speed %u\n",
+ speed);
+ return -EINVAL;
+ }
+
+ return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_STATE,
+ NSP_ETH_STATE_RATE, rate,
+ NSP_ETH_CTRL_SET_RATE);
+}
+
+/**
+ * __nfp_eth_set_split() - set interface lane split
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @lanes: Desired lanes per port
+ *
+ * Set number of lanes in the port.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes)
+{
+ return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES,
+ lanes, NSP_ETH_CTRL_SET_LANES);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
index a2850344f8b4..2d15a7c9d0de 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
@@ -45,6 +45,13 @@
#include "nfp_cpp.h"
#include "nfp6000/nfp6000.h"
+#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU
+#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL
+
+/* NFP Resource Table self-identifier */
+#define NFP_RESOURCE_TBL_NAME "nfp.res"
+#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */
+
#define NFP_RESOURCE_ENTRY_NAME_SZ 8
/**
@@ -100,9 +107,11 @@ static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res)
strncpy(name_pad, res->name, sizeof(name_pad));
/* Search for a matching entry */
- key = NFP_RESOURCE_TBL_KEY;
- if (memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8))
- key = crc32_posix(name_pad, sizeof(name_pad));
+ if (!memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) {
+ nfp_err(cpp, "Grabbing device lock not supported\n");
+ return -EOPNOTSUPP;
+ }
+ key = crc32_posix(name_pad, sizeof(name_pad));
for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
u64 addr = NFP_RESOURCE_TBL_BASE +
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 9709c8ca0774..159564d8dcdb 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -152,7 +152,6 @@ struct w90p910_ether {
struct tran_pdesc *tdesc;
dma_addr_t rdesc_phys;
dma_addr_t tdesc_phys;
- struct net_device_stats stats;
struct platform_device *pdev;
struct resource *res;
struct sk_buff *skb;
@@ -584,15 +583,6 @@ static int w90p910_ether_close(struct net_device *dev)
return 0;
}
-static struct net_device_stats *w90p910_ether_stats(struct net_device *dev)
-{
- struct w90p910_ether *ether;
-
- ether = netdev_priv(dev);
-
- return &ether->stats;
-}
-
static int w90p910_send_frame(struct net_device *dev,
unsigned char *data, int length)
{
@@ -671,10 +661,10 @@ static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id)
ether->finish_tx = 0;
if (txbd->sl & TXDS_TXCP) {
- ether->stats.tx_packets++;
- ether->stats.tx_bytes += txbd->sl & 0xFFFF;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += txbd->sl & 0xFFFF;
} else {
- ether->stats.tx_errors++;
+ dev->stats.tx_errors++;
}
txbd->sl = 0x0;
@@ -730,7 +720,7 @@ static void netdev_rx(struct net_device *dev)
data = ether->rdesc->recv_buf[ether->cur_rx];
skb = netdev_alloc_skb(dev, length + 2);
if (!skb) {
- ether->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
return;
}
@@ -738,24 +728,24 @@ static void netdev_rx(struct net_device *dev)
skb_put(skb, length);
skb_copy_to_linear_data(skb, data, length);
skb->protocol = eth_type_trans(skb, dev);
- ether->stats.rx_packets++;
- ether->stats.rx_bytes += length;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += length;
netif_rx(skb);
} else {
- ether->stats.rx_errors++;
+ dev->stats.rx_errors++;
if (status & RXDS_RP) {
dev_err(&pdev->dev, "rx runt err\n");
- ether->stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
} else if (status & RXDS_CRCE) {
dev_err(&pdev->dev, "rx crc err\n");
- ether->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
} else if (status & RXDS_ALIE) {
dev_err(&pdev->dev, "rx alignment err\n");
- ether->stats.rx_frame_errors++;
+ dev->stats.rx_frame_errors++;
} else if (status & RXDS_PTLE) {
dev_err(&pdev->dev, "rx longer err\n");
- ether->stats.rx_over_errors++;
+ dev->stats.rx_over_errors++;
}
}
@@ -912,7 +902,6 @@ static const struct net_device_ops w90p910_ether_netdev_ops = {
.ndo_open = w90p910_ether_open,
.ndo_stop = w90p910_ether_close,
.ndo_start_xmit = w90p910_ether_start_xmit,
- .ndo_get_stats = w90p910_ether_stats,
.ndo_set_rx_mode = w90p910_ether_set_multicast_list,
.ndo_set_mac_address = w90p910_set_mac_address,
.ndo_do_ioctl = w90p910_ether_ioctl,
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 7b43a3b4abdc..3dd973475125 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1375,13 +1375,8 @@ netxen_receive_peg_ready(struct netxen_adapter *adapter)
} while (--retries);
- if (!retries) {
- printk(KERN_ERR "Receive Peg initialization not "
- "complete, state: 0x%x.\n", val);
- return -EIO;
- }
-
- return 0;
+ pr_err("Receive Peg initialization not complete, state: 0x%x.\n", val);
+ return -EIO;
}
int netxen_init_firmware(struct netxen_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 00c17fa6545b..2ab1aab7c3fe 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -51,7 +51,19 @@
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.10.10.20"
+
+#define QED_MAJOR_VERSION 8
+#define QED_MINOR_VERSION 10
+#define QED_REVISION_VERSION 10
+#define QED_ENGINEERING_VERSION 21
+
+#define QED_VERSION \
+ ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
+ (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
+
+#define STORM_FW_VERSION \
+ ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
+ (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
@@ -59,9 +71,8 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_WFQ_UNIT 100
-#define ISCSI_BDQ_ID(_port_id) (_port_id)
-#define FCOE_BDQ_ID(_port_id) ((_port_id) + 2)
#define QED_WID_SIZE (1024)
+#define QED_MIN_WIDS (4)
#define QED_PF_DEMS_SIZE (4)
/* cau states */
@@ -76,6 +87,15 @@ union qed_mcp_protocol_stats;
enum qed_mcp_protocol_type;
/* helpers */
+#define QED_MFW_GET_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+#define QED_MFW_SET_FIELD(name, field, value) \
+ do { \
+ (name) &= ~((field ## _MASK) << (field ## _SHIFT)); \
+ (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
+ } while (0)
+
static inline u32 qed_db_addr(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
@@ -130,9 +150,35 @@ enum qed_tunn_clss {
QED_TUNN_CLSS_MAC_VNI,
QED_TUNN_CLSS_INNER_MAC_VLAN,
QED_TUNN_CLSS_INNER_MAC_VNI,
+ QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
MAX_QED_TUNN_CLSS,
};
+struct qed_tunn_update_type {
+ bool b_update_mode;
+ bool b_mode_enabled;
+ enum qed_tunn_clss tun_cls;
+};
+
+struct qed_tunn_update_udp_port {
+ bool b_update_port;
+ u16 port;
+};
+
+struct qed_tunnel_info {
+ struct qed_tunn_update_type vxlan;
+ struct qed_tunn_update_type l2_geneve;
+ struct qed_tunn_update_type ip_geneve;
+ struct qed_tunn_update_type l2_gre;
+ struct qed_tunn_update_type ip_gre;
+
+ struct qed_tunn_update_udp_port vxlan_port;
+ struct qed_tunn_update_udp_port geneve_port;
+
+ bool b_update_rx_cls;
+ bool b_update_tx_cls;
+};
+
struct qed_tunn_start_params {
unsigned long tunn_mode;
u16 vxlan_udp_port;
@@ -198,6 +244,7 @@ enum qed_resources {
QED_LL2_QUEUE,
QED_CMDQS_CQS,
QED_RDMA_STATS_QUEUE,
+ QED_BDQ,
QED_MAX_RESC,
};
@@ -205,8 +252,9 @@ enum QED_FEATURE {
QED_PF_L2_QUE,
QED_VF,
QED_RDMA_CNQ,
- QED_VF_L2_QUE,
+ QED_ISCSI_CQ,
QED_FCOE_CQ,
+ QED_VF_L2_QUE,
QED_MAX_FEATURES,
};
@@ -219,7 +267,9 @@ enum QED_PORT_MODE {
QED_PORT_MODE_DE_4X20G,
QED_PORT_MODE_DE_1X40G,
QED_PORT_MODE_DE_2X25G,
- QED_PORT_MODE_DE_1X25G
+ QED_PORT_MODE_DE_1X25G,
+ QED_PORT_MODE_DE_4X25G,
+ QED_PORT_MODE_DE_2X10G,
};
enum qed_dev_cap {
@@ -249,9 +299,14 @@ struct qed_hw_info {
RESC_NUM(_p_hwfn, resc))
#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
- u8 num_tc;
+ /* Amount of traffic classes HW supports */
+ u8 num_hw_tc;
+
+ /* Amount of TCs which should be active according to DCBx or upper
+ * layer driver configuration.
+ */
+ u8 num_active_tc;
u8 offload_tc;
- u8 non_offload_tc;
u32 concrete_fid;
u16 opaque_fid;
@@ -314,15 +369,19 @@ struct qed_qm_info {
struct init_qm_port_params *qm_port_params;
u16 start_pq;
u8 start_vport;
- u8 pure_lb_pq;
- u8 offload_pq;
- u8 pure_ack_pq;
- u8 ooo_pq;
- u8 vf_queues_offset;
+ u16 pure_lb_pq;
+ u16 offload_pq;
+ u16 low_latency_pq;
+ u16 pure_ack_pq;
+ u16 ooo_pq;
+ u16 first_vf_pq;
+ u16 first_mcos_pq;
+ u16 first_rl_pq;
u16 num_pqs;
u16 num_vf_pqs;
u8 num_vports;
u8 max_phys_tcs_per_port;
+ u8 ooo_tc;
bool pf_rl_en;
bool pf_wfq_en;
bool vport_rl_en;
@@ -353,6 +412,12 @@ struct qed_fw_data {
u32 init_ops_size;
};
+#define DRV_MODULE_VERSION \
+ __stringify(QED_MAJOR_VERSION) "." \
+ __stringify(QED_MINOR_VERSION) "." \
+ __stringify(QED_REVISION_VERSION) "." \
+ __stringify(QED_ENGINEERING_VERSION)
+
struct qed_simd_fp_handler {
void *token;
void (*func)(void *);
@@ -364,7 +429,8 @@ struct qed_hwfn {
#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
u8 rel_pf_id; /* Relative to engine*/
u8 abs_pf_id;
-#define QED_PATH_ID(_p_hwfn) ((_p_hwfn)->abs_pf_id & 1)
+#define QED_PATH_ID(_p_hwfn) \
+ (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
u8 port_id;
bool b_active;
@@ -409,6 +475,11 @@ struct qed_hwfn {
struct qed_ptt *p_main_ptt;
struct qed_ptt *p_dpc_ptt;
+ /* PTP will be used only by the leading function.
+ * Usage of all PTP-apis should be synchronized as result.
+ */
+ struct qed_ptt *p_ptp_ptt;
+
struct qed_sb_sp_info *p_sp_sb;
struct qed_sb_attn_info *p_sb_attn;
@@ -455,6 +526,7 @@ struct qed_hwfn {
struct dbg_tools_data dbg_info;
/* PWM region specific data */
+ u16 wid_count;
u32 dpi_size;
u32 dpi_count;
@@ -465,8 +537,8 @@ struct qed_hwfn {
u8 dcbx_no_edpm;
u8 db_bar_no_edpm;
- /* p_ptp_ptt is valid for leading HWFN only */
- struct qed_ptt *p_ptp_ptt;
+ struct qed_ptt *p_arfs_ptt;
+
struct qed_simd_fp_handler simd_proto_handler[64];
#ifdef CONFIG_QED_SRIOV
@@ -523,9 +595,7 @@ struct qed_dev {
u8 dp_level;
char name[NAME_SIZE];
- u8 type;
-#define QED_DEV_TYPE_BB (0 << 0)
-#define QED_DEV_TYPE_AH BIT(0)
+ enum qed_dev_type type;
/* Translate type/revision combo into the proper conditions */
#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
@@ -540,6 +610,9 @@ struct qed_dev {
u16 vendor_id;
u16 device_id;
+#define QED_DEV_ID_MASK 0xff00
+#define QED_DEV_ID_MASK_BB 0x1600
+#define QED_DEV_ID_MASK_AH 0x8000
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
@@ -606,9 +679,7 @@ struct qed_dev {
/* SRIOV */
struct qed_hw_sriov_info *p_iov_info;
#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
-
- unsigned long tunn_mode;
-
+ struct qed_tunnel_info tunnel;
bool b_is_vf;
u32 drv_type;
struct qed_eth_stats *reset_stats;
@@ -652,12 +723,19 @@ struct qed_dev {
u32 rdma_max_sge;
u32 rdma_max_inline;
u32 rdma_max_srq_sge;
+ u16 tunn_feature_mask;
};
-#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
-#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB
-#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
-#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
+#define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
+ : MAX_NUM_VFS_K2)
+#define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
+ : MAX_NUM_L2_QUEUES_K2)
+#define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
+ : MAX_NUM_PORTS_K2)
+#define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
+ : MAX_SB_PER_PATH_K2)
+#define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
+ : MAX_NUM_PFS_K2)
/**
* @brief qed_concrete_to_sw_fid - get the sw function id from
@@ -693,6 +771,26 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
u32 min_pf_rate);
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_device_num_engines(struct qed_dev *cdev);
+int qed_device_get_port_id(struct qed_dev *cdev);
+
+#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+
+/* Flags for indication of required queues */
+#define PQ_FLAGS_RLS (BIT(0))
+#define PQ_FLAGS_MCOS (BIT(1))
+#define PQ_FLAGS_LB (BIT(2))
+#define PQ_FLAGS_OOO (BIT(3))
+#define PQ_FLAGS_ACK (BIT(4))
+#define PQ_FLAGS_OFLD (BIT(5))
+#define PQ_FLAGS_VFS (BIT(6))
+#define PQ_FLAGS_LLT (BIT(7))
+
+/* physical queue index for cm context intialization */
+u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
+u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
+u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
+
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
/* Other Linux specific common definitions */
@@ -721,5 +819,6 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
enum qed_mcp_protocol_type type,
union qed_mcp_protocol_stats *stats);
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
+void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 7e3a6fed3da6..b3aaa985956e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -71,8 +71,7 @@
#define TM_ALIGN BIT(TM_SHIFT)
#define TM_ELEM_SIZE 4
-/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
-#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
+#define ILT_DEFAULT_HW_P_SIZE 4
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -220,9 +219,6 @@ struct qed_cxt_mngr {
*/
u32 vf_count;
- /* total number of SRQ's for this hwfn */
- u32 srq_count;
-
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
@@ -238,12 +234,17 @@ struct qed_cxt_mngr {
u32 t2_num_pages;
u64 first_free;
u64 last_free;
+
+ /* total number of SRQ's for this hwfn */
+ u32 srq_count;
+
+ /* Maximal number of L2 steering filters */
+ u32 arfs_count;
};
static bool src_proto(enum protocol_type type)
{
return type == PROTOCOLID_ISCSI ||
- type == PROTOCOLID_FCOE ||
- type == PROTOCOLID_ROCE;
+ type == PROTOCOLID_FCOE;
}
static bool tm_cid_proto(enum protocol_type type)
@@ -293,6 +294,9 @@ static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
}
+
+ /* Add L2 filtering filters in addition */
+ iids->pf_cids += p_mngr->arfs_count;
}
/* counts the iids for the Timers block configuration */
@@ -304,16 +308,34 @@ struct qed_tm_iids {
u32 per_vf_tids;
};
-static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr,
+static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
+ struct qed_cxt_mngr *p_mngr,
struct qed_tm_iids *iids)
{
- u32 i, j;
-
- for (i = 0; i < MAX_CONN_TYPES; i++) {
+ bool tm_vf_required = false;
+ bool tm_required = false;
+ int i, j;
+
+ /* Timers is a special case -> we don't count how many cids require
+ * timers but what's the max cid that will be used by the timer block.
+ * therefore we traverse in reverse order, and once we hit a protocol
+ * that requires the timers memory, we'll sum all the protocols up
+ * to that one.
+ */
+ for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
- if (tm_cid_proto(i)) {
+ if (tm_cid_proto(i) || tm_required) {
+ if (p_cfg->cid_count)
+ tm_required = true;
+
iids->pf_cids += p_cfg->cid_count;
+ }
+
+ if (tm_cid_proto(i) || tm_vf_required) {
+ if (p_cfg->cids_per_vf)
+ tm_vf_required = true;
+
iids->per_vf_cids += p_cfg->cids_per_vf;
}
@@ -527,7 +549,22 @@ static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
return lines_to_skip;
}
-int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
+static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg
+ *p_cli)
+{
+ p_cli->active = false;
+ p_cli->first.val = 0;
+ p_cli->last.val = 0;
+ return p_cli;
+}
+
+static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
+{
+ p_blk->total_size = 0;
+ return p_blk;
+}
+
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 curr_line, total, i, task_size, line;
@@ -551,7 +588,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
/* CDUC */
- p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
+
curr_line = p_mngr->pf_start_line;
/* CDUC PF */
@@ -560,7 +598,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
/* get the counters for the CDUC and QM clients */
qed_cxt_cdu_iids(p_mngr, &cdu_iids);
- p_blk = &p_cli->pf_blks[CDUC_BLK];
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
@@ -574,7 +612,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
ILT_CLI_CDUC);
/* CDUC VF */
- p_blk = &p_cli->vf_blks[CDUC_BLK];
+ p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
@@ -588,7 +626,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
ILT_CLI_CDUC);
/* CDUT PF */
- p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
p_cli->first.val = curr_line;
/* first the 'working' task memory */
@@ -597,7 +635,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
if (!p_seg || p_seg->count == 0)
continue;
- p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
total = p_seg->count * p_mngr->task_type_size[p_seg->type];
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
p_mngr->task_type_size[p_seg->type]);
@@ -612,7 +650,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
if (!p_seg || p_seg->count == 0)
continue;
- p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+ p_blk =
+ qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
if (!p_seg->has_fl_mem) {
/* The segment is active (total size pf 'working'
@@ -657,7 +696,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
/* 'working' memory */
total = p_seg->count * p_mngr->task_type_size[p_seg->type];
- p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
qed_ilt_cli_blk_fill(p_cli, p_blk,
curr_line, total,
p_mngr->task_type_size[p_seg->type]);
@@ -666,7 +705,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
ILT_CLI_CDUT);
/* 'init' memory */
- p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ p_blk =
+ qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
if (!p_seg->has_fl_mem) {
/* see comment above */
line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
@@ -694,8 +734,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
}
/* QM */
- p_cli = &p_mngr->clients[ILT_CLI_QM];
- p_blk = &p_cli->pf_blks[0];
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
qed_cxt_qm_iids(p_hwfn, &qm_iids);
total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
@@ -719,7 +759,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
p_cli->pf_total_lines = curr_line - p_blk->start_line;
/* SRC */
- p_cli = &p_mngr->clients[ILT_CLI_SRC];
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
qed_cxt_src_iids(p_mngr, &src_iids);
/* Both the PF and VFs searcher connections are stored in the per PF
@@ -733,7 +773,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
total = roundup_pow_of_two(local_max);
- p_blk = &p_cli->pf_blks[0];
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total * sizeof(struct src_ent),
sizeof(struct src_ent));
@@ -744,11 +784,11 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
}
/* TM PF */
- p_cli = &p_mngr->clients[ILT_CLI_TM];
- qed_cxt_tm_iids(p_mngr, &tm_iids);
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
+ qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
total = tm_iids.pf_cids + tm_iids.pf_tids_total;
if (total) {
- p_blk = &p_cli->pf_blks[0];
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total * TM_ELEM_SIZE, TM_ELEM_SIZE);
@@ -760,14 +800,14 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
/* TM VF */
total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
if (total) {
- p_blk = &p_cli->vf_blks[0];
+ p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total * TM_ELEM_SIZE, TM_ELEM_SIZE);
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
- p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
for (i = 1; i < p_mngr->vf_count; i++)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
@@ -777,8 +817,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
total = qed_cxt_get_srq_count(p_hwfn);
if (total) {
- p_cli = &p_mngr->clients[ILT_CLI_TSDM];
- p_blk = &p_cli->pf_blks[SRQ_BLK];
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
@@ -787,13 +827,50 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
p_cli->pf_total_lines = curr_line - p_blk->start_line;
}
+ *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
+
if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
- RESC_NUM(p_hwfn, QED_ILT)) {
- DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
- curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+ RESC_NUM(p_hwfn, QED_ILT))
return -EINVAL;
+
+ return 0;
+}
+
+u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ u32 excess_lines, available_lines;
+ struct qed_cxt_mngr *p_mngr;
+ u32 ilt_page_size, elem_size;
+ struct qed_tid_seg *p_seg;
+ int i;
+
+ available_lines = RESC_NUM(p_hwfn, QED_ILT);
+ excess_lines = used_lines - available_lines;
+
+ if (!excess_lines)
+ return 0;
+
+ if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+ return 0;
+
+ p_mngr = p_hwfn->p_cxt_mngr;
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ elem_size = p_mngr->task_type_size[p_seg->type];
+ if (!elem_size)
+ continue;
+
+ return (ilt_page_size / elem_size) * excess_lines;
}
+ DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n");
return 0;
}
@@ -1127,7 +1204,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
- /* default ILT page size for all clients is 32K */
+ /* default ILT page size for all clients is 64K */
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
@@ -1367,7 +1444,7 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
}
}
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_qm_pf_rt_init_params params;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
@@ -1393,22 +1470,15 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
params.pq_params = qm_info->qm_pq_params;
params.vport_params = qm_info->qm_vport_params;
- qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
+ qed_qm_pf_rt_init(p_hwfn, p_ptt, &params);
}
/* CM PF */
-static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
{
- union qed_qm_pq_params pq_params;
- u16 pq;
-
/* XCM pure-LB queue */
- memset(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = LB_TC;
- pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
- STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
-
- return 0;
+ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
+ qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
}
/* DQ PF */
@@ -1640,7 +1710,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
u8 i;
memset(&tm_iids, 0, sizeof(tm_iids));
- qed_cxt_tm_iids(p_mngr, &tm_iids);
+ qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
/* @@@TBD No pre-scan for now */
@@ -1758,9 +1828,9 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
qed_prs_init_common(p_hwfn);
}
-void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- qed_qm_init_pf(p_hwfn);
+ qed_qm_init_pf(p_hwfn, p_ptt);
qed_cm_init_pf(p_hwfn);
qed_dq_init_pf(p_hwfn);
qed_cdu_init_pf(p_hwfn);
@@ -1884,13 +1954,12 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
}
static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
- struct qed_rdma_pf_params *p_params)
+ struct qed_rdma_pf_params *p_params,
+ u32 num_tasks)
{
- u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
+ u32 num_cons, num_qps, num_srqs;
enum protocol_type proto;
- num_mrs = min_t(u32, RDMA_MAX_TIDS, p_params->num_mrs);
- num_tasks = num_mrs; /* each mr uses a single task id */
num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
switch (p_hwfn->hw_info.personality) {
@@ -1919,7 +1988,7 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
}
}
-int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
{
/* Set the number of required CORE connections */
u32 core_cids = 1; /* SPQ */
@@ -1931,9 +2000,10 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
switch (p_hwfn->hw_info.personality) {
case QED_PCI_ETH_ROCE:
{
- qed_rdma_set_pf_params(p_hwfn,
- &p_hwfn->
- pf_params.rdma_pf_params);
+ qed_rdma_set_pf_params(p_hwfn,
+ &p_hwfn->
+ pf_params.rdma_pf_params,
+ rdma_tasks);
/* no need for break since RoCE coexist with Ethernet */
}
case QED_PCI_ETH:
@@ -1943,6 +2013,7 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
p_params->num_cons, 1);
+ p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
break;
}
case QED_PCI_FCOE:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 8b010324268a..53ad532dc212 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -105,19 +105,28 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
* @brief qed_cxt_set_pf_params - Set the PF params for cxt init
*
* @param p_hwfn
- *
+ * @param rdma_tasks - requested maximum
* @return int
*/
-int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn);
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks);
/**
* @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
*
* @param p_hwfn
+ * @param last_line
*
* @return int
*/
-int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn);
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line);
+
+/**
+ * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased
+ *
+ * @param p_hwfn
+ * @param used_lines
+ */
+u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines);
/**
* @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
@@ -163,19 +172,18 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
/**
* @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
*
- *
- *
* @param p_hwfn
+ * @param p_ptt
*/
-void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief qed_qm_init_pf - Initailze the QM PF phase, per path
*
* @param p_hwfn
+ * @param p_ptt
*/
-
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief Reconfigures QM pf on the fly
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index cfdadb658ade..d883ad5bec6d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -183,7 +183,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
"%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n",
qed_dcbx_app_update[i].name, p_data->arr[id].update,
p_data->arr[id].enable, p_data->arr[id].priority,
- p_data->arr[id].tc, p_hwfn->hw_info.num_tc);
+ p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc);
}
}
@@ -204,12 +204,8 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
p_data->arr[type].tc = tc;
/* QM reconf data */
- if (p_info->personality == personality) {
- if (personality == QED_PCI_ETH)
- p_info->non_offload_tc = tc;
- else
- p_info->offload_tc = tc;
- }
+ if (p_info->personality == personality)
+ p_info->offload_tc = tc;
}
/* Update app protocol data and hw_info fields with the TLV info */
@@ -275,8 +271,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
struct dcbx_app_priority_entry *p_tbl,
u32 pri_tc_tbl, int count, u8 dcbx_version)
{
- u8 tc, priority_map;
enum dcbx_protocol_type type;
+ u8 tc, priority_map;
bool enable, ieee;
u16 protocol_id;
int priority;
@@ -376,7 +372,9 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
if (rc)
return rc;
- p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+ p_info->num_active_tc = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_MAX_TCS);
+ p_hwfn->qm_info.ooo_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC);
data.pf_id = p_hwfn->rel_pf_id;
data.dcbx_enabled = !!dcbx_version;
@@ -558,8 +556,9 @@ qed_dcbx_get_pfc_data(struct qed_hwfn *p_hwfn,
p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
DP_VERBOSE(p_hwfn, QED_MSG_DCB,
- "PFC params: willing %d, pfc_bitmap %d\n",
- p_params->pfc.willing, pfc_map);
+ "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d\n",
+ p_params->pfc.willing, pfc_map, p_params->pfc.max_tc,
+ p_params->pfc.enabled);
}
static void
@@ -578,10 +577,10 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
p_params->max_ets_tc = QED_MFW_GET_FIELD(p_ets->flags,
DCBX_ETS_MAX_TCS);
DP_VERBOSE(p_hwfn, QED_MSG_DCB,
- "ETS params: willing %d, ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
- p_params->ets_willing,
- p_params->ets_cbs,
- p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
+ "ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
+ p_params->ets_willing, p_params->ets_enabled,
+ p_params->ets_cbs, p_ets->pri_tc_tbl[0],
+ p_params->max_ets_tc);
if (p_params->ets_enabled && !p_params->max_ets_tc) {
p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES;
@@ -622,8 +621,7 @@ qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn,
}
static void
-qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
+qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params)
{
struct dcbx_features *p_feat;
@@ -635,8 +633,7 @@ qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
}
static void
-qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
+qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params)
{
struct dcbx_features *p_feat;
@@ -649,7 +646,6 @@ qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
static void
qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
struct qed_dcbx_get *params)
{
struct qed_dcbx_operational_params *p_operational;
@@ -670,6 +666,7 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
if (!enabled) {
p_operational->enabled = enabled;
p_operational->valid = false;
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Dcbx is disabled\n");
return;
}
@@ -683,8 +680,14 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
DCBX_CONFIG_VERSION_CEE);
p_operational->cee = val;
- DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Version support: ieee %d, cee %d\n",
- p_operational->ieee, p_operational->cee);
+ val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_STATIC);
+ p_operational->local = val;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "Version support: ieee %d, cee %d, static %d\n",
+ p_operational->ieee, p_operational->cee,
+ p_operational->local);
qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
p_feat->app.app_pri_tbl, &p_feat->ets,
@@ -699,7 +702,6 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
static void
qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
struct qed_dcbx_get *params)
{
struct lldp_config_params_s *p_local;
@@ -714,7 +716,6 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
static void
qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
struct qed_dcbx_get *params)
{
struct lldp_status_params_s *p_remote;
@@ -728,25 +729,24 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
}
static int
-qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- struct qed_dcbx_get *p_params,
+qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *p_params,
enum qed_mib_read_type type)
{
switch (type) {
case QED_DCBX_REMOTE_MIB:
- qed_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
+ qed_dcbx_get_remote_params(p_hwfn, p_params);
break;
case QED_DCBX_LOCAL_MIB:
- qed_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
+ qed_dcbx_get_local_params(p_hwfn, p_params);
break;
case QED_DCBX_OPERATIONAL_MIB:
- qed_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
+ qed_dcbx_get_operational_params(p_hwfn, p_params);
break;
case QED_DCBX_REMOTE_LLDP_MIB:
- qed_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+ qed_dcbx_get_remote_lldp_params(p_hwfn, p_params);
break;
case QED_DCBX_LOCAL_LLDP_MIB:
- qed_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+ qed_dcbx_get_local_lldp_params(p_hwfn, p_params);
break;
default:
DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
@@ -904,7 +904,8 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
qed_sp_pf_update(p_hwfn);
}
}
- qed_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type);
+
+ qed_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
qed_dcbx_aen(p_hwfn, type);
return rc;
@@ -912,17 +913,14 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
{
- int rc = 0;
-
p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL);
if (!p_hwfn->p_dcbx_info)
- rc = -ENOMEM;
+ return -ENOMEM;
- return rc;
+ return 0;
}
-void qed_dcbx_info_free(struct qed_hwfn *p_hwfn,
- struct qed_dcbx_info *p_dcbx_info)
+void qed_dcbx_info_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->p_dcbx_info);
}
@@ -961,14 +959,9 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
p_dcb_data = &p_dest->fcoe_dcb_data;
qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE);
p_dcb_data = &p_dest->roce_dcb_data;
-
- if (p_src->arr[DCBX_PROTOCOL_ROCE].update)
- qed_dcbx_update_protocol_data(p_dcb_data, p_src,
- DCBX_PROTOCOL_ROCE);
- if (p_src->arr[DCBX_PROTOCOL_ROCE_V2].update)
- qed_dcbx_update_protocol_data(p_dcb_data, p_src,
- DCBX_PROTOCOL_ROCE_V2);
-
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE);
+ p_dcb_data = &p_dest->rroce_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE_V2);
p_dcb_data = &p_dest->iscsi_dcb_data;
qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI);
p_dcb_data = &p_dest->eth_dcb_data;
@@ -994,7 +987,7 @@ static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
if (rc)
goto out;
- rc = qed_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
+ rc = qed_dcbx_get_params(p_hwfn, p_get, type);
out:
qed_ptt_release(p_hwfn, p_ptt);
@@ -1169,6 +1162,9 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
}
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Dcbx version = %d\n",
+ local_admin->config);
+
if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
&params->config.params);
@@ -1245,6 +1241,8 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE;
if (dcbx_info->operational.ieee)
p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+ if (dcbx_info->operational.local)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
memcpy(&p_hwfn->p_dcbx_info->set.config.params,
@@ -1794,8 +1792,9 @@ static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode)
DP_VERBOSE(hwfn, QED_MSG_DCB, "new mode = %x\n", mode);
- if (!(mode & DCB_CAP_DCBX_VER_IEEE) && !(mode & DCB_CAP_DCBX_VER_CEE)) {
- DP_INFO(hwfn, "Allowed mode is cee, ieee or both\n");
+ if (!(mode & DCB_CAP_DCBX_VER_IEEE) &&
+ !(mode & DCB_CAP_DCBX_VER_CEE) && !(mode & DCB_CAP_DCBX_STATIC)) {
+ DP_INFO(hwfn, "Allowed modes are cee, ieee or static\n");
return 1;
}
@@ -1815,6 +1814,11 @@ static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode)
dcbx_set.enabled = true;
}
+ if (mode & DCB_CAP_DCBX_STATIC) {
+ dcbx_set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
+ dcbx_set.enabled = true;
+ }
+
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return 1;
@@ -1823,7 +1827,7 @@ static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode)
qed_ptt_release(hwfn, ptt);
- return 0;
+ return rc;
}
static u8 qed_dcbnl_getfeatcfg(struct qed_dev *cdev, int featid, u8 *flags)
@@ -2202,15 +2206,46 @@ qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
return qed_dcbnl_get_ieee_pfc(cdev, pfc, true);
}
+static int qed_get_sf_ieee_value(u8 selector, u8 *sf_ieee)
+{
+ switch (selector) {
+ case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ *sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case IEEE_8021QAZ_APP_SEL_STREAM:
+ *sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT;
+ break;
+ case IEEE_8021QAZ_APP_SEL_DGRAM:
+ *sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT;
+ break;
+ case IEEE_8021QAZ_APP_SEL_ANY:
+ *sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_dcbx_get *dcbx_info;
struct qed_app_entry *entry;
- bool ethtype;
u8 prio = 0;
+ u8 sf_ieee;
int i;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d\n",
+ app->selector, app->protocol);
+
+ if (qed_get_sf_ieee_value(app->selector, &sf_ieee)) {
+ DP_INFO(cdev, "Invalid selector field value %d\n",
+ app->selector);
+ return -EINVAL;
+ }
+
dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
if (!dcbx_info)
return -EINVAL;
@@ -2221,11 +2256,9 @@ static int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
return -EINVAL;
}
- /* ieee defines the selector field value for ethertype to be 1 */
- ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
entry = &dcbx_info->operational.params.app_entry[i];
- if ((entry->ethtype == ethtype) &&
+ if ((entry->sf_ieee == sf_ieee) &&
(entry->proto_id == app->protocol)) {
prio = entry->prio;
break;
@@ -2253,14 +2286,22 @@ static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
struct qed_dcbx_set dcbx_set;
struct qed_app_entry *entry;
struct qed_ptt *ptt;
- bool ethtype;
+ u8 sf_ieee;
int rc, i;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d pri = %d\n",
+ app->selector, app->protocol, app->priority);
if (app->priority < 0 || app->priority >= QED_MAX_PFC_PRIORITIES) {
DP_INFO(hwfn, "Invalid priority %d\n", app->priority);
return -EINVAL;
}
+ if (qed_get_sf_ieee_value(app->selector, &sf_ieee)) {
+ DP_INFO(cdev, "Invalid selector field value %d\n",
+ app->selector);
+ return -EINVAL;
+ }
+
dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
if (!dcbx_info)
return -EINVAL;
@@ -2278,11 +2319,9 @@ static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
if (rc)
return -EINVAL;
- /* ieee defines the selector field value for ethertype to be 1 */
- ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
entry = &dcbx_set.config.params.app_entry[i];
- if ((entry->ethtype == ethtype) &&
+ if ((entry->sf_ieee == sf_ieee) &&
(entry->proto_id == app->protocol))
break;
/* First empty slot */
@@ -2298,7 +2337,7 @@ static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
}
dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
- dcbx_set.config.params.app_entry[i].ethtype = ethtype;
+ dcbx_set.config.params.app_entry[i].sf_ieee = sf_ieee;
dcbx_set.config.params.app_entry[i].proto_id = app->protocol;
dcbx_set.config.params.app_entry[i].prio = BIT(app->priority);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index 0fabe97f998d..414e26268f3a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -85,9 +85,6 @@ struct qed_dcbx_app_metadata {
enum qed_pci_personality personality;
};
-#define QED_MFW_GET_FIELD(name, field) \
- (((name) & (field ## _MASK)) >> (field ## _SHIFT))
-
struct qed_dcbx_info {
struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
@@ -122,7 +119,7 @@ qed_dcbx_mib_update_event(struct qed_hwfn *,
struct qed_ptt *, enum qed_mib_read_type);
int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn);
-void qed_dcbx_info_free(struct qed_hwfn *, struct qed_dcbx_info *);
+void qed_dcbx_info_free(struct qed_hwfn *p_hwfn);
void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 68f19ca57f96..483241b4b05d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -17,7 +17,6 @@
/* Chip IDs enum */
enum chip_ids {
- CHIP_RESERVED,
CHIP_BB_B0,
CHIP_K2,
MAX_CHIP_IDS
@@ -40,6 +39,7 @@ enum mem_groups {
MEM_GROUP_BTB_RAM,
MEM_GROUP_RDIF_CTX,
MEM_GROUP_TDIF_CTX,
+ MEM_GROUP_CFC_MEM,
MEM_GROUP_CONN_CFC_MEM,
MEM_GROUP_TASK_CFC_MEM,
MEM_GROUP_CAU_PI,
@@ -72,6 +72,7 @@ static const char * const s_mem_group_names[] = {
"BTB_RAM",
"RDIF_CTX",
"TDIF_CTX",
+ "CFC_MEM",
"CONN_CFC_MEM",
"TASK_CFC_MEM",
"CAU_PI",
@@ -185,13 +186,16 @@ struct dbg_array {
u32 size_in_dwords;
};
+struct chip_platform_defs {
+ u8 num_ports;
+ u8 num_pfs;
+ u8 num_vfs;
+};
+
/* Chip constant definitions */
struct chip_defs {
const char *name;
- struct {
- u8 num_ports;
- u8 num_pfs;
- } per_platform[MAX_PLATFORM_IDS];
+ struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
};
/* Platform constant definitions */
@@ -405,22 +409,23 @@ struct phy_defs {
/***************************** Constant Arrays *******************************/
/* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
- { "reserved", { {0, 0}, {0, 0}, {0, 0}, {0, 0} } },
{ "bb_b0",
- { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB}, {0, 0}, {0, 0}, {0, 0} } },
- { "k2", { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2}, {0, 0}, {0, 0}, {0, 0} } }
+ { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0},
+ {0, 0, 0}, {0, 0, 0} } },
+ { "k2",
+ { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0},
+ {0, 0, 0}, {0, 0, 0} } }
};
/* Storm constant definitions array */
static struct storm_defs s_storm_defs[] = {
/* Tstorm */
{'T', BLOCK_TSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
- DBG_BUS_CLIENT_RBCT}, true,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
TSEM_REG_FAST_MEMORY,
TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
@@ -432,8 +437,7 @@ static struct storm_defs s_storm_defs[] = {
4, TCM_REG_SM_TASK_CTX},
/* Mstorm */
{'M', BLOCK_MSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
- DBG_BUS_CLIENT_RBCM}, false,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
MSEM_REG_FAST_MEMORY,
MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
@@ -445,8 +449,7 @@ static struct storm_defs s_storm_defs[] = {
7, MCM_REG_SM_TASK_CTX},
/* Ustorm */
{'U', BLOCK_USEM,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
- DBG_BUS_CLIENT_RBCU}, false,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
USEM_REG_FAST_MEMORY,
USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
@@ -458,8 +461,7 @@ static struct storm_defs s_storm_defs[] = {
3, UCM_REG_SM_TASK_CTX},
/* Xstorm */
{'X', BLOCK_XSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
- DBG_BUS_CLIENT_RBCX}, false,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
XSEM_REG_FAST_MEMORY,
XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
@@ -471,8 +473,7 @@ static struct storm_defs s_storm_defs[] = {
0, 0},
/* Ystorm */
{'Y', BLOCK_YSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
- DBG_BUS_CLIENT_RBCY}, false,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
YSEM_REG_FAST_MEMORY,
YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
@@ -484,8 +485,7 @@ static struct storm_defs s_storm_defs[] = {
12, YCM_REG_SM_TASK_CTX},
/* Pstorm */
{'P', BLOCK_PSEM,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
- DBG_BUS_CLIENT_RBCS}, true,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
PSEM_REG_FAST_MEMORY,
PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
@@ -499,8 +499,9 @@ static struct storm_defs s_storm_defs[] = {
/* Block definitions array */
static struct block_defs block_grc_defs = {
- "grc", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ "grc",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
GRC_REG_DBG_FORCE_FRAME,
@@ -508,29 +509,30 @@ static struct block_defs block_grc_defs = {
};
static struct block_defs block_miscs_defs = {
- "miscs", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "miscs", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_defs = {
- "misc", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "misc", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_dbu_defs = {
- "dbu", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "dbu", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_pglue_b_defs = {
- "pglue_b", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+ "pglue_b",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
PGLUE_B_REG_DBG_FORCE_FRAME,
@@ -538,8 +540,9 @@ static struct block_defs block_pglue_b_defs = {
};
static struct block_defs block_cnig_defs = {
- "cnig", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ "cnig",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
CNIG_REG_DBG_FORCE_FRAME_K2,
@@ -547,15 +550,16 @@ static struct block_defs block_cnig_defs = {
};
static struct block_defs block_cpmu_defs = {
- "cpmu", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "cpmu", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 8
};
static struct block_defs block_ncsi_defs = {
- "ncsi", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ "ncsi",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
NCSI_REG_DBG_FORCE_FRAME,
@@ -563,15 +567,16 @@ static struct block_defs block_ncsi_defs = {
};
static struct block_defs block_opte_defs = {
- "opte", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "opte", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 4
};
static struct block_defs block_bmb_defs = {
- "bmb", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+ "bmb",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
BMB_REG_DBG_FORCE_FRAME,
@@ -579,8 +584,9 @@ static struct block_defs block_bmb_defs = {
};
static struct block_defs block_pcie_defs = {
- "pcie", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ "pcie",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
PCIE_REG_DBG_COMMON_FORCE_FRAME,
@@ -588,15 +594,16 @@ static struct block_defs block_pcie_defs = {
};
static struct block_defs block_mcp_defs = {
- "mcp", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "mcp", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_mcp2_defs = {
- "mcp2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ "mcp2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
MCP2_REG_DBG_FORCE_FRAME,
@@ -604,8 +611,9 @@ static struct block_defs block_mcp2_defs = {
};
static struct block_defs block_pswhst_defs = {
- "pswhst", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswhst",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
PSWHST_REG_DBG_FORCE_FRAME,
@@ -613,8 +621,9 @@ static struct block_defs block_pswhst_defs = {
};
static struct block_defs block_pswhst2_defs = {
- "pswhst2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswhst2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
PSWHST2_REG_DBG_FORCE_FRAME,
@@ -622,8 +631,9 @@ static struct block_defs block_pswhst2_defs = {
};
static struct block_defs block_pswrd_defs = {
- "pswrd", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswrd",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
PSWRD_REG_DBG_FORCE_FRAME,
@@ -631,8 +641,9 @@ static struct block_defs block_pswrd_defs = {
};
static struct block_defs block_pswrd2_defs = {
- "pswrd2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswrd2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
PSWRD2_REG_DBG_FORCE_FRAME,
@@ -640,8 +651,9 @@ static struct block_defs block_pswrd2_defs = {
};
static struct block_defs block_pswwr_defs = {
- "pswwr", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswwr",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
PSWWR_REG_DBG_FORCE_FRAME,
@@ -649,15 +661,16 @@ static struct block_defs block_pswwr_defs = {
};
static struct block_defs block_pswwr2_defs = {
- "pswwr2", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "pswwr2", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISC_PL_HV, 3
};
static struct block_defs block_pswrq_defs = {
- "pswrq", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswrq",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
PSWRQ_REG_DBG_FORCE_FRAME,
@@ -665,8 +678,9 @@ static struct block_defs block_pswrq_defs = {
};
static struct block_defs block_pswrq2_defs = {
- "pswrq2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswrq2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
PSWRQ2_REG_DBG_FORCE_FRAME,
@@ -674,8 +688,9 @@ static struct block_defs block_pswrq2_defs = {
};
static struct block_defs block_pglcs_defs = {
- "pglcs", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ "pglcs",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
PGLCS_REG_DBG_FORCE_FRAME,
@@ -683,8 +698,9 @@ static struct block_defs block_pglcs_defs = {
};
static struct block_defs block_ptu_defs = {
- "ptu", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "ptu",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
PTU_REG_DBG_FORCE_FRAME,
@@ -692,8 +708,9 @@ static struct block_defs block_ptu_defs = {
};
static struct block_defs block_dmae_defs = {
- "dmae", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "dmae",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
DMAE_REG_DBG_FORCE_FRAME,
@@ -701,8 +718,9 @@ static struct block_defs block_dmae_defs = {
};
static struct block_defs block_tcm_defs = {
- "tcm", {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ "tcm",
+ {true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
TCM_REG_DBG_FORCE_FRAME,
@@ -710,8 +728,9 @@ static struct block_defs block_tcm_defs = {
};
static struct block_defs block_mcm_defs = {
- "mcm", {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "mcm",
+ {true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
MCM_REG_DBG_FORCE_FRAME,
@@ -719,8 +738,9 @@ static struct block_defs block_mcm_defs = {
};
static struct block_defs block_ucm_defs = {
- "ucm", {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "ucm",
+ {true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
UCM_REG_DBG_FORCE_FRAME,
@@ -728,8 +748,9 @@ static struct block_defs block_ucm_defs = {
};
static struct block_defs block_xcm_defs = {
- "xcm", {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ "xcm",
+ {true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
XCM_REG_DBG_FORCE_FRAME,
@@ -737,8 +758,9 @@ static struct block_defs block_xcm_defs = {
};
static struct block_defs block_ycm_defs = {
- "ycm", {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ "ycm",
+ {true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
YCM_REG_DBG_FORCE_FRAME,
@@ -746,8 +768,9 @@ static struct block_defs block_ycm_defs = {
};
static struct block_defs block_pcm_defs = {
- "pcm", {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "pcm",
+ {true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
PCM_REG_DBG_FORCE_FRAME,
@@ -755,8 +778,9 @@ static struct block_defs block_pcm_defs = {
};
static struct block_defs block_qm_defs = {
- "qm", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+ "qm",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
QM_REG_DBG_FORCE_FRAME,
@@ -764,8 +788,9 @@ static struct block_defs block_qm_defs = {
};
static struct block_defs block_tm_defs = {
- "tm", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "tm",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
TM_REG_DBG_FORCE_FRAME,
@@ -773,8 +798,9 @@ static struct block_defs block_tm_defs = {
};
static struct block_defs block_dorq_defs = {
- "dorq", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ "dorq",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
DORQ_REG_DBG_FORCE_FRAME,
@@ -782,8 +808,9 @@ static struct block_defs block_dorq_defs = {
};
static struct block_defs block_brb_defs = {
- "brb", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ "brb",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
BRB_REG_DBG_FORCE_FRAME,
@@ -791,8 +818,9 @@ static struct block_defs block_brb_defs = {
};
static struct block_defs block_src_defs = {
- "src", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ "src",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
SRC_REG_DBG_FORCE_FRAME,
@@ -800,8 +828,9 @@ static struct block_defs block_src_defs = {
};
static struct block_defs block_prs_defs = {
- "prs", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ "prs",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
PRS_REG_DBG_FORCE_FRAME,
@@ -809,8 +838,9 @@ static struct block_defs block_prs_defs = {
};
static struct block_defs block_tsdm_defs = {
- "tsdm", {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ "tsdm",
+ {true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
TSDM_REG_DBG_FORCE_FRAME,
@@ -818,8 +848,9 @@ static struct block_defs block_tsdm_defs = {
};
static struct block_defs block_msdm_defs = {
- "msdm", {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "msdm",
+ {true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
MSDM_REG_DBG_FORCE_FRAME,
@@ -827,8 +858,9 @@ static struct block_defs block_msdm_defs = {
};
static struct block_defs block_usdm_defs = {
- "usdm", {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "usdm",
+ {true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
USDM_REG_DBG_FORCE_FRAME,
@@ -836,8 +868,9 @@ static struct block_defs block_usdm_defs = {
};
static struct block_defs block_xsdm_defs = {
- "xsdm", {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ "xsdm",
+ {true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
XSDM_REG_DBG_FORCE_FRAME,
@@ -845,8 +878,9 @@ static struct block_defs block_xsdm_defs = {
};
static struct block_defs block_ysdm_defs = {
- "ysdm", {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ "ysdm",
+ {true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
YSDM_REG_DBG_FORCE_FRAME,
@@ -854,8 +888,9 @@ static struct block_defs block_ysdm_defs = {
};
static struct block_defs block_psdm_defs = {
- "psdm", {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "psdm",
+ {true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
PSDM_REG_DBG_FORCE_FRAME,
@@ -863,8 +898,9 @@ static struct block_defs block_psdm_defs = {
};
static struct block_defs block_tsem_defs = {
- "tsem", {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ "tsem",
+ {true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
TSEM_REG_DBG_FORCE_FRAME,
@@ -872,8 +908,9 @@ static struct block_defs block_tsem_defs = {
};
static struct block_defs block_msem_defs = {
- "msem", {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "msem",
+ {true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
MSEM_REG_DBG_FORCE_FRAME,
@@ -881,8 +918,9 @@ static struct block_defs block_msem_defs = {
};
static struct block_defs block_usem_defs = {
- "usem", {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "usem",
+ {true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
USEM_REG_DBG_FORCE_FRAME,
@@ -890,8 +928,9 @@ static struct block_defs block_usem_defs = {
};
static struct block_defs block_xsem_defs = {
- "xsem", {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ "xsem",
+ {true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
XSEM_REG_DBG_FORCE_FRAME,
@@ -899,8 +938,9 @@ static struct block_defs block_xsem_defs = {
};
static struct block_defs block_ysem_defs = {
- "ysem", {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ "ysem",
+ {true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
YSEM_REG_DBG_FORCE_FRAME,
@@ -908,8 +948,9 @@ static struct block_defs block_ysem_defs = {
};
static struct block_defs block_psem_defs = {
- "psem", {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "psem",
+ {true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
PSEM_REG_DBG_FORCE_FRAME,
@@ -917,8 +958,9 @@ static struct block_defs block_psem_defs = {
};
static struct block_defs block_rss_defs = {
- "rss", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ "rss",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
RSS_REG_DBG_FORCE_FRAME,
@@ -926,8 +968,9 @@ static struct block_defs block_rss_defs = {
};
static struct block_defs block_tmld_defs = {
- "tmld", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "tmld",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
TMLD_REG_DBG_FORCE_FRAME,
@@ -935,8 +978,9 @@ static struct block_defs block_tmld_defs = {
};
static struct block_defs block_muld_defs = {
- "muld", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "muld",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
MULD_REG_DBG_FORCE_FRAME,
@@ -944,8 +988,9 @@ static struct block_defs block_muld_defs = {
};
static struct block_defs block_yuld_defs = {
- "yuld", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "yuld",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
YULD_REG_DBG_FORCE_FRAME,
@@ -953,8 +998,9 @@ static struct block_defs block_yuld_defs = {
};
static struct block_defs block_xyld_defs = {
- "xyld", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ "xyld",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
XYLD_REG_DBG_FORCE_FRAME,
@@ -962,8 +1008,9 @@ static struct block_defs block_xyld_defs = {
};
static struct block_defs block_prm_defs = {
- "prm", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "prm",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
PRM_REG_DBG_FORCE_FRAME,
@@ -971,8 +1018,9 @@ static struct block_defs block_prm_defs = {
};
static struct block_defs block_pbf_pb1_defs = {
- "pbf_pb1", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ "pbf_pb1",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
PBF_PB1_REG_DBG_FORCE_FRAME,
@@ -981,8 +1029,9 @@ static struct block_defs block_pbf_pb1_defs = {
};
static struct block_defs block_pbf_pb2_defs = {
- "pbf_pb2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ "pbf_pb2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
PBF_PB2_REG_DBG_FORCE_FRAME,
@@ -991,8 +1040,9 @@ static struct block_defs block_pbf_pb2_defs = {
};
static struct block_defs block_rpb_defs = {
- "rpb", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "rpb",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
RPB_REG_DBG_FORCE_FRAME,
@@ -1000,8 +1050,9 @@ static struct block_defs block_rpb_defs = {
};
static struct block_defs block_btb_defs = {
- "btb", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+ "btb",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
BTB_REG_DBG_FORCE_FRAME,
@@ -1009,8 +1060,9 @@ static struct block_defs block_btb_defs = {
};
static struct block_defs block_pbf_defs = {
- "pbf", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ "pbf",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
PBF_REG_DBG_FORCE_FRAME,
@@ -1018,8 +1070,9 @@ static struct block_defs block_pbf_defs = {
};
static struct block_defs block_rdif_defs = {
- "rdif", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "rdif",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
RDIF_REG_DBG_FORCE_FRAME,
@@ -1027,8 +1080,9 @@ static struct block_defs block_rdif_defs = {
};
static struct block_defs block_tdif_defs = {
- "tdif", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "tdif",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
TDIF_REG_DBG_FORCE_FRAME,
@@ -1036,8 +1090,9 @@ static struct block_defs block_tdif_defs = {
};
static struct block_defs block_cdu_defs = {
- "cdu", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ "cdu",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
CDU_REG_DBG_FORCE_FRAME,
@@ -1045,8 +1100,9 @@ static struct block_defs block_cdu_defs = {
};
static struct block_defs block_ccfc_defs = {
- "ccfc", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ "ccfc",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
CCFC_REG_DBG_FORCE_FRAME,
@@ -1054,8 +1110,9 @@ static struct block_defs block_ccfc_defs = {
};
static struct block_defs block_tcfc_defs = {
- "tcfc", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ "tcfc",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
TCFC_REG_DBG_FORCE_FRAME,
@@ -1063,8 +1120,9 @@ static struct block_defs block_tcfc_defs = {
};
static struct block_defs block_igu_defs = {
- "igu", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "igu",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
IGU_REG_DBG_FORCE_FRAME,
@@ -1072,8 +1130,9 @@ static struct block_defs block_igu_defs = {
};
static struct block_defs block_cau_defs = {
- "cau", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "cau",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
CAU_REG_DBG_FORCE_FRAME,
@@ -1081,8 +1140,9 @@ static struct block_defs block_cau_defs = {
};
static struct block_defs block_umac_defs = {
- "umac", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ "umac",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
UMAC_REG_DBG_FORCE_FRAME,
@@ -1090,22 +1150,23 @@ static struct block_defs block_umac_defs = {
};
static struct block_defs block_xmac_defs = {
- "xmac", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "xmac", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_dbg_defs = {
- "dbg", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "dbg", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
};
static struct block_defs block_nig_defs = {
- "nig", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ "nig",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
NIG_REG_DBG_FORCE_FRAME,
@@ -1113,8 +1174,9 @@ static struct block_defs block_nig_defs = {
};
static struct block_defs block_wol_defs = {
- "wol", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ "wol",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
WOL_REG_DBG_FORCE_FRAME,
@@ -1122,8 +1184,9 @@ static struct block_defs block_wol_defs = {
};
static struct block_defs block_bmbn_defs = {
- "bmbn", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
+ "bmbn",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
BMBN_REG_DBG_FORCE_FRAME,
@@ -1131,15 +1194,16 @@ static struct block_defs block_bmbn_defs = {
};
static struct block_defs block_ipc_defs = {
- "ipc", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "ipc", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_UA, 8
};
static struct block_defs block_nwm_defs = {
- "nwm", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ "nwm",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
NWM_REG_DBG_FORCE_FRAME,
@@ -1147,22 +1211,29 @@ static struct block_defs block_nwm_defs = {
};
static struct block_defs block_nws_defs = {
- "nws", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
+ "nws",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE,
+ NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID,
+ NWS_REG_DBG_FORCE_FRAME,
true, false, DBG_RESET_REG_MISCS_PL_HV, 12
};
static struct block_defs block_ms_defs = {
- "ms", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
+ "ms",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE,
+ MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID,
+ MS_REG_DBG_FORCE_FRAME,
true, false, DBG_RESET_REG_MISCS_PL_HV, 13
};
static struct block_defs block_phy_pcie_defs = {
- "phy_pcie", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ "phy_pcie",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
PCIE_REG_DBG_COMMON_FORCE_FRAME,
@@ -1170,22 +1241,57 @@ static struct block_defs block_phy_pcie_defs = {
};
static struct block_defs block_led_defs = {
- "led", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "led", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 14
+};
+
+static struct block_defs block_avs_wrap_defs = {
+ "avs_wrap", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_UA, 11
+};
+
+static struct block_defs block_rgfs_defs = {
+ "rgfs", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
- true, true, DBG_RESET_REG_MISCS_PL_HV, 14
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_tgfs_defs = {
+ "tgfs", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ptld_defs = {
+ "ptld", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ypld_defs = {
+ "ypld", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_aeu_defs = {
- "misc_aeu", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "misc_aeu", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_bar0_map_defs = {
- "bar0_map", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "bar0_map", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
@@ -1269,6 +1375,11 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
&block_ms_defs,
&block_phy_pcie_defs,
&block_led_defs,
+ &block_avs_wrap_defs,
+ &block_rgfs_defs,
+ &block_tgfs_defs,
+ &block_ptld_defs,
+ &block_ypld_defs,
&block_misc_aeu_defs,
&block_bar0_map_defs,
};
@@ -1281,65 +1392,67 @@ static struct platform_defs s_platform_defs[] = {
};
static struct grc_param_defs s_grc_param_defs[] = {
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
- {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
- {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
+ {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
+ {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */
- {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+ {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */
- {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
- {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
- {{0, 0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
- {{1, 1, 1}, 0, 1, false, 0, 1} /* DBG_GRC_PARAM_DUMP_PHY */
+ {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
+ {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
+ {{0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PHY */
+ {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_NO_MCP */
+ {{0, 0}, 0, 1, false, 0, 0} /* DBG_GRC_PARAM_NO_FW_VER */
};
static struct rss_mem_defs s_rss_mem_defs[] = {
{ "rss_mem_cid", "rss_cid", 0,
- {256, 256, 320},
- {32, 32, 32} },
+ {256, 320},
+ {32, 32} },
{ "rss_mem_key_msb", "rss_key", 1024,
- {128, 128, 208},
- {256, 256, 256} },
+ {128, 208},
+ {256, 256} },
{ "rss_mem_key_lsb", "rss_key", 2048,
- {128, 128, 208},
- {64, 64, 64} },
+ {128, 208},
+ {64, 64} },
{ "rss_mem_info", "rss_info", 3072,
- {128, 128, 208},
- {16, 16, 16} },
+ {128, 208},
+ {16, 16} },
{ "rss_mem_ind", "rss_ind", 4096,
- {(128 * 128), (128 * 128), (128 * 208)},
- {16, 16, 16} }
+ {(128 * 128), (128 * 208)},
+ {16, 16} }
};
static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1352,32 +1465,32 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = {
static struct big_ram_defs s_big_ram_defs[] = {
{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
- {4800, 4800, 5632} },
+ {4800, 5632} },
{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
- {2880, 2880, 3680} },
+ {2880, 3680} },
{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
- {1152, 1152, 1152} }
+ {1152, 1152} }
};
static struct reset_reg_defs s_reset_regs_defs[] = {
{ MISCS_REG_RESET_PL_UA, 0x0,
- {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
+ {true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
{ MISCS_REG_RESET_PL_HV, 0x0,
- {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
+ {true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
{ MISCS_REG_RESET_PL_HV_2, 0x0,
- {false, false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
+ {false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
{ MISC_REG_RESET_PL_UA, 0x0,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
{ MISC_REG_RESET_PL_HV, 0x0,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
{ MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
{ MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
{ MISC_REG_RESET_PL_PDA_VAUX, 0x2,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
};
static struct phy_defs s_phy_defs[] = {
@@ -1410,6 +1523,26 @@ static u32 qed_read_unaligned_dword(u8 *buf)
return dword;
}
+/* Returns the value of the specified GRC param */
+static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return dev_data->grc.param_val[grc_param];
+}
+
+/* Initializes the GRC parameters */
+static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ if (!dev_data->grc.params_initialized) {
+ qed_dbg_grc_set_params_default(p_hwfn);
+ dev_data->grc.params_initialized = 1;
+ }
+}
+
/* Initializes debug data for the specified device */
static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
@@ -1424,13 +1557,17 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
dev_data->mode_enable[MODE_K2] = 1;
} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
dev_data->chip_id = CHIP_BB_B0;
- dev_data->mode_enable[MODE_BB_B0] = 1;
+ dev_data->mode_enable[MODE_BB] = 1;
} else {
return DBG_STATUS_UNKNOWN_CHIP;
}
dev_data->platform_id = PLATFORM_ASIC;
dev_data->mode_enable[MODE_ASIC] = 1;
+
+ /* Initializes the GRC parameters */
+ qed_dbg_grc_init_params(p_hwfn);
+
dev_data->initialized = true;
return DBG_STATUS_OK;
}
@@ -1561,7 +1698,7 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
int printed_chars;
u32 offset = 0;
- if (dump) {
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
/* Read FW image/version from PRAM in a non-reset SEMI */
bool found = false;
u8 storm_id;
@@ -1622,7 +1759,7 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
{
char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
- if (dump) {
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
u32 global_section_offsize, global_section_addr, mfw_ver;
u32 public_data_addr, global_section_offsize_addr;
int printed_chars;
@@ -1683,15 +1820,13 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
bool dump,
u8 num_specific_global_params)
{
+ u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 offset = 0;
/* Find platform string and dump global params section header */
offset += qed_dump_section_hdr(dump_buf + offset,
- dump,
- "global_params",
- NUM_COMMON_GLOBAL_PARAMS +
- num_specific_global_params);
+ dump, "global_params", num_params);
/* Store params */
offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
@@ -1815,37 +1950,6 @@ static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
}
}
-/* Returns the value of the specified GRC param */
-static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
- enum dbg_grc_params grc_param)
-{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-
- return dev_data->grc.param_val[grc_param];
-}
-
-/* Clear all GRC params */
-static void qed_dbg_grc_clear_params(struct qed_hwfn *p_hwfn)
-{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 i;
-
- for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
- dev_data->grc.param_set_by_user[i] = 0;
-}
-
-/* Assign default GRC param values */
-static void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
-{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 i;
-
- for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
- if (!dev_data->grc.param_set_by_user[i])
- dev_data->grc.param_val[i] =
- s_grc_param_defs[i].default_val[dev_data->chip_id];
-}
-
/* Returns true if the specified entity (indicated by GRC param) should be
* included in the dump, false otherwise.
*/
@@ -1971,7 +2075,7 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
}
}
-/* Returns the attention name offsets of the specified block */
+/* Returns the attention block data of the specified block */
static const struct dbg_attn_block_type_data *
qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
{
@@ -2040,7 +2144,7 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
* The following parameters are dumped:
* - 'count' = num_dumped_entries
* - 'split' = split_type
- * - 'id'i = split_id (dumped only if split_id >= 0)
+ * - 'id' = split_id (dumped only if split_id >= 0)
* - 'param_name' = param_val (user param, dumped only if param_name != NULL and
* param_val != NULL)
*/
@@ -2069,21 +2173,81 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
return offset;
}
-/* Dumps GRC register/memory. Returns the dumped size in dwords. */
+/* Dumps the GRC registers in the specified address range.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ bool dump, u32 addr, u32 len)
+{
+ u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
+
+ if (dump)
+ for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+ else
+ offset += len;
+ return offset;
+}
+
+/* Dumps GRC registers sequence header. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr,
+ u32 len)
+{
+ if (dump)
+ *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
+ return 1;
+}
+
+/* Dumps GRC registers sequence. Returns the dumped size in dwords. */
static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf,
bool dump, u32 addr, u32 len)
{
- u32 offset = 0, i;
+ u32 offset = 0;
+
+ offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, addr, len);
+ return offset;
+}
+
+/* Dumps GRC registers sequence with skip cycle.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ bool dump, u32 addr, u32 total_len,
+ u32 read_len, u32 skip_len)
+{
+ u32 offset = 0, reg_offset = 0;
+ offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
if (dump) {
- *(dump_buf + offset++) = addr | (len << REG_DUMP_LEN_SHIFT);
- for (i = 0; i < len; i++, addr++, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn,
- p_ptt,
- DWORDS_TO_BYTES(addr));
+ while (reg_offset < total_len) {
+ u32 curr_len = min_t(u32,
+ read_len,
+ total_len - reg_offset);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, addr, curr_len);
+ reg_offset += curr_len;
+ addr += curr_len;
+ if (reg_offset < total_len) {
+ curr_len = min_t(u32,
+ skip_len,
+ total_len - skip_len);
+ memset(dump_buf + offset, 0,
+ DWORDS_TO_BYTES(curr_len));
+ offset += curr_len;
+ reg_offset += curr_len;
+ addr += curr_len;
+ }
+ }
} else {
- offset += len + 1;
+ offset += total_len;
}
return offset;
@@ -2124,14 +2288,17 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
const struct dbg_dump_reg *reg =
(const struct dbg_dump_reg *)
&input_regs_arr.ptr[input_offset];
+ u32 addr, len;
+ addr = GET_FIELD(reg->data,
+ DBG_DUMP_REG_ADDRESS);
+ len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
offset +=
- qed_grc_dump_reg_entry(p_hwfn, p_ptt,
- dump_buf + offset, dump,
- GET_FIELD(reg->data,
- DBG_DUMP_REG_ADDRESS),
- GET_FIELD(reg->data,
- DBG_DUMP_REG_LENGTH));
+ qed_grc_dump_reg_entry(p_hwfn, p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len);
(*num_dumped_reg_entries)++;
}
} else {
@@ -2194,8 +2361,14 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
const char *param_name, const char *param_val)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct chip_platform_defs *p_platform_defs;
u32 offset = 0, input_offset = 0;
- u8 port_id, pf_id;
+ struct chip_defs *p_chip_defs;
+ u8 port_id, pf_id, vf_id;
+ u16 fid;
+
+ p_chip_defs = &s_chip_defs[dev_data->chip_id];
+ p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id];
if (dump)
DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
@@ -2214,7 +2387,6 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
switch (split_type_id) {
case SPLIT_TYPE_NONE:
- case SPLIT_TYPE_VF:
offset += qed_grc_dump_split_data(p_hwfn,
p_ptt,
curr_input_regs_arr,
@@ -2227,10 +2399,7 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
param_val);
break;
case SPLIT_TYPE_PORT:
- for (port_id = 0;
- port_id <
- s_chip_defs[dev_data->chip_id].
- per_platform[dev_data->platform_id].num_ports;
+ for (port_id = 0; port_id < p_platform_defs->num_ports;
port_id++) {
if (dump)
qed_port_pretend(p_hwfn, p_ptt,
@@ -2247,20 +2416,48 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
break;
case SPLIT_TYPE_PF:
case SPLIT_TYPE_PORT_PF:
- for (pf_id = 0;
- pf_id <
- s_chip_defs[dev_data->chip_id].
- per_platform[dev_data->platform_id].num_pfs;
+ for (pf_id = 0; pf_id < p_platform_defs->num_pfs;
pf_id++) {
- if (dump)
- qed_fid_pretend(p_hwfn, p_ptt, pf_id);
- offset += qed_grc_dump_split_data(p_hwfn,
- p_ptt,
- curr_input_regs_arr,
- dump_buf + offset,
- dump, block_enable,
- "pf", pf_id, param_name,
- param_val);
+ u8 pfid_shift =
+ PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+
+ if (dump) {
+ fid = pf_id << pfid_shift;
+ qed_fid_pretend(p_hwfn, p_ptt, fid);
+ }
+
+ offset +=
+ qed_grc_dump_split_data(p_hwfn, p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ "pf", pf_id,
+ param_name,
+ param_val);
+ }
+ break;
+ case SPLIT_TYPE_VF:
+ for (vf_id = 0; vf_id < p_platform_defs->num_vfs;
+ vf_id++) {
+ u8 vfvalid_shift =
+ PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
+ u8 vfid_shift =
+ PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
+
+ if (dump) {
+ fid = BIT(vfvalid_shift) |
+ (vf_id << vfid_shift);
+ qed_fid_pretend(p_hwfn, p_ptt, fid);
+ }
+
+ offset +=
+ qed_grc_dump_split_data(p_hwfn, p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ "vf", vf_id,
+ param_name,
+ param_val);
}
break;
default:
@@ -2271,8 +2468,11 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
}
/* Pretend to original PF */
- if (dump)
- qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ if (dump) {
+ fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+ qed_fid_pretend(p_hwfn, p_ptt, fid);
+ }
+
return offset;
}
@@ -2291,13 +2491,14 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
/* Write reset registers */
for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+ u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr);
+
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
dump_buf + offset,
dump,
- BYTES_TO_DWORDS
- (s_reset_regs_defs
- [i].addr), 1);
+ addr,
+ 1);
num_regs++;
}
}
@@ -2339,6 +2540,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
&attn_reg_arr[reg_idx];
u16 modes_buf_offset;
bool eval_mode;
+ u32 addr;
/* Check mode */
eval_mode = GET_FIELD(reg_data->mode.data,
@@ -2349,19 +2551,23 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
if (!eval_mode ||
qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
/* Mode match - read and dump registers */
- offset += qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- reg_data->mask_address,
- 1);
- offset += qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- GET_FIELD(reg_data->data,
- DBG_ATTN_REG_STS_ADDRESS),
- 1);
+ addr = reg_data->mask_address;
+ offset +=
+ qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1);
+ addr = GET_FIELD(reg_data->data,
+ DBG_ATTN_REG_STS_ADDRESS);
+ offset +=
+ qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1);
num_reg_entries += 2;
}
}
@@ -2369,18 +2575,21 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
/* Write storm stall status registers */
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ u32 addr;
+
if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
dump)
continue;
+ addr =
+ BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_STALLED);
offset += qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- BYTES_TO_DWORDS(s_storm_defs[storm_id].
- sem_fast_mem_addr +
- SEM_FAST_REG_STALLED),
- 1);
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1);
num_reg_entries++;
}
@@ -2392,11 +2601,47 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
return offset;
}
+/* Dumps registers that can't be represented in the debug arrays */
+static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, addr;
+
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ dump, 2, "eng", -1, NULL, NULL);
+
+ /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
+ * skipped).
+ */
+ addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
+ offset += qed_grc_dump_reg_entry_skip(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ RDIF_REG_DEBUG_ERROR_INFO_SIZE,
+ 7,
+ 1);
+ addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
+ offset +=
+ qed_grc_dump_reg_entry_skip(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ TDIF_REG_DEBUG_ERROR_INFO_SIZE,
+ 7,
+ 1);
+
+ return offset;
+}
+
/* Dumps a GRC memory header (section and params).
* The following parameters are dumped:
* name - name is dumped only if it's not NULL.
- * addr - byte_addr is dumped only if name is NULL.
- * len - dword_len is always dumped.
+ * addr - addr is dumped only if name is NULL.
+ * len - len is always dumped.
* width - bit_width is dumped if it's not zero.
* packed - packed=1 is dumped if it's not false.
* mem_group - mem_group is always dumped.
@@ -2408,8 +2653,8 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump,
const char *name,
- u32 byte_addr,
- u32 dword_len,
+ u32 addr,
+ u32 len,
u32 bit_width,
bool packed,
const char *mem_group,
@@ -2419,7 +2664,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
u32 offset = 0;
char buf[64];
- if (!dword_len)
+ if (!len)
DP_NOTICE(p_hwfn,
"Unexpected GRC Dump error: dumped memory size must be non-zero\n");
if (bit_width)
@@ -2446,20 +2691,21 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"Dumping %d registers from %s...\n",
- dword_len, buf);
+ len, buf);
} else {
/* Dump address */
offset += qed_dump_num_param(dump_buf + offset,
- dump, "addr", byte_addr);
- if (dump && dword_len > 64)
+ dump, "addr",
+ DWORDS_TO_BYTES(addr));
+ if (dump && len > 64)
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"Dumping %d registers from address 0x%x...\n",
- dword_len, byte_addr);
+ len, (u32)DWORDS_TO_BYTES(addr));
}
/* Dump len */
- offset += qed_dump_num_param(dump_buf + offset, dump, "len", dword_len);
+ offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
/* Dump bit width */
if (bit_width)
@@ -2492,8 +2738,8 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump,
const char *name,
- u32 byte_addr,
- u32 dword_len,
+ u32 addr,
+ u32 len,
u32 bit_width,
bool packed,
const char *mem_group,
@@ -2505,21 +2751,14 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
name,
- byte_addr,
- dword_len,
+ addr,
+ len,
bit_width,
packed,
mem_group, is_storm, storm_letter);
- if (dump) {
- u32 i;
-
- for (i = 0; i < dword_len;
- i++, byte_addr += BYTES_IN_DWORD, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
- } else {
- offset += dword_len;
- }
-
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, addr, len);
return offset;
}
@@ -2575,25 +2814,41 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
if (qed_grc_is_mem_included(p_hwfn,
(enum block_id)cond_hdr->block_id,
mem_group_id)) {
- u32 mem_byte_addr =
- DWORDS_TO_BYTES(GET_FIELD(mem->dword0,
- DBG_DUMP_MEM_ADDRESS));
+ u32 mem_addr = GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_ADDRESS);
u32 mem_len = GET_FIELD(mem->dword1,
DBG_DUMP_MEM_LENGTH);
+ enum dbg_grc_params grc_param;
char storm_letter = 'a';
bool is_storm = false;
/* Update memory length for CCFC/TCFC memories
* according to number of LCIDs/LTIDs.
*/
- if (mem_group_id == MEM_GROUP_CONN_CFC_MEM)
+ if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
+ if (mem_len % MAX_LCIDS != 0) {
+ DP_NOTICE(p_hwfn,
+ "Invalid CCFC connection memory size\n");
+ return 0;
+ }
+
+ grc_param = DBG_GRC_PARAM_NUM_LCIDS;
mem_len = qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LCIDS)
- * (mem_len / MAX_LCIDS);
- else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+ grc_param) *
+ (mem_len / MAX_LCIDS);
+ } else if (mem_group_id ==
+ MEM_GROUP_TASK_CFC_MEM) {
+ if (mem_len % MAX_LTIDS != 0) {
+ DP_NOTICE(p_hwfn,
+ "Invalid TCFC task memory size\n");
+ return 0;
+ }
+
+ grc_param = DBG_GRC_PARAM_NUM_LTIDS;
mem_len = qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LTIDS)
- * (mem_len / MAX_LTIDS);
+ grc_param) *
+ (mem_len / MAX_LTIDS);
+ }
/* If memory is associated with Storm, update
* Storm details.
@@ -2610,7 +2865,7 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
/* Dump memory */
offset += qed_grc_dump_mem(p_hwfn, p_ptt,
dump_buf + offset, dump, NULL,
- mem_byte_addr, mem_len, 0,
+ mem_addr, mem_len, 0,
false,
s_mem_group_names[mem_group_id],
is_storm, storm_letter);
@@ -2799,29 +3054,31 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
u32 offset = 0;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- if (qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)storm_id)) {
- for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
- u32 addr =
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_STORM_REG_FILE +
- DWORDS_TO_BYTES(IOR_SET_OFFSET(set_id));
+ struct storm_defs *storm = &s_storm_defs[storm_id];
- buf[strlen(buf) - 1] = '0' + set_id;
- offset += qed_grc_dump_mem(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- buf,
- addr,
- IORS_PER_SET,
- 32,
- false,
- "ior",
- true,
- s_storm_defs
- [storm_id].letter);
- }
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id))
+ continue;
+
+ for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
+ u32 dwords, addr;
+
+ dwords = storm->sem_fast_mem_addr +
+ SEM_FAST_REG_STORM_REG_FILE;
+ addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id);
+ buf[strlen(buf) - 1] = '0' + set_id;
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ buf,
+ addr,
+ IORS_PER_SET,
+ 32,
+ false,
+ "ior",
+ true,
+ storm->letter);
}
}
@@ -2990,34 +3247,39 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
- u32 total_size = (num_entries * entry_width) / 32;
+ u32 total_dwords = (num_entries * entry_width) / 32;
+ u32 size = RSS_REG_RSS_RAM_DATA_SIZE;
bool packed = (entry_width == 16);
- u32 addr = rss_defs->addr;
- u32 i, j;
+ u32 rss_addr = rss_defs->addr;
+ u32 i, addr;
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
dump,
rss_defs->mem_name,
- addr,
- total_size,
+ 0,
+ total_dwords,
entry_width,
packed,
rss_defs->type_name, false, 0);
if (!dump) {
- offset += total_size;
+ offset += total_dwords;
continue;
}
/* Dump RSS data */
- for (i = 0; i < BYTES_TO_DWORDS(total_size); i++, addr++) {
- qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, addr);
- for (j = 0; j < BYTES_IN_DWORD; j++, offset++)
- *(dump_buf + offset) =
- qed_rd(p_hwfn, p_ptt,
- RSS_REG_RSS_RAM_DATA +
- DWORDS_TO_BYTES(j));
+ for (i = 0; i < total_dwords;
+ i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
+ addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
+ qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ addr,
+ size);
}
}
@@ -3030,19 +3292,19 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
u32 *dump_buf, bool dump, u8 big_ram_id)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 total_blocks, ram_size, offset = 0, i;
char mem_name[12] = "???_BIG_RAM";
char type_name[8] = "???_RAM";
- u32 ram_size, total_blocks;
- u32 offset = 0, i, j;
+ struct big_ram_defs *big_ram;
- total_blocks =
- s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id];
+ big_ram = &s_big_ram_defs[big_ram_id];
+ total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
- strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name,
- strlen(s_big_ram_defs[big_ram_id].instance_name));
- strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name,
- strlen(s_big_ram_defs[big_ram_id].instance_name));
+ strncpy(type_name, big_ram->instance_name,
+ strlen(big_ram->instance_name));
+ strncpy(mem_name, big_ram->instance_name,
+ strlen(big_ram->instance_name));
/* Dump memory header */
offset += qed_grc_dump_mem_hdr(p_hwfn,
@@ -3059,13 +3321,17 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
/* Read and dump Big RAM data */
for (i = 0; i < total_blocks / 2; i++) {
- qed_wr(p_hwfn, p_ptt, s_big_ram_defs[big_ram_id].addr_reg_addr,
- i);
- for (j = 0; j < 2 * BIG_RAM_BLOCK_SIZE_DWORDS; j++, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt,
- s_big_ram_defs[big_ram_id].
- data_reg_addr +
- DWORDS_TO_BYTES(j));
+ u32 addr, len;
+
+ qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
+ addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
+ len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len);
}
return offset;
@@ -3075,11 +3341,11 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
bool block_enable[MAX_BLOCK_ID] = { 0 };
+ u32 offset = 0, addr;
bool halted = false;
- u32 offset = 0;
/* Halt MCP */
- if (dump) {
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
halted = !qed_mcp_halt(p_hwfn, p_ptt);
if (!halted)
DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -3091,7 +3357,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
NULL,
- MCP_REG_SCRATCH,
+ BYTES_TO_DWORDS(MCP_REG_SCRATCH),
MCP_REG_SCRATCH_SIZE,
0, false, "MCP", false, 0);
@@ -3101,7 +3367,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
NULL,
- MCP_REG_CPU_REG_FILE,
+ BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
MCP_REG_CPU_REG_FILE_SIZE,
0, false, "MCP", false, 0);
@@ -3115,12 +3381,13 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
/* Dump required non-MCP registers */
offset += qed_grc_dump_regs_hdr(dump_buf + offset,
dump, 1, "eng", -1, "block", "MCP");
+ addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
dump_buf + offset,
dump,
- BYTES_TO_DWORDS
- (MISC_REG_SHARED_MEM_ADDR), 1);
+ addr,
+ 1);
/* Release MCP */
if (halted && qed_mcp_resume(p_hwfn, p_ptt))
@@ -3212,7 +3479,7 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
{
u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 offset = 0, block_id, line_id, addr, i;
+ u32 offset = 0, block_id, line_id;
struct block_defs *p_block_defs;
if (dump) {
@@ -3255,6 +3522,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
if (dump && !dev_data->block_in_reset[block_id]) {
u8 dbg_client_id =
p_block_defs->dbg_client_id[dev_data->chip_id];
+ u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
+ u32 len = STATIC_DEBUG_LINE_DWORDS;
/* Enable block's client */
qed_bus_enable_clients(p_hwfn, p_ptt,
@@ -3270,11 +3539,13 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
0xf, 0, 0, 0);
/* Read debug line info */
- for (i = 0, addr = DBG_REG_CALENDAR_OUT_DATA;
- i < STATIC_DEBUG_LINE_DWORDS;
- i++, offset++, addr += BYTES_IN_DWORD)
- dump_buf[offset] = qed_rd(p_hwfn, p_ptt,
- addr);
+ offset +=
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len);
}
/* Disable block's client and debug output */
@@ -3311,14 +3582,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
u8 i, port_mode = 0;
u32 offset = 0;
- /* Check if emulation platform */
*num_dumped_dwords = 0;
- /* Fill GRC parameters that were not set by the user with their default
- * value.
- */
- qed_dbg_grc_set_params_default(p_hwfn);
-
/* Find port mode */
if (dump) {
switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
@@ -3370,15 +3635,14 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
}
/* Disable all parities using MFW command */
- if (dump) {
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
if (!parities_masked) {
+ DP_NOTICE(p_hwfn,
+ "Failed to mask parities using MFW\n");
if (qed_grc_get_param
(p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
- else
- DP_NOTICE(p_hwfn,
- "Failed to mask parities using MFW\n");
}
}
@@ -3409,6 +3673,11 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
offset,
dump,
block_enable, NULL, NULL);
+
+ /* Dump special registers */
+ offset += qed_grc_dump_special_regs(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
}
/* Dump memories */
@@ -3583,9 +3852,9 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
}
if (mode_match) {
- u32 grc_addr =
- DWORDS_TO_BYTES(GET_FIELD(reg->data,
- DBG_IDLE_CHK_INFO_REG_ADDRESS));
+ u32 addr =
+ GET_FIELD(reg->data,
+ DBG_IDLE_CHK_INFO_REG_ADDRESS);
/* Write register header */
struct dbg_idle_chk_result_reg_hdr *reg_hdr =
@@ -3597,16 +3866,19 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
memset(reg_hdr, 0, sizeof(*reg_hdr));
reg_hdr->size = reg->size;
SET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
- rule->num_cond_regs + reg_id);
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
+ rule->num_cond_regs + reg_id);
/* Write register values */
- for (i = 0; i < reg->size;
- i++, offset++, grc_addr += 4)
- dump_buf[offset] =
- qed_rd(p_hwfn, p_ptt, grc_addr);
- }
+ offset +=
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ reg->size);
}
+ }
}
return offset;
@@ -3621,7 +3893,7 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
- u32 i, j, offset = 0;
+ u32 i, offset = 0;
u16 entry_id;
u8 reg_id;
@@ -3664,73 +3936,83 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
if (!check_rule && dump)
continue;
+ if (!dump) {
+ u32 entry_dump_size =
+ qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ false,
+ rule->rule_id,
+ rule,
+ 0,
+ NULL);
+
+ offset += num_reg_entries * entry_dump_size;
+ (*num_failing_rules) += num_reg_entries;
+ continue;
+ }
+
/* Go over all register entries (number of entries is the same
* for all condition registers).
*/
for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
/* Read current entry of all condition registers */
- if (dump) {
- u32 next_reg_offset = 0;
-
- for (reg_id = 0;
- reg_id < rule->num_cond_regs;
- reg_id++) {
- const struct dbg_idle_chk_cond_reg
- *reg = &cond_regs[reg_id];
-
- /* Find GRC address (if it's a memory,
- * the address of the specific entry is
- * calculated).
- */
- u32 grc_addr =
- DWORDS_TO_BYTES(
- GET_FIELD(reg->data,
- DBG_IDLE_CHK_COND_REG_ADDRESS));
-
- if (reg->num_entries > 1 ||
- reg->start_entry > 0) {
- u32 padded_entry_size =
- reg->entry_size > 1 ?
- roundup_pow_of_two
- (reg->entry_size) : 1;
-
- grc_addr +=
- DWORDS_TO_BYTES(
- (reg->start_entry +
- entry_id)
- * padded_entry_size);
- }
+ u32 next_reg_offset = 0;
- /* Read registers */
- if (next_reg_offset + reg->entry_size >=
- IDLE_CHK_MAX_ENTRIES_SIZE) {
- DP_NOTICE(p_hwfn,
- "idle check registers entry is too large\n");
- return 0;
- }
+ for (reg_id = 0; reg_id < rule->num_cond_regs;
+ reg_id++) {
+ const struct dbg_idle_chk_cond_reg *reg =
+ &cond_regs[reg_id];
- for (j = 0; j < reg->entry_size;
- j++, next_reg_offset++,
- grc_addr += 4)
- cond_reg_values[next_reg_offset] =
- qed_rd(p_hwfn, p_ptt, grc_addr);
+ /* Find GRC address (if it's a memory,the
+ * address of the specific entry is calculated).
+ */
+ u32 addr =
+ GET_FIELD(reg->data,
+ DBG_IDLE_CHK_COND_REG_ADDRESS);
+
+ if (reg->num_entries > 1 ||
+ reg->start_entry > 0) {
+ u32 padded_entry_size =
+ reg->entry_size > 1 ?
+ roundup_pow_of_two(reg->entry_size) :
+ 1;
+
+ addr += (reg->start_entry + entry_id) *
+ padded_entry_size;
}
+
+ /* Read registers */
+ if (next_reg_offset + reg->entry_size >=
+ IDLE_CHK_MAX_ENTRIES_SIZE) {
+ DP_NOTICE(p_hwfn,
+ "idle check registers entry is too large\n");
+ return 0;
+ }
+
+ next_reg_offset +=
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ cond_reg_values +
+ next_reg_offset,
+ dump, addr,
+ reg->entry_size);
}
/* Call rule's condition function - a return value of
* true indicates failure.
*/
if ((*cond_arr[rule->cond_id])(cond_reg_values,
- imm_values) || !dump) {
+ imm_values)) {
offset +=
- qed_idle_chk_dump_failure(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- rule->rule_id,
- rule,
- entry_id,
- cond_reg_values);
+ qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ rule->rule_id,
+ rule,
+ entry_id,
+ cond_reg_values);
(*num_failing_rules)++;
break;
}
@@ -3818,13 +4100,18 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
struct mcp_file_att file_att;
/* Call NVRAM get file command */
- if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT,
- image_type, &ret_mcp_resp, &ret_mcp_param,
- &ret_txn_size, (u32 *)&file_att) != 0)
- return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+ int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
+ p_ptt,
+ DRV_MSG_CODE_NVM_GET_FILE_ATT,
+ image_type,
+ &ret_mcp_resp,
+ &ret_mcp_param,
+ &ret_txn_size,
+ (u32 *)&file_att);
/* Check response */
- if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ if (nvm_result ||
+ (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
/* Update return values */
@@ -3944,7 +4231,6 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
u32 running_mfw_addr =
MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
- enum dbg_status status;
u32 nvram_image_type;
*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
@@ -3955,30 +4241,12 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
nvram_image_type =
(*running_bundle_id ==
DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
- status = qed_find_nvram_image(p_hwfn,
- p_ptt,
- nvram_image_type,
- trace_meta_offset_bytes,
- trace_meta_size_bytes);
-
- return status;
-}
-
-/* Reads the MCP Trace data from the specified GRC address into the specified
- * buffer.
- */
-static void qed_mcp_trace_read_data(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 grc_addr, u32 size_in_dwords, u32 *buf)
-{
- u32 i;
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "mcp_trace_read_data: reading trace data of size %d dwords from GRC address 0x%x\n",
- size_in_dwords, grc_addr);
- for (i = 0; i < size_in_dwords; i++, grc_addr += BYTES_IN_DWORD)
- buf[i] = qed_rd(p_hwfn, p_ptt, grc_addr);
+ return qed_find_nvram_image(p_hwfn,
+ p_ptt,
+ nvram_image_type,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes);
}
/* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
@@ -4034,11 +4302,14 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
bool dump, u32 *num_dumped_dwords)
{
u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
- u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
- u32 trace_meta_offset_bytes, trace_meta_size_bytes;
+ u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
+ u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
enum dbg_status status;
+ bool mcp_access;
int halted = 0;
+ mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
+
*num_dumped_dwords = 0;
/* Get trace data info */
@@ -4060,7 +4331,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
* consistent if halt fails, MCP trace is taken anyway, with a small
* risk that it may be corrupt.
*/
- if (dump) {
+ if (dump && mcp_access) {
halted = !qed_mcp_halt(p_hwfn, p_ptt);
if (!halted)
DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -4078,13 +4349,12 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
dump, "size", trace_data_size_dwords);
/* Read trace data from scratchpad into dump buffer */
- if (dump)
- qed_mcp_trace_read_data(p_hwfn,
- p_ptt,
- trace_data_grc_addr,
- trace_data_size_dwords,
- dump_buf + offset);
- offset += trace_data_size_dwords;
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS(trace_data_grc_addr),
+ trace_data_size_dwords);
/* Resume MCP (only if halt succeeded) */
if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
@@ -4095,38 +4365,38 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
dump, "mcp_trace_meta", 1);
/* Read trace meta info */
- status = qed_mcp_trace_get_meta_info(p_hwfn,
- p_ptt,
- trace_data_size_bytes,
- &running_bundle_id,
- &trace_meta_offset_bytes,
- &trace_meta_size_bytes);
- if (status != DBG_STATUS_OK)
- return status;
+ if (mcp_access) {
+ status = qed_mcp_trace_get_meta_info(p_hwfn,
+ p_ptt,
+ trace_data_size_bytes,
+ &running_bundle_id,
+ &trace_meta_offset_bytes,
+ &trace_meta_size_bytes);
+ if (status == DBG_STATUS_OK)
+ trace_meta_size_dwords =
+ BYTES_TO_DWORDS(trace_meta_size_bytes);
+ }
- /* Dump trace meta size param (trace_meta_size_bytes is always
- * dword-aligned).
- */
- trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
- offset += qed_dump_num_param(dump_buf + offset, dump, "size",
- trace_meta_size_dwords);
+ /* Dump trace meta size param */
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", trace_meta_size_dwords);
/* Read trace meta image into dump buffer */
- if (dump) {
+ if (dump && trace_meta_size_dwords)
status = qed_mcp_trace_read_meta(p_hwfn,
- p_ptt,
- trace_meta_offset_bytes,
- trace_meta_size_bytes,
- dump_buf + offset);
- if (status != DBG_STATUS_OK)
- return status;
- }
-
- offset += trace_meta_size_dwords;
+ p_ptt,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes,
+ dump_buf + offset);
+ if (status == DBG_STATUS_OK)
+ offset += trace_meta_size_dwords;
*num_dumped_dwords = offset;
- return DBG_STATUS_OK;
+ /* If no mcp access, indicate that the dump doesn't contain the meta
+ * data from NVRAM.
+ */
+ return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
}
/* Dump GRC FIFO */
@@ -4311,9 +4581,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct fw_asserts_ram_section *asserts;
char storm_letter_str[2] = "?";
struct fw_info fw_info;
- u32 offset = 0, i;
+ u32 offset = 0;
u8 storm_id;
/* Dump global params */
@@ -4323,8 +4594,8 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "fw-asserts");
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx,
- last_list_idx, element_addr;
+ u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
+ u32 last_list_idx, addr;
if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
continue;
@@ -4332,6 +4603,8 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
/* Read FW info for the current Storm */
qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
+ asserts = &fw_info.fw_asserts_section;
+
/* Dump FW Asserts section header and params */
storm_letter_str[0] = s_storm_defs[storm_id].letter;
offset += qed_dump_section_hdr(dump_buf + offset, dump,
@@ -4339,12 +4612,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
storm_letter_str);
offset += qed_dump_num_param(dump_buf + offset, dump, "size",
- fw_info.fw_asserts_section.
- list_element_dword_size);
+ asserts->list_element_dword_size);
if (!dump) {
- offset += fw_info.fw_asserts_section.
- list_element_dword_size;
+ offset += asserts->list_element_dword_size;
continue;
}
@@ -4352,28 +4623,22 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
fw_asserts_section_addr =
s_storm_defs[storm_id].sem_fast_mem_addr +
SEM_FAST_REG_INT_RAM +
- RAM_LINES_TO_BYTES(fw_info.fw_asserts_section.
- section_ram_line_offset);
+ RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
next_list_idx_addr =
fw_asserts_section_addr +
- DWORDS_TO_BYTES(fw_info.fw_asserts_section.
- list_next_index_dword_offset);
+ DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
last_list_idx = (next_list_idx > 0
? next_list_idx
- : fw_info.fw_asserts_section.list_num_elements)
- - 1;
- element_addr =
- fw_asserts_section_addr +
- DWORDS_TO_BYTES(fw_info.fw_asserts_section.
- list_dword_offset) +
- last_list_idx *
- DWORDS_TO_BYTES(fw_info.fw_asserts_section.
- list_element_dword_size);
- for (i = 0;
- i < fw_info.fw_asserts_section.list_element_dword_size;
- i++, offset++, element_addr += BYTES_IN_DWORD)
- dump_buf[offset] = qed_rd(p_hwfn, p_ptt, element_addr);
+ : asserts->list_num_elements) - 1;
+ addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
+ asserts->list_dword_offset +
+ last_list_idx * asserts->list_element_dword_size;
+ offset +=
+ qed_grc_dump_addr_range(p_hwfn, p_ptt,
+ dump_buf + offset,
+ dump, addr,
+ asserts->list_element_dword_size);
}
/* Dump last section */
@@ -4386,13 +4651,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
{
/* Convert binary data to debug arrays */
- u32 num_of_buffers = *(u32 *)bin_ptr;
- struct bin_buffer_hdr *buf_array;
+ struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
- buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
-
- for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
s_dbg_arrays[buf_id].ptr =
(u32 *)(bin_ptr + buf_array[buf_id].offset);
s_dbg_arrays[buf_id].size_in_dwords =
@@ -4402,6 +4664,17 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
return DBG_STATUS_OK;
}
+/* Assign default GRC param values */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i;
+
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ dev_data->grc.param_val[i] =
+ s_grc_param_defs[i].default_val[dev_data->chip_id];
+}
+
enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
@@ -4441,8 +4714,9 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
/* GRC Dump */
status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
- /* Clear all GRC params */
- qed_dbg_grc_clear_params(p_hwfn);
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
return status;
}
@@ -4495,6 +4769,10 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
/* Idle Check Dump */
*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
return DBG_STATUS_OK;
}
@@ -4519,11 +4797,15 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
+ /* validate buffer size */
+ status =
+ qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
&needed_buf_size_in_dwords);
- if (status != DBG_STATUS_OK)
+ if (status != DBG_STATUS_OK &&
+ status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4531,8 +4813,13 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
qed_update_blocks_reset_state(p_hwfn, p_ptt);
/* Perform dump */
- return qed_mcp_trace_dump(p_hwfn,
- p_ptt, dump_buf, true, num_dumped_dwords);
+ status = qed_mcp_trace_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
}
enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -4567,8 +4854,14 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
/* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
- return qed_reg_fifo_dump(p_hwfn,
- p_ptt, dump_buf, true, num_dumped_dwords);
+
+ status = qed_reg_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
}
enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -4603,8 +4896,13 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
/* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
- return qed_igu_fifo_dump(p_hwfn,
- p_ptt, dump_buf, true, num_dumped_dwords);
+
+ status = qed_igu_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
}
enum dbg_status
@@ -4641,9 +4939,16 @@ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
/* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
- return qed_protection_override_dump(p_hwfn,
- p_ptt,
- dump_buf, true, num_dumped_dwords);
+
+ status = qed_protection_override_dump(p_hwfn,
+ p_ptt,
+ dump_buf,
+ true, num_dumped_dwords);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
}
enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -5045,13 +5350,10 @@ static char s_temp_buf[MAX_MSG_LEN];
enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
{
/* Convert binary data to debug arrays */
- u32 num_of_buffers = *(u32 *)bin_ptr;
- struct bin_buffer_hdr *buf_array;
+ struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
- buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
-
- for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
s_dbg_arrays[buf_id].ptr =
(u32 *)(bin_ptr + buf_array[buf_id].offset);
s_dbg_arrays[buf_id].size_in_dwords =
@@ -5874,16 +6176,16 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "raw: 0x%016llx, address: 0x%07llx, access: %-5s, pf: %2lld, vf: %s, port: %lld, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
+ "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
elements[i].data,
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_ADDRESS) *
REG_FIFO_ELEMENT_ADDR_FACTOR,
s_access_strs[GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_ACCESS)],
- GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_PF), vf_str,
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PF), vf_str,
+ (u32)GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_PORT),
s_privilege_strs[GET_FIELD(elements[i].
data,
@@ -6189,13 +6491,13 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "window %2d, address: 0x%07x, size: %7lld regs, read: %lld, write: %lld, read protection: %-12s, write protection: %-12s\n",
+ "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
i, address,
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_READ),
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_WRITE),
s_protection_strs[GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
@@ -6508,7 +6810,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
*/
rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
&buf_size_dwords);
- if (rc != DBG_STATUS_OK)
+ if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
return rc;
feature->buf_size = buf_size_dwords * sizeof(u32);
feature->dump_buf = vmalloc(feature->buf_size);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index e518f914eab1..5f31140d0b77 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -75,7 +75,8 @@ enum BAR_ID {
BAR_ID_1 /* Used for doorbells */
};
-static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum BAR_ID bar_id)
{
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
@@ -84,7 +85,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
if (IS_VF(p_hwfn->cdev))
return 1 << 17;
- val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+ val = qed_rd(p_hwfn, p_ptt, bar_reg);
if (val)
return 1 << (val + 15);
@@ -182,199 +183,573 @@ void qed_resc_free(struct qed_dev *cdev)
}
qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
- qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
+ qed_dcbx_info_free(p_hwfn);
}
}
-static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
+/******************** QM initialization *******************/
+#define ACTIVE_TCS_BMAP 0x9f
+#define ACTIVE_TCS_BMAP_4PORT_K2 0xf
+
+/* determines the physical queue flags for a given PF. */
+static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
{
- u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
- struct qed_qm_info *qm_info = &p_hwfn->qm_info;
- struct init_qm_port_params *p_qm_port;
- bool init_rdma_offload_pq = false;
- bool init_pure_ack_pq = false;
- bool init_ooo_pq = false;
- u16 num_pqs, multi_cos_tcs = 1;
- u8 pf_wfq = qm_info->pf_wfq;
- u32 pf_rl = qm_info->pf_rl;
- u16 num_pf_rls = 0;
- u16 num_vfs = 0;
-
-#ifdef CONFIG_QED_SRIOV
- if (p_hwfn->cdev->p_iov_info)
- num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
-#endif
- memset(qm_info, 0, sizeof(*qm_info));
+ u32 flags;
- num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */
- num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
+ /* common flags */
+ flags = PQ_FLAGS_LB;
- if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
- num_pqs++; /* for RoCE queue */
- init_rdma_offload_pq = true;
- /* we subtract num_vfs because each require a rate limiter,
- * and one default rate limiter
- */
- if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn)
- num_pf_rls = RESC_NUM(p_hwfn, QED_RL) - num_vfs - 1;
+ /* feature flags */
+ if (IS_QED_SRIOV(p_hwfn->cdev))
+ flags |= PQ_FLAGS_VFS;
- num_pqs += num_pf_rls;
- qm_info->num_pf_rls = (u8) num_pf_rls;
+ /* protocol flags */
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH:
+ flags |= PQ_FLAGS_MCOS;
+ break;
+ case QED_PCI_FCOE:
+ flags |= PQ_FLAGS_OFLD;
+ break;
+ case QED_PCI_ISCSI:
+ flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+ break;
+ case QED_PCI_ETH_ROCE:
+ flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
+ break;
+ default:
+ DP_ERR(p_hwfn,
+ "unknown personality %d\n", p_hwfn->hw_info.personality);
+ return 0;
}
- if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
- num_pqs += 2; /* for iSCSI pure-ACK / OOO queue */
- init_pure_ack_pq = true;
- init_ooo_pq = true;
- }
+ return flags;
+}
- /* Sanity checking that setup requires legal number of resources */
- if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
- DP_ERR(p_hwfn,
- "Need too many Physical queues - 0x%04x when only %04x are available\n",
- num_pqs, RESC_NUM(p_hwfn, QED_PQ));
- return -EINVAL;
- }
+/* Getters for resource amounts necessary for qm initialization */
+u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
+{
+ return p_hwfn->hw_info.num_hw_tc;
+}
- /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
- */
- qm_info->qm_pq_params = kcalloc(num_pqs,
- sizeof(struct init_qm_pq_params),
- b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
- if (!qm_info->qm_pq_params)
- goto alloc_err;
+u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
+{
+ return IS_QED_SRIOV(p_hwfn->cdev) ?
+ p_hwfn->cdev->p_iov_info->total_vfs : 0;
+}
- qm_info->qm_vport_params = kcalloc(num_vports,
- sizeof(struct init_qm_vport_params),
- b_sleepable ? GFP_KERNEL
- : GFP_ATOMIC);
- if (!qm_info->qm_vport_params)
- goto alloc_err;
+#define NUM_DEFAULT_RLS 1
- qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
- sizeof(struct init_qm_port_params),
- b_sleepable ? GFP_KERNEL
- : GFP_ATOMIC);
- if (!qm_info->qm_port_params)
- goto alloc_err;
+u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
+{
+ u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
- qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
- b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
- if (!qm_info->wfq_data)
- goto alloc_err;
+ /* num RLs can't exceed resource amount of rls or vports */
+ num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL),
+ RESC_NUM(p_hwfn, QED_VPORT));
+
+ /* Make sure after we reserve there's something left */
+ if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS)
+ return 0;
- vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+ /* subtract rls necessary for VFs and one default one for the PF */
+ num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
- /* First init rate limited queues */
- for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) {
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id++;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.non_offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- qm_info->qm_pq_params[curr_queue].rl_valid = 1;
- }
+ return num_pf_rls;
+}
- /* First init per-TC PQs */
- for (i = 0; i < multi_cos_tcs; i++) {
- struct init_qm_pq_params *params =
- &qm_info->qm_pq_params[curr_queue++];
+u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
+{
+ u32 pq_flags = qed_get_pq_flags(p_hwfn);
- if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
- p_hwfn->hw_info.personality == QED_PCI_ETH) {
- params->vport_id = vport_id;
- params->tc_id = p_hwfn->hw_info.non_offload_tc;
- params->wrr_group = 1;
- } else {
- params->vport_id = vport_id;
- params->tc_id = p_hwfn->hw_info.offload_tc;
- params->wrr_group = 1;
- }
- }
+ /* all pqs share the same vport, except for vfs and pf_rl pqs */
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ qed_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) *
+ qed_init_qm_get_num_vfs(p_hwfn) + 1;
+}
- /* Then init pure-LB PQ */
- qm_info->pure_lb_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id =
- (u8) RESC_START(p_hwfn, QED_VPORT);
- qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
-
- qm_info->offload_pq = 0;
- if (init_rdma_offload_pq) {
- qm_info->offload_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- if (init_pure_ack_pq) {
- qm_info->pure_ack_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- if (init_ooo_pq) {
- qm_info->ooo_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- /* Then init per-VF PQs */
- vf_offset = curr_queue;
- for (i = 0; i < num_vfs; i++) {
- /* First vport is used by the PF */
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.non_offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- qm_info->qm_pq_params[curr_queue].rl_valid = 1;
- curr_queue++;
- }
-
- qm_info->vf_queues_offset = vf_offset;
- qm_info->num_pqs = num_pqs;
- qm_info->num_vports = num_vports;
+/* calc amount of PQs according to the requested flags */
+u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
+{
+ u32 pq_flags = qed_get_pq_flags(p_hwfn);
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ qed_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_MCOS & pq_flags)) *
+ qed_init_qm_get_num_tcs(p_hwfn) +
+ (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) +
+ (!!(PQ_FLAGS_ACK & pq_flags)) + (!!(PQ_FLAGS_OFLD & pq_flags)) +
+ (!!(PQ_FLAGS_LLT & pq_flags)) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn);
+}
+
+/* initialize the top level QM params */
+static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ bool four_port;
+
+ /* pq and vport bases for this PF */
+ qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ);
+ qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+
+ /* rate limiting and weighted fair queueing are always enabled */
+ qm_info->vport_rl_en = 1;
+ qm_info->vport_wfq_en = 1;
+
+ /* TC config is different for AH 4 port */
+ four_port = p_hwfn->cdev->num_ports_in_engines == MAX_NUM_PORTS_K2;
+
+ /* in AH 4 port we have fewer TCs per port */
+ qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
+ NUM_OF_PHYS_TCS;
+
+ /* unless MFW indicated otherwise, ooo_tc == 3 for
+ * AH 4-port and 4 otherwise.
+ */
+ if (!qm_info->ooo_tc)
+ qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC :
+ DCBX_TCP_OOO_TC;
+}
+
+/* initialize qm vport params */
+static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 i;
+
+ /* all vports participate in weighted fair queueing */
+ for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++)
+ qm_info->qm_vport_params[i].vport_wfq = 1;
+}
+
+/* initialize qm port params */
+static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
+{
/* Initialize qm port parameters */
- num_ports = p_hwfn->cdev->num_ports_in_engines;
+ u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engines;
+
+ /* indicate how ooo and high pri traffic is dealt with */
+ active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
+ ACTIVE_TCS_BMAP_4PORT_K2 :
+ ACTIVE_TCS_BMAP;
+
for (i = 0; i < num_ports; i++) {
- p_qm_port = &qm_info->qm_port_params[i];
+ struct init_qm_port_params *p_qm_port =
+ &p_hwfn->qm_info.qm_port_params[i];
+
p_qm_port->active = 1;
- if (num_ports == 4)
- p_qm_port->active_phys_tcs = 0x7;
- else
- p_qm_port->active_phys_tcs = 0x9f;
+ p_qm_port->active_phys_tcs = active_phys_tcs;
p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
}
+}
+
+/* Reset the params which must be reset for qm init. QM init may be called as
+ * a result of flows other than driver load (e.g. dcbx renegotiation). Other
+ * params may be affected by the init but would simply recalculate to the same
+ * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
+ * affected as these amounts stay the same.
+ */
+static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ qm_info->num_pqs = 0;
+ qm_info->num_vports = 0;
+ qm_info->num_pf_rls = 0;
+ qm_info->num_vf_pqs = 0;
+ qm_info->first_vf_pq = 0;
+ qm_info->first_mcos_pq = 0;
+ qm_info->first_rl_pq = 0;
+}
+
+static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ qm_info->num_vports++;
+
+ if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
+ qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
+}
+
+/* initialize a single pq and manage qm_info resources accounting.
+ * The pq_init_flags param determines whether the PQ is rate limited
+ * (for VF or PF) and whether a new vport is allocated to the pq or not
+ * (i.e. vport will be shared).
+ */
+
+/* flags for pq init */
+#define PQ_INIT_SHARE_VPORT (1 << 0)
+#define PQ_INIT_PF_RL (1 << 1)
+#define PQ_INIT_VF_RL (1 << 2)
+
+/* defines for pq init */
+#define PQ_INIT_DEFAULT_WRR_GROUP 1
+#define PQ_INIT_DEFAULT_TC 0
+#define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc)
+
+static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
+ struct qed_qm_info *qm_info,
+ u8 tc, u32 pq_init_flags)
+{
+ u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn);
+
+ if (pq_idx > max_pq)
+ DP_ERR(p_hwfn,
+ "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
+
+ /* init pq params */
+ qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
+ qm_info->num_vports;
+ qm_info->qm_pq_params[pq_idx].tc_id = tc;
+ qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
+ qm_info->qm_pq_params[pq_idx].rl_valid =
+ (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL);
+
+ /* qm params accounting */
+ qm_info->num_pqs++;
+ if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
+ qm_info->num_vports++;
+
+ if (pq_init_flags & PQ_INIT_PF_RL)
+ qm_info->num_pf_rls++;
+
+ if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
+ qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
+
+ if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn))
+ DP_ERR(p_hwfn,
+ "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n",
+ qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn));
+}
+
+/* get pq index according to PQ_FLAGS */
+static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
+ u32 pq_flags)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ /* Can't have multiple flags set here */
+ if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
+ goto err;
+
+ switch (pq_flags) {
+ case PQ_FLAGS_RLS:
+ return &qm_info->first_rl_pq;
+ case PQ_FLAGS_MCOS:
+ return &qm_info->first_mcos_pq;
+ case PQ_FLAGS_LB:
+ return &qm_info->pure_lb_pq;
+ case PQ_FLAGS_OOO:
+ return &qm_info->ooo_pq;
+ case PQ_FLAGS_ACK:
+ return &qm_info->pure_ack_pq;
+ case PQ_FLAGS_OFLD:
+ return &qm_info->offload_pq;
+ case PQ_FLAGS_LLT:
+ return &qm_info->low_latency_pq;
+ case PQ_FLAGS_VFS:
+ return &qm_info->first_vf_pq;
+ default:
+ goto err;
+ }
+
+err:
+ DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
+ return NULL;
+}
+
+/* save pq index in qm info */
+static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn,
+ u32 pq_flags, u16 pq_val)
+{
+ u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
+}
+
+/* get tx pq index, with the PQ TX base already set (ready for context init) */
+u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags)
+{
+ u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ return *base_pq_idx + CM_TX_PQ_BASE;
+}
+
+u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
+{
+ u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
+
+ if (tc > max_tc)
+ DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
+
+ return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
+}
+
+u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
+{
+ u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
+
+ if (vf > max_vf)
+ DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
+
+ return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
+}
+
+u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl)
+{
+ u16 max_rl = qed_init_qm_get_num_pf_rls(p_hwfn);
+
+ if (rl > max_rl)
+ DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
+
+ return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+}
+
+/* Functions for creating specific types of pqs */
+static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT))
+ return;
- qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 tc_idx;
- qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
+ return;
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
+ for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++)
+ qed_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
qm_info->num_vf_pqs = num_vfs;
- qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+ for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
+ qed_init_qm_pq(p_hwfn,
+ qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL);
+}
- for (i = 0; i < qm_info->num_vports; i++)
- qm_info->qm_vport_params[i].vport_wfq = 1;
+static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn)
+{
+ u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn);
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
- qm_info->vport_rl_en = 1;
- qm_info->vport_wfq_en = 1;
- qm_info->pf_rl = pf_rl;
- qm_info->pf_wfq = pf_wfq;
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
+ for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
+ qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL);
+}
+
+static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn)
+{
+ /* rate limited pqs, must come first (FW assumption) */
+ qed_init_qm_rl_pqs(p_hwfn);
+
+ /* pqs for multi cos */
+ qed_init_qm_mcos_pqs(p_hwfn);
+
+ /* pure loopback pq */
+ qed_init_qm_lb_pq(p_hwfn);
+
+ /* out of order pq */
+ qed_init_qm_ooo_pq(p_hwfn);
+
+ /* pure ack pq */
+ qed_init_qm_pure_ack_pq(p_hwfn);
+
+ /* pq for offloaded protocol */
+ qed_init_qm_offload_pq(p_hwfn);
+
+ /* low latency pq */
+ qed_init_qm_low_latency_pq(p_hwfn);
+
+ /* done sharing vports */
+ qed_init_qm_advance_vport(p_hwfn);
+
+ /* pqs for vfs */
+ qed_init_qm_vf_pqs(p_hwfn);
+}
+
+/* compare values of getters against resources amounts */
+static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn)
+{
+ if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) {
+ DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
+ return -EINVAL;
+ }
+
+ if (qed_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, QED_PQ)) {
+ DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+ return -EINVAL;
+ }
return 0;
+}
-alloc_err:
- qed_qm_info_free(p_hwfn);
- return -ENOMEM;
+static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct init_qm_vport_params *vport;
+ struct init_qm_port_params *port;
+ struct init_qm_pq_params *pq;
+ int i, tc;
+
+ /* top level params */
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
+ qm_info->start_pq,
+ qm_info->start_vport,
+ qm_info->pure_lb_pq,
+ qm_info->offload_pq, qm_info->pure_ack_pq);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
+ qm_info->ooo_pq,
+ qm_info->first_vf_pq,
+ qm_info->num_pqs,
+ qm_info->num_vf_pqs,
+ qm_info->num_vports, qm_info->max_phys_tcs_per_port);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
+ qm_info->pf_rl_en,
+ qm_info->pf_wfq_en,
+ qm_info->vport_rl_en,
+ qm_info->vport_wfq_en,
+ qm_info->pf_wfq,
+ qm_info->pf_rl,
+ qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn));
+
+ /* port table */
+ for (i = 0; i < p_hwfn->cdev->num_ports_in_engines; i++) {
+ port = &(qm_info->qm_port_params[i]);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
+ i,
+ port->active,
+ port->active_phys_tcs,
+ port->num_pbf_cmd_lines,
+ port->num_btb_blocks, port->reserved);
+ }
+
+ /* vport table */
+ for (i = 0; i < qm_info->num_vports; i++) {
+ vport = &(qm_info->qm_vport_params[i]);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
+ qm_info->start_vport + i,
+ vport->vport_rl, vport->vport_wfq);
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "%d ", vport->first_tx_pq_id[tc]);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n");
+ }
+
+ /* pq table */
+ for (i = 0; i < qm_info->num_pqs; i++) {
+ pq = &(qm_info->qm_pq_params[i]);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
+ qm_info->start_pq + i,
+ pq->vport_id,
+ pq->tc_id, pq->wrr_group, pq->rl_valid);
+ }
+}
+
+static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
+{
+ /* reset params required for init run */
+ qed_init_qm_reset_params(p_hwfn);
+
+ /* init QM top level params */
+ qed_init_qm_params(p_hwfn);
+
+ /* init QM port params */
+ qed_init_qm_port_params(p_hwfn);
+
+ /* init QM vport params */
+ qed_init_qm_vport_params(p_hwfn);
+
+ /* init QM physical queue params */
+ qed_init_qm_pq_params(p_hwfn);
+
+ /* display all that init */
+ qed_dp_init_qm_params(p_hwfn);
}
/* This function reconfigures the QM pf on the fly.
@@ -391,17 +766,8 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
bool b_rc;
int rc;
- /* qm_info is allocated in qed_init_qm_info() which is already called
- * from qed_resc_alloc() or previous call of qed_qm_reconf().
- * The allocated size may change each init, so we free it before next
- * allocation.
- */
- qed_qm_info_free(p_hwfn);
-
/* initialize qed's qm data structure */
- rc = qed_init_qm_info(p_hwfn, false);
- if (rc)
- return rc;
+ qed_init_qm_info(p_hwfn);
/* stop PF's qm queues */
spin_lock_bh(&qm_lock);
@@ -415,7 +781,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */
- qed_qm_init_pf(p_hwfn);
+ qed_qm_init_pf(p_hwfn, p_ptt);
/* activate init tool on runtime array */
rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@@ -434,6 +800,47 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return 0;
}
+static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ int rc;
+
+ rc = qed_init_qm_sanity(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
+ qed_init_qm_get_num_pqs(p_hwfn),
+ GFP_KERNEL);
+ if (!qm_info->qm_pq_params)
+ goto alloc_err;
+
+ qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
+ qed_init_qm_get_num_vports(p_hwfn),
+ GFP_KERNEL);
+ if (!qm_info->qm_vport_params)
+ goto alloc_err;
+
+ qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
+ p_hwfn->cdev->num_ports_in_engines,
+ GFP_KERNEL);
+ if (!qm_info->qm_port_params)
+ goto alloc_err;
+
+ qm_info->wfq_data = kzalloc(sizeof(*qm_info->wfq_data) *
+ qed_init_qm_get_num_vports(p_hwfn),
+ GFP_KERNEL);
+ if (!qm_info->wfq_data)
+ goto alloc_err;
+
+ return 0;
+
+alloc_err:
+ DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
+ qed_qm_info_free(p_hwfn);
+ return -ENOMEM;
+}
+
int qed_resc_alloc(struct qed_dev *cdev)
{
struct qed_iscsi_info *p_iscsi_info;
@@ -442,8 +849,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
#ifdef CONFIG_QED_LL2
struct qed_ll2_info *p_ll2_info;
#endif
+ u32 rdma_tasks, excess_tasks;
struct qed_consq *p_consq;
struct qed_eq *p_eq;
+ u32 line_count;
int i, rc = 0;
if (IS_VF(cdev))
@@ -465,19 +874,44 @@ int qed_resc_alloc(struct qed_dev *cdev)
/* Set the HW cid/tid numbers (in the contest manager)
* Must be done prior to any further computations.
*/
- rc = qed_cxt_set_pf_params(p_hwfn);
+ rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS);
if (rc)
goto alloc_err;
- /* Prepare and process QM requirements */
- rc = qed_init_qm_info(p_hwfn, true);
+ rc = qed_alloc_qm_data(p_hwfn);
if (rc)
goto alloc_err;
+ /* init qm info */
+ qed_init_qm_info(p_hwfn);
+
/* Compute the ILT client partition */
- rc = qed_cxt_cfg_ilt_compute(p_hwfn);
- if (rc)
- goto alloc_err;
+ rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "too many ILT lines; re-computing with less lines\n");
+ /* In case there are not enough ILT lines we reduce the
+ * number of RDMA tasks and re-compute.
+ */
+ excess_tasks =
+ qed_cxt_cfg_ilt_compute_excess(p_hwfn, line_count);
+ if (!excess_tasks)
+ goto alloc_err;
+
+ rdma_tasks = RDMA_MAX_TIDS - excess_tasks;
+ rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks);
+ if (rc)
+ goto alloc_err;
+
+ rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "failed ILT compute. Requested too many lines: %u\n",
+ line_count);
+
+ goto alloc_err;
+ }
+ }
/* CID map / ILT shadow table / T2
* The talbes sizes are determined by the computations above
@@ -674,11 +1108,19 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
return rc;
}
-static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
+static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
{
int hw_mode = 0;
- hw_mode = (1 << MODE_BB_B0);
+ if (QED_IS_BB_B0(p_hwfn->cdev)) {
+ hw_mode |= 1 << MODE_BB;
+ } else if (QED_IS_AH(p_hwfn->cdev)) {
+ hw_mode |= 1 << MODE_K2;
+ } else {
+ DP_NOTICE(p_hwfn, "Unknown chip type %#x\n",
+ p_hwfn->cdev->type);
+ return -EINVAL;
+ }
switch (p_hwfn->cdev->num_ports_in_engines) {
case 1:
@@ -693,7 +1135,7 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
default:
DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
p_hwfn->cdev->num_ports_in_engines);
- return;
+ return -EINVAL;
}
switch (p_hwfn->cdev->mf_mode) {
@@ -719,6 +1161,8 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
"Configuring function for hw_mode: 0x%08x\n",
p_hwfn->hw_info.hw_mode);
+
+ return 0;
}
/* Init run time data for all PFs on an engine. */
@@ -748,16 +1192,67 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev)
}
}
+static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 val, wr_mbs, cache_line_size;
+
+ val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
+ switch (val) {
+ case 0:
+ wr_mbs = 128;
+ break;
+ case 1:
+ wr_mbs = 256;
+ break;
+ case 2:
+ wr_mbs = 512;
+ break;
+ default:
+ DP_INFO(p_hwfn,
+ "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+ val);
+ return;
+ }
+
+ cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs);
+ switch (cache_line_size) {
+ case 32:
+ val = 0;
+ break;
+ case 64:
+ val = 1;
+ break;
+ case 128:
+ val = 2;
+ break;
+ case 256:
+ val = 3;
+ break;
+ default:
+ DP_INFO(p_hwfn,
+ "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+ cache_line_size);
+ }
+
+ if (L1_CACHE_BYTES > wr_mbs)
+ DP_INFO(p_hwfn,
+ "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
+ L1_CACHE_BYTES, wr_mbs);
+
+ STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
+}
+
static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int hw_mode)
{
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params;
struct qed_dev *cdev = p_hwfn->cdev;
+ u8 vf_id, max_num_vfs;
u16 num_pfs, pf_id;
u32 concrete_fid;
int rc = 0;
- u8 vf_id;
qed_init_cau_rt_data(cdev);
@@ -784,17 +1279,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_cxt_hw_init_common(p_hwfn);
- /* Close gate from NIG to BRB/Storm; By default they are open, but
- * we close them to prevent NIG from passing data to reset blocks.
- * Should have been done in the ENGINE phase, but init-tool lacks
- * proper port-pretend capabilities.
- */
- qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
- qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
- qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
- qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
- qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
- qed_port_unpretend(p_hwfn, p_ptt);
+ qed_init_cache_line_size(p_hwfn, p_ptt);
rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
if (rc)
@@ -814,7 +1299,8 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
}
- for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
+ max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
+ for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
@@ -832,17 +1318,15 @@ static int
qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
{
- u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
- u32 dpi_bit_shift, dpi_count;
+ u32 dpi_bit_shift, dpi_count, dpi_page_size;
u32 min_dpis;
+ u32 n_wids;
/* Calculate DPI size */
- dpi_page_size_1 = QED_WID_SIZE * n_cpus;
- dpi_page_size_2 = max_t(u32, QED_WID_SIZE, PAGE_SIZE);
- dpi_page_size = max_t(u32, dpi_page_size_1, dpi_page_size_2);
- dpi_page_size = roundup_pow_of_two(dpi_page_size);
+ n_wids = max_t(u32, QED_MIN_WIDS, n_cpus);
+ dpi_page_size = QED_WID_SIZE * roundup_pow_of_two(n_wids);
+ dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
dpi_bit_shift = ilog2(dpi_page_size / 4096);
-
dpi_count = pwm_region_size / dpi_page_size;
min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
@@ -870,13 +1354,13 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 pwm_regsize, norm_regsize;
u32 non_pwm_conn, min_addr_reg1;
- u32 db_bar_size, n_cpus;
+ u32 db_bar_size, n_cpus = 1;
u32 roce_edpm_mode;
u32 pf_dems_shift;
int rc = 0;
u8 cond;
- db_bar_size = qed_hw_bar_size(p_hwfn, BAR_ID_1);
+ db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
if (p_hwfn->cdev->num_hwfns > 1)
db_bar_size /= 2;
@@ -931,6 +1415,8 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_rdma_dpm_bar(p_hwfn, p_ptt);
}
+ p_hwfn->wid_count = (u16) n_cpus;
+
DP_INFO(p_hwfn,
"doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
norm_regsize,
@@ -967,7 +1453,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct qed_tunn_start_params *p_tunn,
+ struct qed_tunnel_info *p_tunn,
int hw_mode,
bool b_hw_start,
enum qed_int_mode int_mode,
@@ -987,7 +1473,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
p_hwfn->qm_info.pf_rl = 100000;
}
- qed_cxt_hw_init_pf(p_hwfn);
+ qed_cxt_hw_init_pf(p_hwfn, p_ptt);
qed_int_igu_init_rt(p_hwfn);
@@ -1095,25 +1581,47 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
}
-int qed_hw_init(struct qed_dev *cdev,
- struct qed_tunn_start_params *p_tunn,
- bool b_hw_start,
- enum qed_int_mode int_mode,
- bool allow_npar_tx_switch,
- const u8 *bin_fw_data)
+static void
+qed_fill_load_req_params(struct qed_load_req_params *p_load_req,
+ struct qed_drv_load_params *p_drv_load)
+{
+ memset(p_load_req, 0, sizeof(*p_load_req));
+
+ p_load_req->drv_role = p_drv_load->is_crash_kernel ?
+ QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS;
+ p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
+ p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
+ p_load_req->override_force_load = p_drv_load->override_force_load;
+}
+
+static int qed_vf_start(struct qed_hwfn *p_hwfn,
+ struct qed_hw_init_params *p_params)
{
+ if (p_params->p_tunn) {
+ qed_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
+ qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
+ }
+
+ p_hwfn->b_int_enabled = 1;
+
+ return 0;
+}
+
+int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
+{
+ struct qed_load_req_params load_req_params;
u32 load_code, param, drv_mb_param;
bool b_default_mtu = true;
struct qed_hwfn *p_hwfn;
int rc = 0, mfw_rc, i;
- if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+ if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
return -EINVAL;
}
if (IS_PF(cdev)) {
- rc = qed_init_fw_data(cdev, bin_fw_data);
+ rc = qed_init_fw_data(cdev, p_params->bin_fw_data);
if (rc)
return rc;
}
@@ -1128,26 +1636,32 @@ int qed_hw_init(struct qed_dev *cdev,
}
if (IS_VF(cdev)) {
- p_hwfn->b_int_enabled = 1;
+ qed_vf_start(p_hwfn, p_params);
continue;
}
/* Enable DMAE in PXP */
rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
- qed_calc_hw_mode(p_hwfn);
+ rc = qed_calc_hw_mode(p_hwfn);
+ if (rc)
+ return rc;
- rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
+ qed_fill_load_req_params(&load_req_params,
+ p_params->p_drv_load_params);
+ rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+ &load_req_params);
if (rc) {
- DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
+ DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n");
return rc;
}
- qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
-
+ load_code = load_req_params.load_code;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
- rc, load_code);
+ "Load request was sent. Load code: 0x%x\n",
+ load_code);
+
+ qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
@@ -1168,11 +1682,15 @@ int qed_hw_init(struct qed_dev *cdev,
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn, p_hwfn->hw_info.hw_mode,
- b_hw_start, int_mode,
- allow_npar_tx_switch);
+ p_params->p_tunn,
+ p_hwfn->hw_info.hw_mode,
+ p_params->b_hw_start,
+ p_params->int_mode,
+ p_params->allow_npar_tx_switch);
break;
default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected load code [0x%08x]", load_code);
rc = -EINVAL;
break;
}
@@ -1212,10 +1730,7 @@ int qed_hw_init(struct qed_dev *cdev,
if (IS_PF(cdev)) {
p_hwfn = QED_LEADING_HWFN(cdev);
- drv_mb_param = (FW_MAJOR_VERSION << 24) |
- (FW_MINOR_VERSION << 16) |
- (FW_REVISION_VERSION << 8) |
- (FW_ENGINEERING_VERSION);
+ drv_mb_param = STORM_FW_VERSION;
rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
drv_mb_param, &load_code, &param);
@@ -1290,27 +1805,53 @@ void qed_hw_timers_stop_all(struct qed_dev *cdev)
int qed_hw_stop(struct qed_dev *cdev)
{
- int rc = 0, t_rc;
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ int rc, rc2 = 0;
int j;
for_each_hwfn(cdev, j) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
- struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+ p_hwfn = &cdev->hwfns[j];
+ p_ptt = p_hwfn->p_main_ptt;
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
if (IS_VF(cdev)) {
qed_vf_pf_int_cleanup(p_hwfn);
+ rc = qed_vf_pf_reset(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "qed_vf_pf_reset failed. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
continue;
}
/* mark the hw as uninitialized... */
p_hwfn->hw_init_done = false;
+ /* Send unload command to MCP */
+ rc = qed_mcp_unload_req(p_hwfn, p_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed sending a UNLOAD_REQ command. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
+
+ qed_slowpath_irq_sync(p_hwfn);
+
+ /* After this point no MFW attentions are expected, e.g. prevent
+ * race between pf stop and dcbx pf update.
+ */
rc = qed_sp_pf_stop(p_hwfn);
- if (rc)
+ if (rc) {
DP_NOTICE(p_hwfn,
- "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
+ "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
qed_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
@@ -1333,34 +1874,54 @@ int qed_hw_stop(struct qed_dev *cdev)
/* Need to wait 1ms to guarantee SBs are cleared */
usleep_range(1000, 2000);
+
+ /* Disable PF in HW blocks */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+ qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+
+ qed_mcp_unload_done(p_hwfn, p_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed sending a UNLOAD_DONE command. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
}
if (IS_PF(cdev)) {
+ p_hwfn = QED_LEADING_HWFN(cdev);
+ p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt;
+
/* Disable DMAE in PXP - in CMT, this should only be done for
* first hw-function, and only after all transactions have
* stopped for all active hw-functions.
*/
- t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
- cdev->hwfns[0].p_main_ptt, false);
- if (t_rc != 0)
- rc = t_rc;
+ rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "qed_change_pci_hwfn failed. rc = %d.\n", rc);
+ rc2 = -EINVAL;
+ }
}
- return rc;
+ return rc2;
}
-void qed_hw_stop_fastpath(struct qed_dev *cdev)
+int qed_hw_stop_fastpath(struct qed_dev *cdev)
{
int j;
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
- struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct qed_ptt *p_ptt;
if (IS_VF(cdev)) {
qed_vf_pf_int_cleanup(p_hwfn);
continue;
}
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
DP_VERBOSE(p_hwfn,
NETIF_MSG_IFDOWN, "Shutting down the fastpath\n");
@@ -1378,100 +1939,28 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
/* Need to wait 1ms to guarantee SBs are cleared */
usleep_range(1000, 2000);
- }
-}
-
-void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
-{
- if (IS_VF(p_hwfn->cdev))
- return;
-
- /* Re-open incoming traffic */
- qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
-}
-
-static int qed_reg_assert(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 reg, bool expected)
-{
- u32 assert_val = qed_rd(p_hwfn, p_ptt, reg);
-
- if (assert_val != expected) {
- DP_NOTICE(p_hwfn, "Value at address 0x%08x != 0x%08x\n",
- reg, expected);
- return -EINVAL;
+ qed_ptt_release(p_hwfn, p_ptt);
}
return 0;
}
-int qed_hw_reset(struct qed_dev *cdev)
+int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
{
- int rc = 0;
- u32 unload_resp, unload_param;
- u32 wol_param;
- int i;
-
- switch (cdev->wol_config) {
- case QED_OV_WOL_DISABLED:
- wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
- break;
- case QED_OV_WOL_ENABLED:
- wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
- break;
- default:
- DP_NOTICE(cdev,
- "Unknown WoL configuration %02x\n", cdev->wol_config);
- /* Fallthrough */
- case QED_OV_WOL_DEFAULT:
- wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
- }
-
- for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_ptt *p_ptt;
- if (IS_VF(cdev)) {
- rc = qed_vf_pf_reset(p_hwfn);
- if (rc)
- return rc;
- continue;
- }
-
- DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
-
- /* Check for incorrect states */
- qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
- QM_REG_USG_CNT_PF_TX, 0);
- qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
- QM_REG_USG_CNT_PF_OTHER, 0);
-
- /* Disable PF in HW blocks */
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- TCFC_REG_STRONG_ENABLE_PF, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- CCFC_REG_STRONG_ENABLE_PF, 0);
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
- /* Send unload command to MCP */
- rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_UNLOAD_REQ, wol_param,
- &unload_resp, &unload_param);
- if (rc) {
- DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
- unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
- }
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
- rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_UNLOAD_DONE,
- 0, &unload_resp, &unload_param);
- if (rc) {
- DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
- return rc;
- }
- }
+ /* Re-open incoming traffic */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+ qed_ptt_release(p_hwfn, p_ptt);
- return rc;
+ return 0;
}
/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
@@ -1485,10 +1974,25 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
{
/* clear indirect access */
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+ if (QED_IS_AH(p_hwfn->cdev)) {
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0);
+ } else {
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
+ }
/* Clean Previous errors if such exist */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -1522,7 +2026,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
{
u32 *feat_num = p_hwfn->hw_info.feat_num;
struct qed_sb_cnt_info sb_cnt_info;
- int num_features = 1;
+ u32 non_l2_sbs = 0;
if (IS_ENABLED(CONFIG_QED_RDMA) &&
p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
@@ -1530,204 +2034,260 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
* the status blocks equally between L2 / RoCE but with
* consideration as to how many l2 queues / cnqs we have.
*/
- num_features++;
-
feat_num[QED_RDMA_CNQ] =
- min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
+ min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
- }
- feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
- num_features,
- RESC_NUM(p_hwfn, QED_L2_QUEUE));
-
- memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
- qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- feat_num[QED_VF_L2_QUE] =
- min_t(u32,
- RESC_NUM(p_hwfn, QED_L2_QUEUE) -
- FEAT_NUM(p_hwfn, QED_PF_L2_QUE), sb_cnt_info.sb_iov_cnt);
+ non_l2_sbs = feat_num[QED_RDMA_CNQ];
+ }
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
+ p_hwfn->hw_info.personality == QED_PCI_ETH) {
+ /* Start by allocating VF queues, then PF's */
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+ feat_num[QED_VF_L2_QUE] = min_t(u32,
+ RESC_NUM(p_hwfn, QED_L2_QUEUE),
+ sb_cnt_info.sb_iov_cnt);
+ feat_num[QED_PF_L2_QUE] = min_t(u32,
+ RESC_NUM(p_hwfn, QED_SB) -
+ non_l2_sbs,
+ RESC_NUM(p_hwfn,
+ QED_L2_QUEUE) -
+ FEAT_NUM(p_hwfn,
+ QED_VF_L2_QUE));
+ }
+
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+ feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB),
+ RESC_NUM(p_hwfn,
+ QED_CMDQS_CQS));
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
- "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n",
+ "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d ISCSI_CQ=%d #SBS=%d\n",
(int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
- RESC_NUM(p_hwfn, QED_SB), num_features);
+ (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
+ RESC_NUM(p_hwfn, QED_SB));
}
-static enum resource_id_enum qed_hw_get_mfw_res_id(enum qed_resources res_id)
+const char *qed_hw_get_resc_name(enum qed_resources res_id)
{
- enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
-
switch (res_id) {
- case QED_SB:
- mfw_res_id = RESOURCE_NUM_SB_E;
- break;
case QED_L2_QUEUE:
- mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
- break;
+ return "L2_QUEUE";
case QED_VPORT:
- mfw_res_id = RESOURCE_NUM_VPORT_E;
- break;
+ return "VPORT";
case QED_RSS_ENG:
- mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
- break;
+ return "RSS_ENG";
case QED_PQ:
- mfw_res_id = RESOURCE_NUM_PQ_E;
- break;
+ return "PQ";
case QED_RL:
- mfw_res_id = RESOURCE_NUM_RL_E;
- break;
+ return "RL";
case QED_MAC:
+ return "MAC";
case QED_VLAN:
- /* Each VFC resource can accommodate both a MAC and a VLAN */
- mfw_res_id = RESOURCE_VFC_FILTER_E;
- break;
+ return "VLAN";
+ case QED_RDMA_CNQ_RAM:
+ return "RDMA_CNQ_RAM";
case QED_ILT:
- mfw_res_id = RESOURCE_ILT_E;
- break;
+ return "ILT";
case QED_LL2_QUEUE:
- mfw_res_id = RESOURCE_LL2_QUEUE_E;
- break;
- case QED_RDMA_CNQ_RAM:
+ return "LL2_QUEUE";
case QED_CMDQS_CQS:
- /* CNQ/CMDQS are the same resource */
- mfw_res_id = RESOURCE_CQS_E;
- break;
+ return "CMDQS_CQS";
case QED_RDMA_STATS_QUEUE:
- mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
- break;
+ return "RDMA_STATS_QUEUE";
+ case QED_BDQ:
+ return "BDQ";
+ case QED_SB:
+ return "SB";
default:
- break;
+ return "UNKNOWN_RESOURCE";
+ }
+}
+
+static int
+__qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 resc_max_val, u32 *p_mcp_resp)
+{
+ int rc;
+
+ rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
+ resc_max_val, p_mcp_resp);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "MFW response failure for a max value setting of resource %d [%s]\n",
+ res_id, qed_hw_get_resc_name(res_id));
+ return rc;
+ }
+
+ if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
+ DP_INFO(p_hwfn,
+ "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
+ res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp);
+
+ return 0;
+}
+
+static int
+qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ bool b_ah = QED_IS_AH(p_hwfn->cdev);
+ u32 resc_max_val, mcp_resp;
+ u8 res_id;
+ int rc;
+
+ for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
+ switch (res_id) {
+ case QED_LL2_QUEUE:
+ resc_max_val = MAX_NUM_LL2_RX_QUEUES;
+ break;
+ case QED_RDMA_CNQ_RAM:
+ /* No need for a case for QED_CMDQS_CQS since
+ * CNQ/CMDQS are the same resource.
+ */
+ resc_max_val = NUM_OF_CMDQS_CQS;
+ break;
+ case QED_RDMA_STATS_QUEUE:
+ resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
+ : RDMA_NUM_STATISTIC_COUNTERS_BB;
+ break;
+ case QED_BDQ:
+ resc_max_val = BDQ_NUM_RESOURCES;
+ break;
+ default:
+ continue;
+ }
+
+ rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
+ resc_max_val, &mcp_resp);
+ if (rc)
+ return rc;
+
+ /* There's no point to continue to the next resource if the
+ * command is not supported by the MFW.
+ * We do continue if the command is supported but the resource
+ * is unknown to the MFW. Such a resource will be later
+ * configured with the default allocation values.
+ */
+ if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
+ return -EINVAL;
}
- return mfw_res_id;
+ return 0;
}
-static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
- enum qed_resources res_id)
+static
+int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
+ enum qed_resources res_id,
+ u32 *p_resc_num, u32 *p_resc_start)
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
+ bool b_ah = QED_IS_AH(p_hwfn->cdev);
struct qed_sb_cnt_info sb_cnt_info;
- u32 dflt_resc_num = 0;
switch (res_id) {
- case QED_SB:
- memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
- qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- dflt_resc_num = sb_cnt_info.sb_cnt;
- break;
case QED_L2_QUEUE:
- dflt_resc_num = MAX_NUM_L2_QUEUES_BB / num_funcs;
+ *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
+ MAX_NUM_L2_QUEUES_BB) / num_funcs;
break;
case QED_VPORT:
- dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs;
+ *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+ MAX_NUM_VPORTS_BB) / num_funcs;
break;
case QED_RSS_ENG:
- dflt_resc_num = ETH_RSS_ENGINE_NUM_BB / num_funcs;
+ *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
+ ETH_RSS_ENGINE_NUM_BB) / num_funcs;
break;
case QED_PQ:
- /* The granularity of the PQs is 8 */
- dflt_resc_num = MAX_QM_TX_QUEUES_BB / num_funcs;
- dflt_resc_num &= ~0x7;
+ *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
+ MAX_QM_TX_QUEUES_BB) / num_funcs;
+ *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
break;
case QED_RL:
- dflt_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+ *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
break;
case QED_MAC:
case QED_VLAN:
/* Each VFC resource can accommodate both a MAC and a VLAN */
- dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
+ *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break;
case QED_ILT:
- dflt_resc_num = PXP_NUM_ILT_RECORDS_BB / num_funcs;
+ *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
+ PXP_NUM_ILT_RECORDS_BB) / num_funcs;
break;
case QED_LL2_QUEUE:
- dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
break;
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
- dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
+ *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
break;
case QED_RDMA_STATS_QUEUE:
- dflt_resc_num = RDMA_NUM_STATISTIC_COUNTERS_BB / num_funcs;
+ *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
+ RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs;
break;
- default:
+ case QED_BDQ:
+ if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
+ p_hwfn->hw_info.personality != QED_PCI_FCOE)
+ *p_resc_num = 0;
+ else
+ *p_resc_num = 1;
break;
+ case QED_SB:
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+ *p_resc_num = sb_cnt_info.sb_cnt;
+ break;
+ default:
+ return -EINVAL;
}
- return dflt_resc_num;
-}
-
-static const char *qed_hw_get_resc_name(enum qed_resources res_id)
-{
switch (res_id) {
- case QED_SB:
- return "SB";
- case QED_L2_QUEUE:
- return "L2_QUEUE";
- case QED_VPORT:
- return "VPORT";
- case QED_RSS_ENG:
- return "RSS_ENG";
- case QED_PQ:
- return "PQ";
- case QED_RL:
- return "RL";
- case QED_MAC:
- return "MAC";
- case QED_VLAN:
- return "VLAN";
- case QED_RDMA_CNQ_RAM:
- return "RDMA_CNQ_RAM";
- case QED_ILT:
- return "ILT";
- case QED_LL2_QUEUE:
- return "LL2_QUEUE";
- case QED_CMDQS_CQS:
- return "CMDQS_CQS";
- case QED_RDMA_STATS_QUEUE:
- return "RDMA_STATS_QUEUE";
+ case QED_BDQ:
+ if (!*p_resc_num)
+ *p_resc_start = 0;
+ else if (p_hwfn->cdev->num_ports_in_engines == 4)
+ *p_resc_start = p_hwfn->port_id;
+ else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+ *p_resc_start = p_hwfn->port_id;
+ else if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
+ *p_resc_start = p_hwfn->port_id + 2;
+ break;
default:
- return "UNKNOWN_RESOURCE";
+ *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
+ break;
}
+
+ return 0;
}
-static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
- enum qed_resources res_id)
+static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
+ enum qed_resources res_id)
{
- u32 dflt_resc_num = 0, dflt_resc_start = 0, mcp_resp, mcp_param;
- u32 *p_resc_num, *p_resc_start;
- struct resource_info resc_info;
+ u32 dflt_resc_num = 0, dflt_resc_start = 0;
+ u32 mcp_resp, *p_resc_num, *p_resc_start;
int rc;
p_resc_num = &RESC_NUM(p_hwfn, res_id);
p_resc_start = &RESC_START(p_hwfn, res_id);
- /* Default values assumes that each function received equal share */
- dflt_resc_num = qed_hw_get_dflt_resc_num(p_hwfn, res_id);
- if (!dflt_resc_num) {
+ rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
+ &dflt_resc_start);
+ if (rc) {
DP_ERR(p_hwfn,
"Failed to get default amount for resource %d [%s]\n",
res_id, qed_hw_get_resc_name(res_id));
- return -EINVAL;
- }
- dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx;
-
- memset(&resc_info, 0, sizeof(resc_info));
- resc_info.res_id = qed_hw_get_mfw_res_id(res_id);
- if (resc_info.res_id == RESOURCE_NUM_INVALID) {
- DP_ERR(p_hwfn,
- "Failed to match resource %d [%s] with the MFW resources\n",
- res_id, qed_hw_get_resc_name(res_id));
- return -EINVAL;
+ return rc;
}
- rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, &resc_info,
- &mcp_resp, &mcp_param);
+ rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ &mcp_resp, p_resc_num, p_resc_start);
if (rc) {
DP_NOTICE(p_hwfn,
"MFW response failure for an allocation request for resource %d [%s]\n",
@@ -1740,13 +2300,12 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
* - There is an internal error in the MFW while processing the request
* - The resource ID is unknown to the MFW
*/
- if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK &&
- mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) {
- DP_NOTICE(p_hwfn,
- "Resource %d [%s]: No allocation info was received [mcp_resp 0x%x]. Applying default values [num %d, start %d].\n",
- res_id,
- qed_hw_get_resc_name(res_id),
- mcp_resp, dflt_resc_num, dflt_resc_start);
+ if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
+ res_id,
+ qed_hw_get_resc_name(res_id),
+ mcp_resp, dflt_resc_num, dflt_resc_start);
*p_resc_num = dflt_resc_num;
*p_resc_start = dflt_resc_start;
goto out;
@@ -1754,13 +2313,9 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
/* Special handling for status blocks; Would be revised in future */
if (res_id == QED_SB) {
- resc_info.size -= 1;
- resc_info.offset -= p_hwfn->enabled_func_idx;
+ *p_resc_num -= 1;
+ *p_resc_start -= p_hwfn->enabled_func_idx;
}
-
- *p_resc_num = resc_info.size;
- *p_resc_start = resc_info.offset;
-
out:
/* PQs have to divide by 8 [that's the HW granularity].
* Reduce number so it would fit.
@@ -1778,19 +2333,80 @@ out:
return 0;
}
-static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn)
{
- u8 res_id;
int rc;
+ u8 res_id;
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
- rc = qed_hw_set_resc_info(p_hwfn, res_id);
+ rc = __qed_hw_set_resc_info(p_hwfn, res_id);
if (rc)
return rc;
}
+ return 0;
+}
+
+static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_resc_unlock_params resc_unlock_params;
+ struct qed_resc_lock_params resc_lock_params;
+ bool b_ah = QED_IS_AH(p_hwfn->cdev);
+ u8 res_id;
+ int rc;
+
+ /* Setting the max values of the soft resources and the following
+ * resources allocation queries should be atomic. Since several PFs can
+ * run in parallel - a resource lock is needed.
+ * If either the resource lock or resource set value commands are not
+ * supported - skip the the max values setting, release the lock if
+ * needed, and proceed to the queries. Other failures, including a
+ * failure to acquire the lock, will cause this function to fail.
+ */
+ qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
+ QED_RESC_LOCK_RESC_ALLOC, false);
+
+ rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
+ if (rc && rc != -EINVAL) {
+ return rc;
+ } else if (rc == -EINVAL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
+ } else if (!rc && !resc_lock_params.b_granted) {
+ DP_NOTICE(p_hwfn,
+ "Failed to acquire the resource lock for the resource allocation commands\n");
+ return -EBUSY;
+ } else {
+ rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt);
+ if (rc && rc != -EINVAL) {
+ DP_NOTICE(p_hwfn,
+ "Failed to set the max values of the soft resources\n");
+ goto unlock_and_exit;
+ } else if (rc == -EINVAL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
+ rc = qed_mcp_resc_unlock(p_hwfn, p_ptt,
+ &resc_unlock_params);
+ if (rc)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+ }
+
+ rc = qed_hw_set_resc_info(p_hwfn);
+ if (rc)
+ goto unlock_and_exit;
+
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
+ rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
+ if (rc)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+
/* Sanity for ILT */
- if ((RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB)) {
+ if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
+ (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
RESC_START(p_hwfn, QED_ILT),
RESC_END(p_hwfn, QED_ILT) - 1);
@@ -1799,8 +2415,6 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
qed_hw_set_feat(p_hwfn);
- DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
- "The numbers for each resource are:\n");
for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n",
qed_hw_get_resc_name(res_id),
@@ -1808,6 +2422,11 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
RESC_START(p_hwfn, res_id));
return 0;
+
+unlock_and_exit:
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
+ qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
+ return rc;
}
static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -1860,9 +2479,15 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G;
+ break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G;
+ break;
default:
DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
break;
@@ -1976,8 +2601,9 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
+ struct qed_dev *cdev = p_hwfn->cdev;
- num_funcs = MAX_NUM_PFS_BB;
+ num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
* in the other bits are selected.
@@ -1990,12 +2616,17 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
if (reg_function_hide & 0x1) {
- if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
- num_funcs = 0;
- eng_mask = 0xaaaa;
+ if (QED_IS_BB(cdev)) {
+ if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) {
+ num_funcs = 0;
+ eng_mask = 0xaaaa;
+ } else {
+ num_funcs = 1;
+ eng_mask = 0x5554;
+ }
} else {
num_funcs = 1;
- eng_mask = 0x5554;
+ eng_mask = 0xfffe;
}
/* Get the number of the enabled functions on the engine */
@@ -2027,24 +2658,12 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
}
-static int
-qed_get_hw_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum qed_pci_personality personality)
+static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
u32 port_mode;
- int rc;
-
- /* Since all information is common, only first hwfns should do this */
- if (IS_LEAD_HWFN(p_hwfn)) {
- rc = qed_iov_hw_info(p_hwfn);
- if (rc)
- return rc;
- }
- /* Read the port mode */
- port_mode = qed_rd(p_hwfn, p_ptt,
- CNIG_REG_NW_PORT_MODE_BB_B0);
+ port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
if (port_mode < 3) {
p_hwfn->cdev->num_ports_in_engines = 1;
@@ -2057,6 +2676,54 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
/* Default num_ports_in_engines to something */
p_hwfn->cdev->num_ports_in_engines = 1;
}
+}
+
+static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 port;
+ int i;
+
+ p_hwfn->cdev->num_ports_in_engines = 0;
+
+ for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
+ port = qed_rd(p_hwfn, p_ptt,
+ CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
+ if (port & 1)
+ p_hwfn->cdev->num_ports_in_engines++;
+ }
+
+ if (!p_hwfn->cdev->num_ports_in_engines) {
+ DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");
+
+ /* Default num_ports_in_engine to something */
+ p_hwfn->cdev->num_ports_in_engines = 1;
+ }
+}
+
+static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ if (QED_IS_BB(p_hwfn->cdev))
+ qed_hw_info_port_num_bb(p_hwfn, p_ptt);
+ else
+ qed_hw_info_port_num_ah(p_hwfn, p_ptt);
+}
+
+static int
+qed_get_hw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_pci_personality personality)
+{
+ int rc;
+
+ /* Since all information is common, only first hwfns should do this */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_iov_hw_info(p_hwfn);
+ if (rc)
+ return rc;
+ }
+
+ qed_hw_info_port_num(p_hwfn, p_ptt);
qed_hw_get_nvm_info(p_hwfn, p_ptt);
@@ -2085,33 +2752,48 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.personality = protocol;
}
+ p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
+ p_hwfn->hw_info.num_active_tc = 1;
+
qed_get_num_funcs(p_hwfn, p_ptt);
if (qed_mcp_is_init(p_hwfn))
p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
- return qed_hw_get_resc(p_hwfn);
+ return qed_hw_get_resc(p_hwfn, p_ptt);
}
-static int qed_get_dev_info(struct qed_dev *cdev)
+static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u16 device_id_mask;
u32 tmp;
/* Read Vendor Id / Device Id */
pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
- cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_NUM);
- cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_REV);
+ /* Determine type */
+ device_id_mask = cdev->device_id & QED_DEV_ID_MASK;
+ switch (device_id_mask) {
+ case QED_DEV_ID_MASK_BB:
+ cdev->type = QED_DEV_TYPE_BB;
+ break;
+ case QED_DEV_ID_MASK_AH:
+ cdev->type = QED_DEV_TYPE_AH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id);
+ return -EBUSY;
+ }
+
+ cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
+ cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
+
MASK_FIELD(CHIP_REV, cdev->chip_rev);
- cdev->type = QED_DEV_TYPE_BB;
/* Learn number of HW-functions */
- tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CMT_ENABLED_FOR_PAIR);
+ tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
if (tmp & (1 << p_hwfn->rel_pf_id)) {
DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
@@ -2120,15 +2802,17 @@ static int qed_get_dev_info(struct qed_dev *cdev)
cdev->num_hwfns = 1;
}
- cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
+ cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt,
MISCS_REG_CHIP_TEST_REG) >> 4;
MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
- cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_METAL);
+ cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
MASK_FIELD(CHIP_METAL, cdev->chip_metal);
DP_INFO(cdev->hwfns,
- "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ QED_IS_BB(cdev) ? "BB" : "AH",
+ 'A' + cdev->chip_rev,
+ (int)cdev->chip_metal,
cdev->chip_num, cdev->chip_rev,
cdev->chip_bond_id, cdev->chip_metal);
@@ -2174,7 +2858,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
- rc = qed_get_dev_info(p_hwfn->cdev);
+ rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
if (rc)
goto err1;
}
@@ -2195,6 +2879,15 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
goto err2;
}
+ /* Sending a mailbox to the MFW should be done after qed_get_hw_info()
+ * is called as it sets the ports number in an engine.
+ */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n");
+ }
+
/* Allocate the init RT array and initialize the init-ops engine */
rc = qed_init_alloc(p_hwfn);
if (rc)
@@ -2236,11 +2929,14 @@ int qed_hw_prepare(struct qed_dev *cdev,
u8 __iomem *addr;
/* adjust bar offset for second engine */
- addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
+ addr = cdev->regview +
+ qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_0) / 2;
p_regview = addr;
- /* adjust doorbell bar offset for second engine */
- addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
+ addr = cdev->doorbells +
+ qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_1) / 2;
p_doorbell = addr;
/* prepare second hw function */
@@ -3363,3 +4059,22 @@ void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
memset(p_hwfn->qm_info.wfq_data, 0,
sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
}
+
+int qed_device_num_engines(struct qed_dev *cdev)
+{
+ return QED_IS_BB(cdev) ? 2 : 1;
+}
+
+static int qed_device_num_ports(struct qed_dev *cdev)
+{
+ /* in CMT always only one port */
+ if (cdev->num_hwfns > 1)
+ return 1;
+
+ return cdev->num_ports_in_engines * qed_device_num_engines(cdev);
+}
+
+int qed_device_get_port_id(struct qed_dev *cdev)
+{
+ return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 6812003411cd..cefe3ee9064a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -82,26 +82,63 @@ int qed_resc_alloc(struct qed_dev *cdev);
*/
void qed_resc_setup(struct qed_dev *cdev);
+enum qed_override_force_load {
+ QED_OVERRIDE_FORCE_LOAD_NONE,
+ QED_OVERRIDE_FORCE_LOAD_ALWAYS,
+ QED_OVERRIDE_FORCE_LOAD_NEVER,
+};
+
+struct qed_drv_load_params {
+ /* Indicates whether the driver is running over a crash kernel.
+ * As part of the load request, this will be used for providing the
+ * driver role to the MFW.
+ * In case of a crash kernel over PDA - this should be set to false.
+ */
+ bool is_crash_kernel;
+
+ /* The timeout value that the MFW should use when locking the engine for
+ * the driver load process.
+ * A value of '0' means the default value, and '255' means no timeout.
+ */
+ u8 mfw_timeout_val;
+#define QED_LOAD_REQ_LOCK_TO_DEFAULT 0
+#define QED_LOAD_REQ_LOCK_TO_NONE 255
+
+ /* Avoid engine reset when first PF loads on it */
+ bool avoid_eng_reset;
+
+ /* Allow overriding the default force load behavior */
+ enum qed_override_force_load override_force_load;
+};
+
+struct qed_hw_init_params {
+ /* Tunneling parameters */
+ struct qed_tunnel_info *p_tunn;
+
+ bool b_hw_start;
+
+ /* Interrupt mode [msix, inta, etc.] to use */
+ enum qed_int_mode int_mode;
+
+ /* NPAR tx switching to be used for vports for tx-switching */
+ bool allow_npar_tx_switch;
+
+ /* Binary fw data pointer in binary fw file */
+ const u8 *bin_fw_data;
+
+ /* Driver load parameters */
+ struct qed_drv_load_params *p_drv_load_params;
+};
+
/**
* @brief qed_hw_init -
*
* @param cdev
- * @param p_tunn
- * @param b_hw_start
- * @param int_mode - interrupt mode [msix, inta, etc.] to use.
- * @param allow_npar_tx_switch - npar tx switching to be used
- * for vports configured for tx-switching.
- * @param bin_fw_data - binary fw data pointer in binary fw file.
- * Pass NULL if not using binary fw file.
+ * @param p_params
*
* @return int
*/
-int qed_hw_init(struct qed_dev *cdev,
- struct qed_tunn_start_params *p_tunn,
- bool b_hw_start,
- enum qed_int_mode int_mode,
- bool allow_npar_tx_switch,
- const u8 *bin_fw_data);
+int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params);
/**
* @brief qed_hw_timers_stop_all - stop the timers HW block
@@ -128,26 +165,20 @@ int qed_hw_stop(struct qed_dev *cdev);
*
* @param cdev
*
+ * @return int
*/
-void qed_hw_stop_fastpath(struct qed_dev *cdev);
+int qed_hw_stop_fastpath(struct qed_dev *cdev);
/**
* @brief qed_hw_start_fastpath -restart fastpath traffic,
* only if hw_stop_fastpath was called
*
- * @param cdev
- *
- */
-void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
-
-/**
- * @brief qed_hw_reset -
- *
- * @param cdev
+ * @param p_hwfn
*
* @return int
*/
-int qed_hw_reset(struct qed_dev *cdev);
+int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
+
/**
* @brief qed_hw_prepare -
@@ -441,4 +472,6 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
*/
int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 coalesce, u8 qid, u16 sb_id);
+
+const char *qed_hw_get_resc_name(enum qed_resources res_id);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index cbc81412174f..21a58fffd02b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -191,7 +191,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
- p_data->q_params.bdq_resource_id = FCOE_BDQ_ID(p_hwfn->port_id);
+ p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
@@ -241,7 +241,7 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
struct fcoe_conn_offload_ramrod_data *p_data;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- u16 pq_id = 0, tmp;
+ u16 physical_q0, tmp;
int rc;
/* Get SPQ entry */
@@ -261,9 +261,9 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
p_data = &p_ramrod->offload_ramrod_data;
/* Transmission PQ is the first of the PF */
- pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_FCOE, NULL);
- p_conn->physical_q0 = cpu_to_le16(pq_id);
- p_data->physical_q0 = cpu_to_le16(pq_id);
+ physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ p_conn->physical_q0 = cpu_to_le16(physical_q0);
+ p_data->physical_q0 = cpu_to_le16(physical_q0);
p_data->conn_id = cpu_to_le16(p_conn->conn_id);
DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
@@ -340,10 +340,10 @@ qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
static int
qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
- struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u32 active_segs = 0;
@@ -512,19 +512,31 @@ static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
- u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id);
-
- return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id);
+ if (RESC_NUM(p_hwfn, QED_BDQ)) {
+ return (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+ QED_BDQ),
+ bdq_id);
+ } else {
+ DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+ return NULL;
+ }
}
static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
- u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id);
-
- return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id);
+ if (RESC_NUM(p_hwfn, QED_BDQ)) {
+ return (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+ QED_BDQ),
+ bdq_id);
+ } else {
+ DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+ return NULL;
+ }
}
struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
@@ -753,6 +765,7 @@ static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
static int qed_fcoe_stop(struct qed_dev *cdev)
{
+ struct qed_ptt *p_ptt;
int rc;
if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
@@ -766,10 +779,15 @@ static int qed_fcoe_stop(struct qed_dev *cdev)
return -EINVAL;
}
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (!p_ptt)
+ return -EAGAIN;
+
/* Stop the fcoe */
- rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev),
+ rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), p_ptt,
QED_SPQ_MODE_EBLOCK, NULL);
cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
+ qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 37c2bfb663bb..858a57a73589 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -574,6 +574,7 @@ enum core_event_opcode {
CORE_EVENT_TX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_FLUSH,
MAX_CORE_EVENT_OPCODE
};
@@ -625,6 +626,7 @@ enum core_ramrod_cmd_id {
CORE_RAMROD_TX_QUEUE_START,
CORE_RAMROD_RX_QUEUE_STOP,
CORE_RAMROD_TX_QUEUE_STOP,
+ CORE_RAMROD_RX_QUEUE_FLUSH,
MAX_CORE_RAMROD_CMD_ID
};
@@ -698,7 +700,8 @@ struct core_rx_slow_path_cqe {
u8 type;
u8 ramrod_cmd_id;
__le16 echo;
- __le32 reserved1[7];
+ struct core_rx_cqe_opaque_data opaque_data;
+ __le32 reserved1[5];
};
union core_rx_cqe_union {
@@ -735,45 +738,46 @@ struct core_rx_stop_ramrod_data {
__le16 reserved2[2];
};
-struct core_tx_bd_flags {
- u8 as_bitfield;
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1
-#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1
-#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2
-#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1
-#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3
-#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4
-#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1
-#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+struct core_tx_bd_data {
+ __le16 as_bitfield;
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_DATA_START_BD_MASK 0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT 2
+#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_NBDS_MASK 0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT 8
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
+#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
+#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
};
struct core_tx_bd {
struct regpair addr;
__le16 nbytes;
__le16 nw_vlan_or_lb_echo;
- u8 bitfield0;
-#define CORE_TX_BD_NBDS_MASK 0xF
-#define CORE_TX_BD_NBDS_SHIFT 0
-#define CORE_TX_BD_ROCE_FLAV_MASK 0x1
-#define CORE_TX_BD_ROCE_FLAV_SHIFT 4
-#define CORE_TX_BD_RESERVED0_MASK 0x7
-#define CORE_TX_BD_RESERVED0_SHIFT 5
- struct core_tx_bd_flags bd_flags;
+ struct core_tx_bd_data bd_data;
__le16 bitfield1;
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
#define CORE_TX_BD_TX_DST_MASK 0x1
#define CORE_TX_BD_TX_DST_SHIFT 14
-#define CORE_TX_BD_RESERVED1_MASK 0x1
-#define CORE_TX_BD_RESERVED1_SHIFT 15
+#define CORE_TX_BD_RESERVED_MASK 0x1
+#define CORE_TX_BD_RESERVED_SHIFT 15
};
enum core_tx_dest {
@@ -800,6 +804,14 @@ struct core_tx_stop_ramrod_data {
__le32 reserved0[2];
};
+enum dcb_dhcp_update_flag {
+ DONT_UPDATE_DCB_DHCP,
+ UPDATE_DCB,
+ UPDATE_DSCP,
+ UPDATE_DCB_DSCP,
+ MAX_DCB_DHCP_UPDATE_FLAG
+};
+
struct eth_mstorm_per_pf_stat {
struct regpair gre_discard_pkts;
struct regpair vxlan_discard_pkts;
@@ -893,6 +905,12 @@ union event_ring_element {
struct event_ring_next_addr next_addr;
};
+enum fw_flow_ctrl_mode {
+ flow_ctrl_pause,
+ flow_ctrl_pfc,
+ MAX_FW_FLOW_CTRL_MODE
+};
+
/* Major and Minor hsi Versions */
struct hsi_fp_ver_struct {
u8 minor_ver_arr[2];
@@ -921,6 +939,7 @@ enum malicious_vf_error_id {
ETH_EDPM_OUT_OF_SYNC,
ETH_TUNN_IPV6_EXT_NBD_ERR,
ETH_CONTROL_PACKET_VIOLATION,
+ ETH_ANTI_SPOOFING_ERR,
MAX_MALICIOUS_VF_ERROR_ID
};
@@ -1106,8 +1125,9 @@ struct tstorm_per_port_stat {
struct regpair ll2_mac_filter_discard;
struct regpair ll2_conn_disabled_discard;
struct regpair iscsi_irregular_pkt;
- struct regpair reserved;
+ struct regpair fcoe_irregular_pkt;
struct regpair roce_irregular_pkt;
+ struct regpair reserved;
struct regpair eth_irregular_pkt;
struct regpair reserved1;
struct regpair preroce_irregular_pkt;
@@ -1648,6 +1668,11 @@ enum block_addr {
GRCBASE_MS = 0x6a0000,
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
+ GRCBASE_AVS_WRAP = 0x6b0000,
+ GRCBASE_RGFS = 0x19d0000,
+ GRCBASE_TGFS = 0x19e0000,
+ GRCBASE_PTLD = 0x19f0000,
+ GRCBASE_YPLD = 0x1a10000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -1732,6 +1757,11 @@ enum block_id {
BLOCK_MS,
BLOCK_PHY_PCIE,
BLOCK_LED,
+ BLOCK_AVS_WRAP,
+ BLOCK_RGFS,
+ BLOCK_TGFS,
+ BLOCK_PTLD,
+ BLOCK_YPLD,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
@@ -1783,9 +1813,9 @@ struct dbg_attn_reg_result {
__le32 data;
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK 0xFF
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24
- __le16 attn_idx_offset;
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
+ __le16 block_attn_offset;
__le16 reserved;
__le32 sts_val;
__le32 mask_val;
@@ -1815,12 +1845,12 @@ struct dbg_mode_hdr {
/* Attention register */
struct dbg_attn_reg {
struct dbg_mode_hdr mode;
- __le16 attn_idx_offset;
+ __le16 block_attn_offset;
__le32 data;
#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF
-#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT 24
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
__le32 sts_clr_address;
__le32 mask_address;
};
@@ -2001,6 +2031,20 @@ enum dbg_bus_clients {
MAX_DBG_BUS_CLIENTS
};
+enum dbg_bus_constraint_ops {
+ DBG_BUS_CONSTRAINT_OP_EQ,
+ DBG_BUS_CONSTRAINT_OP_NE,
+ DBG_BUS_CONSTRAINT_OP_LT,
+ DBG_BUS_CONSTRAINT_OP_LTC,
+ DBG_BUS_CONSTRAINT_OP_LE,
+ DBG_BUS_CONSTRAINT_OP_LEC,
+ DBG_BUS_CONSTRAINT_OP_GT,
+ DBG_BUS_CONSTRAINT_OP_GTC,
+ DBG_BUS_CONSTRAINT_OP_GE,
+ DBG_BUS_CONSTRAINT_OP_GEC,
+ MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
/* Debug Bus memory address */
struct dbg_bus_mem_addr {
__le32 lo;
@@ -2092,10 +2136,18 @@ struct dbg_bus_data {
* DBG_BUS_TARGET_ID_PCI.
*/
__le16 reserved;
- struct dbg_bus_block_data blocks[80];/* Debug Bus data for each block */
+ struct dbg_bus_block_data blocks[88];/* Debug Bus data for each block */
struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */
};
+enum dbg_bus_filter_types {
+ DBG_BUS_FILTER_TYPE_OFF,
+ DBG_BUS_FILTER_TYPE_PRE,
+ DBG_BUS_FILTER_TYPE_POST,
+ DBG_BUS_FILTER_TYPE_ON,
+ MAX_DBG_BUS_FILTER_TYPES
+};
+
/* Debug bus frame modes */
enum dbg_bus_frame_modes {
DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */
@@ -2104,6 +2156,40 @@ enum dbg_bus_frame_modes {
MAX_DBG_BUS_FRAME_MODES
};
+enum dbg_bus_input_types {
+ DBG_BUS_INPUT_TYPE_STORM,
+ DBG_BUS_INPUT_TYPE_BLOCK,
+ MAX_DBG_BUS_INPUT_TYPES
+};
+
+enum dbg_bus_other_engine_modes {
+ DBG_BUS_OTHER_ENGINE_MODE_NONE,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
+ MAX_DBG_BUS_OTHER_ENGINE_MODES
+};
+
+enum dbg_bus_post_trigger_types {
+ DBG_BUS_POST_TRIGGER_RECORD,
+ DBG_BUS_POST_TRIGGER_DROP,
+ MAX_DBG_BUS_POST_TRIGGER_TYPES
+};
+
+enum dbg_bus_pre_trigger_types {
+ DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
+ DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
+ DBG_BUS_PRE_TRIGGER_DROP,
+ MAX_DBG_BUS_PRE_TRIGGER_TYPES
+};
+
+enum dbg_bus_semi_frame_modes {
+ DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
+ MAX_DBG_BUS_SEMI_FRAME_MODES
+};
+
/* Debug bus states */
enum dbg_bus_states {
DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */
@@ -2115,6 +2201,19 @@ enum dbg_bus_states {
MAX_DBG_BUS_STATES
};
+enum dbg_bus_storm_modes {
+ DBG_BUS_STORM_MODE_PRINTF,
+ DBG_BUS_STORM_MODE_PRAM_ADDR,
+ DBG_BUS_STORM_MODE_DRA_RW,
+ DBG_BUS_STORM_MODE_DRA_W,
+ DBG_BUS_STORM_MODE_LD_ST_ADDR,
+ DBG_BUS_STORM_MODE_DRA_FSM,
+ DBG_BUS_STORM_MODE_RH,
+ DBG_BUS_STORM_MODE_FOC,
+ DBG_BUS_STORM_MODE_EXT_STORE,
+ MAX_DBG_BUS_STORM_MODES
+};
+
/* Debug bus target IDs */
enum dbg_bus_targets {
/* records debug bus to DBG block internal buffer */
@@ -2128,13 +2227,10 @@ enum dbg_bus_targets {
/* GRC Dump data */
struct dbg_grc_data {
- __le32 param_val[40]; /* Value of each GRC parameter. Array size must
- * match the enum dbg_grc_params.
- */
- u8 param_set_by_user[40]; /* Indicates for each GRC parameter if it was
- * set by the user (0/1). Array size must
- * match the enum dbg_grc_params.
- */
+ u8 params_initialized;
+ u8 reserved1;
+ __le16 reserved2;
+ __le32 param_val[48];
};
/* Debug GRC params */
@@ -2181,6 +2277,8 @@ enum dbg_grc_params {
DBG_GRC_PARAM_PARITY_SAFE,
DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */
DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */
+ DBG_GRC_PARAM_NO_MCP,
+ DBG_GRC_PARAM_NO_FW_VER,
MAX_DBG_GRC_PARAMS
};
@@ -2280,7 +2378,7 @@ struct dbg_tools_data {
struct dbg_bus_data bus; /* Debug Bus data */
struct idle_chk_data idle_chk; /* Idle Check data */
u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */
- u8 block_in_reset[80]; /* Indicates if a block is in reset state (0/1).
+ u8 block_in_reset[88]; /* Indicates if a block is in reset state (0/1).
*/
u8 chip_id; /* Chip ID (from enum chip_ids) */
u8 platform_id; /* Platform ID (from enum platform_ids) */
@@ -2404,7 +2502,7 @@ struct fw_info_location {
enum init_modes {
MODE_RESERVED,
- MODE_BB_B0,
+ MODE_BB,
MODE_K2,
MODE_ASIC,
MODE_RESERVED2,
@@ -2418,7 +2516,6 @@ enum init_modes {
MODE_PORTS_PER_ENG_2,
MODE_PORTS_PER_ENG_4,
MODE_100G,
- MODE_40G,
MODE_RESERVED6,
MAX_INIT_MODES
};
@@ -2686,6 +2783,13 @@ struct iro {
*/
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
/**
+ * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
+ * default value.
+ *
+ * @param p_hwfn - HW device data
+ */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
+/**
* @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
* GRC Dump.
*
@@ -3369,6 +3473,11 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool eth_geneve_enable, bool ip_geneve_enable);
+void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 pf_id);
+void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 pf_id, bool tcp, bool udp,
+ bool ipv4, bool ipv6);
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
@@ -3418,7 +3527,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
(IRO[22].base + ((pf_id) * IRO[22].m1))
-#define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size)
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[23].base + ((stat_counter_id) * IRO[23].m1))
#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
@@ -3482,7 +3591,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
static const struct iro iro_arr[47] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
- {0x4cb0, 0x78, 0x0, 0x0, 0x78},
+ {0x4cb0, 0x80, 0x0, 0x0, 0x80},
{0x6318, 0x20, 0x0, 0x0, 0x20},
{0xb00, 0x8, 0x0, 0x0, 0x4},
{0xa80, 0x8, 0x0, 0x0, 0x4},
@@ -3521,13 +3630,13 @@ static const struct iro iro_arr[47] = {
{0xd888, 0x38, 0x0, 0x0, 0x24},
{0x12c38, 0x10, 0x0, 0x0, 0x8},
{0x11aa0, 0x38, 0x0, 0x0, 0x18},
- {0xa8c0, 0x30, 0x0, 0x0, 0x10},
- {0x86f8, 0x28, 0x0, 0x0, 0x18},
+ {0xa8c0, 0x38, 0x0, 0x0, 0x10},
+ {0x86f8, 0x30, 0x0, 0x0, 0x18},
{0x101f8, 0x10, 0x0, 0x0, 0x10},
{0xdd08, 0x48, 0x0, 0x0, 0x38},
{0x10660, 0x20, 0x0, 0x0, 0x20},
{0x2b80, 0x80, 0x0, 0x0, 0x10},
- {0x5000, 0x10, 0x0, 0x0, 0x10},
+ {0x5020, 0x10, 0x0, 0x0, 0x10},
};
/* Runtime array offsets */
@@ -4595,6 +4704,12 @@ enum eth_ipv4_frag_type {
MAX_ETH_IPV4_FRAG_TYPE
};
+enum eth_ip_type {
+ ETH_IPV4,
+ ETH_IPV6,
+ MAX_ETH_IP_TYPE
+};
+
enum eth_ramrod_cmd_id {
ETH_RAMROD_UNUSED,
ETH_RAMROD_VPORT_START,
@@ -4752,6 +4867,18 @@ struct eth_vport_tx_mode {
__le16 reserved2[3];
};
+enum gft_filter_update_action {
+ GFT_ADD_FILTER,
+ GFT_DELETE_FILTER,
+ MAX_GFT_FILTER_UPDATE_ACTION
+};
+
+enum gft_logic_filter_type {
+ GFT_FILTER_TYPE,
+ RFS_FILTER_TYPE,
+ MAX_GFT_LOGIC_FILTER_TYPE
+};
+
/* Ramrod data for rx queue start ramrod */
struct rx_queue_start_ramrod_data {
__le16 rx_queue_id;
@@ -4822,6 +4949,16 @@ struct rx_udp_filter_data {
__le32 tenant_id;
};
+struct rx_update_gft_filter_data {
+ struct regpair pkt_hdr_addr;
+ __le16 pkt_hdr_length;
+ __le16 rx_qid_or_action_icid;
+ u8 vport_id;
+ u8 filter_type;
+ u8 filter_action;
+ u8 reserved;
+};
+
/* Ramrod data for rx queue start ramrod */
struct tx_queue_start_ramrod_data {
__le16 sb_id;
@@ -4944,7 +5081,10 @@ struct vport_update_ramrod_data_cmn {
u8 update_mtu_flg;
__le16 mtu;
- u8 reserved[2];
+ u8 update_ctl_frame_checks_en_flg;
+ u8 ctl_frame_mac_check_en;
+ u8 ctl_frame_ethtype_check_en;
+ u8 reserved[15];
};
struct vport_update_ramrod_mcast {
@@ -4962,6 +5102,652 @@ struct vport_update_ramrod_data {
struct eth_vport_rss_config rss_config;
};
+struct gft_cam_line {
+ __le32 camline;
+#define GFT_CAM_LINE_VALID_MASK 0x1
+#define GFT_CAM_LINE_VALID_SHIFT 0
+#define GFT_CAM_LINE_DATA_MASK 0x3FFF
+#define GFT_CAM_LINE_DATA_SHIFT 1
+#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF
+#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
+#define GFT_CAM_LINE_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_RESERVED1_SHIFT 29
+};
+
+struct gft_cam_line_mapped {
+ __le32 camline;
+#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25
+#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
+};
+
+union gft_cam_line_union {
+ struct gft_cam_line cam_line;
+ struct gft_cam_line_mapped cam_line_mapped;
+};
+
+enum gft_profile_ip_version {
+ GFT_PROFILE_IPV4 = 0,
+ GFT_PROFILE_IPV6 = 1,
+ MAX_GFT_PROFILE_IP_VERSION
+};
+
+enum gft_profile_upper_protocol_type {
+ GFT_PROFILE_ROCE_PROTOCOL = 0,
+ GFT_PROFILE_RROCE_PROTOCOL = 1,
+ GFT_PROFILE_FCOE_PROTOCOL = 2,
+ GFT_PROFILE_ICMP_PROTOCOL = 3,
+ GFT_PROFILE_ARP_PROTOCOL = 4,
+ GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5,
+ GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6,
+ GFT_PROFILE_TCP_PROTOCOL = 7,
+ GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8,
+ GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9,
+ GFT_PROFILE_UDP_PROTOCOL = 10,
+ GFT_PROFILE_USER_IP_1_INNER = 11,
+ GFT_PROFILE_USER_IP_2_OUTER = 12,
+ GFT_PROFILE_USER_ETH_1_INNER = 13,
+ GFT_PROFILE_USER_ETH_2_OUTER = 14,
+ GFT_PROFILE_RAW = 15,
+ MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
+};
+
+struct gft_ram_line {
+ __le32 low32bits;
+#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
+#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3
+#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7
+#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9
+#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13
+#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17
+#define GFT_RAM_LINE_TTL_MASK 0x1
+#define GFT_RAM_LINE_TTL_SHIFT 18
+#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19
+#define GFT_RAM_LINE_RESERVED0_MASK 0x1
+#define GFT_RAM_LINE_RESERVED0_SHIFT 20
+#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21
+#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22
+#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23
+#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24
+#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25
+#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26
+#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27
+#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28
+#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29
+#define GFT_RAM_LINE_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_DST_PORT_SHIFT 30
+#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
+ __le32 high32bits;
+#define GFT_RAM_LINE_DSCP_MASK 0x1
+#define GFT_RAM_LINE_DSCP_SHIFT 0
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1
+#define GFT_RAM_LINE_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_DST_IP_SHIFT 2
+#define GFT_RAM_LINE_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_SRC_IP_SHIFT 3
+#define GFT_RAM_LINE_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_PRIORITY_SHIFT 4
+#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5
+#define GFT_RAM_LINE_VLAN_MASK 0x1
+#define GFT_RAM_LINE_VLAN_SHIFT 6
+#define GFT_RAM_LINE_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_DST_MAC_SHIFT 7
+#define GFT_RAM_LINE_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_SRC_MAC_SHIFT 8
+#define GFT_RAM_LINE_TENANT_ID_MASK 0x1
+#define GFT_RAM_LINE_TENANT_ID_SHIFT 9
+#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF
+#define GFT_RAM_LINE_RESERVED1_SHIFT 10
+};
+
+struct mstorm_eth_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct xstorm_eth_conn_agctxdq_ext_ldpart {
+ u8 reserved0;
+ u8 eth_state;
+ u8 flags0;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id;
+ __le16 physical_q0;
+ __le16 quota;
+ __le16 edpm_num_bds;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_prod;
+ __le16 tx_class;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+ u8 reserved0;
+ u8 eth_state;
+ u8 flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id;
+ __le16 physical_q0;
+ __le16 quota;
+ __le16 edpm_num_bds;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_prod;
+ __le16 tx_class;
+ __le16 conn_dpi;
+};
+
struct mstorm_rdma_task_st_ctx {
struct regpair temp[4];
};
@@ -6165,7 +6951,7 @@ struct ystorm_roce_conn_st_ctx {
};
struct xstorm_roce_conn_st_ctx {
- struct regpair temp[22];
+ struct regpair temp[24];
};
struct tstorm_roce_conn_st_ctx {
@@ -6220,7 +7006,7 @@ struct roce_create_qp_req_ramrod_data {
__le16 mtu;
__le16 pd;
__le16 sq_num_pages;
- __le16 reseved2;
+ __le16 low_latency_phy_queue;
struct regpair sq_pbl_addr;
struct regpair orq_pbl_addr;
__le16 local_mac_addr[3];
@@ -6234,7 +7020,7 @@ struct roce_create_qp_req_ramrod_data {
u8 stats_counter_id;
u8 reserved3[7];
__le32 cq_cid;
- __le16 physical_queue0;
+ __le16 regular_latency_phy_queue;
__le16 dpi;
};
@@ -6282,15 +7068,16 @@ struct roce_create_qp_resp_ramrod_data {
__le32 dst_gid[4];
struct regpair qp_handle_for_cqe;
struct regpair qp_handle_for_async;
- __le32 reserved2[2];
+ __le16 low_latency_phy_queue;
+ u8 reserved2[6];
__le32 cq_cid;
- __le16 physical_queue0;
+ __le16 regular_latency_phy_queue;
__le16 dpi;
};
struct roce_destroy_qp_req_output_params {
__le32 num_bound_mw;
- __le32 reserved;
+ __le32 cq_prod;
};
struct roce_destroy_qp_req_ramrod_data {
@@ -6299,7 +7086,7 @@ struct roce_destroy_qp_req_ramrod_data {
struct roce_destroy_qp_resp_output_params {
__le32 num_invalidated_mw;
- __le32 reserved;
+ __le32 cq_prod;
};
struct roce_destroy_qp_resp_ramrod_data {
@@ -7426,6 +8213,7 @@ struct ystorm_fcoe_conn_st_ctx {
u8 fcp_rsp_size;
__le16 mss;
struct regpair reserved;
+ __le16 min_frame_size;
u8 protection_info_flags;
#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
@@ -7444,7 +8232,6 @@ struct ystorm_fcoe_conn_st_ctx {
#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F
#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2
u8 fcp_xfer_size;
- u8 reserved3[2];
};
struct fcoe_vlan_fields {
@@ -8273,10 +9060,10 @@ struct xstorm_iscsi_conn_ag_ctx {
#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
@@ -8322,10 +9109,10 @@ struct xstorm_iscsi_conn_ag_ctx {
#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
@@ -8335,8 +9122,8 @@ struct xstorm_iscsi_conn_ag_ctx {
#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
u8 flags11;
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
@@ -8440,7 +9227,7 @@ struct xstorm_iscsi_conn_ag_ctx {
__le32 reg10;
__le32 reg11;
__le32 exp_stat_sn;
- __le32 reg13;
+ __le32 ongoing_fast_rxmit_seq;
__le32 reg14;
__le32 reg15;
__le32 reg16;
@@ -8466,10 +9253,10 @@ struct tstorm_iscsi_conn_ag_ctx {
#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
@@ -8490,10 +9277,10 @@ struct tstorm_iscsi_conn_ag_ctx {
#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
@@ -8539,7 +9326,7 @@ struct tstorm_iscsi_conn_ag_ctx {
__le32 reg6;
__le32 reg7;
__le32 reg8;
- u8 byte2;
+ u8 cid_offload_cnt;
u8 byte3;
__le16 word0;
};
@@ -8831,11 +9618,24 @@ struct eth_stats {
u64 r511;
u64 r1023;
u64 r1518;
- u64 r1522;
- u64 r2047;
- u64 r4095;
- u64 r9216;
- u64 r16383;
+
+ union {
+ struct {
+ u64 r1522;
+ u64 r2047;
+ u64 r4095;
+ u64 r9216;
+ u64 r16383;
+ } bb0;
+ struct {
+ u64 unused1;
+ u64 r1519_to_max;
+ u64 unused2;
+ u64 unused3;
+ u64 unused4;
+ } ah0;
+ } u0;
+
u64 rfcs;
u64 rxcf;
u64 rxpf;
@@ -8852,14 +9652,36 @@ struct eth_stats {
u64 t511;
u64 t1023;
u64 t1518;
- u64 t2047;
- u64 t4095;
- u64 t9216;
- u64 t16383;
+
+ union {
+ struct {
+ u64 t2047;
+ u64 t4095;
+ u64 t9216;
+ u64 t16383;
+ } bb1;
+ struct {
+ u64 t1519_to_max;
+ u64 unused6;
+ u64 unused7;
+ u64 unused8;
+ } ah1;
+ } u1;
+
u64 txpf;
u64 txpp;
- u64 tlpiec;
- u64 tncl;
+
+ union {
+ struct {
+ u64 tlpiec;
+ u64 tncl;
+ } bb2;
+ struct {
+ u64 unused9;
+ u64 unused10;
+ } ah2;
+ } u2;
+
u64 rbyte;
u64 rxuca;
u64 rxmca;
@@ -8943,12 +9765,12 @@ struct dcbx_ets_feature {
#define DCBX_ETS_CBS_SHIFT 3
#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
#define DCBX_ETS_MAX_TCS_SHIFT 4
-#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00
-#define DCBX_ISCSI_OOO_TC_SHIFT 8
+#define DCBX_OOO_TC_MASK 0x00000f00
+#define DCBX_OOO_TC_SHIFT 8
u32 pri_tc_tbl[1];
-#define DCBX_ISCSI_OOO_TC (4)
+#define DCBX_TCP_OOO_TC (4)
-#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1)
#define DCBX_CEE_STRICT_PRIORITY 0xf
u32 tc_bw_tbl[2];
u32 tc_tsa_tbl[2];
@@ -8957,6 +9779,9 @@ struct dcbx_ets_feature {
#define DCBX_ETS_TSA_ETS 2
};
+#define DCBX_TCP_OOO_TC (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC (3)
+
struct dcbx_app_priority_entry {
u32 entry;
#define DCBX_APP_PRI_MAP_MASK 0x000000ff
@@ -9067,6 +9892,10 @@ struct dcb_dscp_map {
struct public_global {
u32 max_path;
u32 max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
u32 debug_mb_offset;
u32 phymod_dbg_mb_offset;
struct couple_mode_teaming cmt;
@@ -9248,9 +10077,11 @@ struct public_func {
#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
#define DRV_ID_PDA_COMP_VER_SHIFT 0
+#define LOAD_REQ_HSI_VERSION 2
#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
#define DRV_ID_MCP_HSI_VER_SHIFT 16
-#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT)
+#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
+ DRV_ID_MCP_HSI_VER_SHIFT)
#define DRV_ID_DRV_TYPE_MASK 0x7f000000
#define DRV_ID_DRV_TYPE_SHIFT 24
@@ -9345,6 +10176,7 @@ enum resource_id_enum {
RESOURCE_NUM_RSS_ENGINES_E = 14,
RESOURCE_LL2_QUEUE_E = 15,
RESOURCE_RDMA_STATS_QUEUE_E = 16,
+ RESOURCE_BDQ_E = 17,
RESOURCE_MAX_NUM,
RESOURCE_NUM_INVALID = 0xFFFFFFFF
};
@@ -9362,6 +10194,46 @@ struct resource_info {
#define RESOURCE_ELEMENT_STRICT (1 << 0)
};
+#define DRV_ROLE_NONE 0
+#define DRV_ROLE_PREBOOT 1
+#define DRV_ROLE_OS 2
+#define DRV_ROLE_KDUMP 3
+
+struct load_req_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_REQ_ROLE_MASK 0x000000FF
+#define LOAD_REQ_ROLE_SHIFT 0
+#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
+#define LOAD_REQ_LOCK_TO_SHIFT 8
+#define LOAD_REQ_LOCK_TO_DEFAULT 0
+#define LOAD_REQ_LOCK_TO_NONE 255
+#define LOAD_REQ_FORCE_MASK 0x000F0000
+#define LOAD_REQ_FORCE_SHIFT 16
+#define LOAD_REQ_FORCE_NONE 0
+#define LOAD_REQ_FORCE_PF 1
+#define LOAD_REQ_FORCE_ALL 2
+#define LOAD_REQ_FLAGS0_MASK 0x00F00000
+#define LOAD_REQ_FLAGS0_SHIFT 20
+#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
+};
+
+struct load_rsp_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_RSP_ROLE_MASK 0x000000FF
+#define LOAD_RSP_ROLE_SHIFT 0
+#define LOAD_RSP_HSI_MASK 0x0000FF00
+#define LOAD_RSP_HSI_SHIFT 8
+#define LOAD_RSP_FLAGS0_MASK 0x000F0000
+#define LOAD_RSP_FLAGS0_SHIFT 16
+#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
+};
+
union drv_union_data {
u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
struct mcp_mac wol_mac;
@@ -9393,6 +10265,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_LOAD_REQ 0x10000000
#define DRV_MSG_CODE_LOAD_DONE 0x11000000
#define DRV_MSG_CODE_INIT_HW 0x12000000
+#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000
#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
#define DRV_MSG_CODE_INIT_PHY 0x22000000
@@ -9405,12 +10278,14 @@ struct public_drv_mb {
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
+#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
+#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
-#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
+#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
@@ -9436,6 +10311,33 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
+#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+
+#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
+#define RESOURCE_CMD_REQ_RESC_SHIFT 0
+#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
+#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
+#define RESOURCE_OPCODE_REQ 1
+#define RESOURCE_OPCODE_REQ_WO_AGING 2
+#define RESOURCE_OPCODE_REQ_W_AGING 3
+#define RESOURCE_OPCODE_RELEASE 4
+#define RESOURCE_OPCODE_FORCE_RELEASE 5
+#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
+#define RESOURCE_CMD_REQ_AGE_SHIFT 8
+
+#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
+#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
+#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
+#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
+#define RESOURCE_OPCODE_GNT 1
+#define RESOURCE_OPCODE_BUSY 2
+#define RESOURCE_OPCODE_RELEASED 3
+#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4
+#define RESOURCE_OPCODE_WRONG_OWNER 5
+#define RESOURCE_OPCODE_UNKNOWN_CMD 255
+
+#define RESOURCE_DUMP 0
+
#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
#define DRV_MSG_CODE_OS_WOL 0x002e0000
@@ -9524,12 +10426,16 @@ struct public_drv_mb {
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_UNSUPPORTED 0x00000000
#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000
#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
@@ -9549,6 +10455,10 @@ struct public_drv_mb {
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 fw_mb_param;
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
/* get pf rdma protocol command responce */
#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0
@@ -9659,6 +10569,8 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xF
+
u32 e_lane_cfg1;
u32 e_lane_cfg2;
u32 f_lane_cfg1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 899cad7f97ea..a05feb38c6ee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -58,6 +58,7 @@ struct qed_ptt {
struct list_head list_entry;
unsigned int idx;
struct pxp_ptt_entry pxp;
+ u8 hwfn_id;
};
struct qed_ptt_pool {
@@ -79,6 +80,7 @@ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
p_pool->ptts[i].idx = i;
p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
p_pool->ptts[i].pxp.pretend.control = 0;
+ p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
if (i >= RESERVED_PTT_MAX)
list_add(&p_pool->ptts[i].list_entry,
&p_pool->free_list);
@@ -193,6 +195,11 @@ static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
offset = hw_addr - win_hw_addr;
+ if (p_ptt->hwfn_id != p_hwfn->my_id)
+ DP_NOTICE(p_hwfn,
+ "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
+ p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
+
/* Verify the address is within the window */
if (hw_addr < win_hw_addr ||
offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
@@ -800,55 +807,3 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
return rc;
}
-u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
- enum protocol_type proto, union qed_qm_pq_params *p_params)
-{
- u16 pq_id = 0;
-
- if ((proto == PROTOCOLID_CORE ||
- proto == PROTOCOLID_ETH ||
- proto == PROTOCOLID_ISCSI ||
- proto == PROTOCOLID_ROCE) && !p_params) {
- DP_NOTICE(p_hwfn,
- "Protocol %d received NULL PQ params\n", proto);
- return 0;
- }
-
- switch (proto) {
- case PROTOCOLID_CORE:
- if (p_params->core.tc == LB_TC)
- pq_id = p_hwfn->qm_info.pure_lb_pq;
- else if (p_params->core.tc == OOO_LB_TC)
- pq_id = p_hwfn->qm_info.ooo_pq;
- else
- pq_id = p_hwfn->qm_info.offload_pq;
- break;
- case PROTOCOLID_ETH:
- pq_id = p_params->eth.tc;
- if (p_params->eth.is_vf)
- pq_id += p_hwfn->qm_info.vf_queues_offset +
- p_params->eth.vf_id;
- break;
- case PROTOCOLID_ISCSI:
- if (p_params->iscsi.q_idx == 1)
- pq_id = p_hwfn->qm_info.pure_ack_pq;
- break;
- case PROTOCOLID_ROCE:
- if (p_params->roce.dcqcn)
- pq_id = p_params->roce.qpid;
- else
- pq_id = p_hwfn->qm_info.offload_pq;
- if (pq_id > p_hwfn->qm_info.num_pf_rls)
- pq_id = p_hwfn->qm_info.offload_pq;
- break;
- case PROTOCOLID_FCOE:
- pq_id = p_hwfn->qm_info.offload_pq;
- break;
- default:
- pq_id = 0;
- }
-
- pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
-
- return pq_id;
-}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index 9277264d2e65..f2505c691c26 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -297,9 +297,6 @@ union qed_qm_pq_params {
} roce;
};
-u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
- enum protocol_type proto, union qed_qm_pq_params *params);
-
int qed_init_fw_data(struct qed_dev *cdev,
const u8 *fw_data);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index d891a6852695..67200c5498ab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -215,13 +215,6 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
{
u32 qm_line_crd;
- /* In A0 - Limit the size of pbf queue so that only 511 commands with
- * the minimum size of 4 (FCoE minimum size)
- */
- bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
-
- if (is_bb_a0)
- cmdq_lines = min_t(u32, cmdq_lines, 1022);
qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
(u32)cmdq_lines);
@@ -343,13 +336,11 @@ static void qed_tx_pq_map_rt_init(
u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
QM_PF_QUEUE_GROUP_SIZE;
- bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
u16 i, pq_id, pq_group;
/* a bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
- u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
- u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+ u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
u32 mem_addr_4kb = base_mem_addr_4kb;
@@ -371,6 +362,10 @@ static void qed_tx_pq_map_rt_init(
bool is_vf_pq = (i >= p_params->num_pf_pqs);
struct qm_rf_pq_map tx_pq_map;
+ bool rl_valid = p_params->pq_params[i].rl_valid &&
+ (p_params->pq_params[i].vport_id <
+ MAX_QM_GLOBAL_RLS);
+
/* update first Tx PQ of VPORT/TC */
u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
p_params->start_vport;
@@ -389,14 +384,18 @@ static void qed_tx_pq_map_rt_init(
(p_params->pf_id <<
QM_WFQ_VP_PQ_PF_SHIFT));
}
+
+ if (p_params->pq_params[i].rl_valid && !rl_valid)
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT ID for rate limiter configuration");
/* fill PQ map entry */
memset(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
- p_params->pq_params[i].rl_valid ? 1 : 0);
+ SET_FIELD(tx_pq_map.reg,
+ QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
- p_params->pq_params[i].rl_valid ?
+ rl_valid ?
p_params->pq_params[i].vport_id : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
@@ -413,8 +412,9 @@ static void qed_tx_pq_map_rt_init(
/* if PQ is associated with a VF, add indication
* to PQ VF mask
*/
- tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
- (1 << (pq_id % tx_pq_vf_mask_width));
+ tx_pq_vf_mask[pq_id /
+ QM_PF_QUEUE_GROUP_SIZE] |=
+ BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
mem_addr_4kb += vport_pq_mem_4kb;
} else {
mem_addr_4kb += pq_mem_4kb;
@@ -480,8 +480,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
if (p_params->pf_id < MAX_NUM_PFS_BB)
crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
else
- crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
- (p_params->pf_id % MAX_NUM_PFS_BB);
+ crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
+ crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
@@ -498,11 +498,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
QM_WFQ_CRD_REG_SIGN_BIT);
}
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
- inc_val);
STORE_RT_REG(p_hwfn,
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+ inc_val);
return 0;
}
@@ -576,6 +576,12 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
{
u8 i, vport_id;
+ if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT ID for rate limiter configuration");
+ return -1;
+ }
+
/* go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
@@ -785,6 +791,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
{
u32 inc_val = QM_RL_INC_VAL(vport_rl);
+ if (vport_id >= MAX_QM_GLOBAL_RLS) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT ID for rate limiter configuration");
+ return -1;
+ }
+
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
return -1;
@@ -940,12 +952,6 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
- /* comp ver */
- reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
- qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
- qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
- qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
-
/* EDPM with geneve tunnel not supported in BB_B0 */
if (QED_IS_BB_B0(p_hwfn->cdev))
return;
@@ -955,3 +961,132 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
ip_geneve_enable ? 1 : 0);
}
+
+#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
+#define PARSER_ETH_CONN_CM_HDR (0x0)
+#define CAM_LINE_SIZE sizeof(u32)
+#define RAM_LINE_SIZE sizeof(u64)
+#define REG_SIZE sizeof(u32)
+
+void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 pf_id)
+{
+ union gft_cam_line_union camline;
+ struct gft_ram_line ramline;
+ u32 *p_ramline, i;
+
+ p_ramline = (u32 *)&ramline;
+
+ /*stop using gft logic */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
+ memset(&camline, 0, sizeof(union gft_cam_line_union));
+ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
+ camline.cam_line_mapped.camline);
+ memset(&ramline, 0, sizeof(union gft_cam_line_union));
+
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) {
+ u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM;
+
+ hw_addr += (RAM_LINE_SIZE * pf_id + i * REG_SIZE);
+
+ qed_wr(p_hwfn, p_ptt, hw_addr, *(p_ramline + i));
+ }
+}
+
+void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 pf_id, bool tcp, bool udp,
+ bool ipv4, bool ipv6)
+{
+ u32 rfs_cm_hdr_event_id, *p_ramline;
+ union gft_cam_line_union camline;
+ struct gft_ram_line ramline;
+ int i;
+
+ rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+ p_ramline = (u32 *)&ramline;
+
+ if (!ipv6 && !ipv4)
+ DP_NOTICE(p_hwfn,
+ "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6");
+ if (!tcp && !udp)
+ DP_NOTICE(p_hwfn,
+ "set_rfs_mode_enable: must accept at least on of - udp or tcp");
+
+ rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
+ PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+
+ /* Configure Registers for RFS mode */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
+ camline.cam_line_mapped.camline = 0;
+
+ /* cam line is now valid!! */
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_VALID, 1);
+
+ /* filters are per PF!! */
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+ if (!(tcp && udp)) {
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1);
+ if (tcp)
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
+ GFT_PROFILE_TCP_PROTOCOL);
+ else
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
+ GFT_PROFILE_UDP_PROTOCOL);
+ }
+
+ if (!(ipv4 && ipv6)) {
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+ if (ipv4)
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_IP_VERSION,
+ GFT_PROFILE_IPV4);
+ else
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_IP_VERSION,
+ GFT_PROFILE_IPV6);
+ }
+
+ /* write characteristics to cam */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
+ camline.cam_line_mapped.camline);
+ camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
+ PRS_REG_GFT_CAM +
+ CAM_LINE_SIZE * pf_id);
+
+ /* write line to RAM - compare to filter 4 tuple */
+ ramline.low32bits = 0;
+ ramline.high32bits = 0;
+ SET_FIELD(ramline.high32bits, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ramline.high32bits, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ramline.low32bits, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ramline.low32bits, GFT_RAM_LINE_DST_PORT, 1);
+
+ /* each iteration write to reg */
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
+ i * REG_SIZE, *(p_ramline + i));
+
+ /* set default profile so that no filter match will happen */
+ ramline.low32bits = 0xffff;
+ ramline.high32bits = 0xffff;
+
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH + i * REG_SIZE,
+ *(p_ramline + i));
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 243b64e0d4dc..4a2e7be5bf72 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -554,7 +554,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
}
/* First Dword contains metadata and should be skipped */
- buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
+ buf_hdr = (struct bin_buffer_hdr *)data;
offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 84310b60849b..0ed24d6e6c65 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -2500,8 +2500,9 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
/* Configure pi coalescing if set */
if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+ u8 num_tc = p_hwfn->hw_info.num_hw_tc;
u8 timeset, timer_res;
- u8 num_tc = 1, i;
+ u8 i;
/* timeset = (coalesce >> timer-res), timeset is 7bit wide */
if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 098766f7fe88..339c91dfa658 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -181,6 +181,15 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_params = &p_hwfn->pf_params.iscsi_pf_params;
p_queue = &p_init->q_params;
+ /* Sanity */
+ if (p_params->num_queues > p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]) {
+ DP_ERR(p_hwfn,
+ "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
+ p_params->num_queues,
+ p_hwfn->hw_info.resc_num[QED_ISCSI_CQ]);
+ return -EINVAL;
+ }
+
SET_FIELD(p_init->hdr.flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE);
p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC;
@@ -216,7 +225,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
}
- p_queue->bdq_resource_id = ISCSI_BDQ_ID(p_hwfn->port_id);
+ p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_RQ],
p_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
@@ -270,11 +279,10 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
struct tcp_offload_params *p_tcp = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- union qed_qm_pq_params pq_params;
- u16 pq0_id = 0, pq1_id = 0;
dma_addr_t r2tq_pbl_addr;
dma_addr_t xhq_pbl_addr;
dma_addr_t uhq_pbl_addr;
+ u16 physical_q;
int rc = 0;
u32 dval;
u16 wval;
@@ -297,16 +305,14 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.iscsi_conn_offload;
/* Transmission PQ is the first of the PF */
- memset(&pq_params, 0, sizeof(pq_params));
- pq0_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params);
- p_conn->physical_q0 = cpu_to_le16(pq0_id);
- p_ramrod->iscsi.physical_q0 = cpu_to_le16(pq0_id);
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ p_conn->physical_q0 = cpu_to_le16(physical_q);
+ p_ramrod->iscsi.physical_q0 = cpu_to_le16(physical_q);
/* iSCSI Pure-ACK PQ */
- pq_params.iscsi.q_idx = 1;
- pq1_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params);
- p_conn->physical_q1 = cpu_to_le16(pq1_id);
- p_ramrod->iscsi.physical_q1 = cpu_to_le16(pq1_id);
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+ p_conn->physical_q1 = cpu_to_le16(physical_q);
+ p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q);
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN;
SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE,
@@ -593,21 +599,31 @@ static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
- u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id);
-
- return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id,
- bdq_id);
+ if (RESC_NUM(p_hwfn, QED_BDQ)) {
+ return (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+ QED_BDQ),
+ bdq_id);
+ } else {
+ DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+ return NULL;
+ }
}
static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
- u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id);
-
- return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id,
- bdq_id);
+ if (RESC_NUM(p_hwfn, QED_BDQ)) {
+ return (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+ QED_BDQ),
+ bdq_id);
+ } else {
+ DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+ return NULL;
+ }
}
static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
@@ -857,6 +873,8 @@ static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn,
HILO_64_REGPAIR(tstats.iscsi_rx_bytes_cnt);
p_stats->iscsi_rx_packet_cnt =
HILO_64_REGPAIR(tstats.iscsi_rx_packet_cnt);
+ p_stats->iscsi_rx_new_ooo_isle_events_cnt =
+ HILO_64_REGPAIR(tstats.iscsi_rx_new_ooo_isle_events_cnt);
p_stats->iscsi_cmdq_threshold_cnt =
le32_to_cpu(tstats.iscsi_cmdq_threshold_cnt);
p_stats->iscsi_rq_threshold_cnt =
@@ -1003,6 +1021,8 @@ static int qed_fill_iscsi_dev_info(struct qed_dev *cdev,
info->secondary_bdq_rq_addr =
qed_iscsi_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
+ info->num_cqs = FEAT_NUM(hwfn, QED_ISCSI_CQ);
+
return rc;
}
@@ -1304,6 +1324,26 @@ static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats);
}
+void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+ struct qed_mcp_iscsi_stats *stats)
+{
+ struct qed_iscsi_stats proto_stats;
+
+ /* Retrieve FW statistics */
+ memset(&proto_stats, 0, sizeof(proto_stats));
+ if (qed_iscsi_stats(cdev, &proto_stats)) {
+ DP_VERBOSE(cdev, QED_MSG_STORAGE,
+ "Failed to collect ISCSI statistics\n");
+ return;
+ }
+
+ /* Translate FW statistics into struct */
+ stats->rx_pdus = proto_stats.iscsi_rx_total_pdu_cnt;
+ stats->tx_pdus = proto_stats.iscsi_tx_total_pdu_cnt;
+ stats->rx_bytes = proto_stats.iscsi_rx_bytes_cnt;
+ stats->tx_bytes = proto_stats.iscsi_tx_bytes_cnt;
+}
+
static const struct qed_iscsi_ops qed_iscsi_ops_pass = {
.common = &qed_common_ops_pass,
.ll2 = &qed_ll2_ops_pass,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
index 20c187f4ed0b..ae98f772cbc0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
@@ -64,13 +64,25 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
void qed_iscsi_free(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info);
+
+/**
+ * @brief - Fills provided statistics struct with statistics.
+ *
+ * @param cdev
+ * @param stats - points to struct that will be filled with statistics.
+ */
+void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+ struct qed_mcp_iscsi_stats *stats);
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
static inline struct qed_iscsi_info *qed_iscsi_alloc(
struct qed_hwfn *p_hwfn) { return NULL; }
static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info) {}
static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info) {}
+ struct qed_iscsi_info *p_iscsi_info) {}
+static inline void
+qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+ struct qed_mcp_iscsi_stats *stats) {}
#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index df932be5a4e5..746fed4099c8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -938,15 +938,12 @@ qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
dma_addr_t pbl_addr,
u16 pbl_size, void __iomem **pp_doorbell)
{
- union qed_qm_pq_params pq_params;
int rc;
- memset(&pq_params, 0, sizeof(pq_params));
rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
pbl_addr, pbl_size,
- qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH,
- &pq_params));
+ qed_get_cm_pq_idx_mcos(p_hwfn, tc));
if (rc)
return rc;
@@ -1470,13 +1467,20 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
memset(&pstats, 0, sizeof(pstats));
qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
- p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
- p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
- p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
- p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
- p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
- p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
- p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
+ p_stats->common.tx_ucast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->common.tx_mcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->common.tx_bcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->common.tx_ucast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->common.tx_mcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->common.tx_bcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->common.tx_err_drop_pkts +=
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
}
static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
@@ -1502,10 +1506,10 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
memset(&tstats, 0, sizeof(tstats));
qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
- p_stats->mftag_filter_discards +=
- HILO_64_REGPAIR(tstats.mftag_filter_discard);
- p_stats->mac_filter_discards +=
- HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+ p_stats->common.mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ p_stats->common.mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
}
static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
@@ -1539,12 +1543,15 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
memset(&ustats, 0, sizeof(ustats));
qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
- p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
- p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
- p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
- p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
- p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
- p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+ p_stats->common.rx_ucast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->common.rx_mcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->common.rx_bcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
}
static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
@@ -1578,23 +1585,26 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
memset(&mstats, 0, sizeof(mstats));
qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
- p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
- p_stats->packet_too_big_discard +=
- HILO_64_REGPAIR(mstats.packet_too_big_discard);
- p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
- p_stats->tpa_coalesced_pkts +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
- p_stats->tpa_coalesced_events +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_events);
- p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
- p_stats->tpa_coalesced_bytes +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+ p_stats->common.no_buff_discards +=
+ HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->common.packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->common.tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ p_stats->common.tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ p_stats->common.tpa_aborts_num +=
+ HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->common.tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_eth_stats *p_stats)
{
+ struct qed_eth_stats_common *p_common = &p_stats->common;
struct port_stats port_stats;
int j;
@@ -1605,54 +1615,75 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
offsetof(struct public_port, stats),
sizeof(port_stats));
- p_stats->rx_64_byte_packets += port_stats.eth.r64;
- p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
- p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
- p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
- p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
- p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
- p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
- p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
- p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
- p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
- p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
- p_stats->rx_crc_errors += port_stats.eth.rfcs;
- p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
- p_stats->rx_pause_frames += port_stats.eth.rxpf;
- p_stats->rx_pfc_frames += port_stats.eth.rxpp;
- p_stats->rx_align_errors += port_stats.eth.raln;
- p_stats->rx_carrier_errors += port_stats.eth.rfcr;
- p_stats->rx_oversize_packets += port_stats.eth.rovr;
- p_stats->rx_jabbers += port_stats.eth.rjbr;
- p_stats->rx_undersize_packets += port_stats.eth.rund;
- p_stats->rx_fragments += port_stats.eth.rfrg;
- p_stats->tx_64_byte_packets += port_stats.eth.t64;
- p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
- p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
- p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
- p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
- p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
- p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
- p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
- p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
- p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
- p_stats->tx_pause_frames += port_stats.eth.txpf;
- p_stats->tx_pfc_frames += port_stats.eth.txpp;
- p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
- p_stats->tx_total_collisions += port_stats.eth.tncl;
- p_stats->rx_mac_bytes += port_stats.eth.rbyte;
- p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
- p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
- p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
- p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
- p_stats->tx_mac_bytes += port_stats.eth.tbyte;
- p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
- p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
- p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
- p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
+ p_common->rx_64_byte_packets += port_stats.eth.r64;
+ p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
+ p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
+ p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
+ p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+ p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+ p_common->rx_crc_errors += port_stats.eth.rfcs;
+ p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
+ p_common->rx_pause_frames += port_stats.eth.rxpf;
+ p_common->rx_pfc_frames += port_stats.eth.rxpp;
+ p_common->rx_align_errors += port_stats.eth.raln;
+ p_common->rx_carrier_errors += port_stats.eth.rfcr;
+ p_common->rx_oversize_packets += port_stats.eth.rovr;
+ p_common->rx_jabbers += port_stats.eth.rjbr;
+ p_common->rx_undersize_packets += port_stats.eth.rund;
+ p_common->rx_fragments += port_stats.eth.rfrg;
+ p_common->tx_64_byte_packets += port_stats.eth.t64;
+ p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
+ p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
+ p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
+ p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+ p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+ p_common->tx_pause_frames += port_stats.eth.txpf;
+ p_common->tx_pfc_frames += port_stats.eth.txpp;
+ p_common->rx_mac_bytes += port_stats.eth.rbyte;
+ p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
+ p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
+ p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
+ p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
+ p_common->tx_mac_bytes += port_stats.eth.tbyte;
+ p_common->tx_mac_uc_packets += port_stats.eth.txuca;
+ p_common->tx_mac_mc_packets += port_stats.eth.txmca;
+ p_common->tx_mac_bc_packets += port_stats.eth.txbca;
+ p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
for (j = 0; j < 8; j++) {
- p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
- p_stats->brb_discards += port_stats.brb.brb_discard[j];
+ p_common->brb_truncates += port_stats.brb.brb_truncate[j];
+ p_common->brb_discards += port_stats.brb.brb_discard[j];
+ }
+
+ if (QED_IS_BB(p_hwfn->cdev)) {
+ struct qed_eth_stats_bb *p_bb = &p_stats->bb;
+
+ p_bb->rx_1519_to_1522_byte_packets +=
+ port_stats.eth.u0.bb0.r1522;
+ p_bb->rx_1519_to_2047_byte_packets +=
+ port_stats.eth.u0.bb0.r2047;
+ p_bb->rx_2048_to_4095_byte_packets +=
+ port_stats.eth.u0.bb0.r4095;
+ p_bb->rx_4096_to_9216_byte_packets +=
+ port_stats.eth.u0.bb0.r9216;
+ p_bb->rx_9217_to_16383_byte_packets +=
+ port_stats.eth.u0.bb0.r16383;
+ p_bb->tx_1519_to_2047_byte_packets +=
+ port_stats.eth.u1.bb1.t2047;
+ p_bb->tx_2048_to_4095_byte_packets +=
+ port_stats.eth.u1.bb1.t4095;
+ p_bb->tx_4096_to_9216_byte_packets +=
+ port_stats.eth.u1.bb1.t9216;
+ p_bb->tx_9217_to_16383_byte_packets +=
+ port_stats.eth.u1.bb1.t16383;
+ p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
+ p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
+ } else {
+ struct qed_eth_stats_ah *p_ah = &p_stats->ah;
+
+ p_ah->rx_1519_to_max_byte_packets +=
+ port_stats.eth.u0.ah0.r1519_to_max;
+ p_ah->tx_1519_to_max_byte_packets =
+ port_stats.eth.u1.ah1.t1519_to_max;
}
}
@@ -1768,6 +1799,84 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
_qed_get_vport_stats(cdev, cdev->reset_stats);
}
+static void
+qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_arfs_config_params *p_cfg_params)
+{
+ if (p_cfg_params->arfs_enable) {
+ qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_cfg_params->tcp, p_cfg_params->udp,
+ p_cfg_params->ipv4, p_cfg_params->ipv6);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
+ p_cfg_params->tcp ? "Enable" : "Disable",
+ p_cfg_params->udp ? "Enable" : "Disable",
+ p_cfg_params->ipv4 ? "Enable" : "Disable",
+ p_cfg_params->ipv6 ? "Enable" : "Disable");
+ } else {
+ qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
+ p_cfg_params->arfs_enable ? "Enable" : "Disable");
+}
+
+static int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_spq_comp_cb *p_cb,
+ dma_addr_t p_addr, u16 length, u16 qid,
+ u8 vport_id, bool b_is_add)
+{
+ struct rx_update_gft_filter_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u16 abs_rx_q_id = 0;
+ u8 abs_vport_id = 0;
+ int rc = -EINVAL;
+
+ rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc)
+ return rc;
+
+ rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+ if (rc)
+ return rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ if (p_cb) {
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+ init_data.p_comp_data = p_cb;
+ } else {
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+ }
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_GFT_UPDATE_FILTER,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_update_gft;
+ DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
+ p_ramrod->pkt_hdr_length = cpu_to_le16(length);
+ p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->filter_type = RFS_FILTER_TYPE;
+ p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
+ abs_vport_id, abs_rx_q_id,
+ b_is_add ? "Adding" : "Removing", (u64)p_addr, length);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
static int qed_fill_eth_dev_info(struct qed_dev *cdev,
struct qed_dev_eth_info *info)
{
@@ -1898,7 +2007,11 @@ static int qed_start_vport(struct qed_dev *cdev,
return rc;
}
- qed_hw_start_fastpath(p_hwfn);
+ rc = qed_hw_start_fastpath(p_hwfn);
+ if (rc) {
+ DP_ERR(cdev, "Failed to start VPORT fastpath\n");
+ return rc;
+ }
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
"Started V-PORT %d with MTU %d\n",
@@ -2141,7 +2254,13 @@ static int qed_start_txq(struct qed_dev *cdev,
#define QED_HW_STOP_RETRY_LIMIT (10)
static int qed_fastpath_stop(struct qed_dev *cdev)
{
- qed_hw_stop_fastpath(cdev);
+ int rc;
+
+ rc = qed_hw_stop_fastpath(cdev);
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop Fastpath\n");
+ return rc;
+ }
return 0;
}
@@ -2166,31 +2285,46 @@ static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
static int qed_tunn_configure(struct qed_dev *cdev,
struct qed_tunn_params *tunn_params)
{
- struct qed_tunn_update_params tunn_info;
+ struct qed_tunnel_info tunn_info;
int i, rc;
- if (IS_VF(cdev))
- return 0;
-
memset(&tunn_info, 0, sizeof(tunn_info));
- if (tunn_params->update_vxlan_port == 1) {
- tunn_info.update_vxlan_udp_port = 1;
- tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
+ if (tunn_params->update_vxlan_port) {
+ tunn_info.vxlan_port.b_update_port = true;
+ tunn_info.vxlan_port.port = tunn_params->vxlan_port;
}
- if (tunn_params->update_geneve_port == 1) {
- tunn_info.update_geneve_udp_port = 1;
- tunn_info.geneve_udp_port = tunn_params->geneve_port;
+ if (tunn_params->update_geneve_port) {
+ tunn_info.geneve_port.b_update_port = true;
+ tunn_info.geneve_port.port = tunn_params->geneve_port;
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_tunnel_info *tun;
+
+ tun = &hwfn->cdev->tunnel;
rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
QED_SPQ_MODE_EBLOCK, NULL);
-
if (rc)
return rc;
+
+ if (IS_PF_SRIOV(hwfn)) {
+ u16 vxlan_port, geneve_port;
+ int j;
+
+ vxlan_port = tun->vxlan_port.port;
+ geneve_port = tun->geneve_port.port;
+
+ qed_for_each_vf(hwfn, j) {
+ qed_iov_bulletin_set_udp_ports(hwfn, j,
+ vxlan_port,
+ geneve_port);
+ }
+
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
}
return 0;
@@ -2315,6 +2449,59 @@ static int qed_configure_filter(struct qed_dev *cdev,
}
}
+static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_arfs_config_params arfs_config_params;
+
+ memset(&arfs_config_params, 0, sizeof(arfs_config_params));
+ arfs_config_params.tcp = true;
+ arfs_config_params.udp = true;
+ arfs_config_params.ipv4 = true;
+ arfs_config_params.ipv6 = true;
+ arfs_config_params.arfs_enable = en_searcher;
+
+ qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &arfs_config_params);
+ return 0;
+}
+
+static void
+qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
+ void *cookie, union event_ring_data *data,
+ u8 fw_return_code)
+{
+ struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
+ void *dev = p_hwfn->cdev->ops_cookie;
+
+ op->arfs_filter_op(dev, cookie, fw_return_code);
+}
+
+static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
+ dma_addr_t mapping, u16 length,
+ u16 vport_id, u16 rx_queue_id,
+ bool add_filter)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_spq_comp_cb cb;
+ int rc = -EINVAL;
+
+ cb.function = qed_arfs_sp_response_handler;
+ cb.cookie = cookie;
+
+ rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt,
+ &cb, mapping, length, rx_queue_id,
+ vport_id, add_filter);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to issue a-RFS filter configuration\n");
+ else
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
+ "Successfully issued a-RFS filter configuration\n");
+
+ return rc;
+}
+
static int qed_fp_cqe_completion(struct qed_dev *dev,
u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
{
@@ -2356,6 +2543,8 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.eth_cqe_completion = &qed_fp_cqe_completion,
.get_vport_stats = &qed_get_vport_stats,
.tunn_config = &qed_tunn_configure,
+ .ntuple_filter_config = &qed_ntuple_arfs_filter_config,
+ .configure_arfs_searcher = &qed_configure_arfs_searcher,
};
const struct qed_eth_ops *qed_get_eth_ops(void)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index e763abd334f6..6f44229899eb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -185,6 +185,14 @@ struct qed_filter_accept_flags {
#define QED_ACCEPT_BCAST 0x20
};
+struct qed_arfs_config_params {
+ bool tcp;
+ bool udp;
+ bool ipv4;
+ bool ipv6;
+ bool arfs_enable;
+};
+
struct qed_sp_vport_update_params {
u16 opaque_fid;
u8 vport_id;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 0d3cef409c96..09c86411918c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -597,7 +597,7 @@ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
u8 bd_flags = 0;
if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
- SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
+ SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
return bd_flags;
}
@@ -758,8 +758,8 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
p_buffer->placement_offset;
parse_flags = p_buffer->parse_flags;
bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
- SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
- SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
+ SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
+ SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
p_buffer->vlan, bd_flags,
@@ -1090,7 +1090,6 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
struct core_tx_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- union qed_qm_pq_params pq_params;
u16 pq_id = 0, pbl_size;
int rc = -EINVAL;
@@ -1127,9 +1126,18 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
- memset(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = p_ll2_conn->conn.tx_tc;
- pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ switch (p_ll2_conn->conn.tx_tc) {
+ case LB_TC:
+ pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+ break;
+ case OOO_LB_TC:
+ pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
+ break;
+ default:
+ pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ break;
+ }
+
p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
switch (conn_type) {
@@ -1400,13 +1408,21 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
struct qed_ll2_info *p_ll2_conn;
struct qed_ll2_rx_queue *p_rx;
struct qed_ll2_tx_queue *p_tx;
+ struct qed_ptt *p_ptt;
int rc = -EINVAL;
u32 i, capacity;
u8 qid;
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+
p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
- if (!p_ll2_conn)
- return -EINVAL;
+ if (!p_ll2_conn) {
+ rc = -EINVAL;
+ goto out;
+ }
+
p_rx = &p_ll2_conn->rx_queue;
p_tx = &p_ll2_conn->tx_queue;
@@ -1439,7 +1455,9 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
p_tx->cur_completing_frag_num = 0;
*p_tx->p_fw_cons = 0;
- qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
+ if (rc)
+ goto out;
qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
p_ll2_conn->queue_id = qid;
@@ -1453,26 +1471,28 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
if (rc)
- return rc;
+ goto out;
rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
if (rc)
- return rc;
+ goto out;
if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
- qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+ qed_llh_add_protocol_filter(p_hwfn, p_ptt,
0x8906, 0,
QED_LLH_FILTER_ETHERTYPE);
- qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+ qed_llh_add_protocol_filter(p_hwfn, p_ptt,
0x8914, 0,
QED_LLH_FILTER_ETHERTYPE);
}
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
@@ -1591,33 +1611,34 @@ static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
p_tx->cur_send_frag_num++;
}
-static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2,
- struct qed_ll2_tx_packet *p_curp,
- u8 num_of_bds,
- enum core_tx_dest tx_dest,
- u16 vlan,
- u8 bd_flags,
- u16 l4_hdr_offset_w,
- enum core_roce_flavor_type type,
- dma_addr_t first_frag,
- u16 first_frag_len)
+static void
+qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2,
+ struct qed_ll2_tx_packet *p_curp,
+ u8 num_of_bds,
+ enum core_tx_dest tx_dest,
+ u16 vlan,
+ u8 bd_flags,
+ u16 l4_hdr_offset_w,
+ enum core_roce_flavor_type roce_flavor,
+ dma_addr_t first_frag,
+ u16 first_frag_len)
{
struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
struct core_tx_bd *start_bd = NULL;
- u16 frag_idx;
+ u16 bd_data = 0, frag_idx;
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
cpu_to_le16(l4_hdr_offset_w));
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
- start_bd->bd_flags.as_bitfield = bd_flags;
- start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
- CORE_TX_BD_FLAGS_START_BD_SHIFT;
- SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
- SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
+ bd_data |= bd_flags;
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
+ start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
DMA_REGPAIR_LE(start_bd->addr, first_frag);
start_bd->nbytes = cpu_to_le16(first_frag_len);
@@ -1642,9 +1663,8 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
- (*p_bd)->bd_flags.as_bitfield = 0;
+ (*p_bd)->bd_data.as_bitfield = 0;
(*p_bd)->bitfield1 = 0;
- (*p_bd)->bitfield0 = 0;
p_curp->bds_set[frag_idx].tx_frag = 0;
p_curp->bds_set[frag_idx].frag_len = 0;
}
@@ -1823,23 +1843,30 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
{
struct qed_ll2_info *p_ll2_conn = NULL;
int rc = -EINVAL;
+ struct qed_ptt *p_ptt;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
- if (!p_ll2_conn)
- return -EINVAL;
+ if (!p_ll2_conn) {
+ rc = -EINVAL;
+ goto out;
+ }
/* Stop Tx & Rx of connection, if needed */
if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
- return rc;
+ goto out;
qed_ll2_txq_flush(p_hwfn, connection_handle);
}
if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
- return rc;
+ goto out;
qed_ll2_rxq_flush(p_hwfn, connection_handle);
}
@@ -1847,14 +1874,16 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
- qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+ qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
0x8906, 0,
QED_LLH_FILTER_ETHERTYPE);
- qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+ qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
0x8914, 0,
QED_LLH_FILTER_ETHERTYPE);
}
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
@@ -2241,11 +2270,11 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
/* Request HW to calculate IP csum */
if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
- flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+ flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
if (skb_vlan_tag_present(skb)) {
vlan = skb_vlan_tag_get(skb);
- flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
+ flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
}
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index eef30a598b40..59992cf20d42 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -45,6 +45,7 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h>
@@ -54,6 +55,8 @@
#include "qed_dev_api.h"
#include "qed_ll2.h"
#include "qed_fcoe.h"
+#include "qed_iscsi.h"
+
#include "qed_mcp.h"
#include "qed_hw.h"
#include "qed_selftest.h"
@@ -227,10 +230,25 @@ err0:
int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info)
{
+ struct qed_tunnel_info *tun = &cdev->tunnel;
struct qed_ptt *ptt;
memset(dev_info, 0, sizeof(struct qed_dev_info));
+ if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->vxlan.b_mode_enabled)
+ dev_info->vxlan_enable = true;
+
+ if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
+ tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
+ dev_info->gre_enable = true;
+
+ if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
+ tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
+ dev_info->geneve_enable = true;
+
dev_info->num_hwfns = cdev->num_hwfns;
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
@@ -238,6 +256,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
QED_PCI_ETH_ROCE);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
+ dev_info->dev_type = cdev->type;
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
if (IS_PF(cdev)) {
@@ -588,6 +607,19 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
return rc;
}
+void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 id = p_hwfn->my_id;
+ u32 int_mode;
+
+ int_mode = cdev->int_params.out.int_mode;
+ if (int_mode == QED_INT_MODE_MSIX)
+ synchronize_irq(cdev->int_params.msix_table[id].vector);
+ else
+ synchronize_irq(cdev->pdev->irq);
+}
+
static void qed_slowpath_irq_free(struct qed_dev *cdev)
{
int i;
@@ -630,19 +662,6 @@ static int qed_nic_stop(struct qed_dev *cdev)
return rc;
}
-static int qed_nic_reset(struct qed_dev *cdev)
-{
- int rc;
-
- rc = qed_hw_reset(cdev);
- if (rc)
- return rc;
-
- qed_resc_free(cdev);
-
- return 0;
-}
-
static int qed_nic_setup(struct qed_dev *cdev)
{
int rc, i;
@@ -743,7 +762,8 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
- if (!IS_ENABLED(CONFIG_QED_RDMA))
+ if (!IS_ENABLED(CONFIG_QED_RDMA) ||
+ QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE)
return 0;
for_each_hwfn(cdev, i)
@@ -875,10 +895,12 @@ static void qed_update_pf_params(struct qed_dev *cdev,
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
/* divide by 3 the MRs to avoid MF ILT overflow */
- params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
}
+ if (cdev->num_hwfns > 1 || IS_VF(cdev))
+ params->eth_pf_params.num_arfs_filters = 0;
+
/* In case we might support RDMA, don't allow qede to be greedy
* with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
*/
@@ -900,11 +922,15 @@ static void qed_update_pf_params(struct qed_dev *cdev,
static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params)
{
- struct qed_tunn_start_params tunn_info;
+ struct qed_drv_load_params drv_load_params;
+ struct qed_hw_init_params hw_init_params;
struct qed_mcp_drv_version drv_version;
+ struct qed_tunnel_info tunn_info;
const u8 *data = NULL;
struct qed_hwfn *hwfn;
+#ifdef CONFIG_RFS_ACCEL
struct qed_ptt *p_ptt;
+#endif
int rc = -EINVAL;
if (qed_iov_wq_start(cdev))
@@ -920,13 +946,18 @@ static int qed_slowpath_start(struct qed_dev *cdev,
goto err;
}
- p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
- if (p_ptt) {
- QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt;
- } else {
- DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n");
- goto err;
+#ifdef CONFIG_RFS_ACCEL
+ if (cdev->num_hwfns == 1) {
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (p_ptt) {
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
+ } else {
+ DP_NOTICE(cdev,
+ "Failed to acquire PTT for aRFS\n");
+ goto err;
+ }
}
+#endif
}
cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
@@ -953,27 +984,47 @@ static int qed_slowpath_start(struct qed_dev *cdev,
qed_dbg_pf_init(cdev);
}
- memset(&tunn_info, 0, sizeof(tunn_info));
- tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
- 1 << QED_MODE_L2GRE_TUNN |
- 1 << QED_MODE_IPGRE_TUNN |
- 1 << QED_MODE_L2GENEVE_TUNN |
- 1 << QED_MODE_IPGENEVE_TUNN;
-
- tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
- tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
- tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
-
/* Start the slowpath */
- rc = qed_hw_init(cdev, &tunn_info, true,
- cdev->int_params.out.int_mode,
- true, data);
+ memset(&hw_init_params, 0, sizeof(hw_init_params));
+ memset(&tunn_info, 0, sizeof(tunn_info));
+ tunn_info.vxlan.b_mode_enabled = true;
+ tunn_info.l2_gre.b_mode_enabled = true;
+ tunn_info.ip_gre.b_mode_enabled = true;
+ tunn_info.l2_geneve.b_mode_enabled = true;
+ tunn_info.ip_geneve.b_mode_enabled = true;
+ tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ hw_init_params.p_tunn = &tunn_info;
+ hw_init_params.b_hw_start = true;
+ hw_init_params.int_mode = cdev->int_params.out.int_mode;
+ hw_init_params.allow_npar_tx_switch = true;
+ hw_init_params.bin_fw_data = data;
+
+ memset(&drv_load_params, 0, sizeof(drv_load_params));
+ drv_load_params.is_crash_kernel = is_kdump_kernel();
+ drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
+ drv_load_params.avoid_eng_reset = false;
+ drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
+ hw_init_params.p_drv_load_params = &drv_load_params;
+
+ rc = qed_hw_init(cdev, &hw_init_params);
if (rc)
goto err2;
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
+ if (IS_PF(cdev)) {
+ cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
+ BIT(QED_MODE_L2GENEVE_TUNN) |
+ BIT(QED_MODE_IPGENEVE_TUNN) |
+ BIT(QED_MODE_L2GRE_TUNN) |
+ BIT(QED_MODE_IPGRE_TUNN));
+ }
+
/* Allocate LL2 interface if needed */
if (QED_LEADING_HWFN(cdev)->using_ll2) {
rc = qed_ll2_alloc_if(cdev);
@@ -1014,9 +1065,12 @@ err:
if (IS_PF(cdev))
release_firmware(cdev->firmware);
- if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt)
+#ifdef CONFIG_RFS_ACCEL
+ if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt)
qed_ptt_release(QED_LEADING_HWFN(cdev),
- QED_LEADING_HWFN(cdev)->p_ptp_ptt);
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt);
+#endif
qed_iov_wq_stop(cdev, false);
@@ -1031,8 +1085,11 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
qed_ll2_dealloc_if(cdev);
if (IS_PF(cdev)) {
- qed_ptt_release(QED_LEADING_HWFN(cdev),
- QED_LEADING_HWFN(cdev)->p_ptp_ptt);
+#ifdef CONFIG_RFS_ACCEL
+ if (cdev->num_hwfns == 1)
+ qed_ptt_release(QED_LEADING_HWFN(cdev),
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt);
+#endif
qed_free_stream_mem(cdev);
if (IS_QED_ETH_IF(cdev))
qed_sriov_disable(cdev, true);
@@ -1042,7 +1099,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
}
qed_disable_msix(cdev);
- qed_nic_reset(cdev);
+
+ qed_resc_free(cdev);
qed_iov_wq_stop(cdev, true);
@@ -1653,13 +1711,18 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
switch (type) {
case QED_MCP_LAN_STATS:
qed_get_vport_stats(cdev, &eth_stats);
- stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts;
- stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
+ stats->lan_stats.ucast_rx_pkts =
+ eth_stats.common.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts =
+ eth_stats.common.tx_ucast_pkts;
stats->lan_stats.fcs_err = -1;
break;
case QED_MCP_FCOE_STATS:
qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
break;
+ case QED_MCP_ISCSI_STATS:
+ qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
+ break;
default:
DP_ERR(cdev, "Invalid protocol type = %d\n", type);
return;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 87fde205149f..7266b36a2655 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -111,12 +111,71 @@ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
}
}
+struct qed_mcp_cmd_elem {
+ struct list_head list;
+ struct qed_mcp_mb_params *p_mb_params;
+ u16 expected_seq_num;
+ bool b_is_completed;
+};
+
+/* Must be called while cmd_lock is acquired */
+static struct qed_mcp_cmd_elem *
+qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_mb_params *p_mb_params,
+ u16 expected_seq_num)
+{
+ struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
+
+ p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
+ if (!p_cmd_elem)
+ goto out;
+
+ p_cmd_elem->p_mb_params = p_mb_params;
+ p_cmd_elem->expected_seq_num = expected_seq_num;
+ list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
+out:
+ return p_cmd_elem;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_cmd_elem *p_cmd_elem)
+{
+ list_del(&p_cmd_elem->list);
+ kfree(p_cmd_elem);
+}
+
+/* Must be called while cmd_lock is acquired */
+static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
+ u16 seq_num)
+{
+ struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
+
+ list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
+ if (p_cmd_elem->expected_seq_num == seq_num)
+ return p_cmd_elem;
+ }
+
+ return NULL;
+}
+
int qed_mcp_free(struct qed_hwfn *p_hwfn)
{
if (p_hwfn->mcp_info) {
+ struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
+
kfree(p_hwfn->mcp_info->mfw_mb_cur);
kfree(p_hwfn->mcp_info->mfw_mb_shadow);
+
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+ list_for_each_entry_safe(p_cmd_elem,
+ p_tmp,
+ &p_hwfn->mcp_info->cmd_list, list) {
+ qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ }
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
}
+
kfree(p_hwfn->mcp_info);
return 0;
@@ -160,7 +219,7 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
DRV_PULSE_SEQ_MASK;
- p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+ p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
return 0;
}
@@ -176,6 +235,12 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
goto err;
p_info = p_hwfn->mcp_info;
+ /* Initialize the MFW spinlock */
+ spin_lock_init(&p_info->cmd_lock);
+ spin_lock_init(&p_info->link_lock);
+
+ INIT_LIST_HEAD(&p_info->cmd_list);
+
if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
DP_NOTICE(p_hwfn, "MCP is not initialized\n");
/* Do not free mcp_info here, since public_base indicate that
@@ -190,10 +255,6 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err;
- /* Initialize the MFW spinlock */
- spin_lock_init(&p_info->lock);
- spin_lock_init(&p_info->link_lock);
-
return 0;
err:
@@ -201,68 +262,39 @@ err:
return -ENOMEM;
}
-/* Locks the MFW mailbox of a PF to ensure a single access.
- * The lock is achieved in most cases by holding a spinlock, causing other
- * threads to wait till a previous access is done.
- * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
- * access is achieved by setting a blocking flag, which will fail other
- * competing contexts to send their mailboxes.
- */
-static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
+static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
- spin_lock_bh(&p_hwfn->mcp_info->lock);
+ u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
- /* The spinlock shouldn't be acquired when the mailbox command is
- * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
- * pending [UN]LOAD_REQ command of another PF together with a spinlock
- * (i.e. interrupts are disabled) - can lead to a deadlock.
- * It is assumed that for a single PF, no other mailbox commands can be
- * sent from another context while sending LOAD_REQ, and that any
- * parallel commands to UNLOAD_REQ can be cancelled.
+ /* Use MCP history register to check if MCP reset occurred between init
+ * time and now.
*/
- if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
- p_hwfn->mcp_info->block_mb_sending = false;
-
- if (p_hwfn->mcp_info->block_mb_sending) {
- DP_NOTICE(p_hwfn,
- "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
- cmd);
- spin_unlock_bh(&p_hwfn->mcp_info->lock);
- return -EBUSY;
- }
+ if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
+ p_hwfn->mcp_info->mcp_hist, generic_por_0);
- if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
- p_hwfn->mcp_info->block_mb_sending = true;
- spin_unlock_bh(&p_hwfn->mcp_info->lock);
+ qed_load_mcp_offsets(p_hwfn, p_ptt);
+ qed_mcp_cmd_port_init(p_hwfn, p_ptt);
}
-
- return 0;
-}
-
-static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
-{
- if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
- spin_unlock_bh(&p_hwfn->mcp_info->lock);
}
int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
- u8 delay = CHIP_MCP_RESP_ITER_US;
- u32 org_mcp_reset_seq, cnt = 0;
+ u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
int rc = 0;
- /* Ensure that only a single thread is accessing the mailbox at a
- * certain time.
- */
- rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
- if (rc != 0)
- return rc;
+ /* Ensure that only a single thread is accessing the mailbox */
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
- /* Set drv command along with the updated sequence */
org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
- (DRV_MSG_CODE_MCP_RESET | seq));
+
+ /* Set drv command along with the updated sequence */
+ qed_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq = ++p_hwfn->mcp_info->drv_mb_seq;
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
do {
/* Wait for MFW response */
@@ -281,72 +313,207 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
rc = -EAGAIN;
}
- qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
-static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param)
+/* Must be called while cmd_lock is acquired */
+static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
{
- u8 delay = CHIP_MCP_RESP_ITER_US;
- u32 seq, cnt = 1, actual_mb_seq;
- int rc = 0;
-
- /* Get actual driver mailbox sequence */
- actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK;
+ struct qed_mcp_cmd_elem *p_cmd_elem;
- /* Use MCP history register to check if MCP reset occurred between
- * init time and now.
+ /* There is at most one pending command at a certain time, and if it
+ * exists - it is placed at the HEAD of the list.
*/
- if (p_hwfn->mcp_info->mcp_hist !=
- qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
- qed_load_mcp_offsets(p_hwfn, p_ptt);
- qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+ if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
+ p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
+ struct qed_mcp_cmd_elem, list);
+ return !p_cmd_elem->b_is_completed;
}
- seq = ++p_hwfn->mcp_info->drv_mb_seq;
- /* Set drv param */
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+ return false;
+}
- /* Set drv command along with the updated sequence */
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+/* Must be called while cmd_lock is acquired */
+static int
+qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params *p_mb_params;
+ struct qed_mcp_cmd_elem *p_cmd_elem;
+ u32 mcp_resp;
+ u16 seq_num;
+
+ mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+ seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
+
+ /* Return if no new non-handled response has been received */
+ if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
+ return -EAGAIN;
+
+ p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
+ if (!p_cmd_elem) {
+ DP_ERR(p_hwfn,
+ "Failed to find a pending mailbox cmd that expects sequence number %d\n",
+ seq_num);
+ return -EINVAL;
+ }
+
+ p_mb_params = p_cmd_elem->p_mb_params;
+
+ /* Get the MFW response along with the sequence number */
+ p_mb_params->mcp_resp = mcp_resp;
+
+ /* Get the MFW param */
+ p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+
+ /* Get the union data */
+ if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
+ u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ offsetof(struct public_drv_mb,
+ union_data);
+ qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+ union_data_addr, p_mb_params->data_dst_size);
+ }
+
+ p_cmd_elem->b_is_completed = true;
+
+ return 0;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_mb_params *p_mb_params,
+ u16 seq_num)
+{
+ union drv_union_data union_data;
+ u32 union_data_addr;
+
+ /* Set the union data */
+ union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ offsetof(struct public_drv_mb, union_data);
+ memset(&union_data, 0, sizeof(union_data));
+ if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
+ memcpy(&union_data, p_mb_params->p_data_src,
+ p_mb_params->data_src_size);
+ qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
+ sizeof(union_data));
+
+ /* Set the drv param */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
+
+ /* Set the drv command along with the sequence number */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "wrote command (%x) to MFW MB param 0x%08x\n",
- (cmd | seq), param);
+ "MFW mailbox: command 0x%08x param 0x%08x\n",
+ (p_mb_params->cmd | seq_num), p_mb_params->param);
+}
+static int
+_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_mb_params *p_mb_params,
+ u32 max_retries, u32 delay)
+{
+ struct qed_mcp_cmd_elem *p_cmd_elem;
+ u32 cnt = 0;
+ u16 seq_num;
+ int rc = 0;
+
+ /* Wait until the mailbox is non-occupied */
do {
- /* Wait for MFW response */
+ /* Exit the loop if there is no pending command, or if the
+ * pending command is completed during this iteration.
+ * The spinlock stays locked until the command is sent.
+ */
+
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ if (!qed_mcp_has_pending_cmd(p_hwfn))
+ break;
+
+ rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (!rc)
+ break;
+ else if (rc != -EAGAIN)
+ goto err;
+
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
udelay(delay);
- *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+ } while (++cnt < max_retries);
- /* Give the FW up to 5 second (500*10ms) */
- } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
- (cnt++ < QED_DRV_MB_MAX_RETRIES));
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn,
+ "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return -EAGAIN;
+ }
- DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "[after %d ms] read (%x) seq is (%x) from FW MB\n",
- cnt * delay, *o_mcp_resp, seq);
-
- /* Is this a reply to our command? */
- if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
- *o_mcp_resp &= FW_MSG_CODE_MASK;
- /* Get the MCP param */
- *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
- } else {
- /* FW BUG! */
- DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
- cmd, param);
- *o_mcp_resp = 0;
- rc = -EAGAIN;
+ /* Send the mailbox command */
+ qed_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
+ p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
+ if (!p_cmd_elem) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ /* Wait for the MFW response */
+ do {
+ /* Exit the loop if the command is already completed, or if the
+ * command is completed during this iteration.
+ * The spinlock stays locked until the list element is removed.
+ */
+
+ udelay(delay);
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ if (p_cmd_elem->b_is_completed)
+ break;
+
+ rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (!rc)
+ break;
+ else if (rc != -EAGAIN)
+ goto err;
+
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+ } while (++cnt < max_retries);
+
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn,
+ "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+ qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ return -EAGAIN;
}
+
+ qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
+ p_mb_params->mcp_resp,
+ p_mb_params->mcp_param,
+ (cnt * delay) / 1000, (cnt * delay) % 1000);
+
+ /* Clear the sequence number from the MFW response */
+ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
+
+ return 0;
+
+err:
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
@@ -354,9 +521,9 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_mb_params *p_mb_params)
{
- u32 union_data_addr;
-
- int rc;
+ size_t union_data_size = sizeof(union drv_union_data);
+ u32 max_retries = QED_DRV_MB_MAX_RETRIES;
+ u32 delay = CHIP_MCP_RESP_ITER_US;
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
@@ -364,33 +531,17 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
return -EBUSY;
}
- union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
- offsetof(struct public_drv_mb, union_data);
-
- /* Ensure that only a single thread is accessing the mailbox at a
- * certain time.
- */
- rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
- if (rc)
- return rc;
-
- if (p_mb_params->p_data_src != NULL)
- qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
- p_mb_params->p_data_src,
- sizeof(*p_mb_params->p_data_src));
-
- rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
- p_mb_params->param, &p_mb_params->mcp_resp,
- &p_mb_params->mcp_param);
-
- if (p_mb_params->p_data_dst != NULL)
- qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
- union_data_addr,
- sizeof(*p_mb_params->p_data_dst));
-
- qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
+ if (p_mb_params->data_src_size > union_data_size ||
+ p_mb_params->data_dst_size > union_data_size) {
+ DP_ERR(p_hwfn,
+ "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
+ p_mb_params->data_src_size,
+ p_mb_params->data_dst_size, union_data_size);
+ return -EINVAL;
+ }
- return rc;
+ return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
+ delay);
}
int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@@ -401,32 +552,12 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param)
{
struct qed_mcp_mb_params mb_params;
- union drv_union_data data_src;
int rc;
memset(&mb_params, 0, sizeof(mb_params));
- memset(&data_src, 0, sizeof(data_src));
mb_params.cmd = cmd;
mb_params.param = param;
- /* In case of UNLOAD_DONE, set the primary MAC */
- if ((cmd == DRV_MSG_CODE_UNLOAD_DONE) &&
- (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) {
- u8 *p_mac = p_hwfn->cdev->wol_mac;
-
- data_src.wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
- data_src.wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
- p_mac[4] << 8 | p_mac[5];
-
- DP_VERBOSE(p_hwfn,
- (QED_MSG_SP | NETIF_MSG_IFDOWN),
- "Setting WoL MAC: %pM --> [%08x,%08x]\n",
- p_mac, data_src.wol_mac.mac_upper,
- data_src.wol_mac.mac_lower);
-
- mb_params.p_data_src = &data_src;
- }
-
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
return rc;
@@ -445,13 +576,17 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
{
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
int rc;
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = cmd;
mb_params.param = param;
- mb_params.p_data_dst = &union_data;
+ mb_params.p_data_dst = raw_data;
+
+ /* Use the maximal value since the actual one is part of the response */
+ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
+
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
return rc;
@@ -460,55 +595,413 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
*o_mcp_param = mb_params.mcp_param;
*o_txn_size = *o_mcp_param;
- memcpy(o_buf, &union_data.raw_data, *o_txn_size);
+ memcpy(o_buf, raw_data, *o_txn_size);
return 0;
}
-int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *p_load_code)
+static bool
+qed_mcp_can_force_load(u8 drv_role,
+ u8 exist_drv_role,
+ enum qed_override_force_load override_force_load)
+{
+ bool can_force_load = false;
+
+ switch (override_force_load) {
+ case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
+ can_force_load = true;
+ break;
+ case QED_OVERRIDE_FORCE_LOAD_NEVER:
+ can_force_load = false;
+ break;
+ default:
+ can_force_load = (drv_role == DRV_ROLE_OS &&
+ exist_drv_role == DRV_ROLE_PREBOOT) ||
+ (drv_role == DRV_ROLE_KDUMP &&
+ exist_drv_role == DRV_ROLE_OS);
+ break;
+ }
+
+ return can_force_load;
+}
+
+static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
+ &resp, &param);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to send cancel load request, rc = %d\n", rc);
+
+ return rc;
+}
+
+#define CONFIG_QEDE_BITMAP_IDX BIT(0)
+#define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1)
+#define CONFIG_QEDR_BITMAP_IDX BIT(2)
+#define CONFIG_QEDF_BITMAP_IDX BIT(4)
+#define CONFIG_QEDI_BITMAP_IDX BIT(5)
+#define CONFIG_QED_LL2_BITMAP_IDX BIT(6)
+
+static u32 qed_get_config_bitmap(void)
+{
+ u32 config_bitmap = 0x0;
+
+ if (IS_ENABLED(CONFIG_QEDE))
+ config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
+
+ if (IS_ENABLED(CONFIG_QED_SRIOV))
+ config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
+
+ if (IS_ENABLED(CONFIG_QED_RDMA))
+ config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
+
+ if (IS_ENABLED(CONFIG_QED_FCOE))
+ config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
+
+ if (IS_ENABLED(CONFIG_QED_ISCSI))
+ config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
+
+ if (IS_ENABLED(CONFIG_QED_LL2))
+ config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
+
+ return config_bitmap;
+}
+
+struct qed_load_req_in_params {
+ u8 hsi_ver;
+#define QED_LOAD_REQ_HSI_VER_DEFAULT 0
+#define QED_LOAD_REQ_HSI_VER_1 1
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u8 drv_role;
+ u8 timeout_val;
+ u8 force_cmd;
+ bool avoid_eng_reset;
+};
+
+struct qed_load_req_out_params {
+ u32 load_code;
+ u32 exist_drv_ver_0;
+ u32 exist_drv_ver_1;
+ u32 exist_fw_ver;
+ u8 exist_drv_role;
+ u8 mfw_hsi_ver;
+ bool drv_exists;
+};
+
+static int
+__qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_load_req_in_params *p_in_params,
+ struct qed_load_req_out_params *p_out_params)
{
- struct qed_dev *cdev = p_hwfn->cdev;
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
+ u32 hsi_ver;
int rc;
+ memset(&load_req, 0, sizeof(load_req));
+ load_req.drv_ver_0 = p_in_params->drv_ver_0;
+ load_req.drv_ver_1 = p_in_params->drv_ver_1;
+ load_req.fw_ver = p_in_params->fw_ver;
+ QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
+ QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
+ p_in_params->timeout_val);
+ QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
+ p_in_params->force_cmd);
+ QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
+ p_in_params->avoid_eng_reset);
+
+ hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
+ DRV_ID_MCP_HSI_VER_CURRENT :
+ (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
+
memset(&mb_params, 0, sizeof(mb_params));
- /* Load Request */
mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
- mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
- cdev->drv_type;
- memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
- mb_params.p_data_src = &union_data;
- rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
+ mb_params.p_data_src = &load_req;
+ mb_params.data_src_size = sizeof(load_req);
+ mb_params.p_data_dst = &load_rsp;
+ mb_params.data_dst_size = sizeof(load_rsp);
- /* if mcp fails to respond we must abort */
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
+ mb_params.param,
+ QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
+ QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
+ QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
+ QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
+
+ if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
+ load_req.drv_ver_0,
+ load_req.drv_ver_1,
+ load_req.fw_ver,
+ load_req.misc0,
+ QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
+ QED_MFW_GET_FIELD(load_req.misc0,
+ LOAD_REQ_LOCK_TO),
+ QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
+ QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
+ }
+
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) {
- DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
return rc;
}
- *p_load_code = mb_params.mcp_resp;
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
+ p_out_params->load_code = mb_params.mcp_resp;
+
+ if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
+ p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
+ load_rsp.drv_ver_0,
+ load_rsp.drv_ver_1,
+ load_rsp.fw_ver,
+ load_rsp.misc0,
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
+
+ p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
+ p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
+ p_out_params->exist_fw_ver = load_rsp.fw_ver;
+ p_out_params->exist_drv_role =
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
+ p_out_params->mfw_hsi_ver =
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
+ p_out_params->drv_exists =
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
+ LOAD_RSP_FLAGS0_DRV_EXISTS;
+ }
+
+ return 0;
+}
+
+static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
+ enum qed_drv_role drv_role,
+ u8 *p_mfw_drv_role)
+{
+ switch (drv_role) {
+ case QED_DRV_ROLE_OS:
+ *p_mfw_drv_role = DRV_ROLE_OS;
+ break;
+ case QED_DRV_ROLE_KDUMP:
+ *p_mfw_drv_role = DRV_ROLE_KDUMP;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum qed_load_req_force {
+ QED_LOAD_REQ_FORCE_NONE,
+ QED_LOAD_REQ_FORCE_PF,
+ QED_LOAD_REQ_FORCE_ALL,
+};
+
+static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
+
+ enum qed_load_req_force force_cmd,
+ u8 *p_mfw_force_cmd)
+{
+ switch (force_cmd) {
+ case QED_LOAD_REQ_FORCE_NONE:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
+ break;
+ case QED_LOAD_REQ_FORCE_PF:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
+ break;
+ case QED_LOAD_REQ_FORCE_ALL:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
+ break;
+ }
+}
+
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_load_req_params *p_params)
+{
+ struct qed_load_req_out_params out_params;
+ struct qed_load_req_in_params in_params;
+ u8 mfw_drv_role, mfw_force_cmd;
+ int rc;
+
+ memset(&in_params, 0, sizeof(in_params));
+ in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
+ in_params.drv_ver_0 = QED_VERSION;
+ in_params.drv_ver_1 = qed_get_config_bitmap();
+ in_params.fw_ver = STORM_FW_VERSION;
+ rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
+ if (rc)
+ return rc;
+
+ in_params.drv_role = mfw_drv_role;
+ in_params.timeout_val = p_params->timeout_val;
+ qed_get_mfw_force_cmd(p_hwfn,
+ QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
+
+ in_params.force_cmd = mfw_force_cmd;
+ in_params.avoid_eng_reset = p_params->avoid_eng_reset;
- /* If MFW refused (e.g. other port is in diagnostic mode) we
- * must abort. This can happen in the following cases:
- * - Other port is in diagnostic mode
- * - Previously loaded function on the engine is not compliant with
- * the requester.
- * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
- * -
+ memset(&out_params, 0, sizeof(out_params));
+ rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+ if (rc)
+ return rc;
+
+ /* First handle cases where another load request should/might be sent:
+ * - MFW expects the old interface [HSI version = 1]
+ * - MFW responds that a force load request is required
*/
- if (!(*p_load_code) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
- DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+ if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_INFO(p_hwfn,
+ "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
+
+ in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
+ memset(&out_params, 0, sizeof(out_params));
+ rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+ if (rc)
+ return rc;
+ } else if (out_params.load_code ==
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
+ if (qed_mcp_can_force_load(in_params.drv_role,
+ out_params.exist_drv_role,
+ p_params->override_force_load)) {
+ DP_INFO(p_hwfn,
+ "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
+ in_params.drv_role, in_params.fw_ver,
+ in_params.drv_ver_0, in_params.drv_ver_1,
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+
+ qed_get_mfw_force_cmd(p_hwfn,
+ QED_LOAD_REQ_FORCE_ALL,
+ &mfw_force_cmd);
+
+ in_params.force_cmd = mfw_force_cmd;
+ memset(&out_params, 0, sizeof(out_params));
+ rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc)
+ return rc;
+ } else {
+ DP_NOTICE(p_hwfn,
+ "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
+ in_params.drv_role, in_params.fw_ver,
+ in_params.drv_ver_0, in_params.drv_ver_1,
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+ DP_NOTICE(p_hwfn,
+ "Avoid sending a force load request to prevent disruption of active PFs\n");
+
+ qed_mcp_cancel_load_req(p_hwfn, p_ptt);
+ return -EBUSY;
+ }
+ }
+
+ /* Now handle the other types of responses.
+ * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
+ * expected here after the additional revised load requests were sent.
+ */
+ switch (out_params.load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
+ out_params.drv_exists) {
+ /* The role and fw/driver version match, but the PF is
+ * already loaded and has not been unloaded gracefully.
+ */
+ DP_NOTICE(p_hwfn,
+ "PF is already loaded\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
+ out_params.load_code);
return -EBUSY;
}
+ p_params->load_code = out_params.load_code;
+
return 0;
}
+int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 wol_param, mcp_resp, mcp_param;
+
+ switch (p_hwfn->cdev->wol_config) {
+ case QED_OV_WOL_DISABLED:
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
+ break;
+ case QED_OV_WOL_ENABLED:
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unknown WoL configuration %02x\n",
+ p_hwfn->cdev->wol_config);
+ /* Fallthrough */
+ case QED_OV_WOL_DEFAULT:
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
+ }
+
+ return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
+ &mcp_resp, &mcp_param);
+}
+
+int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params mb_params;
+ struct mcp_mac wol_mac;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
+
+ /* Set the primary MAC if WoL is enabled */
+ if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
+ u8 *p_mac = p_hwfn->cdev->wol_mac;
+
+ memset(&wol_mac, 0, sizeof(wol_mac));
+ wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
+ wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
+ p_mac[4] << 8 | p_mac[5];
+
+ DP_VERBOSE(p_hwfn,
+ (QED_MSG_SP | NETIF_MSG_IFDOWN),
+ "Setting WoL MAC: %pM --> [%08x,%08x]\n",
+ p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
+
+ mb_params.p_data_src = &wol_mac;
+ mb_params.data_src_size = sizeof(wol_mac);
+ }
+
+ return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -549,7 +1042,6 @@ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
u32 func_addr = SECTION_ADDR(mfw_func_offsize,
MCP_PF_ID(p_hwfn));
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
int rc;
int i;
@@ -560,8 +1052,8 @@ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
- memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = vfs_to_ack;
+ mb_params.data_src_size = VF_MAX_STATIC / 8;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
@@ -744,33 +1236,31 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
{
struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
- struct eth_phy_cfg *phy_cfg;
+ struct eth_phy_cfg phy_cfg;
int rc = 0;
u32 cmd;
/* Set the shmem configuration according to params */
- phy_cfg = &union_data.drv_phy_cfg;
- memset(phy_cfg, 0, sizeof(*phy_cfg));
+ memset(&phy_cfg, 0, sizeof(phy_cfg));
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
if (!params->speed.autoneg)
- phy_cfg->speed = params->speed.forced_speed;
- phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
- phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
- phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
- phy_cfg->adv_speed = params->speed.advertised_speeds;
- phy_cfg->loopback_mode = params->loopback_mode;
+ phy_cfg.speed = params->speed.forced_speed;
+ phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+ phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+ phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
+ phy_cfg.adv_speed = params->speed.advertised_speeds;
+ phy_cfg.loopback_mode = params->loopback_mode;
p_hwfn->b_drv_link_init = b_up;
if (b_up) {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
- phy_cfg->speed,
- phy_cfg->pause,
- phy_cfg->adv_speed,
- phy_cfg->loopback_mode,
- phy_cfg->feature_config_flags);
+ phy_cfg.speed,
+ phy_cfg.pause,
+ phy_cfg.adv_speed,
+ phy_cfg.loopback_mode,
+ phy_cfg.feature_config_flags);
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Resetting link\n");
@@ -778,7 +1268,8 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = cmd;
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &phy_cfg;
+ mb_params.data_src_size = sizeof(phy_cfg);
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */
@@ -805,7 +1296,6 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
enum qed_mcp_protocol_type stats_type;
union qed_mcp_protocol_stats stats;
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
u32 hsi_param;
switch (type) {
@@ -835,8 +1325,8 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_GET_STATS;
mb_params.param = hsi_param;
- memcpy(&union_data, &stats, sizeof(stats));
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &stats;
+ mb_params.data_src_size = sizeof(stats);
qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
}
@@ -963,7 +1453,7 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
qed_mcp_update_bw(p_hwfn, p_ptt);
break;
default:
- DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
+ DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL;
}
}
@@ -1316,24 +1806,23 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_drv_version *p_ver)
{
- struct drv_version_stc *p_drv_version;
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct drv_version_stc drv_version;
__be32 val;
u32 i;
int rc;
- p_drv_version = &union_data.drv_version;
- p_drv_version->version = p_ver->version;
-
+ memset(&drv_version, 0, sizeof(drv_version));
+ drv_version.version = p_ver->version;
for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
- *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+ *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
}
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &drv_version;
+ mb_params.data_src_size = sizeof(drv_version);
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
@@ -1450,7 +1939,7 @@ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 *mac)
{
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ u32 mfw_mac[2];
int rc;
memset(&mb_params, 0, sizeof(mb_params));
@@ -1458,8 +1947,17 @@ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
DRV_MSG_CODE_VMAC_TYPE_SHIFT;
mb_params.param |= MCP_PF_ID(p_hwfn);
- ether_addr_copy(&union_data.raw_data[0], mac);
- mb_params.p_data_src = &union_data;
+
+ /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
+ * in 32-bit granularity.
+ * So the MAC has to be set in native order [and not byte order],
+ * otherwise it would be read incorrectly by MFW after swap.
+ */
+ mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
+ mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
+
+ mb_params.p_data_src = (u8 *)mfw_mac;
+ mb_params.data_src_size = 8;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
@@ -1724,52 +2222,426 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
return rc;
}
-#define QED_RESC_ALLOC_VERSION_MAJOR 1
+static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
+{
+ enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
+
+ switch (res_id) {
+ case QED_SB:
+ mfw_res_id = RESOURCE_NUM_SB_E;
+ break;
+ case QED_L2_QUEUE:
+ mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
+ break;
+ case QED_VPORT:
+ mfw_res_id = RESOURCE_NUM_VPORT_E;
+ break;
+ case QED_RSS_ENG:
+ mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
+ break;
+ case QED_PQ:
+ mfw_res_id = RESOURCE_NUM_PQ_E;
+ break;
+ case QED_RL:
+ mfw_res_id = RESOURCE_NUM_RL_E;
+ break;
+ case QED_MAC:
+ case QED_VLAN:
+ /* Each VFC resource can accommodate both a MAC and a VLAN */
+ mfw_res_id = RESOURCE_VFC_FILTER_E;
+ break;
+ case QED_ILT:
+ mfw_res_id = RESOURCE_ILT_E;
+ break;
+ case QED_LL2_QUEUE:
+ mfw_res_id = RESOURCE_LL2_QUEUE_E;
+ break;
+ case QED_RDMA_CNQ_RAM:
+ case QED_CMDQS_CQS:
+ /* CNQ/CMDQS are the same resource */
+ mfw_res_id = RESOURCE_CQS_E;
+ break;
+ case QED_RDMA_STATS_QUEUE:
+ mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
+ break;
+ case QED_BDQ:
+ mfw_res_id = RESOURCE_BDQ_E;
+ break;
+ default:
+ break;
+ }
+
+ return mfw_res_id;
+}
+
+#define QED_RESC_ALLOC_VERSION_MAJOR 2
#define QED_RESC_ALLOC_VERSION_MINOR 0
#define QED_RESC_ALLOC_VERSION \
((QED_RESC_ALLOC_VERSION_MAJOR << \
DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
(QED_RESC_ALLOC_VERSION_MINOR << \
DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
-int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct resource_info *p_resc_info,
- u32 *p_mcp_resp, u32 *p_mcp_param)
+
+struct qed_resc_alloc_in_params {
+ u32 cmd;
+ enum qed_resources res_id;
+ u32 resc_max_val;
+};
+
+struct qed_resc_alloc_out_params {
+ u32 mcp_resp;
+ u32 mcp_param;
+ u32 resc_num;
+ u32 resc_start;
+ u32 vf_resc_num;
+ u32 vf_resc_start;
+ u32 flags;
+};
+
+static int
+qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_resc_alloc_in_params *p_in_params,
+ struct qed_resc_alloc_out_params *p_out_params)
{
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct resource_info mfw_resc_info;
int rc;
+ memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
+
+ mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
+ if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
+ DP_ERR(p_hwfn,
+ "Failed to match resource %d [%s] with the MFW resources\n",
+ p_in_params->res_id,
+ qed_hw_get_resc_name(p_in_params->res_id));
+ return -EINVAL;
+ }
+
+ switch (p_in_params->cmd) {
+ case DRV_MSG_SET_RESOURCE_VALUE_MSG:
+ mfw_resc_info.size = p_in_params->resc_max_val;
+ /* Fallthrough */
+ case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
+ p_in_params->cmd);
+ return -EINVAL;
+ }
+
memset(&mb_params, 0, sizeof(mb_params));
- memset(&union_data, 0, sizeof(union_data));
- mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ mb_params.cmd = p_in_params->cmd;
mb_params.param = QED_RESC_ALLOC_VERSION;
+ mb_params.p_data_src = &mfw_resc_info;
+ mb_params.data_src_size = sizeof(mfw_resc_info);
+ mb_params.p_data_dst = mb_params.p_data_src;
+ mb_params.data_dst_size = mb_params.data_src_size;
- /* Need to have a sufficient large struct, as the cmd_and_union
- * is going to do memcpy from and to it.
- */
- memcpy(&union_data.resource, p_resc_info, sizeof(*p_resc_info));
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
+ p_in_params->cmd,
+ p_in_params->res_id,
+ qed_hw_get_resc_name(p_in_params->res_id),
+ QED_MFW_GET_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ QED_MFW_GET_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_in_params->resc_max_val);
- mb_params.p_data_src = &union_data;
- mb_params.p_data_dst = &union_data;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
return rc;
- /* Copy the data back */
- memcpy(p_resc_info, &union_data.resource, sizeof(*p_resc_info));
- *p_mcp_resp = mb_params.mcp_resp;
- *p_mcp_param = mb_params.mcp_param;
+ p_out_params->mcp_resp = mb_params.mcp_resp;
+ p_out_params->mcp_param = mb_params.mcp_param;
+ p_out_params->resc_num = mfw_resc_info.size;
+ p_out_params->resc_start = mfw_resc_info.offset;
+ p_out_params->vf_resc_num = mfw_resc_info.vf_size;
+ p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
+ p_out_params->flags = mfw_resc_info.flags;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
+ QED_MFW_GET_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ QED_MFW_GET_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_out_params->resc_num,
+ p_out_params->resc_start,
+ p_out_params->vf_resc_num,
+ p_out_params->vf_resc_start, p_out_params->flags);
+
+ return 0;
+}
+
+int
+qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 resc_max_val, u32 *p_mcp_resp)
+{
+ struct qed_resc_alloc_out_params out_params;
+ struct qed_resc_alloc_in_params in_params;
+ int rc;
+
+ memset(&in_params, 0, sizeof(in_params));
+ in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
+ in_params.res_id = res_id;
+ in_params.resc_max_val = resc_max_val;
+ memset(&out_params, 0, sizeof(out_params));
+ rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ return 0;
+}
+
+int
+qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
+{
+ struct qed_resc_alloc_out_params out_params;
+ struct qed_resc_alloc_in_params in_params;
+ int rc;
+
+ memset(&in_params, 0, sizeof(in_params));
+ in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ in_params.res_id = res_id;
+ memset(&out_params, 0, sizeof(out_params));
+ rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+ *p_resc_num = out_params.resc_num;
+ *p_resc_start = out_params.resc_start;
+ }
+
+ return 0;
+}
+
+int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 mcp_resp, mcp_param;
+
+ return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
+ &mcp_resp, &mcp_param);
+}
+
+static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
+{
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
+ p_mcp_resp, p_mcp_param);
+ if (rc)
+ return rc;
+
+ if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The resource command is unsupported by the MFW\n");
+ return -EINVAL;
+ }
+
+ if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
+ u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
+
+ DP_NOTICE(p_hwfn,
+ "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
+ param, opcode);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+int
+__qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_resc_lock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ int rc;
+
+ switch (p_params->timeout) {
+ case QED_MCP_RESC_LOCK_TO_DEFAULT:
+ opcode = RESOURCE_OPCODE_REQ;
+ p_params->timeout = 0;
+ break;
+ case QED_MCP_RESC_LOCK_TO_NONE:
+ opcode = RESOURCE_OPCODE_REQ_WO_AGING;
+ p_params->timeout = 0;
+ break;
+ default:
+ opcode = RESOURCE_OPCODE_REQ_W_AGING;
+ break;
+ }
+
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
+ param, p_params->timeout, opcode, p_params->resource);
+
+ /* Attempt to acquire the resource */
+ rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
+ if (rc)
+ return rc;
+
+ /* Analyze the response */
+ p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
+ opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
- "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x, offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
- *p_mcp_param,
- p_resc_info->res_id,
- p_resc_info->size,
- p_resc_info->offset,
- p_resc_info->vf_size,
- p_resc_info->vf_offset, p_resc_info->flags);
+ "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
+ mcp_param, opcode, p_params->owner);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_GNT:
+ p_params->b_granted = true;
+ break;
+ case RESOURCE_OPCODE_BUSY:
+ p_params->b_granted = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return -EINVAL;
+ }
return 0;
}
+
+int
+qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
+{
+ u32 retry_cnt = 0;
+ int rc;
+
+ do {
+ /* No need for an interval before the first iteration */
+ if (retry_cnt) {
+ if (p_params->sleep_b4_retry) {
+ u16 retry_interval_in_ms =
+ DIV_ROUND_UP(p_params->retry_interval,
+ 1000);
+
+ msleep(retry_interval_in_ms);
+ } else {
+ udelay(p_params->retry_interval);
+ }
+ }
+
+ rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
+ if (rc)
+ return rc;
+
+ if (p_params->b_granted)
+ break;
+ } while (retry_cnt++ < p_params->retry_num);
+
+ return 0;
+}
+
+int
+qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_resc_unlock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ int rc;
+
+ opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
+ : RESOURCE_OPCODE_RELEASE;
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
+ param, opcode, p_params->resource);
+
+ /* Attempt to release the resource */
+ rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
+ if (rc)
+ return rc;
+
+ /* Analyze the response */
+ opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
+ mcp_param, opcode);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_RELEASED_PREVIOUS:
+ DP_INFO(p_hwfn,
+ "Resource unlock request for an already released resource [%d]\n",
+ p_params->resource);
+ /* Fallthrough */
+ case RESOURCE_OPCODE_RELEASED:
+ p_params->b_released = true;
+ break;
+ case RESOURCE_OPCODE_WRONG_OWNER:
+ p_params->b_released = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
+ struct qed_resc_unlock_params *p_unlock,
+ enum qed_resc_lock
+ resource, bool b_is_permanent)
+{
+ if (p_lock) {
+ memset(p_lock, 0, sizeof(*p_lock));
+
+ /* Permanent resources don't require aging, and there's no
+ * point in trying to acquire them more than once since it's
+ * unexpected another entity would release them.
+ */
+ if (b_is_permanent) {
+ p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
+ } else {
+ p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
+ p_lock->retry_interval =
+ QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
+ p_lock->sleep_b4_retry = true;
+ }
+
+ p_lock->resource = resource;
+ }
+
+ if (p_unlock) {
+ memset(p_unlock, 0, sizeof(*p_unlock));
+ p_unlock->resource = resource;
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 368e88de146c..5ae35d6cc7d1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -39,6 +39,7 @@
#include <linux/spinlock.h>
#include <linux/qed/qed_fcoe_if.h>
#include "qed_hsi.h"
+#include "qed_dev_api.h"
struct qed_mcp_link_speed_params {
bool autoneg;
@@ -479,14 +480,18 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
rel_pfid)
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
-/* TODO - this is only correct as long as only BB is supported, and
- * no port-swapping is implemented; Afterwards we'll need to fix it.
- */
-#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
- ((_p_hwfn)->cdev->num_ports_in_engines * 2))
+#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
+ ((_p_hwfn)->cdev->num_ports_in_engines * \
+ qed_device_num_engines((_p_hwfn)->cdev)))
+
struct qed_mcp_info {
- /* Spinlock used for protecting the access to the MFW mailbox */
- spinlock_t lock;
+ /* List for mailbox commands which were sent and wait for a response */
+ struct list_head cmd_list;
+
+ /* Spinlock used for protecting the access to the mailbox commands list
+ * and the sending of the commands.
+ */
+ spinlock_t cmd_lock;
/* Spinlock used for syncing SW link-changes and link-changes
* originating from attention context.
@@ -506,14 +511,16 @@ struct qed_mcp_info {
u8 *mfw_mb_cur;
u8 *mfw_mb_shadow;
u16 mfw_mb_length;
- u16 mcp_hist;
+ u32 mcp_hist;
};
struct qed_mcp_mb_params {
u32 cmd;
u32 param;
- union drv_union_data *p_data_src;
- union drv_union_data *p_data_dst;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
u32 mcp_resp;
u32 mcp_param;
};
@@ -564,27 +571,55 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn);
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
+enum qed_drv_role {
+ QED_DRV_ROLE_OS,
+ QED_DRV_ROLE_KDUMP,
+};
+
+struct qed_load_req_params {
+ /* Input params */
+ enum qed_drv_role drv_role;
+ u8 timeout_val;
+ bool avoid_eng_reset;
+ enum qed_override_force_load override_force_load;
+
+ /* Output params */
+ u32 load_code;
+};
+
/**
- * @brief Sends a LOAD_REQ to the MFW, and in case operation
- * succeed, returns whether this PF is the first on the
- * chip/engine/port or function. This function should be
- * called when driver is ready to accept MFW events after
- * Storms initializations are done.
+ * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
+ * returns whether this PF is the first on the engine/port or function.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param p_load_code - The MCP response param containing one
- * of the following:
- * FW_MSG_CODE_DRV_LOAD_ENGINE
- * FW_MSG_CODE_DRV_LOAD_PORT
- * FW_MSG_CODE_DRV_LOAD_FUNCTION
- * @return int -
- * 0 - Operation was successul.
- * -EBUSY - Operation failed
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return int - 0 - Operation was successful.
*/
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 *p_load_code);
+ struct qed_load_req_params *p_params);
+
+/**
+ * @brief Sends a UNLOAD_REQ message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int - 0 - Operation was successful.
+ */
+int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief Sends a UNLOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int - 0 - Operation was successful.
+ */
+int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief Read the MFW mailbox into Current buffer.
@@ -708,6 +743,41 @@ int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 mask_parities);
/**
+ * @brief - Sets the MFW's max value for the given resource
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param res_id
+ * @param resc_max_val
+ * @param p_mcp_resp
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 resc_max_val, u32 *p_mcp_resp);
+
+/**
+ * @brief - Gets the MFW allocation info for the given resource
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param res_id
+ * @param p_mcp_resp
+ * @param p_resc_num
+ * @param p_resc_start
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start);
+
+/**
* @brief Send eswitch mode to MFW
*
* @param p_hwfn
@@ -720,19 +790,106 @@ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_eswitch eswitch);
+#define QED_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP
+#define QED_MCP_RESC_LOCK_MAX_VAL 31
+
+enum qed_resc_lock {
+ QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL,
+ QED_RESC_LOCK_PTP_PORT0,
+ QED_RESC_LOCK_PTP_PORT1,
+ QED_RESC_LOCK_PTP_PORT2,
+ QED_RESC_LOCK_PTP_PORT3,
+ QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL,
+ QED_RESC_LOCK_RESC_INVALID
+};
+
/**
- * @brief - Gets the MFW allocation info for the given resource
+ * @brief - Initiates PF FLR
*
* @param p_hwfn
* @param p_ptt
- * @param p_resc_info - descriptor of requested resource
- * @param p_mcp_resp
- * @param p_mcp_param
*
* @return int - 0 - operation was successful.
*/
-int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct resource_info *p_resc_info,
- u32 *p_mcp_resp, u32 *p_mcp_param);
+int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+struct qed_resc_lock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Lock timeout value in seconds [default, none or 1..254] */
+ u8 timeout;
+#define QED_MCP_RESC_LOCK_TO_DEFAULT 0
+#define QED_MCP_RESC_LOCK_TO_NONE 255
+
+ /* Number of times to retry locking */
+ u8 retry_num;
+#define QED_MCP_RESC_LOCK_RETRY_CNT_DFLT 10
+
+ /* The interval in usec between retries */
+ u16 retry_interval;
+#define QED_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000
+
+ /* Use sleep or delay between retries */
+ bool sleep_b4_retry;
+
+ /* Will be set as true if the resource is free and granted */
+ bool b_granted;
+
+ /* Will be filled with the resource owner.
+ * [0..15 = PF0-15, 16 = MFW]
+ */
+ u8 owner;
+};
+
+/**
+ * @brief Acquires MFW generic resource lock
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params);
+
+struct qed_resc_unlock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Allow to release a resource even if belongs to another PF */
+ bool b_force;
+
+ /* Will be set as true if the resource is released */
+ bool b_released;
+};
+
+/**
+ * @brief Releases MFW generic resource lock
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_resc_unlock_params *p_params);
+
+/**
+ * @brief - default initialization for lock/unlock resource structs
+ *
+ * @param p_lock - lock params struct to be initialized; Can be NULL
+ * @param p_unlock - unlock params struct to be initialized; Can be NULL
+ * @param resource - the requested resource
+ * @paral b_is_permanent - disable retries & aging when set
+ */
+void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
+ struct qed_resc_unlock_params *p_unlock,
+ enum qed_resc_lock
+ resource, bool b_is_permanent);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 378afce58b3f..db96670192c7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -41,6 +41,7 @@
#include "qed_iscsi.h"
#include "qed_ll2.h"
#include "qed_ooo.h"
+#include "qed_cxt.h"
static struct qed_ooo_archipelago
*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn,
@@ -48,15 +49,18 @@ static struct qed_ooo_archipelago
*p_ooo_info,
u32 cid)
{
- struct qed_ooo_archipelago *p_archipelago = NULL;
+ u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
+ struct qed_ooo_archipelago *p_archipelago;
- list_for_each_entry(p_archipelago,
- &p_ooo_info->archipelagos_list, list_entry) {
- if (p_archipelago->cid == cid)
- return p_archipelago;
- }
+ if (idx >= p_ooo_info->max_num_archipelagos)
+ return NULL;
- return NULL;
+ p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
+
+ if (list_empty(&p_archipelago->isles_list))
+ return NULL;
+
+ return p_archipelago;
}
static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn,
@@ -97,8 +101,8 @@ void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
{
+ u16 max_num_archipelagos = 0, cid_base;
struct qed_ooo_info *p_ooo_info;
- u16 max_num_archipelagos = 0;
u16 max_num_isles = 0;
u32 i;
@@ -110,6 +114,7 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons;
max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos;
+ cid_base = (u16)qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ISCSI);
if (!max_num_archipelagos) {
DP_NOTICE(p_hwfn,
@@ -121,11 +126,12 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (!p_ooo_info)
return NULL;
+ p_ooo_info->cid_base = cid_base;
+ p_ooo_info->max_num_archipelagos = max_num_archipelagos;
+
INIT_LIST_HEAD(&p_ooo_info->free_buffers_list);
INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list);
INIT_LIST_HEAD(&p_ooo_info->free_isles_list);
- INIT_LIST_HEAD(&p_ooo_info->free_archipelagos_list);
- INIT_LIST_HEAD(&p_ooo_info->archipelagos_list);
p_ooo_info->p_isles_mem = kcalloc(max_num_isles,
sizeof(struct qed_ooo_isle),
@@ -146,11 +152,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (!p_ooo_info->p_archipelagos_mem)
goto no_archipelagos_mem;
- for (i = 0; i < max_num_archipelagos; i++) {
+ for (i = 0; i < max_num_archipelagos; i++)
INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list);
- list_add_tail(&p_ooo_info->p_archipelagos_mem[i].list_entry,
- &p_ooo_info->free_archipelagos_list);
- }
p_ooo_info->ooo_history.p_cqes =
kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES,
@@ -178,21 +181,9 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_archipelago *p_archipelago;
struct qed_ooo_buffer *p_buffer;
struct qed_ooo_isle *p_isle;
- bool b_found = false;
-
- if (list_empty(&p_ooo_info->archipelagos_list))
- return;
- list_for_each_entry(p_archipelago,
- &p_ooo_info->archipelagos_list, list_entry) {
- if (p_archipelago->cid == cid) {
- list_del(&p_archipelago->list_entry);
- b_found = true;
- break;
- }
- }
-
- if (!b_found)
+ p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+ if (!p_archipelago)
return;
while (!list_empty(&p_archipelago->isles_list)) {
@@ -216,27 +207,21 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
list_add_tail(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
}
-
- list_add_tail(&p_archipelago->list_entry,
- &p_ooo_info->free_archipelagos_list);
}
void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info)
{
- struct qed_ooo_archipelago *p_arch;
+ struct qed_ooo_archipelago *p_archipelago;
struct qed_ooo_buffer *p_buffer;
struct qed_ooo_isle *p_isle;
+ u32 i;
- while (!list_empty(&p_ooo_info->archipelagos_list)) {
- p_arch = list_first_entry(&p_ooo_info->archipelagos_list,
- struct qed_ooo_archipelago,
- list_entry);
-
- list_del(&p_arch->list_entry);
+ for (i = 0; i < p_ooo_info->max_num_archipelagos; i++) {
+ p_archipelago = &(p_ooo_info->p_archipelagos_mem[i]);
- while (!list_empty(&p_arch->isles_list)) {
- p_isle = list_first_entry(&p_arch->isles_list,
+ while (!list_empty(&p_archipelago->isles_list)) {
+ p_isle = list_first_entry(&p_archipelago->isles_list,
struct qed_ooo_isle,
list_entry);
@@ -258,8 +243,6 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
list_add_tail(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
}
- list_add_tail(&p_arch->list_entry,
- &p_ooo_info->free_archipelagos_list);
}
if (!list_empty(&p_ooo_info->ready_buffers_list))
list_splice_tail_init(&p_ooo_info->ready_buffers_list,
@@ -378,12 +361,6 @@ void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
p_ooo_info->cur_isles_number--;
list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list);
}
-
- if (list_empty(&p_archipelago->isles_list)) {
- list_del(&p_archipelago->list_entry);
- list_add(&p_archipelago->list_entry,
- &p_ooo_info->free_archipelagos_list);
- }
}
void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
@@ -426,28 +403,10 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
return;
}
- if (!p_archipelago &&
- !list_empty(&p_ooo_info->free_archipelagos_list)) {
- p_archipelago =
- list_first_entry(&p_ooo_info->free_archipelagos_list,
- struct qed_ooo_archipelago, list_entry);
+ if (!p_archipelago) {
+ u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
- list_del(&p_archipelago->list_entry);
- if (!list_empty(&p_archipelago->isles_list)) {
- DP_NOTICE(p_hwfn,
- "Free OOO connection is not empty\n");
- INIT_LIST_HEAD(&p_archipelago->isles_list);
- }
- p_archipelago->cid = cid;
- list_add(&p_archipelago->list_entry,
- &p_ooo_info->archipelagos_list);
- } else if (!p_archipelago) {
- DP_NOTICE(p_hwfn, "No more free OOO connections\n");
- list_add(&p_isle->list_entry,
- &p_ooo_info->free_isles_list);
- list_add(&p_buffer->list_entry,
- &p_ooo_info->free_buffers_list);
- return;
+ p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
}
list_add(&p_buffer->list_entry, &p_isle->buffers_list);
@@ -517,11 +476,6 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
} else {
list_splice_tail_init(&p_right_isle->buffers_list,
&p_ooo_info->ready_buffers_list);
- if (list_empty(&p_archipelago->isles_list)) {
- list_del(&p_archipelago->list_entry);
- list_add(&p_archipelago->list_entry,
- &p_ooo_info->free_archipelagos_list);
- }
}
list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
index 4f138fb5f533..791ad0f8b759 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
@@ -60,9 +60,7 @@ struct qed_ooo_isle {
};
struct qed_ooo_archipelago {
- struct list_head list_entry;
struct list_head isles_list;
- u32 cid;
};
struct qed_ooo_history {
@@ -75,14 +73,14 @@ struct qed_ooo_info {
struct list_head free_buffers_list;
struct list_head ready_buffers_list;
struct list_head free_isles_list;
- struct list_head free_archipelagos_list;
- struct list_head archipelagos_list;
struct qed_ooo_archipelago *p_archipelagos_mem;
struct qed_ooo_isle *p_isles_mem;
struct qed_ooo_history ooo_history;
u32 cur_isles_number;
u32 max_isles_number;
u32 gen_isles_number;
+ u16 max_num_archipelagos;
+ u16 cid_base;
};
#if IS_ENABLED(CONFIG_QED_ISCSI)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index d27aa85da23c..434a164a76ed 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -34,7 +34,7 @@
#include "qed_dev_api.h"
#include "qed_hw.h"
#include "qed_l2.h"
-#include "qed_ptp.h"
+#include "qed_mcp.h"
#include "qed_reg_addr.h"
/* 16 nano second time quantas to wait before making a Drift adjustment */
@@ -45,6 +45,82 @@
#define QED_DRIFT_CNTR_DIRECTION_SHIFT 31
#define QED_TIMESTAMP_MASK BIT(16)
+static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
+{
+ switch (qed_device_get_port_id(p_hwfn->cdev)) {
+ case 0:
+ return QED_RESC_LOCK_PTP_PORT0;
+ case 1:
+ return QED_RESC_LOCK_PTP_PORT1;
+ case 2:
+ return QED_RESC_LOCK_PTP_PORT2;
+ case 3:
+ return QED_RESC_LOCK_PTP_PORT3;
+ default:
+ return QED_RESC_LOCK_RESC_INVALID;
+ }
+}
+
+static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_resc_lock_params params;
+ enum qed_resc_lock resource;
+ int rc;
+
+ resource = qed_ptcdev_to_resc(p_hwfn);
+ if (resource == QED_RESC_LOCK_RESC_INVALID)
+ return -EINVAL;
+
+ qed_mcp_resc_lock_default_init(&params, NULL, resource, true);
+
+ rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &params);
+ if (rc && rc != -EINVAL) {
+ return rc;
+ } else if (rc == -EINVAL) {
+ /* MFW doesn't support resource locking, first PF on the port
+ * has lock ownership.
+ */
+ if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines)
+ return 0;
+
+ DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
+ return -EBUSY;
+ } else if (!rc && !params.b_granted) {
+ DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n");
+ return -EBUSY;
+ }
+
+ return rc;
+}
+
+static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_resc_unlock_params params;
+ enum qed_resc_lock resource;
+ int rc;
+
+ resource = qed_ptcdev_to_resc(p_hwfn);
+ if (resource == QED_RESC_LOCK_RESC_INVALID)
+ return -EINVAL;
+
+ qed_mcp_resc_lock_default_init(NULL, &params, resource, true);
+
+ rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &params);
+ if (rc == -EINVAL) {
+ /* MFW doesn't support locking, first PF has lock ownership */
+ if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines) {
+ rc = 0;
+ } else {
+ DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
+ return -EINVAL;
+ }
+ } else if (rc) {
+ DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n");
+ }
+
+ return rc;
+}
+
/* Read Rx timestamp */
static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp)
{
@@ -112,39 +188,73 @@ static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles)
}
/* Filter PTP protocol packets that need to be timestamped */
-static int qed_ptp_hw_cfg_rx_filters(struct qed_dev *cdev,
- enum qed_ptp_filter_type type)
+static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev,
+ enum qed_ptp_filter_type rx_type,
+ enum qed_ptp_hwtstamp_tx_type tx_type)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
- u32 rule_mask, parm_mask;
+ u32 rule_mask, enable_cfg = 0x0;
- switch (type) {
- case QED_PTP_FILTER_L2_IPV4_IPV6:
- parm_mask = 0x6AA;
- rule_mask = 0x3EEE;
+ switch (rx_type) {
+ case QED_PTP_FILTER_NONE:
+ enable_cfg = 0x0;
+ rule_mask = 0x3FFF;
break;
- case QED_PTP_FILTER_L2:
- parm_mask = 0x6BF;
- rule_mask = 0x3EFF;
+ case QED_PTP_FILTER_ALL:
+ enable_cfg = 0x7;
+ rule_mask = 0x3CAA;
break;
- case QED_PTP_FILTER_IPV4_IPV6:
- parm_mask = 0x7EA;
- rule_mask = 0x3FFE;
+ case QED_PTP_FILTER_V1_L4_EVENT:
+ enable_cfg = 0x3;
+ rule_mask = 0x3FFA;
break;
- case QED_PTP_FILTER_IPV4:
- parm_mask = 0x7EE;
+ case QED_PTP_FILTER_V1_L4_GEN:
+ enable_cfg = 0x3;
rule_mask = 0x3FFE;
break;
+ case QED_PTP_FILTER_V2_L4_EVENT:
+ enable_cfg = 0x5;
+ rule_mask = 0x3FAA;
+ break;
+ case QED_PTP_FILTER_V2_L4_GEN:
+ enable_cfg = 0x5;
+ rule_mask = 0x3FEE;
+ break;
+ case QED_PTP_FILTER_V2_L2_EVENT:
+ enable_cfg = 0x5;
+ rule_mask = 0x3CFF;
+ break;
+ case QED_PTP_FILTER_V2_L2_GEN:
+ enable_cfg = 0x5;
+ rule_mask = 0x3EFF;
+ break;
+ case QED_PTP_FILTER_V2_EVENT:
+ enable_cfg = 0x5;
+ rule_mask = 0x3CAA;
+ break;
+ case QED_PTP_FILTER_V2_GEN:
+ enable_cfg = 0x5;
+ rule_mask = 0x3EEE;
+ break;
default:
- DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", type);
+ DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type);
return -EINVAL;
}
- qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, parm_mask);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg);
- qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_TO_HOST, 0x1);
+ if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
+ } else {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask);
+ }
/* Reset possibly old timestamps */
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
@@ -248,7 +358,25 @@ static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
static int qed_ptp_hw_enable(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+ struct qed_ptt *p_ptt;
+ int rc;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n");
+ return -EBUSY;
+ }
+
+ p_hwfn->p_ptp_ptt = p_ptt;
+
+ rc = qed_ptp_res_lock(p_hwfn, p_ptt);
+ if (rc) {
+ DP_INFO(p_hwfn,
+ "Couldn't acquire the resource lock, skip ptp enable for this PF\n");
+ qed_ptt_release(p_hwfn, p_ptt);
+ p_hwfn->p_ptp_ptt = NULL;
+ return rc;
+ }
/* Reset PTP event detection rules - will be configured in the IOCTL */
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
@@ -262,12 +390,20 @@ static int qed_ptp_hw_enable(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
/* Pause free running counter */
- qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
+ if (QED_IS_BB_B0(p_hwfn->cdev))
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
+ if (QED_IS_AH(p_hwfn->cdev))
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
/* Resume free running counter */
- qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
+ if (QED_IS_BB_B0(p_hwfn->cdev))
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
+ if (QED_IS_AH(p_hwfn->cdev)) {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
+ }
/* Disable drift register */
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
@@ -281,22 +417,13 @@ static int qed_ptp_hw_enable(struct qed_dev *cdev)
return 0;
}
-static int qed_ptp_hw_hwtstamp_tx_on(struct qed_dev *cdev)
-{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
-
- qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x6AA);
- qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3EEE);
-
- return 0;
-}
-
static int qed_ptp_hw_disable(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+ qed_ptp_res_unlock(p_hwfn, p_ptt);
+
/* Reset PTP event detection rules */
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
@@ -308,12 +435,14 @@ static int qed_ptp_hw_disable(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0);
qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
+ qed_ptt_release(p_hwfn, p_ptt);
+ p_hwfn->p_ptp_ptt = NULL;
+
return 0;
}
const struct qed_eth_ptp_ops qed_ptp_ops_pass = {
- .hwtstamp_tx_on = qed_ptp_hw_hwtstamp_tx_on,
- .cfg_rx_filters = qed_ptp_hw_cfg_rx_filters,
+ .cfg_filters = qed_ptp_hw_cfg_filters,
.read_rx_ts = qed_ptp_hw_read_rx_ts,
.read_tx_ts = qed_ptp_hw_read_tx_ts,
.read_cc = qed_ptp_hw_read_cc,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index d59d9df60cd2..1ae73b2d6d1e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -160,13 +160,13 @@
0x2e0704UL
#define CCFC_REG_STRONG_ENABLE_PF \
0x2e0708UL
-#define PGLUE_B_REG_PGL_ADDR_88_F0 \
+#define PGLUE_B_REG_PGL_ADDR_88_F0_BB \
0x2aa404UL
-#define PGLUE_B_REG_PGL_ADDR_8C_F0 \
+#define PGLUE_B_REG_PGL_ADDR_8C_F0_BB \
0x2aa408UL
-#define PGLUE_B_REG_PGL_ADDR_90_F0 \
+#define PGLUE_B_REG_PGL_ADDR_90_F0_BB \
0x2aa40cUL
-#define PGLUE_B_REG_PGL_ADDR_94_F0 \
+#define PGLUE_B_REG_PGL_ADDR_94_F0_BB \
0x2aa410UL
#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
0x2aa138UL
@@ -356,6 +356,10 @@
0x238804UL
#define RDIF_REG_STOP_ON_ERROR \
0x300040UL
+#define RDIF_REG_DEBUG_ERROR_INFO \
+ 0x300400UL
+#define RDIF_REG_DEBUG_ERROR_INFO_SIZE \
+ 64
#define SRC_REG_SOFT_RST \
0x23874cUL
#define TCFC_REG_ACTIVITY_COUNTER \
@@ -370,6 +374,10 @@
0x1700004UL
#define TDIF_REG_STOP_ON_ERROR \
0x310040UL
+#define TDIF_REG_DEBUG_ERROR_INFO \
+ 0x310400UL
+#define TDIF_REG_DEBUG_ERROR_INFO_SIZE \
+ 64
#define UCM_REG_INIT \
0x1280000UL
#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
@@ -1236,6 +1244,26 @@
0x1901534UL
#define USEM_REG_DBG_FORCE_FRAME \
0x1901538UL
+#define NWS_REG_DBG_SELECT \
+ 0x700128UL
+#define NWS_REG_DBG_DWORD_ENABLE \
+ 0x70012cUL
+#define NWS_REG_DBG_SHIFT \
+ 0x700130UL
+#define NWS_REG_DBG_FORCE_VALID \
+ 0x700134UL
+#define NWS_REG_DBG_FORCE_FRAME \
+ 0x700138UL
+#define MS_REG_DBG_SELECT \
+ 0x6a0228UL
+#define MS_REG_DBG_DWORD_ENABLE \
+ 0x6a022cUL
+#define MS_REG_DBG_SHIFT \
+ 0x6a0230UL
+#define MS_REG_DBG_FORCE_VALID \
+ 0x6a0234UL
+#define MS_REG_DBG_FORCE_FRAME \
+ 0x6a0238UL
#define PCIE_REG_DBG_COMMON_SELECT \
0x054398UL
#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
@@ -1448,6 +1476,8 @@
0x000b48UL
#define RSS_REG_RSS_RAM_DATA \
0x238c20UL
+#define RSS_REG_RSS_RAM_DATA_SIZE \
+ 4
#define MISC_REG_BLOCK_256B_EN \
0x008c14UL
#define NWS_REG_NWS_CMU \
@@ -1520,4 +1550,22 @@
#define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL
#define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL
#define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL
+#define NIG_REG_PTP_LATCH_OSTS_PKT_TIME 0x509040UL
+#define PSWRQ2_REG_WR_MBS0 0x240400UL
+
+#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2 0x2aaf98UL
+#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2 0x2aaf9cUL
+#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2 0x2aafa0UL
+#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2 0x2aafa4UL
+#define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL
+#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
+
+#define PRS_REG_SEARCH_GFT 0x1f11bcUL
+#define PRS_REG_CM_HDR_GFT 0x1f11c8UL
+#define PRS_REG_GFT_CAM 0x1f1100UL
+#define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL
+#define PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT 0
+#define PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT 8
+#define PRS_REG_LOAD_L2_FILTER 0x1f0198UL
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index d9ff6b28591c..56289d7cd306 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -66,17 +66,31 @@
#include "qed_roce.h"
#include "qed_ll2.h"
-void qed_async_roce_event(struct qed_hwfn *p_hwfn,
- struct event_ring_entry *p_eqe)
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
+
+void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code, union rdma_eqe_data *rdma_data)
{
- struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
+ u16 icid =
+ (u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid);
+
+ /* icid release in this async event can occur only if the icid
+ * was offloaded to the FW. In case it wasn't offloaded this is
+ * handled in qed_roce_sp_destroy_qp.
+ */
+ qed_roce_free_real_icid(p_hwfn, icid);
+ } else {
+ struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
- p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
- p_eqe->opcode, &p_eqe->data);
+ events->affiliated_event(p_hwfn->p_rdma_info->events.context,
+ fw_event_code,
+ &rdma_data->async_handle);
+ }
}
static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 max_count)
+ struct qed_bmap *bmap, u32 max_count, char *name)
{
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
@@ -90,43 +104,62 @@ static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
return -ENOMEM;
}
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n",
- bmap->bitmap);
+ snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
return 0;
}
static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
struct qed_bmap *bmap, u32 *id_num)
{
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap);
-
*id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
-
- if (*id_num >= bmap->max_count) {
- DP_NOTICE(p_hwfn, "no id available max_count=%d\n",
- bmap->max_count);
+ if (*id_num >= bmap->max_count)
return -EINVAL;
- }
__set_bit(*id_num, bmap->bitmap);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
+ bmap->name, *id_num);
+
return 0;
}
+static void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ if (id_num >= bmap->max_count)
+ return;
+
+ __set_bit(id_num, bmap->bitmap);
+}
+
static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
struct qed_bmap *bmap, u32 id_num)
{
bool b_acquired;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num);
if (id_num >= bmap->max_count)
return;
b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
if (!b_acquired) {
- DP_NOTICE(p_hwfn, "ID %d already released\n", id_num);
+ DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
+ bmap->name, id_num);
return;
}
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
+ bmap->name, id_num);
+}
+
+static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ if (id_num >= bmap->max_count)
+ return -1;
+
+ return test_bit(id_num, bmap->bitmap);
}
static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
@@ -170,7 +203,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
/* Queue zone lines are shared between RoCE and L2 in such a way that
* they can be used by each without obstructing the other.
*/
- p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
+ p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+ p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
/* Allocate a struct with device params and fill it */
p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
@@ -191,7 +225,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
}
/* Allocate bit map for pd's */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS);
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
+ "PD");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate pd_map, rc = %d\n",
@@ -201,7 +236,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
/* Allocate DPI bitmap */
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
- p_hwfn->dpi_count);
+ p_hwfn->dpi_count, "DPI");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate DPI bitmap, rc = %d\n", rc);
@@ -212,7 +247,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
* twice the number of QPs.
*/
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
- p_rdma_info->num_qps * 2);
+ p_rdma_info->num_qps * 2, "CQ");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate cq bitmap, rc = %d\n", rc);
@@ -224,7 +259,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
* The maximum number of CQs is bounded to twice the number of QPs.
*/
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
- p_rdma_info->num_qps * 2);
+ p_rdma_info->num_qps * 2, "Toggle");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate toogle bits, rc = %d\n", rc);
@@ -233,7 +268,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
/* Allocate bitmap for itids */
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
- p_rdma_info->num_mrs);
+ p_rdma_info->num_mrs, "MR");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate itids bitmaps, rc = %d\n", rc);
@@ -241,16 +276,27 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
}
/* Allocate bitmap for cids used for qps. */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons);
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
+ "CID");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate cid bitmap, rc = %d\n", rc);
goto free_tid_map;
}
+ /* Allocate bitmap for cids used for responders/requesters. */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
+ "REAL_CID");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate real cid bitmap, rc = %d\n", rc);
+ goto free_cid_map;
+ }
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
return 0;
+free_cid_map:
+ kfree(p_rdma_info->cid_map.bitmap);
free_tid_map:
kfree(p_rdma_info->tid_map.bitmap);
free_toggle_map:
@@ -271,16 +317,79 @@ free_rdma_info:
return rc;
}
+static void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, bool check)
+{
+ int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+ int last_line = bmap->max_count / (64 * 8);
+ int last_item = last_line * 8 +
+ DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
+ u64 *pmap = (u64 *)bmap->bitmap;
+ int line, item, offset;
+ u8 str_last_line[200] = { 0 };
+
+ if (!weight || !check)
+ goto end;
+
+ DP_NOTICE(p_hwfn,
+ "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
+ bmap->name, bmap->max_count, weight);
+
+ /* print aligned non-zero lines, if any */
+ for (item = 0, line = 0; line < last_line; line++, item += 8)
+ if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
+ DP_NOTICE(p_hwfn,
+ "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ line,
+ pmap[item],
+ pmap[item + 1],
+ pmap[item + 2],
+ pmap[item + 3],
+ pmap[item + 4],
+ pmap[item + 5],
+ pmap[item + 6], pmap[item + 7]);
+
+ /* print last unaligned non-zero line, if any */
+ if ((bmap->max_count % (64 * 8)) &&
+ (bitmap_weight((unsigned long *)&pmap[item],
+ bmap->max_count - item * 64))) {
+ offset = sprintf(str_last_line, "line 0x%04x: ", line);
+ for (; item < last_item; item++)
+ offset += sprintf(str_last_line + offset,
+ "0x%016llx ", pmap[item]);
+ DP_NOTICE(p_hwfn, "%s\n", str_last_line);
+ }
+
+end:
+ kfree(bmap->bitmap);
+ bmap->bitmap = NULL;
+}
+
static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
{
+ struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ int wait_count = 0;
- kfree(p_rdma_info->cid_map.bitmap);
- kfree(p_rdma_info->tid_map.bitmap);
- kfree(p_rdma_info->toggle_bits.bitmap);
- kfree(p_rdma_info->cq_map.bitmap);
- kfree(p_rdma_info->dpi_map.bitmap);
- kfree(p_rdma_info->pd_map.bitmap);
+ /* when destroying a_RoCE QP the control is returned to the user after
+ * the synchronous part. The asynchronous part may take a little longer.
+ * We delay for a short while if an async destroy QP is still expected.
+ * Beyond the added delay we clear the bitmap anyway.
+ */
+ while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+ msleep(100);
+ if (wait_count++ > 20) {
+ DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
+ break;
+ }
+ }
+
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
kfree(p_rdma_info->port);
kfree(p_rdma_info->dev);
@@ -675,6 +784,7 @@ static int qed_rdma_add_user(void *rdma_cxt,
((out_params->dpi) * p_hwfn->dpi_size);
out_params->dpi_size = p_hwfn->dpi_size;
+ out_params->wid_count = p_hwfn->wid_count;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
return rc;
@@ -693,6 +803,8 @@ static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
+ p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
+
return p_port;
}
@@ -724,6 +836,14 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
u32 addr;
p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
+ DP_NOTICE(p_hwfn,
+ "queue zone offset %d is too large (max is %d)\n",
+ qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
+ return;
+ }
+
qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
@@ -737,9 +857,12 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
struct qed_dev_rdma_info *info)
{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+
memset(info, 0, sizeof(*info));
info->rdma_type = QED_RDMA_TYPE_ROCE;
+ info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
qed_fill_dev_info(cdev, &info->common);
@@ -887,8 +1010,7 @@ static int qed_rdma_create_cq(void *rdma_cxt,
/* Allocate icid */
spin_lock_bh(&p_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn,
- &p_info->cq_map, &returned_id);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
spin_unlock_bh(&p_info->lock);
if (rc) {
@@ -1080,6 +1202,14 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
return flavor;
}
+void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
+{
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@ -1139,15 +1269,22 @@ err:
return rc;
}
+static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp)
{
struct roce_create_qp_resp_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
- union qed_qm_pq_params qm_params;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
- u16 physical_queue0 = 0;
+ u16 regular_latency_queue;
+ enum protocol_type proto;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -1229,15 +1366,16 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
- p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
qp->rq_cq_id);
- memset(&qm_params, 0, sizeof(qm_params));
- qm_params.roce.qpid = qp->icid >> 1;
- physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+ regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+
+ p_ramrod->regular_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
+ p_ramrod->low_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
- p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@@ -1253,13 +1391,19 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL);
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
- rc, physical_queue0);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "rc = %d regular physical queue = 0x%x\n", rc,
+ regular_latency_queue);
if (rc)
goto err;
qp->resp_offloaded = true;
+ qp->cq_prod = 0;
+
+ proto = p_hwfn->p_rdma_info->proto;
+ qed_roce_set_real_cid(p_hwfn, qp->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn, proto));
return rc;
@@ -1277,10 +1421,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
{
struct roce_create_qp_req_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
- union qed_qm_pq_params qm_params;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
- u16 physical_queue0 = 0;
+ u16 regular_latency_queue;
+ enum protocol_type proto;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -1351,15 +1495,16 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
- p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
- p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
- qp->sq_cq_id);
+ p_ramrod->cq_cid =
+ cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
- memset(&qm_params, 0, sizeof(qm_params));
- qm_params.roce.qpid = qp->icid >> 1;
- physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+ regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+
+ p_ramrod->regular_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
+ p_ramrod->low_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
- p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@@ -1378,6 +1523,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
goto err;
qp->req_offloaded = true;
+ proto = p_hwfn->p_rdma_info->proto;
+ qed_roce_set_real_cid(p_hwfn,
+ qp->icid + 1 -
+ qed_cxt_get_proto_cid_start(p_hwfn, proto));
return rc;
@@ -1577,7 +1726,8 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
- u32 *num_invalidated_mw)
+ u32 *num_invalidated_mw,
+ u32 *cq_prod)
{
struct roce_destroy_qp_resp_output_params *p_ramrod_res;
struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
@@ -1588,8 +1738,22 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
- if (!qp->resp_offloaded)
+ *num_invalidated_mw = 0;
+ *cq_prod = qp->cq_prod;
+
+ if (!qp->resp_offloaded) {
+ /* If a responder was never offload, we need to free the cids
+ * allocated in create_qp as a FW async event will never arrive
+ */
+ u32 cid;
+
+ cid = qp->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn,
+ p_hwfn->p_rdma_info->proto);
+ qed_roce_free_cid_pair(p_hwfn, (u16)cid);
+
return 0;
+ }
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -1624,6 +1788,8 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
goto err;
*num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
+ *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
+ qp->cq_prod = *cq_prod;
/* Free IRQ - only if ramrod succeeded, in case FW is still using it */
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
@@ -1827,10 +1993,8 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
out_params->draining = false;
- if (rq_err_state)
+ if (rq_err_state || sq_err_state)
qp->cur_state = QED_ROCE_QP_STATE_ERR;
- else if (sq_err_state)
- qp->cur_state = QED_ROCE_QP_STATE_SQE;
else if (sq_draining)
out_params->draining = true;
out_params->state = qp->cur_state;
@@ -1849,10 +2013,9 @@ err_resp:
static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
- struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
- u32 start_cid;
+ u32 cq_prod;
int rc;
/* Destroys the specified QP */
@@ -1866,7 +2029,8 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
- &num_invalidated_mw);
+ &num_invalidated_mw,
+ &cq_prod);
if (rc)
return rc;
@@ -1881,21 +2045,6 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
"number of invalidate memory windows is different from bounded ones\n");
return -EINVAL;
}
-
- spin_lock_bh(&p_rdma_info->lock);
-
- start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
- p_rdma_info->proto);
-
- /* Release responder's icid */
- qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
- qp->icid - start_cid);
-
- /* Release requester's icid */
- qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
- qp->icid + 1 - start_cid);
-
- spin_unlock_bh(&p_rdma_info->lock);
}
return 0;
@@ -2097,8 +2246,7 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
params->modify_flags);
return rc;
- } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR ||
- qp->cur_state == QED_ROCE_QP_STATE_SQE) {
+ } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) {
/* ->ERR */
rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
params->modify_flags);
@@ -2110,12 +2258,19 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
return rc;
} else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
/* Any state -> RESET */
+ u32 cq_prod;
+
+ /* Send destroy responder ramrod */
+ rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
+ qp,
+ &num_invalidated_mw,
+ &cq_prod);
- rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
- &num_invalidated_mw);
if (rc)
return rc;
+ qp->cq_prod = cq_prod;
+
rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
&num_bound_mw);
@@ -2357,6 +2512,8 @@ qed_rdma_register_tid(void *rdma_cxt,
}
rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc)
+ return rc;
if (fw_return_code != RDMA_RETURN_OK) {
DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
@@ -2454,6 +2611,31 @@ static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
return rc;
}
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ u32 start_cid, cid, xcid;
+
+ /* an even icid belongs to a responder while an odd icid belongs to a
+ * requester. The 'cid' received as an input can be either. We calculate
+ * the "partner" icid and call it xcid. Only if both are free then the
+ * "cid" map can be cleared.
+ */
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
+ cid = icid - start_cid;
+ xcid = cid ^ 1;
+
+ spin_lock_bh(&p_rdma_info->lock);
+
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
+ if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
+ }
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
{
return QED_LEADING_HWFN(cdev);
@@ -2773,7 +2955,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
: QED_LL2_RROCE;
if (pkt->roce_mode == ROCE_V2_IPV4)
- flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+ flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
/* Tx header */
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 36cf4b2ab7fa..9742af516183 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -67,9 +67,11 @@ enum qed_rdma_toggle_bit {
QED_RDMA_TOGGLE_BIT_SET = 1
};
+#define QED_RDMA_MAX_BMAP_NAME (10)
struct qed_bmap {
unsigned long *bitmap;
u32 max_count;
+ char name[QED_RDMA_MAX_BMAP_NAME];
};
struct qed_rdma_info {
@@ -82,6 +84,7 @@ struct qed_rdma_info {
struct qed_bmap qp_map;
struct qed_bmap srq_map;
struct qed_bmap cid_map;
+ struct qed_bmap real_cid_map;
struct qed_bmap dpi_map;
struct qed_bmap toggle_bits;
struct qed_rdma_events events;
@@ -92,6 +95,7 @@ struct qed_rdma_info {
u32 num_qps;
u32 num_mrs;
u16 queue_zone_base;
+ u16 max_queue_zones;
enum protocol_type proto;
};
@@ -153,6 +157,7 @@ struct qed_rdma_qp {
dma_addr_t irq_phys_addr;
u8 irq_num_pages;
bool resp_offloaded;
+ u32 cq_prod;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
@@ -163,8 +168,8 @@ struct qed_rdma_qp {
#if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-void qed_async_roce_event(struct qed_hwfn *p_hwfn,
- struct event_ring_entry *p_eqe);
+void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code, union rdma_eqe_data *rdma_data);
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
@@ -187,7 +192,9 @@ void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u16 src_mac_addr_lo, bool b_last_packet);
#else
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
-static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
+static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code,
+ union rdma_eqe_data *rdma_data) {}
static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 30393ffaa8e5..3357bbefa445 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -84,6 +84,7 @@ union ramrod_data {
struct tx_queue_stop_ramrod_data tx_queue_stop;
struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop;
+ struct rx_update_gft_filter_data rx_update_gft;
struct vport_update_ramrod_data vport_update;
struct core_rx_start_ramrod_data core_rx_queue_start;
struct core_rx_stop_ramrod_data core_rx_queue_stop;
@@ -408,7 +409,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- struct qed_tunn_start_params *p_tunn,
+ struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch);
/**
@@ -441,7 +442,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
- struct qed_tunn_update_params *p_tunn,
+ struct qed_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
/**
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 6fb80f9ef446..bc3694e91b85 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -111,7 +111,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
return 0;
}
-static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
+static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
{
switch (type) {
case QED_TUNN_CLSS_MAC_VLAN:
@@ -122,206 +122,201 @@ static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
return TUNNEL_CLSS_INNER_MAC_VLAN;
case QED_TUNN_CLSS_INNER_MAC_VNI:
return TUNNEL_CLSS_INNER_MAC_VNI;
+ case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
+ return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
default:
return TUNNEL_CLSS_MAC_VLAN;
}
}
static void
-qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn,
- struct qed_tunn_update_params *p_src,
- struct pf_update_tunnel_config *p_tunn_cfg)
+qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
+ struct qed_tunnel_info *p_src, bool b_pf_start)
{
- unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode;
- unsigned long update_mask = p_src->tunn_mode_update_mask;
- unsigned long tunn_mode = p_src->tunn_mode;
- unsigned long new_tunn_mode = 0;
-
- if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) {
- if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
- __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
- } else {
- if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode))
- __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
- }
-
- if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) {
- if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
- __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
- } else {
- if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode))
- __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
- }
+ if (p_src->vxlan.b_update_mode || b_pf_start)
+ p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
- if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) {
- if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
- __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
- } else {
- if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode))
- __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
- }
-
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- cpu_to_le16(p_src->geneve_udp_port);
- }
+ if (p_src->l2_gre.b_update_mode || b_pf_start)
+ p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) {
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
- __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
- } else {
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
- __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
- }
+ if (p_src->ip_gre.b_update_mode || b_pf_start)
+ p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) {
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
- __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
- } else {
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
- __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
- }
+ if (p_src->l2_geneve.b_update_mode || b_pf_start)
+ p_tun->l2_geneve.b_mode_enabled =
+ p_src->l2_geneve.b_mode_enabled;
- p_src->tunn_mode = new_tunn_mode;
+ if (p_src->ip_geneve.b_update_mode || b_pf_start)
+ p_tun->ip_geneve.b_mode_enabled =
+ p_src->ip_geneve.b_mode_enabled;
}
-static void
-qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
- struct qed_tunn_update_params *p_src,
- struct pf_update_tunnel_config *p_tunn_cfg)
+static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
+ struct qed_tunnel_info *p_src)
{
- unsigned long tunn_mode = p_src->tunn_mode;
enum tunnel_clss type;
- qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
- p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
- p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
-
- type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
- p_tunn_cfg->tunnel_clss_vxlan = type;
-
- type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
- p_tunn_cfg->tunnel_clss_l2gre = type;
+ p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
+ p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
+
+ type = qed_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
+ p_tun->vxlan.tun_cls = type;
+ type = qed_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
+ p_tun->l2_gre.tun_cls = type;
+ type = qed_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
+ p_tun->ip_gre.tun_cls = type;
+ type = qed_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
+ p_tun->l2_geneve.tun_cls = type;
+ type = qed_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
+ p_tun->ip_geneve.tun_cls = type;
+}
- type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
- p_tunn_cfg->tunnel_clss_ipgre = type;
+static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun,
+ struct qed_tunnel_info *p_src)
+{
+ p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
+ p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
- if (p_src->update_vxlan_udp_port) {
- p_tunn_cfg->set_vxlan_udp_port_flg = 1;
- p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
- }
+ if (p_src->geneve_port.b_update_port)
+ p_tun->geneve_port.port = p_src->geneve_port.port;
- if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2gre = 1;
+ if (p_src->vxlan_port.b_update_port)
+ p_tun->vxlan_port.port = p_src->vxlan_port.port;
+}
- if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgre = 1;
+static void
+__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ struct qed_tunn_update_type *tun_type)
+{
+ *p_tunn_cls = tun_type->tun_cls;
- if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_vxlan = 1;
+ if (tun_type->b_mode_enabled)
+ *p_enable_tx_clas = 1;
+}
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- cpu_to_le16(p_src->geneve_udp_port);
+static void
+qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ struct qed_tunn_update_type *tun_type,
+ u8 *p_update_port, __le16 *p_port,
+ struct qed_tunn_update_udp_port *p_udp_port)
+{
+ __qed_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas, tun_type);
+ if (p_udp_port->b_update_port) {
+ *p_update_port = 1;
+ *p_port = cpu_to_le16(p_udp_port->port);
}
+}
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2geneve = 1;
-
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgeneve = 1;
-
- type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
- p_tunn_cfg->tunnel_clss_l2geneve = type;
-
- type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
- p_tunn_cfg->tunnel_clss_ipgeneve = type;
+static void
+qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
+
+ qed_set_pf_update_tunn_mode(p_tun, p_src, false);
+ qed_set_tunn_cls_info(p_tun, p_src);
+ qed_set_tunn_ports(p_tun, p_src);
+
+ qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tunn_cfg->tx_enable_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tunn_cfg->tx_enable_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tunn_cfg->tx_enable_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tunn_cfg->tx_enable_l2gre,
+ &p_tun->l2_gre);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tunn_cfg->tx_enable_ipgre,
+ &p_tun->ip_gre);
+
+ p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
+ p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
}
static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- unsigned long tunn_mode)
+ struct qed_tunnel_info *p_tun)
{
- u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
- u8 l2geneve_enable = 0, ipgeneve_enable = 0;
-
- if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
- l2gre_enable = 1;
+ qed_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
+ p_tun->ip_gre.b_mode_enabled);
+ qed_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
- if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
- ipgre_enable = 1;
-
- if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
- vxlan_enable = 1;
-
- qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
- qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+ qed_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled);
+}
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
- l2geneve_enable = 1;
+static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_tunn)
+{
+ if (p_tunn->vxlan_port.b_update_port)
+ qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->vxlan_port.port);
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
- ipgeneve_enable = 1;
+ if (p_tunn->geneve_port.b_update_port)
+ qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->geneve_port.port);
- qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
- ipgeneve_enable);
+ qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
}
static void
qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
- struct qed_tunn_start_params *p_src,
+ struct qed_tunnel_info *p_src,
struct pf_start_tunnel_config *p_tunn_cfg)
{
- unsigned long tunn_mode;
- enum tunnel_clss type;
+ struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
if (!p_src)
return;
- tunn_mode = p_src->tunn_mode;
- type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
- p_tunn_cfg->tunnel_clss_vxlan = type;
- type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
- p_tunn_cfg->tunnel_clss_l2gre = type;
- type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
- p_tunn_cfg->tunnel_clss_ipgre = type;
-
- if (p_src->update_vxlan_udp_port) {
- p_tunn_cfg->set_vxlan_udp_port_flg = 1;
- p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
- }
-
- if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2gre = 1;
-
- if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgre = 1;
-
- if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_vxlan = 1;
-
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- cpu_to_le16(p_src->geneve_udp_port);
- }
-
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2geneve = 1;
-
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgeneve = 1;
-
- type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
- p_tunn_cfg->tunnel_clss_l2geneve = type;
- type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
- p_tunn_cfg->tunnel_clss_ipgeneve = type;
+ qed_set_pf_update_tunn_mode(p_tun, p_src, true);
+ qed_set_tunn_cls_info(p_tun, p_src);
+ qed_set_tunn_ports(p_tun, p_src);
+
+ qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tunn_cfg->tx_enable_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tunn_cfg->tx_enable_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tunn_cfg->tx_enable_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tunn_cfg->tx_enable_l2gre,
+ &p_tun->l2_gre);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tunn_cfg->tx_enable_ipgre,
+ &p_tun->ip_gre);
}
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- struct qed_tunn_start_params *p_tunn,
+ struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch)
{
struct pf_start_ramrod_data *p_ramrod = NULL;
@@ -416,11 +411,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL);
- if (p_tunn) {
- qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->tunn_mode);
- p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
- }
+ if (p_tunn)
+ qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel);
return rc;
}
@@ -451,7 +443,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
/* Set pf update ramrod command params */
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
- struct qed_tunn_update_params *p_tunn,
+ struct qed_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
{
@@ -459,6 +451,12 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
+
+ if (!p_tunn)
+ return -EINVAL;
+
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qed_spq_get_cid(p_hwfn);
@@ -479,15 +477,7 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- if (p_tunn->update_vxlan_udp_port)
- qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->vxlan_udp_port);
- if (p_tunn->update_geneve_udp_port)
- qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->geneve_udp_port);
-
- qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
- p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+ qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 645328a9f0cf..f6423a139ca0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -119,6 +119,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
u8 *p_fw_ret, bool skip_quick_poll)
{
struct qed_spq_comp_done *comp_done;
+ struct qed_ptt *p_ptt;
int rc;
/* A relatively short polling period w/o sleeping, to allow the FW to
@@ -135,8 +136,14 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
if (!rc)
return 0;
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
+ return -EAGAIN;
+ }
+
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
- rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+ rc = qed_mcp_drain(p_hwfn, p_ptt);
if (rc) {
DP_NOTICE(p_hwfn, "MCP drain failed\n");
goto err;
@@ -145,15 +152,18 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
/* Retry after drain */
rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
if (!rc)
- return 0;
+ goto out;
comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
- if (comp_done->done == 1) {
+ if (comp_done->done == 1)
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
- return 0;
- }
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return 0;
+
err:
+ qed_ptt_release(p_hwfn, p_ptt);
DP_NOTICE(p_hwfn,
"Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
le32_to_cpu(p_ent->elem.hdr.cid),
@@ -205,11 +215,10 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq)
{
- u16 pq;
- struct qed_cxt_info cxt_info;
- struct core_conn_context *p_cxt;
- union qed_qm_pq_params pq_params;
- int rc;
+ struct core_conn_context *p_cxt;
+ struct qed_cxt_info cxt_info;
+ u16 physical_q;
+ int rc;
cxt_info.iid = p_spq->cid;
@@ -231,10 +240,8 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
/* QM physical queue */
- memset(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = LB_TC;
- pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
- p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+ p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
p_cxt->xstorm_st_context.spq_base_lo =
DMA_LO_LE(p_spq->chain.p_phys_addr);
@@ -296,9 +303,12 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
+#if IS_ENABLED(CONFIG_QED_RDMA)
case PROTOCOLID_ROCE:
- qed_async_roce_event(p_hwfn, p_eqe);
+ qed_roce_async_event(p_hwfn, p_eqe->opcode,
+ &p_eqe->data.rdma_data);
return 0;
+#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
@@ -306,14 +316,6 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
case PROTOCOLID_ISCSI:
if (!IS_ENABLED(CONFIG_QED_ISCSI))
return -EINVAL;
- if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
- u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
-
- qed_ooo_release_connection_isles(p_hwfn,
- p_hwfn->p_ooo_info,
- cid);
- return 0;
- }
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 253c2bbe1e4e..d5df29f787c5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -178,26 +178,59 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
return vf;
}
+enum qed_iov_validate_q_mode {
+ QED_IOV_VALIDATE_Q_NA,
+ QED_IOV_VALIDATE_Q_ENABLE,
+ QED_IOV_VALIDATE_Q_DISABLE,
+};
+
+static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ u16 qid,
+ enum qed_iov_validate_q_mode mode,
+ bool b_is_tx)
+{
+ if (mode == QED_IOV_VALIDATE_Q_NA)
+ return true;
+
+ if ((b_is_tx && p_vf->vf_queues[qid].p_tx_cid) ||
+ (!b_is_tx && p_vf->vf_queues[qid].p_rx_cid))
+ return mode == QED_IOV_VALIDATE_Q_ENABLE;
+
+ /* In case we haven't found any valid cid, then its disabled */
+ return mode == QED_IOV_VALIDATE_Q_DISABLE;
+}
+
static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
- struct qed_vf_info *p_vf, u16 rx_qid)
+ struct qed_vf_info *p_vf,
+ u16 rx_qid,
+ enum qed_iov_validate_q_mode mode)
{
- if (rx_qid >= p_vf->num_rxqs)
+ if (rx_qid >= p_vf->num_rxqs) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
- return rx_qid < p_vf->num_rxqs;
+ return false;
+ }
+
+ return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
}
static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
- struct qed_vf_info *p_vf, u16 tx_qid)
+ struct qed_vf_info *p_vf,
+ u16 tx_qid,
+ enum qed_iov_validate_q_mode mode)
{
- if (tx_qid >= p_vf->num_txqs)
+ if (tx_qid >= p_vf->num_txqs) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
- return tx_qid < p_vf->num_txqs;
+ return false;
+ }
+
+ return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
}
static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
@@ -217,6 +250,34 @@ static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
return false;
}
+static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_rxqs; i++)
+ if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ QED_IOV_VALIDATE_Q_ENABLE,
+ false))
+ return true;
+
+ return false;
+}
+
+static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_txqs; i++)
+ if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ QED_IOV_VALIDATE_Q_ENABLE,
+ true))
+ return true;
+
+ return false;
+}
+
static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
int vfid, struct qed_ptt *p_ptt)
{
@@ -557,14 +618,30 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
return 0;
}
- /* Calculate the first VF index - this is a bit tricky; Basically,
- * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
- * after the first engine's VFs.
+ /* First VF index based on offset is tricky:
+ * - If ARI is supported [likely], offset - (16 - pf_id) would
+ * provide the number for eng0. 2nd engine Vfs would begin
+ * after the first engine's VFs.
+ * - If !ARI, VFs would start on next device.
+ * so offset - (256 - pf_id) would provide the number.
+ * Utilize the fact that (256 - pf_id) is achieved only by later
+ * to diffrentiate between the two.
*/
- cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
- p_hwfn->abs_pf_id - 16;
- if (QED_PATH_ID(p_hwfn))
- cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+ if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
+ u32 first = p_hwfn->cdev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 16;
+
+ cdev->p_iov_info->first_vf_in_pf = first;
+
+ if (QED_PATH_ID(p_hwfn))
+ cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+ } else {
+ u32 first = p_hwfn->cdev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 256;
+
+ cdev->p_iov_info->first_vf_in_pf = first;
+ }
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"First VF in hwfn 0x%08x\n",
@@ -677,6 +754,11 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
int rc;
+ /* It's possible VF was previously considered malicious -
+ * clear the indication even if we're only going to disable VF.
+ */
+ vf->b_malicious = false;
+
if (vf->to_disable)
return 0;
@@ -689,9 +771,6 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
- /* It's possible VF was previously considered malicious */
- vf->b_malicious = false;
-
rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
if (rc)
return rc;
@@ -1118,13 +1197,17 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
(sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
&params);
- qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
- mbx->req_virt->first_tlv.reply_address,
- sizeof(u64) / 4, &params);
-
+ /* Once PF copies the rc to the VF, the latter can continue
+ * and send an additional message. So we have to make sure the
+ * channel would be re-set to ready prior to that.
+ */
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+
+ qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+ mbx->req_virt->first_tlv.reply_address,
+ sizeof(u64) / 4, &params);
}
static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
@@ -1733,6 +1816,8 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
vf->state = VF_ENABLED;
start = &mbx->req_virt->start_vport;
+ qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
/* Initialize Status block in CAU */
for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
if (!start->sb_addr[sb_id]) {
@@ -1746,7 +1831,6 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
start->sb_addr[sb_id],
vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
}
- qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
vf->mtu = start->mtu;
vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
@@ -1803,6 +1887,16 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
vf->vport_instance--;
vf->spoof_chk = false;
+ if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
+ (qed_iov_validate_active_txq(p_hwfn, vf))) {
+ vf->b_malicious = true;
+ DP_NOTICE(p_hwfn,
+ "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_MALICIOUS;
+ goto out;
+ }
+
rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
if (rc) {
DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
@@ -1814,6 +1908,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
vf->configured_features = 0;
memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
sizeof(struct pfvf_def_resp_tlv), status);
}
@@ -1870,7 +1965,8 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
req = &mbx->req_virt->start_rxq;
- if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+ if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
+ QED_IOV_VALIDATE_Q_DISABLE) ||
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
@@ -1923,6 +2019,220 @@ out:
qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
}
+static void
+qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
+ struct qed_tunnel_info *p_tun,
+ u16 tunn_feature_mask)
+{
+ p_resp->tunn_feature_mask = tunn_feature_mask;
+ p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
+ p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
+ p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
+ p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
+ p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
+ p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
+ p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
+ p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
+ p_resp->geneve_udp_port = p_tun->geneve_port.port;
+ p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
+}
+
+static void
+__qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct qed_tunn_update_type *p_tun,
+ enum qed_tunn_mode mask, u8 tun_cls)
+{
+ if (p_req->tun_mode_update_mask & BIT(mask)) {
+ p_tun->b_update_mode = true;
+
+ if (p_req->tunn_mode & BIT(mask))
+ p_tun->b_mode_enabled = true;
+ }
+
+ p_tun->tun_cls = tun_cls;
+}
+
+static void
+qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct qed_tunn_update_type *p_tun,
+ struct qed_tunn_update_udp_port *p_port,
+ enum qed_tunn_mode mask,
+ u8 tun_cls, u8 update_port, u16 port)
+{
+ if (update_port) {
+ p_port->b_update_port = true;
+ p_port->port = port;
+ }
+
+ __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
+}
+
+static bool
+qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
+{
+ bool b_update_requested = false;
+
+ if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
+ p_req->update_geneve_port || p_req->update_vxlan_port)
+ b_update_requested = true;
+
+ return b_update_requested;
+}
+
+static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc)
+{
+ if (tun->b_update_mode && !tun->b_mode_enabled) {
+ tun->b_update_mode = false;
+ *rc = -EINVAL;
+ }
+}
+
+static int
+qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn,
+ u16 *tun_features, bool *update,
+ struct qed_tunnel_info *tun_src)
+{
+ struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth;
+ struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel;
+ u16 bultn_vxlan_port, bultn_geneve_port;
+ void *cookie = p_hwfn->cdev->ops_cookie;
+ int i, rc = 0;
+
+ *tun_features = p_hwfn->cdev->tunn_feature_mask;
+ bultn_vxlan_port = tun->vxlan_port.port;
+ bultn_geneve_port = tun->geneve_port.port;
+ qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc);
+ qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc);
+ qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc);
+ qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc);
+ qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc);
+
+ if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) &&
+ (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
+ tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
+ tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
+ tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
+ tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) {
+ tun_src->b_update_rx_cls = false;
+ tun_src->b_update_tx_cls = false;
+ rc = -EINVAL;
+ }
+
+ if (tun_src->vxlan_port.b_update_port) {
+ if (tun_src->vxlan_port.port == tun->vxlan_port.port) {
+ tun_src->vxlan_port.b_update_port = false;
+ } else {
+ *update = true;
+ bultn_vxlan_port = tun_src->vxlan_port.port;
+ }
+ }
+
+ if (tun_src->geneve_port.b_update_port) {
+ if (tun_src->geneve_port.port == tun->geneve_port.port) {
+ tun_src->geneve_port.b_update_port = false;
+ } else {
+ *update = true;
+ bultn_geneve_port = tun_src->geneve_port.port;
+ }
+ }
+
+ qed_for_each_vf(p_hwfn, i) {
+ qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port,
+ bultn_geneve_port);
+ }
+
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port);
+
+ return rc;
+}
+
+static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf)
+{
+ struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ bool b_update_required = false;
+ struct qed_tunnel_info tunn;
+ u16 tunn_feature_mask = 0;
+ int i, rc = 0;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ memset(&tunn, 0, sizeof(tunn));
+ p_req = &mbx->req_virt->tunn_param_update;
+
+ if (!qed_iov_pf_validate_tunn_param(p_req)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "No tunnel update requested by VF\n");
+ status = PFVF_STATUS_FAILURE;
+ goto send_resp;
+ }
+
+ tunn.b_update_rx_cls = p_req->update_tun_cls;
+ tunn.b_update_tx_cls = p_req->update_tun_cls;
+
+ qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
+ QED_MODE_VXLAN_TUNN, p_req->vxlan_clss,
+ p_req->update_vxlan_port,
+ p_req->vxlan_port);
+ qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
+ QED_MODE_L2GENEVE_TUNN,
+ p_req->l2geneve_clss,
+ p_req->update_geneve_port,
+ p_req->geneve_port);
+ __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
+ QED_MODE_IPGENEVE_TUNN,
+ p_req->ipgeneve_clss);
+ __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
+ QED_MODE_L2GRE_TUNN, p_req->l2gre_clss);
+ __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
+ QED_MODE_IPGRE_TUNN, p_req->ipgre_clss);
+
+ /* If PF modifies VF's req then it should
+ * still return an error in case of partial configuration
+ * or modified configuration as opposed to requested one.
+ */
+ rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask,
+ &b_update_required, &tunn);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ /* If QED client is willing to update anything ? */
+ if (b_update_required) {
+ u16 geneve_port;
+
+ rc = qed_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ geneve_port = p_tun->geneve_port.port;
+ qed_for_each_vf(p_hwfn, i) {
+ qed_iov_bulletin_set_udp_ports(p_hwfn, i,
+ p_tun->vxlan_port.port,
+ geneve_port);
+ }
+ }
+
+send_resp:
+ p_resp = qed_add_tlv(p_hwfn, &mbx->offset,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
+
+ qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
+}
+
static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *p_vf, u8 status)
@@ -1970,21 +2280,16 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
struct qed_queue_start_common_params params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
- union qed_qm_pq_params pq_params;
struct vfpf_start_txq_tlv *req;
struct qed_vf_q_info *p_queue;
int rc;
u16 pq;
- /* Prepare the parameters which would choose the right PQ */
- memset(&pq_params, 0, sizeof(pq_params));
- pq_params.eth.is_vf = 1;
- pq_params.eth.vf_id = vf->relative_vf_id;
-
memset(&params, 0, sizeof(params));
req = &mbx->req_virt->start_txq;
- if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+ if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
+ QED_IOV_VALIDATE_Q_DISABLE) ||
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
@@ -2004,7 +2309,7 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
if (!p_queue->p_tx_cid)
goto out;
- pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, &pq_params);
+ pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
req->pbl_addr, req->pbl_size, pq);
if (rc) {
@@ -2021,57 +2326,53 @@ out:
static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf,
- u16 rxq_id, u8 num_rxqs, bool cqe_completion)
+ u16 rxq_id, bool cqe_completion)
{
struct qed_vf_q_info *p_queue;
int rc = 0;
- int qid;
- if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
+ if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id,
+ QED_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] Tried Closing Rx 0x%04x which is inactive\n",
+ vf->relative_vf_id, rxq_id);
return -EINVAL;
+ }
- for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
- p_queue = &vf->vf_queues[qid];
+ p_queue = &vf->vf_queues[rxq_id];
- if (!p_queue->p_rx_cid)
- continue;
-
- rc = qed_eth_rx_queue_stop(p_hwfn,
- p_queue->p_rx_cid,
- false, cqe_completion);
- if (rc)
- return rc;
+ rc = qed_eth_rx_queue_stop(p_hwfn,
+ p_queue->p_rx_cid,
+ false, cqe_completion);
+ if (rc)
+ return rc;
- vf->vf_queues[qid].p_rx_cid = NULL;
- vf->num_active_rxqs--;
- }
+ p_queue->p_rx_cid = NULL;
+ vf->num_active_rxqs--;
- return rc;
+ return 0;
}
static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
- struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
+ struct qed_vf_info *vf, u16 txq_id)
{
- int rc = 0;
struct qed_vf_q_info *p_queue;
- int qid;
+ int rc = 0;
- if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
+ if (!qed_iov_validate_txq(p_hwfn, vf, txq_id,
+ QED_IOV_VALIDATE_Q_ENABLE))
return -EINVAL;
- for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
- p_queue = &vf->vf_queues[qid];
- if (!p_queue->p_tx_cid)
- continue;
+ p_queue = &vf->vf_queues[txq_id];
- rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
- if (rc)
- return rc;
+ rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
+ if (rc)
+ return rc;
- p_queue->p_tx_cid = NULL;
- }
+ p_queue->p_tx_cid = NULL;
- return rc;
+ return 0;
}
static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
@@ -2080,20 +2381,28 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_rxqs_tlv *req;
int rc;
- /* We give the option of starting from qid != 0, in this case we
- * need to make sure that qid + num_qs doesn't exceed the actual
- * amount of queues that exist.
+ /* There has never been an official driver that used this interface
+ * for stopping multiple queues, and it is now considered deprecated.
+ * Validate this isn't used here.
*/
req = &mbx->req_virt->stop_rxqs;
- rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
- req->num_rxqs, req->cqe_completion);
- if (rc)
- status = PFVF_STATUS_FAILURE;
+ if (req->num_rxqs != 1) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Odd; VF[%d] tried stopping multiple Rx queues\n",
+ vf->relative_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+ rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+ req->cqe_completion);
+ if (!rc)
+ status = PFVF_STATUS_SUCCESS;
+out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
length, status);
}
@@ -2104,19 +2413,27 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_txqs_tlv *req;
int rc;
- /* We give the option of starting from qid != 0, in this case we
- * need to make sure that qid + num_qs doesn't exceed the actual
- * amount of queues that exist.
+ /* There has never been an official driver that used this interface
+ * for stopping multiple queues, and it is now considered deprecated.
+ * Validate this isn't used here.
*/
req = &mbx->req_virt->stop_txqs;
- rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
- if (rc)
- status = PFVF_STATUS_FAILURE;
+ if (req->num_txqs != 1) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Odd; VF[%d] tried stopping multiple Tx queues\n",
+ vf->relative_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+ rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid);
+ if (!rc)
+ status = PFVF_STATUS_SUCCESS;
+out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
length, status);
}
@@ -2141,22 +2458,17 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
/* Validate inputs */
- if (req->num_rxqs + req->rx_qid > QED_MAX_VF_CHAINS_PER_PF ||
- !qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
- DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
- vf->relative_vf_id, req->rx_qid, req->num_rxqs);
- goto out;
- }
-
- for (i = 0; i < req->num_rxqs; i++) {
- qid = req->rx_qid + i;
- if (!vf->vf_queues[qid].p_rx_cid) {
- DP_INFO(p_hwfn,
- "VF[%d] rx_qid = %d isn`t active!\n",
- vf->relative_vf_id, qid);
+ for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++)
+ if (!qed_iov_validate_rxq(p_hwfn, vf, i,
+ QED_IOV_VALIDATE_Q_ENABLE)) {
+ DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+ vf->relative_vf_id, req->rx_qid, req->num_rxqs);
goto out;
}
+ /* Prepare the handlers */
+ for (i = 0; i < req->num_rxqs; i++) {
+ qid = req->rx_qid + i;
handlers[i] = vf->vf_queues[qid].p_rx_cid;
}
@@ -2372,7 +2684,8 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
for (i = 0; i < table_size; i++) {
q_idx = p_rss_tlv->rss_ind_table[i];
- if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx)) {
+ if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
+ QED_IOV_VALIDATE_Q_ENABLE)) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[%d]: Omitting RSS due to wrong queue %04x\n",
@@ -2381,15 +2694,6 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
goto out;
}
- if (!vf->vf_queues[q_idx].p_rx_cid) {
- DP_VERBOSE(p_hwfn,
- QED_MSG_IOV,
- "VF[%d]: Omitting RSS due to inactive queue %08x\n",
- vf->relative_vf_id, q_idx);
- b_reject = true;
- goto out;
- }
-
p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
}
@@ -3042,9 +3346,10 @@ qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return rc;
}
-int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
+bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
{
- u16 i, found = 0;
+ bool found = false;
+ u16 i;
DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
for (i = 0; i < (VF_MAX_STATIC / 32); i++)
@@ -3054,7 +3359,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
if (!p_hwfn->cdev->p_iov_info) {
DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
- return 0;
+ return false;
}
/* Mark VFs */
@@ -3083,7 +3388,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
* VF flr until ACKs, we're safe.
*/
p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
- found = 1;
+ found = true;
}
}
@@ -3184,6 +3489,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
case CHANNEL_TLV_RELEASE:
qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
break;
+ case CHANNEL_TLV_UPDATE_TUNN_PARAM:
+ qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
+ break;
}
} else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
@@ -3289,11 +3597,17 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
if (!p_vf)
return;
- DP_INFO(p_hwfn,
- "VF [%d] - Malicious behavior [%02x]\n",
- p_vf->abs_vf_id, p_data->err_id);
+ if (!p_vf->b_malicious) {
+ DP_NOTICE(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
- p_vf->b_malicious = true;
+ p_vf->b_malicious = true;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
+ }
}
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
@@ -3414,6 +3728,29 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
+void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port)
+{
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can not set udp ports, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ if (vf_info->b_malicious) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Can not set udp ports to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
+
+ vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
+ vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
+}
+
static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *p_vf_info;
@@ -3842,6 +4179,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
{
+ struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
struct qed_mcp_link_capabilities caps;
struct qed_mcp_link_params params;
struct qed_mcp_link_state link;
@@ -3858,9 +4196,15 @@ void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
if (!vf_info)
continue;
- memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
- memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
- memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
+ /* Only hwfn0 is actually interested in the link speed.
+ * But since only it would receive an MFW indication of link,
+ * need to take configuration from it - otherwise things like
+ * rate limiting for hwfn1 VF would not work.
+ */
+ memcpy(&params, qed_mcp_get_link_params(lead_hwfn),
+ sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
+ memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
sizeof(caps));
/* Modify link according to the VF's configured link state */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index a89605821522..81a497ce6585 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -270,6 +270,9 @@ enum qed_iov_wq_flag {
*/
u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
+void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port);
+
/**
* @brief Read sriov related information and allocated resources
* reads from configuraiton space, shmem, etc.
@@ -348,9 +351,9 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
* @param p_hwfn
* @param disabled_vfs - bitmask of all VFs on path that were FLRed
*
- * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ * @return true iff one of the PF's vfs got FLRed. false otherwise.
*/
-int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
+bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
/**
* @brief Search extended TLVs in request/reply buffer.
@@ -378,6 +381,12 @@ static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
return MAX_NUM_VFS;
}
+static inline void
+qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid,
+ u16 vxlan_port, u16 geneve_port)
+{
+}
+
static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
{
return 0;
@@ -407,10 +416,10 @@ static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
-static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
- u32 *disabled_vfs)
+static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
+ u32 *disabled_vfs)
{
- return 0;
+ return false;
}
static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 15d2855ec563..11d71e5eea14 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -134,14 +134,20 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
}
if (!*done) {
- DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "VF <-- PF Timeout [Type %d]\n",
- p_req->first_tlv.tl.type);
+ DP_NOTICE(p_hwfn,
+ "VF <-- PF Timeout [Type %d]\n",
+ p_req->first_tlv.tl.type);
rc = -EBUSY;
} else {
- DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "PF response: %d [Type %d]\n",
- *done, p_req->first_tlv.tl.type);
+ if ((*done != PFVF_STATUS_SUCCESS) &&
+ (*done != PFVF_STATUS_NO_RESOURCE))
+ DP_NOTICE(p_hwfn,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ else
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
}
return rc;
@@ -228,7 +234,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
/* send acquire request */
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
/* copy acquire response from buffer to p_hwfn */
memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
@@ -412,6 +418,155 @@ free_p_iov:
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+static void
+__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct qed_tunn_update_type *p_src,
+ enum qed_tunn_clss mask, u8 *p_cls)
+{
+ if (p_src->b_update_mode) {
+ p_req->tun_mode_update_mask |= BIT(mask);
+
+ if (p_src->b_mode_enabled)
+ p_req->tunn_mode |= BIT(mask);
+ }
+
+ *p_cls = p_src->tun_cls;
+}
+
+static void
+qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct qed_tunn_update_type *p_src,
+ enum qed_tunn_clss mask,
+ u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
+ u8 *p_update_port, u16 *p_udp_port)
+{
+ if (p_port->b_update_port) {
+ *p_update_port = 1;
+ *p_udp_port = p_port->port;
+ }
+
+ __qed_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls);
+}
+
+void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
+{
+ if (p_tun->vxlan.b_mode_enabled)
+ p_tun->vxlan.b_update_mode = true;
+ if (p_tun->l2_geneve.b_mode_enabled)
+ p_tun->l2_geneve.b_update_mode = true;
+ if (p_tun->ip_geneve.b_mode_enabled)
+ p_tun->ip_geneve.b_update_mode = true;
+ if (p_tun->l2_gre.b_mode_enabled)
+ p_tun->l2_gre.b_update_mode = true;
+ if (p_tun->ip_gre.b_mode_enabled)
+ p_tun->ip_gre.b_update_mode = true;
+
+ p_tun->b_update_rx_cls = true;
+ p_tun->b_update_tx_cls = true;
+}
+
+static void
+__qed_vf_update_tunn_param(struct qed_tunn_update_type *p_tun,
+ u16 feature_mask, u8 tunn_mode,
+ u8 tunn_cls, enum qed_tunn_mode val)
+{
+ if (feature_mask & BIT(val)) {
+ p_tun->b_mode_enabled = tunn_mode;
+ p_tun->tun_cls = tunn_cls;
+ } else {
+ p_tun->b_mode_enabled = false;
+ }
+}
+
+static void qed_vf_update_tunn_param(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_tun,
+ struct pfvf_update_tunn_param_tlv *p_resp)
+{
+ /* Update mode and classes provided by PF */
+ u16 feat_mask = p_resp->tunn_feature_mask;
+
+ __qed_vf_update_tunn_param(&p_tun->vxlan, feat_mask,
+ p_resp->vxlan_mode, p_resp->vxlan_clss,
+ QED_MODE_VXLAN_TUNN);
+ __qed_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask,
+ p_resp->l2geneve_mode,
+ p_resp->l2geneve_clss,
+ QED_MODE_L2GENEVE_TUNN);
+ __qed_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask,
+ p_resp->ipgeneve_mode,
+ p_resp->ipgeneve_clss,
+ QED_MODE_IPGENEVE_TUNN);
+ __qed_vf_update_tunn_param(&p_tun->l2_gre, feat_mask,
+ p_resp->l2gre_mode, p_resp->l2gre_clss,
+ QED_MODE_L2GRE_TUNN);
+ __qed_vf_update_tunn_param(&p_tun->ip_gre, feat_mask,
+ p_resp->ipgre_mode, p_resp->ipgre_clss,
+ QED_MODE_IPGRE_TUNN);
+ p_tun->geneve_port.port = p_resp->geneve_udp_port;
+ p_tun->vxlan_port.port = p_resp->vxlan_udp_port;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
+ p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled,
+ p_tun->l2_gre.b_mode_enabled, p_tun->ip_gre.b_mode_enabled);
+}
+
+int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_src)
+{
+ struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ int rc;
+
+ p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ sizeof(*p_req));
+
+ if (p_src->b_update_rx_cls && p_src->b_update_tx_cls)
+ p_req->update_tun_cls = 1;
+
+ qed_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, QED_MODE_VXLAN_TUNN,
+ &p_req->vxlan_clss, &p_src->vxlan_port,
+ &p_req->update_vxlan_port,
+ &p_req->vxlan_port);
+ qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve,
+ QED_MODE_L2GENEVE_TUNN,
+ &p_req->l2geneve_clss, &p_src->geneve_port,
+ &p_req->update_geneve_port,
+ &p_req->geneve_port);
+ __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve,
+ QED_MODE_IPGENEVE_TUNN,
+ &p_req->ipgeneve_clss);
+ __qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre,
+ QED_MODE_L2GRE_TUNN, &p_req->l2gre_clss);
+ __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre,
+ QED_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
+ rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+
+ if (rc)
+ goto exit;
+
+ if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Failed to update tunnel parameters\n");
+ rc = -EINVAL;
+ }
+
+ qed_vf_update_tunn_param(p_hwfn, p_tun, p_resp);
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
int
qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid,
@@ -1245,6 +1400,18 @@ static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
return true;
}
+static void
+qed_vf_bulletin_get_udp_ports(struct qed_hwfn *p_hwfn,
+ u16 *p_vxlan_port, u16 *p_geneve_port)
+{
+ struct qed_bulletin_content *p_bulletin;
+
+ p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+
+ *p_vxlan_port = p_bulletin->vxlan_udp_port;
+ *p_geneve_port = p_bulletin->geneve_udp_port;
+}
+
void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor,
u16 *fw_rev, u16 *fw_eng)
@@ -1264,12 +1431,16 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
void *cookie = hwfn->cdev->ops_cookie;
+ u16 vxlan_port, geneve_port;
+ qed_vf_bulletin_get_udp_ports(hwfn, &vxlan_port, &geneve_port);
is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
&is_mac_forced);
if (is_mac_exist && cookie)
ops->force_mac(cookie, mac, !!is_mac_forced);
+ ops->ports_update(cookie, vxlan_port, geneve_port);
+
/* Always update link configuration according to bulletin */
qed_link_update(hwfn);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 7da0b165d8bc..34ac70b0e5fe 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -275,6 +275,8 @@ struct vfpf_stop_rxqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 rx_qid;
+
+ /* this field is deprecated and should *always* be set to '1' */
u8 num_rxqs;
u8 cqe_completion;
u8 padding[4];
@@ -285,6 +287,8 @@ struct vfpf_stop_txqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 tx_qid;
+
+ /* this field is deprecated and should *always* be set to '1' */
u8 num_txqs;
u8 padding[5];
};
@@ -425,6 +429,43 @@ struct vfpf_ucast_filter_tlv {
u16 padding[3];
};
+/* tunnel update param tlv */
+struct vfpf_update_tunn_param_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 tun_mode_update_mask;
+ u8 tunn_mode;
+ u8 update_tun_cls;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u8 update_geneve_port;
+ u8 update_vxlan_port;
+ u16 geneve_port;
+ u16 vxlan_port;
+ u8 padding[2];
+};
+
+struct pfvf_update_tunn_param_tlv {
+ struct pfvf_tlv hdr;
+
+ u16 tunn_feature_mask;
+ u8 vxlan_mode;
+ u8 l2geneve_mode;
+ u8 ipgeneve_mode;
+ u8 l2gre_mode;
+ u8 ipgre_mode;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+};
+
struct tlv_buffer_size {
u8 tlv_buffer[TLV_BUFFER_SIZE];
};
@@ -440,6 +481,7 @@ union vfpf_tlvs {
struct vfpf_vport_start_tlv start_vport;
struct vfpf_vport_update_tlv vport_update;
struct vfpf_ucast_filter_tlv ucast_filter;
+ struct vfpf_update_tunn_param_tlv tunn_param_update;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
@@ -449,6 +491,7 @@ union pfvf_tlvs {
struct pfvf_acquire_resp_tlv acquire_resp;
struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start;
+ struct pfvf_update_tunn_param_tlv tunn_param_resp;
};
enum qed_bulletin_bit {
@@ -509,7 +552,9 @@ struct qed_bulletin_content {
u8 partner_rx_flow_ctrl_en;
u8 partner_adv_pause;
u8 sfp_tx_fault;
- u8 padding4[6];
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 padding4[2];
u32 speed;
u32 partner_adv_speed;
@@ -551,6 +596,7 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_RSS,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
@@ -868,6 +914,9 @@ void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_bulletin_content *p_bulletin);
void qed_iov_vf_task(struct work_struct *work);
+void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun);
+int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_tunn);
#else
static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params)
@@ -1029,6 +1078,17 @@ __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
static inline void qed_iov_vf_task(struct work_struct *work)
{
}
+
+static inline void
+qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
+{
+}
+
+static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_tunn)
+{
+ return -EINVAL;
+}
#endif
#endif
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index f2aaef2cfb86..9b4f08b6f9b9 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -41,6 +41,9 @@
#include <linux/mutex.h>
#include <linux/bpf.h>
#include <linux/io.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
#include <linux/qed/common_hsi.h>
#include <linux/qed/eth_common.h>
#include <linux/qed/qed_if.h>
@@ -50,7 +53,7 @@
#define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 10
#define QEDE_REVISION_VERSION 10
-#define QEDE_ENGINEERING_VERSION 20
+#define QEDE_ENGINEERING_VERSION 21
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
__stringify(QEDE_REVISION_VERSION) "." \
@@ -58,7 +61,7 @@
#define DRV_MODULE_SYM qede
-struct qede_stats {
+struct qede_stats_common {
u64 no_buff_discards;
u64 packet_too_big_discard;
u64 ttl0_discard;
@@ -90,11 +93,6 @@ struct qede_stats {
u64 rx_256_to_511_byte_packets;
u64 rx_512_to_1023_byte_packets;
u64 rx_1024_to_1518_byte_packets;
- u64 rx_1519_to_1522_byte_packets;
- u64 rx_1519_to_2047_byte_packets;
- u64 rx_2048_to_4095_byte_packets;
- u64 rx_4096_to_9216_byte_packets;
- u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -111,17 +109,39 @@ struct qede_stats {
u64 tx_256_to_511_byte_packets;
u64 tx_512_to_1023_byte_packets;
u64 tx_1024_to_1518_byte_packets;
+ u64 tx_pause_frames;
+ u64 tx_pfc_frames;
+ u64 brb_truncates;
+ u64 brb_discards;
+ u64 tx_mac_ctrl_frames;
+};
+
+struct qede_stats_bb {
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
u64 tx_1519_to_2047_byte_packets;
u64 tx_2048_to_4095_byte_packets;
u64 tx_4096_to_9216_byte_packets;
u64 tx_9217_to_16383_byte_packets;
- u64 tx_pause_frames;
- u64 tx_pfc_frames;
u64 tx_lpi_entry_count;
u64 tx_total_collisions;
- u64 brb_truncates;
- u64 brb_discards;
- u64 tx_mac_ctrl_frames;
+};
+
+struct qede_stats_ah {
+ u64 rx_1519_to_max_byte_packets;
+ u64 tx_1519_to_max_byte_packets;
+};
+
+struct qede_stats {
+ struct qede_stats_common common;
+
+ union {
+ struct qede_stats_bb bb;
+ struct qede_stats_ah ah;
+ };
};
struct qede_vlan {
@@ -147,10 +167,11 @@ struct qede_dev {
u32 dp_module;
u8 dp_level;
- u32 flags;
-#define QEDE_FLAG_IS_VF BIT(0)
+ unsigned long flags;
+#define QEDE_FLAG_IS_VF BIT(0)
#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
#define QEDE_TX_TIMESTAMPING_EN BIT(1)
+#define QEDE_FLAGS_PTP_TX_IN_PRORGESS BIT(2)
const struct qed_eth_ops *ops;
struct qede_ptp *ptp;
@@ -158,6 +179,10 @@ struct qede_dev {
struct qed_dev_eth_info dev_info;
#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_IS_BB(edev) \
+ ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB)
+#define QEDE_IS_AH(edev) \
+ ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
struct qede_fastpath *fp_array;
u8 req_num_tx;
@@ -216,7 +241,10 @@ struct qede_dev {
u16 vxlan_dst_port;
u16 geneve_dst_port;
- bool wol_enabled;
+#ifdef CONFIG_RFS_ACCEL
+ struct qede_arfs *arfs;
+#endif
+ bool wol_enabled;
struct qede_rdma_dev rdma_info;
@@ -292,21 +320,24 @@ struct qede_rx_queue {
u8 data_direction;
u8 rxq_id;
+ /* Used once per each NAPI run */
+ u16 num_rx_buffers;
+
+ u16 rx_headroom;
+
u32 rx_buf_size;
u32 rx_buf_seg_size;
- u64 rcv_pkts;
-
struct sw_rx_data *sw_rx_ring;
struct qed_chain rx_bd_ring;
struct qed_chain rx_comp_ring ____cacheline_aligned;
- /* Used once per each NAPI run */
- u16 num_rx_buffers;
-
/* GRO */
struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
+ /* Used once per each NAPI run */
+ u64 rcv_pkts;
+
u64 rx_hw_errors;
u64 rx_alloc_errors;
u64 rx_ip_frags;
@@ -328,6 +359,11 @@ struct sw_tx_bd {
#define QEDE_TSO_SPLIT_BD BIT(0)
};
+struct sw_tx_xdp {
+ struct page *page;
+ dma_addr_t mapping;
+};
+
struct qede_tx_queue {
u8 is_xdp;
bool is_legacy;
@@ -351,11 +387,11 @@ struct qede_tx_queue {
#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
/* Regular Tx requires skb + metadata for release purpose,
- * while XDP requires only the pages themselves.
+ * while XDP requires the pages and the mapped address.
*/
union {
struct sw_tx_bd *skbs;
- struct page **pages;
+ struct sw_tx_xdp *xdp;
} sw_tx_ring;
struct qed_chain tx_pbl;
@@ -407,8 +443,20 @@ struct qede_fastpath {
#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
#define QEDE_SP_RX_MODE 1
-#define QEDE_SP_VXLAN_PORT_CONFIG 2
-#define QEDE_SP_GENEVE_PORT_CONFIG 3
+
+#ifdef CONFIG_RFS_ACCEL
+int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr);
+void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev);
+void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc);
+void qede_free_arfs(struct qede_dev *edev);
+int qede_alloc_arfs(struct qede_dev *edev);
+
+#define QEDE_SP_ARFS_CONFIG 4
+#define QEDE_SP_TASK_POLL_DELAY (5 * HZ)
+#define QEDE_RFS_MAX_FLTR 256
+#endif
struct qede_reload_args {
void (*func)(struct qede_dev *edev, struct qede_reload_args *args);
@@ -433,6 +481,7 @@ irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie);
/* Filtering function definitions */
void qede_force_mac(void *dev, u8 *mac, bool forced);
+void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port);
int qede_set_mac_addr(struct net_device *ndev, void *p);
int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
index 03e8c0212433..a9e7379313db 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
@@ -281,6 +281,11 @@ static int qede_dcbnl_ieee_setapp(struct net_device *netdev,
struct dcb_app *app)
{
struct qede_dev *edev = netdev_priv(netdev);
+ int err;
+
+ err = dcb_ieee_setapp(netdev, app);
+ if (err)
+ return err;
return edev->ops->dcb->ieee_setapp(edev->cdev, app);
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 897953133245..4dcfe9614731 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -75,16 +75,33 @@ static const struct {
QEDE_TQSTAT(stopped_cnt),
};
-#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
-#define QEDE_STAT_STRING(stat_name) (#stat_name)
-#define _QEDE_STAT(stat_name, pf_only) \
- {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
-#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true)
-#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false)
+#define QEDE_STAT_OFFSET(stat_name, type, base) \
+ (offsetof(type, stat_name) + (base))
+#define QEDE_STAT_STRING(stat_name) (#stat_name)
+#define _QEDE_STAT(stat_name, type, base, attr) \
+ {QEDE_STAT_OFFSET(stat_name, type, base), \
+ QEDE_STAT_STRING(stat_name), \
+ attr}
+#define QEDE_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_common, 0, 0x0)
+#define QEDE_PF_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_common, 0, \
+ BIT(QEDE_STAT_PF_ONLY))
+#define QEDE_PF_BB_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_bb, \
+ offsetof(struct qede_stats, bb), \
+ BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_BB_ONLY))
+#define QEDE_PF_AH_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_ah, \
+ offsetof(struct qede_stats, ah), \
+ BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_AH_ONLY))
static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
- bool pf_only;
+ unsigned long attr;
+#define QEDE_STAT_PF_ONLY 0
+#define QEDE_STAT_BB_ONLY 1
+#define QEDE_STAT_AH_ONLY 2
} qede_stats_arr[] = {
QEDE_STAT(rx_ucast_bytes),
QEDE_STAT(rx_mcast_bytes),
@@ -106,22 +123,23 @@ static const struct {
QEDE_PF_STAT(rx_256_to_511_byte_packets),
QEDE_PF_STAT(rx_512_to_1023_byte_packets),
QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
- QEDE_PF_STAT(rx_1519_to_1522_byte_packets),
- QEDE_PF_STAT(rx_1519_to_2047_byte_packets),
- QEDE_PF_STAT(rx_2048_to_4095_byte_packets),
- QEDE_PF_STAT(rx_4096_to_9216_byte_packets),
- QEDE_PF_STAT(rx_9217_to_16383_byte_packets),
+ QEDE_PF_BB_STAT(rx_1519_to_1522_byte_packets),
+ QEDE_PF_BB_STAT(rx_1519_to_2047_byte_packets),
+ QEDE_PF_BB_STAT(rx_2048_to_4095_byte_packets),
+ QEDE_PF_BB_STAT(rx_4096_to_9216_byte_packets),
+ QEDE_PF_BB_STAT(rx_9217_to_16383_byte_packets),
+ QEDE_PF_AH_STAT(rx_1519_to_max_byte_packets),
QEDE_PF_STAT(tx_64_byte_packets),
QEDE_PF_STAT(tx_65_to_127_byte_packets),
QEDE_PF_STAT(tx_128_to_255_byte_packets),
QEDE_PF_STAT(tx_256_to_511_byte_packets),
QEDE_PF_STAT(tx_512_to_1023_byte_packets),
QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
- QEDE_PF_STAT(tx_1519_to_2047_byte_packets),
- QEDE_PF_STAT(tx_2048_to_4095_byte_packets),
- QEDE_PF_STAT(tx_4096_to_9216_byte_packets),
- QEDE_PF_STAT(tx_9217_to_16383_byte_packets),
-
+ QEDE_PF_BB_STAT(tx_1519_to_2047_byte_packets),
+ QEDE_PF_BB_STAT(tx_2048_to_4095_byte_packets),
+ QEDE_PF_BB_STAT(tx_4096_to_9216_byte_packets),
+ QEDE_PF_BB_STAT(tx_9217_to_16383_byte_packets),
+ QEDE_PF_AH_STAT(tx_1519_to_max_byte_packets),
QEDE_PF_STAT(rx_mac_crtl_frames),
QEDE_PF_STAT(tx_mac_ctrl_frames),
QEDE_PF_STAT(rx_pause_frames),
@@ -136,8 +154,8 @@ static const struct {
QEDE_PF_STAT(rx_jabbers),
QEDE_PF_STAT(rx_undersize_packets),
QEDE_PF_STAT(rx_fragments),
- QEDE_PF_STAT(tx_lpi_entry_count),
- QEDE_PF_STAT(tx_total_collisions),
+ QEDE_PF_BB_STAT(tx_lpi_entry_count),
+ QEDE_PF_BB_STAT(tx_total_collisions),
QEDE_PF_STAT(brb_truncates),
QEDE_PF_STAT(brb_discards),
QEDE_STAT(no_buff_discards),
@@ -155,6 +173,12 @@ static const struct {
};
#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+#define QEDE_STAT_IS_PF_ONLY(i) \
+ test_bit(QEDE_STAT_PF_ONLY, &qede_stats_arr[i].attr)
+#define QEDE_STAT_IS_BB_ONLY(i) \
+ test_bit(QEDE_STAT_BB_ONLY, &qede_stats_arr[i].attr)
+#define QEDE_STAT_IS_AH_ONLY(i) \
+ test_bit(QEDE_STAT_AH_ONLY, &qede_stats_arr[i].attr)
enum {
QEDE_PRI_FLAG_CMT,
@@ -213,6 +237,13 @@ static void qede_get_strings_stats_rxq(struct qede_dev *edev,
}
}
+static bool qede_is_irrelevant_stat(struct qede_dev *edev, int stat_index)
+{
+ return (IS_VF(edev) && QEDE_STAT_IS_PF_ONLY(stat_index)) ||
+ (QEDE_IS_BB(edev) && QEDE_STAT_IS_AH_ONLY(stat_index)) ||
+ (QEDE_IS_AH(edev) && QEDE_STAT_IS_BB_ONLY(stat_index));
+}
+
static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{
struct qede_fastpath *fp;
@@ -234,7 +265,7 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
/* Account for non-queue statistics */
for (i = 0; i < QEDE_NUM_STATS; i++) {
- if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+ if (qede_is_irrelevant_stat(edev, i))
continue;
strcpy(buf, qede_stats_arr[i].string);
buf += ETH_GSTRING_LEN;
@@ -309,7 +340,7 @@ static void qede_get_ethtool_stats(struct net_device *dev,
}
for (i = 0; i < QEDE_NUM_STATS; i++) {
- if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+ if (qede_is_irrelevant_stat(edev, i))
continue;
*buf = *((u64 *)(((void *)&edev->stats) +
qede_stats_arr[i].offset));
@@ -323,17 +354,13 @@ static void qede_get_ethtool_stats(struct net_device *dev,
static int qede_get_sset_count(struct net_device *dev, int stringset)
{
struct qede_dev *edev = netdev_priv(dev);
- int num_stats = QEDE_NUM_STATS;
+ int num_stats = QEDE_NUM_STATS, i;
switch (stringset) {
case ETH_SS_STATS:
- if (IS_VF(edev)) {
- int i;
-
- for (i = 0; i < QEDE_NUM_STATS; i++)
- if (qede_stats_arr[i].pf_only)
- num_stats--;
- }
+ for (i = 0; i < QEDE_NUM_STATS; i++)
+ if (qede_is_irrelevant_stat(edev, i))
+ num_stats--;
/* Account for the Regular Tx statistics */
num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 107c3fda4792..eb5652073ca8 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -38,6 +38,459 @@
#include <linux/qed/qed_if.h>
#include "qede.h"
+#ifdef CONFIG_RFS_ACCEL
+struct qede_arfs_tuple {
+ union {
+ __be32 src_ipv4;
+ struct in6_addr src_ipv6;
+ };
+ union {
+ __be32 dst_ipv4;
+ struct in6_addr dst_ipv6;
+ };
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 eth_proto;
+ u8 ip_proto;
+};
+
+struct qede_arfs_fltr_node {
+#define QEDE_FLTR_VALID 0
+ unsigned long state;
+
+ /* pointer to aRFS packet buffer */
+ void *data;
+
+ /* dma map address of aRFS packet buffer */
+ dma_addr_t mapping;
+
+ /* length of aRFS packet buffer */
+ int buf_len;
+
+ /* tuples to hold from aRFS packet buffer */
+ struct qede_arfs_tuple tuple;
+
+ u32 flow_id;
+ u16 sw_id;
+ u16 rxq_id;
+ u16 next_rxq_id;
+ bool filter_op;
+ bool used;
+ struct hlist_node node;
+};
+
+struct qede_arfs {
+#define QEDE_ARFS_POLL_COUNT 100
+#define QEDE_RFS_FLW_BITSHIFT (4)
+#define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
+ struct hlist_head arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
+
+ /* lock for filter list access */
+ spinlock_t arfs_list_lock;
+ unsigned long *arfs_fltr_bmap;
+ int filter_count;
+ bool enable;
+};
+
+static void qede_configure_arfs_fltr(struct qede_dev *edev,
+ struct qede_arfs_fltr_node *n,
+ u16 rxq_id, bool add_fltr)
+{
+ const struct qed_eth_ops *op = edev->ops;
+
+ if (n->used)
+ return;
+
+ DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
+ "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
+ add_fltr ? "Adding" : "Deleting",
+ n->flow_id, n->sw_id, ntohs(n->tuple.src_port),
+ ntohs(n->tuple.dst_port), rxq_id);
+
+ n->used = true;
+ n->filter_op = add_fltr;
+ op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
+ rxq_id, add_fltr);
+}
+
+static void
+qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
+{
+ kfree(fltr->data);
+ clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
+ kfree(fltr);
+}
+
+void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
+{
+ struct qede_arfs_fltr_node *fltr = filter;
+ struct qede_dev *edev = dev;
+
+ if (fw_rc) {
+ DP_NOTICE(edev,
+ "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
+ fw_rc, fltr->flow_id, fltr->sw_id,
+ ntohs(fltr->tuple.src_port),
+ ntohs(fltr->tuple.dst_port), fltr->rxq_id);
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ fltr->used = false;
+ clear_bit(QEDE_FLTR_VALID, &fltr->state);
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+ return;
+ }
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ fltr->used = false;
+
+ if (fltr->filter_op) {
+ set_bit(QEDE_FLTR_VALID, &fltr->state);
+ if (fltr->rxq_id != fltr->next_rxq_id)
+ qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
+ false);
+ } else {
+ clear_bit(QEDE_FLTR_VALID, &fltr->state);
+ if (fltr->rxq_id != fltr->next_rxq_id) {
+ fltr->rxq_id = fltr->next_rxq_id;
+ qede_configure_arfs_fltr(edev, fltr,
+ fltr->rxq_id, true);
+ }
+ }
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+}
+
+/* Should be called while qede_lock is held */
+void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
+{
+ int i;
+
+ for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
+ struct hlist_node *temp;
+ struct hlist_head *head;
+ struct qede_arfs_fltr_node *fltr;
+
+ head = &edev->arfs->arfs_hl_head[i];
+
+ hlist_for_each_entry_safe(fltr, temp, head, node) {
+ bool del = false;
+
+ if (edev->state != QEDE_STATE_OPEN)
+ del = true;
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
+ !fltr->used) || free_fltr) {
+ hlist_del(&fltr->node);
+ dma_unmap_single(&edev->pdev->dev,
+ fltr->mapping,
+ fltr->buf_len, DMA_TO_DEVICE);
+ qede_free_arfs_filter(edev, fltr);
+ edev->arfs->filter_count--;
+ } else {
+ if ((rps_may_expire_flow(edev->ndev,
+ fltr->rxq_id,
+ fltr->flow_id,
+ fltr->sw_id) || del) &&
+ !free_fltr)
+ qede_configure_arfs_fltr(edev, fltr,
+ fltr->rxq_id,
+ false);
+ }
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+ }
+ }
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ if (!edev->arfs->filter_count) {
+ if (edev->arfs->enable) {
+ edev->arfs->enable = false;
+ edev->ops->configure_arfs_searcher(edev->cdev, false);
+ }
+ } else {
+ set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task,
+ QEDE_SP_TASK_POLL_DELAY);
+ }
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+}
+
+/* This function waits until all aRFS filters get deleted and freed.
+ * On timeout it frees all filters forcefully.
+ */
+void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
+{
+ int count = QEDE_ARFS_POLL_COUNT;
+
+ while (count) {
+ qede_process_arfs_filters(edev, false);
+
+ if (!edev->arfs->filter_count)
+ break;
+
+ msleep(100);
+ count--;
+ }
+
+ if (!count) {
+ DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
+
+ /* Something is terribly wrong, free forcefully */
+ qede_process_arfs_filters(edev, true);
+ }
+}
+
+int qede_alloc_arfs(struct qede_dev *edev)
+{
+ int i;
+
+ edev->arfs = vzalloc(sizeof(*edev->arfs));
+ if (!edev->arfs)
+ return -ENOMEM;
+
+ spin_lock_init(&edev->arfs->arfs_list_lock);
+
+ for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
+ INIT_HLIST_HEAD(&edev->arfs->arfs_hl_head[i]);
+
+ edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
+ if (!edev->ndev->rx_cpu_rmap) {
+ vfree(edev->arfs);
+ edev->arfs = NULL;
+ return -ENOMEM;
+ }
+
+ edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) *
+ sizeof(long));
+ if (!edev->arfs->arfs_fltr_bmap) {
+ free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
+ edev->ndev->rx_cpu_rmap = NULL;
+ vfree(edev->arfs);
+ edev->arfs = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void qede_free_arfs(struct qede_dev *edev)
+{
+ if (!edev->arfs)
+ return;
+
+ if (edev->ndev->rx_cpu_rmap)
+ free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
+
+ edev->ndev->rx_cpu_rmap = NULL;
+ vfree(edev->arfs->arfs_fltr_bmap);
+ edev->arfs->arfs_fltr_bmap = NULL;
+ vfree(edev->arfs);
+ edev->arfs = NULL;
+}
+
+static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
+ const struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
+ tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
+ return true;
+ else
+ return false;
+ } else {
+ struct in6_addr *src = &tpos->tuple.src_ipv6;
+ u8 size = sizeof(struct in6_addr);
+
+ if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
+ !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
+ return true;
+ else
+ return false;
+ }
+}
+
+static struct qede_arfs_fltr_node *
+qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
+ __be16 src_port, __be16 dst_port, u8 ip_proto)
+{
+ struct qede_arfs_fltr_node *tpos;
+
+ hlist_for_each_entry(tpos, h, node)
+ if (tpos->tuple.ip_proto == ip_proto &&
+ tpos->tuple.eth_proto == skb->protocol &&
+ qede_compare_ip_addr(tpos, skb) &&
+ tpos->tuple.src_port == src_port &&
+ tpos->tuple.dst_port == dst_port)
+ return tpos;
+
+ return NULL;
+}
+
+static struct qede_arfs_fltr_node *
+qede_alloc_filter(struct qede_dev *edev, int min_hlen)
+{
+ struct qede_arfs_fltr_node *n;
+ int bit_id;
+
+ bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
+ QEDE_RFS_MAX_FLTR);
+
+ if (bit_id >= QEDE_RFS_MAX_FLTR)
+ return NULL;
+
+ n = kzalloc(sizeof(*n), GFP_ATOMIC);
+ if (!n)
+ return NULL;
+
+ n->data = kzalloc(min_hlen, GFP_ATOMIC);
+ if (!n->data) {
+ kfree(n);
+ return NULL;
+ }
+
+ n->sw_id = (u16)bit_id;
+ set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
+ return n;
+}
+
+int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_arfs_fltr_node *n;
+ int min_hlen, rc, tp_offset;
+ struct ethhdr *eth;
+ __be16 *ports;
+ u16 tbl_idx;
+ u8 ip_proto;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+ if (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ip_proto = ip_hdr(skb)->protocol;
+ tp_offset = sizeof(struct iphdr);
+ } else {
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ tp_offset = sizeof(struct ipv6hdr);
+ }
+
+ if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
+ return -EPROTONOSUPPORT;
+
+ ports = (__be16 *)(skb->data + tp_offset);
+ tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ n = qede_arfs_htbl_key_search(&edev->arfs->arfs_hl_head[tbl_idx],
+ skb, ports[0], ports[1], ip_proto);
+
+ if (n) {
+ /* Filter match */
+ n->next_rxq_id = rxq_index;
+
+ if (test_bit(QEDE_FLTR_VALID, &n->state)) {
+ if (n->rxq_id != rxq_index)
+ qede_configure_arfs_fltr(edev, n, n->rxq_id,
+ false);
+ } else {
+ if (!n->used) {
+ n->rxq_id = rxq_index;
+ qede_configure_arfs_fltr(edev, n, n->rxq_id,
+ true);
+ }
+ }
+
+ rc = n->sw_id;
+ goto ret_unlock;
+ }
+
+ min_hlen = ETH_HLEN + skb_headlen(skb);
+
+ n = qede_alloc_filter(edev, min_hlen);
+ if (!n) {
+ rc = -ENOMEM;
+ goto ret_unlock;
+ }
+
+ n->buf_len = min_hlen;
+ n->rxq_id = rxq_index;
+ n->next_rxq_id = rxq_index;
+ n->tuple.src_port = ports[0];
+ n->tuple.dst_port = ports[1];
+ n->flow_id = flow_id;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
+ n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
+ } else {
+ memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
+ sizeof(struct in6_addr));
+ memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
+ sizeof(struct in6_addr));
+ }
+
+ eth = (struct ethhdr *)n->data;
+ eth->h_proto = skb->protocol;
+ n->tuple.eth_proto = skb->protocol;
+ n->tuple.ip_proto = ip_proto;
+ memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
+
+ n->mapping = dma_map_single(&edev->pdev->dev, n->data,
+ n->buf_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&edev->pdev->dev, n->mapping)) {
+ DP_NOTICE(edev, "Failed to map DMA memory for arfs\n");
+ qede_free_arfs_filter(edev, n);
+ rc = -ENOMEM;
+ goto ret_unlock;
+ }
+
+ INIT_HLIST_NODE(&n->node);
+ hlist_add_head(&n->node, &edev->arfs->arfs_hl_head[tbl_idx]);
+ edev->arfs->filter_count++;
+
+ if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
+ edev->ops->configure_arfs_searcher(edev->cdev, true);
+ edev->arfs->enable = true;
+ }
+
+ qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+
+ set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+ return n->sw_id;
+
+ret_unlock:
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+ return rc;
+}
+#endif
+
+void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
+{
+ struct qede_dev *edev = dev;
+
+ if (edev->vxlan_dst_port != vxlan_port)
+ edev->vxlan_dst_port = 0;
+
+ if (edev->geneve_dst_port != geneve_port)
+ edev->geneve_dst_port = 0;
+}
+
void qede_force_mac(void *dev, u8 *mac, bool forced)
{
struct qede_dev *edev = dev;
@@ -441,69 +894,112 @@ int qede_set_features(struct net_device *dev, netdev_features_t features)
void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
{
struct qede_dev *edev = netdev_priv(dev);
+ struct qed_tunn_params tunn_params;
u16 t_port = ntohs(ti->port);
+ int rc;
+
+ memset(&tunn_params, 0, sizeof(tunn_params));
switch (ti->type) {
case UDP_TUNNEL_TYPE_VXLAN:
+ if (!edev->dev_info.common.vxlan_enable)
+ return;
+
if (edev->vxlan_dst_port)
return;
- edev->vxlan_dst_port = t_port;
+ tunn_params.update_vxlan_port = 1;
+ tunn_params.vxlan_port = t_port;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
- t_port);
+ __qede_lock(edev);
+ rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
+ __qede_unlock(edev);
+
+ if (!rc) {
+ edev->vxlan_dst_port = t_port;
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
+ t_port);
+ } else {
+ DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
+ t_port);
+ }
- set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
break;
case UDP_TUNNEL_TYPE_GENEVE:
+ if (!edev->dev_info.common.geneve_enable)
+ return;
+
if (edev->geneve_dst_port)
return;
- edev->geneve_dst_port = t_port;
+ tunn_params.update_geneve_port = 1;
+ tunn_params.geneve_port = t_port;
+
+ __qede_lock(edev);
+ rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
+ __qede_unlock(edev);
+
+ if (!rc) {
+ edev->geneve_dst_port = t_port;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Added geneve port=%d\n", t_port);
+ } else {
+ DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
+ t_port);
+ }
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
- t_port);
- set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
break;
default:
return;
}
-
- schedule_delayed_work(&edev->sp_task, 0);
}
-void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
+void qede_udp_tunnel_del(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct qede_dev *edev = netdev_priv(dev);
+ struct qed_tunn_params tunn_params;
u16 t_port = ntohs(ti->port);
+ memset(&tunn_params, 0, sizeof(tunn_params));
+
switch (ti->type) {
case UDP_TUNNEL_TYPE_VXLAN:
if (t_port != edev->vxlan_dst_port)
return;
+ tunn_params.update_vxlan_port = 1;
+ tunn_params.vxlan_port = 0;
+
+ __qede_lock(edev);
+ edev->ops->tunn_config(edev->cdev, &tunn_params);
+ __qede_unlock(edev);
+
edev->vxlan_dst_port = 0;
DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
t_port);
- set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
break;
case UDP_TUNNEL_TYPE_GENEVE:
if (t_port != edev->geneve_dst_port)
return;
+ tunn_params.update_geneve_port = 1;
+ tunn_params.geneve_port = 0;
+
+ __qede_lock(edev);
+ edev->ops->tunn_config(edev->cdev, &tunn_params);
+ __qede_unlock(edev);
+
edev->geneve_dst_port = 0;
DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
t_port);
- set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
break;
default:
return;
}
-
- schedule_delayed_work(&edev->sp_task, 0);
}
static void qede_xdp_reload_func(struct qede_dev *edev,
@@ -520,11 +1016,6 @@ static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
{
struct qede_reload_args args;
- if (prog && prog->xdp_adjust_head) {
- DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n");
- return -EOPNOTSUPP;
- }
-
/* If we're called, there was already a bpf reference increment */
args.func = &qede_xdp_reload_func;
args.u.new_prog = prog;
@@ -537,6 +1028,11 @@ int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
{
struct qede_dev *edev = netdev_priv(dev);
+ if (IS_VF(edev)) {
+ DP_NOTICE(edev, "VFs don't support XDP\n");
+ return -EOPNOTSUPP;
+ }
+
switch (xdp->command) {
case XDP_SETUP_PROG:
return qede_xdp_set(edev, xdp->prog);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 1e65038c8fc0..7b6f41d06245 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -87,7 +87,8 @@ int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
WARN_ON(!rx_bd);
rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
- rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+ rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
+ rxq->rx_headroom);
rxq->sw_rx_prod++;
rxq->filled_buffers++;
@@ -360,7 +361,8 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
metadata->mapping + padding,
length, PCI_DMA_TODEVICE);
- txq->sw_tx_ring.pages[idx] = metadata->data;
+ txq->sw_tx_ring.xdp[idx].page = metadata->data;
+ txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
txq->sw_tx_prod++;
/* Mark the fastpath for future XDP doorbell */
@@ -384,19 +386,19 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{
- struct eth_tx_1st_bd *bd;
- u16 hw_bd_cons;
+ u16 hw_bd_cons, idx;
hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
barrier();
while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
- bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+ qed_chain_consume(&txq->tx_pbl);
+ idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
- dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd),
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons &
- NUM_TX_BDS_MAX]);
+ dma_unmap_page(&edev->pdev->dev,
+ txq->sw_tx_ring.xdp[idx].mapping,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(txq->sw_tx_ring.xdp[idx].page);
txq->sw_tx_cons++;
txq->xmit_pkts++;
@@ -508,7 +510,8 @@ static inline void qede_reuse_page(struct qede_rx_queue *rxq,
new_mapping = curr_prod->mapping + curr_prod->page_offset;
rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
- rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
+ rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
+ rxq->rx_headroom);
rxq->sw_rx_prod++;
curr_cons->data = NULL;
@@ -624,7 +627,6 @@ static inline void qede_skb_receive(struct qede_dev *edev,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
napi_gro_receive(&fp->napi, skb);
- rxq->rcv_pkts++;
}
static void qede_set_gro_params(struct qede_dev *edev,
@@ -884,9 +886,9 @@ static inline void qede_tpa_cont(struct qede_dev *edev,
"Strange - TPA cont with more than a single len_list entry\n");
}
-static void qede_tpa_end(struct qede_dev *edev,
- struct qede_fastpath *fp,
- struct eth_fast_path_rx_tpa_end_cqe *cqe)
+static int qede_tpa_end(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct eth_fast_path_rx_tpa_end_cqe *cqe)
{
struct qede_rx_queue *rxq = fp->rxq;
struct qede_agg_info *tpa_info;
@@ -934,11 +936,12 @@ static void qede_tpa_end(struct qede_dev *edev,
tpa_info->state = QEDE_AGG_STATE_NONE;
- return;
+ return 1;
err:
tpa_info->state = QEDE_AGG_STATE_NONE;
dev_kfree_skb_any(tpa_info->skb);
tpa_info->skb = NULL;
+ return 0;
}
static u8 qede_check_notunn_csum(u16 flag)
@@ -990,14 +993,15 @@ static bool qede_rx_xdp(struct qede_dev *edev,
struct qede_rx_queue *rxq,
struct bpf_prog *prog,
struct sw_rx_data *bd,
- struct eth_fast_path_rx_reg_cqe *cqe)
+ struct eth_fast_path_rx_reg_cqe *cqe,
+ u16 *data_offset, u16 *len)
{
- u16 len = le16_to_cpu(cqe->len_on_first_bd);
struct xdp_buff xdp;
enum xdp_action act;
- xdp.data = page_address(bd->data) + cqe->placement_offset;
- xdp.data_end = xdp.data + len;
+ xdp.data_hard_start = page_address(bd->data);
+ xdp.data = xdp.data_hard_start + *data_offset;
+ xdp.data_end = xdp.data + *len;
/* Queues always have a full reset currently, so for the time
* being until there's atomic program replace just mark read
@@ -1007,6 +1011,10 @@ static bool qede_rx_xdp(struct qede_dev *edev,
act = bpf_prog_run_xdp(prog, &xdp);
rcu_read_unlock();
+ /* Recalculate, as XDP might have changed the headers */
+ *data_offset = xdp.data - xdp.data_hard_start;
+ *len = xdp.data_end - xdp.data;
+
if (act == XDP_PASS)
return true;
@@ -1025,7 +1033,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
/* Now if there's a transmission problem, we'd still have to
* throw current buffer, as replacement was already allocated.
*/
- if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) {
+ if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) {
dma_unmap_page(rxq->dev, bd->mapping,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(bd->data);
@@ -1052,7 +1060,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
struct sw_rx_data *bd, u16 len,
u16 pad)
{
- unsigned int offset = bd->page_offset;
+ unsigned int offset = bd->page_offset + pad;
struct skb_frag_struct *frag;
struct page *page = bd->data;
unsigned int pull_len;
@@ -1069,7 +1077,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
*/
if (len + pad <= edev->rx_copybreak) {
memcpy(skb_put(skb, len),
- page_address(page) + pad + offset, len);
+ page_address(page) + offset, len);
qede_reuse_page(rxq, bd);
goto out;
}
@@ -1077,7 +1085,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
frag = &skb_shinfo(skb)->frags[0];
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, pad + offset, len, rxq->rx_buf_seg_size);
+ page, offset, len, rxq->rx_buf_seg_size);
va = skb_frag_address(frag);
pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
@@ -1178,8 +1186,7 @@ static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
return 0;
case ETH_RX_CQE_TYPE_TPA_END:
- qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
- return 1;
+ return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
default:
return 0;
}
@@ -1224,12 +1231,13 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
fp_cqe = &cqe->fast_path_regular;
len = le16_to_cpu(fp_cqe->len_on_first_bd);
- pad = fp_cqe->placement_offset;
+ pad = fp_cqe->placement_offset + rxq->rx_headroom;
/* Run eBPF program if one is attached */
if (xdp_prog)
- if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
- return 1;
+ if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
+ &pad, &len))
+ return 0;
/* If this is an error packet then drop it */
flags = cqe->fast_path_regular.pars_flags.flags;
@@ -1290,8 +1298,8 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
{
struct qede_rx_queue *rxq = fp->rxq;
struct qede_dev *edev = fp->edev;
+ int work_done = 0, rcv_pkts = 0;
u16 hw_comp_cons, sw_comp_cons;
- int work_done = 0;
hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
@@ -1305,12 +1313,14 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
/* Loop to complete all indicated BDs */
while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
- qede_rx_process_cqe(edev, fp, rxq);
+ rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
work_done++;
}
+ rxq->rcv_pkts += rcv_pkts;
+
/* Allocate replacement buffers */
while (rxq->num_rx_buffers - rxq->filled_buffers)
if (qede_alloc_rx_buffer(rxq, false))
@@ -1687,13 +1697,24 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
}
/* Disable offloads for geneve tunnels, as HW can't parse
- * the geneve header which has option length greater than 32B.
+ * the geneve header which has option length greater than 32b
+ * and disable offloads for the ports which are not offloaded.
*/
- if ((l4_proto == IPPROTO_UDP) &&
- ((skb_inner_mac_header(skb) -
- skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN))
- return features & ~(NETIF_F_CSUM_MASK |
- NETIF_F_GSO_MASK);
+ if (l4_proto == IPPROTO_UDP) {
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 hdrlen, vxln_port, gnv_port;
+
+ hdrlen = QEDE_MAX_TUN_HDR_LEN;
+ vxln_port = edev->vxlan_dst_port;
+ gnv_port = edev->geneve_dst_port;
+
+ if ((skb_inner_mac_header(skb) -
+ skb_transport_header(skb)) > hdrlen ||
+ (ntohs(udp_hdr(skb)->dest) != vxln_port &&
+ ntohs(udp_hdr(skb)->dest) != gnv_port))
+ return features & ~(NETIF_F_CSUM_MASK |
+ NETIF_F_GSO_MASK);
+ }
}
return features;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 3a78c3f25157..b9ba23d71c61 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -84,6 +84,8 @@ static const struct qed_eth_ops *qed_ops;
#define CHIP_NUM_57980S_50 0x1654
#define CHIP_NUM_57980S_25 0x1656
#define CHIP_NUM_57980S_IOV 0x1664
+#define CHIP_NUM_AH 0x8070
+#define CHIP_NUM_AH_IOV 0x8090
#ifndef PCI_DEVICE_ID_NX2_57980E
#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
@@ -93,6 +95,9 @@ static const struct qed_eth_ops *qed_ops;
#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
+#define PCI_DEVICE_ID_AH CHIP_NUM_AH
+#define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
+
#endif
enum qede_pci_private {
@@ -110,6 +115,10 @@ static const struct pci_device_id qede_pci_tbl[] = {
#ifdef CONFIG_QED_SRIOV
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
#endif
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
+#endif
{ 0 }
};
@@ -216,9 +225,13 @@ static struct pci_driver qede_pci_driver = {
static struct qed_eth_cb_ops qede_ll_ops = {
{
+#ifdef CONFIG_RFS_ACCEL
+ .arfs_filter_op = qede_arfs_filter_op,
+#endif
.link_update = qede_link_update,
},
.force_mac = qede_force_mac,
+ .ports_update = qede_udp_ports_update,
};
static int qede_netdev_event(struct notifier_block *this, unsigned long event,
@@ -314,122 +327,135 @@ static int qede_close(struct net_device *ndev);
void qede_fill_by_demand_stats(struct qede_dev *edev)
{
+ struct qede_stats_common *p_common = &edev->stats.common;
struct qed_eth_stats stats;
edev->ops->get_vport_stats(edev->cdev, &stats);
- edev->stats.no_buff_discards = stats.no_buff_discards;
- edev->stats.packet_too_big_discard = stats.packet_too_big_discard;
- edev->stats.ttl0_discard = stats.ttl0_discard;
- edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
- edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
- edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
- edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
- edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
- edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
- edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
- edev->stats.mac_filter_discards = stats.mac_filter_discards;
-
- edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
- edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
- edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
- edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
- edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
- edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
- edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
- edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
- edev->stats.coalesced_events = stats.tpa_coalesced_events;
- edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
- edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
- edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
-
- edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
- edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
- edev->stats.rx_128_to_255_byte_packets =
- stats.rx_128_to_255_byte_packets;
- edev->stats.rx_256_to_511_byte_packets =
- stats.rx_256_to_511_byte_packets;
- edev->stats.rx_512_to_1023_byte_packets =
- stats.rx_512_to_1023_byte_packets;
- edev->stats.rx_1024_to_1518_byte_packets =
- stats.rx_1024_to_1518_byte_packets;
- edev->stats.rx_1519_to_1522_byte_packets =
- stats.rx_1519_to_1522_byte_packets;
- edev->stats.rx_1519_to_2047_byte_packets =
- stats.rx_1519_to_2047_byte_packets;
- edev->stats.rx_2048_to_4095_byte_packets =
- stats.rx_2048_to_4095_byte_packets;
- edev->stats.rx_4096_to_9216_byte_packets =
- stats.rx_4096_to_9216_byte_packets;
- edev->stats.rx_9217_to_16383_byte_packets =
- stats.rx_9217_to_16383_byte_packets;
- edev->stats.rx_crc_errors = stats.rx_crc_errors;
- edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
- edev->stats.rx_pause_frames = stats.rx_pause_frames;
- edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
- edev->stats.rx_align_errors = stats.rx_align_errors;
- edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
- edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
- edev->stats.rx_jabbers = stats.rx_jabbers;
- edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
- edev->stats.rx_fragments = stats.rx_fragments;
- edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
- edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
- edev->stats.tx_128_to_255_byte_packets =
- stats.tx_128_to_255_byte_packets;
- edev->stats.tx_256_to_511_byte_packets =
- stats.tx_256_to_511_byte_packets;
- edev->stats.tx_512_to_1023_byte_packets =
- stats.tx_512_to_1023_byte_packets;
- edev->stats.tx_1024_to_1518_byte_packets =
- stats.tx_1024_to_1518_byte_packets;
- edev->stats.tx_1519_to_2047_byte_packets =
- stats.tx_1519_to_2047_byte_packets;
- edev->stats.tx_2048_to_4095_byte_packets =
- stats.tx_2048_to_4095_byte_packets;
- edev->stats.tx_4096_to_9216_byte_packets =
- stats.tx_4096_to_9216_byte_packets;
- edev->stats.tx_9217_to_16383_byte_packets =
- stats.tx_9217_to_16383_byte_packets;
- edev->stats.tx_pause_frames = stats.tx_pause_frames;
- edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
- edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
- edev->stats.tx_total_collisions = stats.tx_total_collisions;
- edev->stats.brb_truncates = stats.brb_truncates;
- edev->stats.brb_discards = stats.brb_discards;
- edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
+
+ p_common->no_buff_discards = stats.common.no_buff_discards;
+ p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
+ p_common->ttl0_discard = stats.common.ttl0_discard;
+ p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
+ p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
+ p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
+ p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
+ p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
+ p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
+ p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
+ p_common->mac_filter_discards = stats.common.mac_filter_discards;
+
+ p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
+ p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
+ p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
+ p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
+ p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
+ p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
+ p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
+ p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
+ p_common->coalesced_events = stats.common.tpa_coalesced_events;
+ p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
+ p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
+ p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
+
+ p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
+ p_common->rx_65_to_127_byte_packets =
+ stats.common.rx_65_to_127_byte_packets;
+ p_common->rx_128_to_255_byte_packets =
+ stats.common.rx_128_to_255_byte_packets;
+ p_common->rx_256_to_511_byte_packets =
+ stats.common.rx_256_to_511_byte_packets;
+ p_common->rx_512_to_1023_byte_packets =
+ stats.common.rx_512_to_1023_byte_packets;
+ p_common->rx_1024_to_1518_byte_packets =
+ stats.common.rx_1024_to_1518_byte_packets;
+ p_common->rx_crc_errors = stats.common.rx_crc_errors;
+ p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
+ p_common->rx_pause_frames = stats.common.rx_pause_frames;
+ p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
+ p_common->rx_align_errors = stats.common.rx_align_errors;
+ p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
+ p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
+ p_common->rx_jabbers = stats.common.rx_jabbers;
+ p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
+ p_common->rx_fragments = stats.common.rx_fragments;
+ p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
+ p_common->tx_65_to_127_byte_packets =
+ stats.common.tx_65_to_127_byte_packets;
+ p_common->tx_128_to_255_byte_packets =
+ stats.common.tx_128_to_255_byte_packets;
+ p_common->tx_256_to_511_byte_packets =
+ stats.common.tx_256_to_511_byte_packets;
+ p_common->tx_512_to_1023_byte_packets =
+ stats.common.tx_512_to_1023_byte_packets;
+ p_common->tx_1024_to_1518_byte_packets =
+ stats.common.tx_1024_to_1518_byte_packets;
+ p_common->tx_pause_frames = stats.common.tx_pause_frames;
+ p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
+ p_common->brb_truncates = stats.common.brb_truncates;
+ p_common->brb_discards = stats.common.brb_discards;
+ p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
+
+ if (QEDE_IS_BB(edev)) {
+ struct qede_stats_bb *p_bb = &edev->stats.bb;
+
+ p_bb->rx_1519_to_1522_byte_packets =
+ stats.bb.rx_1519_to_1522_byte_packets;
+ p_bb->rx_1519_to_2047_byte_packets =
+ stats.bb.rx_1519_to_2047_byte_packets;
+ p_bb->rx_2048_to_4095_byte_packets =
+ stats.bb.rx_2048_to_4095_byte_packets;
+ p_bb->rx_4096_to_9216_byte_packets =
+ stats.bb.rx_4096_to_9216_byte_packets;
+ p_bb->rx_9217_to_16383_byte_packets =
+ stats.bb.rx_9217_to_16383_byte_packets;
+ p_bb->tx_1519_to_2047_byte_packets =
+ stats.bb.tx_1519_to_2047_byte_packets;
+ p_bb->tx_2048_to_4095_byte_packets =
+ stats.bb.tx_2048_to_4095_byte_packets;
+ p_bb->tx_4096_to_9216_byte_packets =
+ stats.bb.tx_4096_to_9216_byte_packets;
+ p_bb->tx_9217_to_16383_byte_packets =
+ stats.bb.tx_9217_to_16383_byte_packets;
+ p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
+ p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
+ } else {
+ struct qede_stats_ah *p_ah = &edev->stats.ah;
+
+ p_ah->rx_1519_to_max_byte_packets =
+ stats.ah.rx_1519_to_max_byte_packets;
+ p_ah->tx_1519_to_max_byte_packets =
+ stats.ah.tx_1519_to_max_byte_packets;
+ }
}
static void qede_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct qede_dev *edev = netdev_priv(dev);
+ struct qede_stats_common *p_common;
qede_fill_by_demand_stats(edev);
+ p_common = &edev->stats.common;
- stats->rx_packets = edev->stats.rx_ucast_pkts +
- edev->stats.rx_mcast_pkts +
- edev->stats.rx_bcast_pkts;
- stats->tx_packets = edev->stats.tx_ucast_pkts +
- edev->stats.tx_mcast_pkts +
- edev->stats.tx_bcast_pkts;
-
- stats->rx_bytes = edev->stats.rx_ucast_bytes +
- edev->stats.rx_mcast_bytes +
- edev->stats.rx_bcast_bytes;
+ stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
+ p_common->rx_bcast_pkts;
+ stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
+ p_common->tx_bcast_pkts;
- stats->tx_bytes = edev->stats.tx_ucast_bytes +
- edev->stats.tx_mcast_bytes +
- edev->stats.tx_bcast_bytes;
+ stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
+ p_common->rx_bcast_bytes;
+ stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
+ p_common->tx_bcast_bytes;
- stats->tx_errors = edev->stats.tx_err_drop_pkts;
- stats->multicast = edev->stats.rx_mcast_pkts +
- edev->stats.rx_bcast_pkts;
+ stats->tx_errors = p_common->tx_err_drop_pkts;
+ stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
- stats->rx_fifo_errors = edev->stats.no_buff_discards;
+ stats->rx_fifo_errors = p_common->no_buff_discards;
- stats->collisions = edev->stats.tx_total_collisions;
- stats->rx_crc_errors = edev->stats.rx_crc_errors;
- stats->rx_frame_errors = edev->stats.rx_align_errors;
+ if (QEDE_IS_BB(edev))
+ stats->collisions = edev->stats.bb.tx_total_collisions;
+ stats->rx_crc_errors = p_common->rx_crc_errors;
+ stats->rx_frame_errors = p_common->rx_align_errors;
}
#ifdef CONFIG_QED_SRIOV
@@ -532,6 +558,9 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_udp_tunnel_del = qede_udp_tunnel_del,
.ndo_features_check = qede_features_check,
.ndo_xdp = qede_xdp,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = qede_rx_flow_steer,
+#endif
};
/* -------------------------------------------------------------------------
@@ -581,7 +610,8 @@ static void qede_init_ndev(struct qede_dev *edev)
{
struct net_device *ndev = edev->ndev;
struct pci_dev *pdev = edev->pdev;
- u32 hw_features;
+ bool udp_tunnel_enable = false;
+ netdev_features_t hw_features;
pci_set_drvdata(pdev, ndev);
@@ -603,16 +633,33 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- /* Encap features*/
- hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_GRE_CSUM;
- ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
- NETIF_F_TSO6 | NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_GRE_CSUM;
+ if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
+ hw_features |= NETIF_F_NTUPLE;
+
+ if (edev->dev_info.common.vxlan_enable ||
+ edev->dev_info.common.geneve_enable)
+ udp_tunnel_enable = true;
+
+ if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
+ hw_features |= NETIF_F_TSO_ECN;
+ ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_RXCSUM;
+ }
+
+ if (udp_tunnel_enable) {
+ hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM);
+ ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM);
+ }
+
+ if (edev->dev_info.common.gre_enable) {
+ hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
+ ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM);
+ }
ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
NETIF_F_HIGHDMA;
@@ -750,7 +797,6 @@ static void qede_sp_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work);
- struct qed_dev *cdev = edev->cdev;
__qede_lock(edev);
@@ -758,24 +804,12 @@ static void qede_sp_task(struct work_struct *work)
if (edev->state == QEDE_STATE_OPEN)
qede_config_rx_mode(edev->ndev);
- if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
- struct qed_tunn_params tunn_params;
-
- memset(&tunn_params, 0, sizeof(tunn_params));
- tunn_params.update_vxlan_port = 1;
- tunn_params.vxlan_port = edev->vxlan_dst_port;
- qed_ops->tunn_config(cdev, &tunn_params);
- }
-
- if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
- struct qed_tunn_params tunn_params;
-
- memset(&tunn_params, 0, sizeof(tunn_params));
- tunn_params.update_geneve_port = 1;
- tunn_params.geneve_port = edev->geneve_dst_port;
- qed_ops->tunn_config(cdev, &tunn_params);
+#ifdef CONFIG_RFS_ACCEL
+ if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
+ if (edev->state == QEDE_STATE_OPEN)
+ qede_process_arfs_filters(edev, false);
}
-
+#endif
__qede_unlock(edev);
}
@@ -786,6 +820,9 @@ static void qede_update_pf_params(struct qed_dev *cdev)
/* 64 rx + 64 tx + 64 XDP */
memset(&pf_params, 0, sizeof(struct qed_pf_params));
pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
+#ifdef CONFIG_RFS_ACCEL
+ pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
+#endif
qed_ops->common->update_pf_params(cdev, &pf_params);
}
@@ -870,13 +907,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
/* PTP not supported on VFs */
- if (!is_vf) {
- rc = qede_ptp_register_phc(edev);
- if (rc) {
- DP_NOTICE(edev, "Cannot register PHC\n");
- goto err5;
- }
- }
+ if (!is_vf)
+ qede_ptp_enable(edev, true);
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
@@ -891,8 +923,6 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
return 0;
-err5:
- unregister_netdev(edev->ndev);
err4:
qede_roce_dev_remove(edev);
err3:
@@ -940,11 +970,10 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
DP_INFO(edev, "Starting qede_remove\n");
- cancel_delayed_work_sync(&edev->sp_task);
-
unregister_netdev(ndev);
+ cancel_delayed_work_sync(&edev->sp_task);
- qede_ptp_remove(edev);
+ qede_ptp_disable(edev);
qede_roce_dev_remove(edev);
@@ -1165,9 +1194,11 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
rxq->num_rx_buffers = edev->q_num_rx_buffers;
rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
+ rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0;
- if (rxq->rx_buf_size > PAGE_SIZE)
- rxq->rx_buf_size = PAGE_SIZE;
+ /* Make sure that the headroom and payload fit in a single page */
+ if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE)
+ rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom;
/* Segment size to spilt a page in multiple equal parts,
* unless XDP is used in which case we'd use the entire page.
@@ -1229,7 +1260,7 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
/* Free the parallel SW ring */
if (txq->is_xdp)
- kfree(txq->sw_tx_ring.pages);
+ kfree(txq->sw_tx_ring.xdp);
else
kfree(txq->sw_tx_ring.skbs);
@@ -1247,9 +1278,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
/* Allocate the parallel driver ring for Tx buffers */
if (txq->is_xdp) {
- size = sizeof(*txq->sw_tx_ring.pages) * TX_RING_SIZE;
- txq->sw_tx_ring.pages = kzalloc(size, GFP_KERNEL);
- if (!txq->sw_tx_ring.pages)
+ size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE;
+ txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
+ if (!txq->sw_tx_ring.xdp)
goto err;
} else {
size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
@@ -1466,6 +1497,18 @@ static int qede_req_msix_irqs(struct qede_dev *edev)
}
for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
+#ifdef CONFIG_RFS_ACCEL
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
+ rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
+ edev->int_info.msix[i].vector);
+ if (rc) {
+ DP_ERR(edev, "Failed to add CPU rmap\n");
+ qede_free_arfs(edev);
+ }
+ }
+#endif
rc = request_irq(edev->int_info.msix[i].vector,
qede_msix_fp_int, 0, edev->fp_array[i].name,
&edev->fp_array[i]);
@@ -1827,8 +1870,6 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
qede_roce_dev_event_close(edev);
edev->state = QEDE_STATE_CLOSED;
- qede_ptp_stop(edev);
-
/* Close OS Tx */
netif_tx_disable(edev->ndev);
netif_carrier_off(edev->ndev);
@@ -1847,7 +1888,12 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
qede_vlan_mark_nonconfigured(edev);
edev->ops->fastpath_stop(edev->cdev);
-
+#ifdef CONFIG_RFS_ACCEL
+ if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
+ qede_poll_for_freeing_arfs_filters(edev);
+ qede_free_arfs(edev);
+ }
+#endif
/* Release the interrupts */
qede_sync_free_irqs(edev);
edev->ops->common->set_fp_int(edev->cdev, 0);
@@ -1899,6 +1945,13 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
if (rc)
goto err2;
+#ifdef CONFIG_RFS_ACCEL
+ if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
+ rc = qede_alloc_arfs(edev);
+ if (rc)
+ DP_NOTICE(edev, "aRFS memory allocation failed\n");
+ }
+#endif
qede_napi_add_enable(edev);
DP_INFO(edev, "Napi added and enabled\n");
@@ -1925,13 +1978,10 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
qede_roce_dev_event_open(edev);
- qede_ptp_start(edev, (mode == QEDE_LOAD_NORMAL));
-
edev->state = QEDE_STATE_OPEN;
DP_INFO(edev, "Ending successfully qede load\n");
-
goto out;
err4:
qede_sync_free_irqs(edev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 2e62dec09bd7..24f06e2ef43e 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -181,6 +181,7 @@ static void qede_ptp_task(struct work_struct *work)
skb_tstamp_tx(ptp->tx_skb, &shhwtstamps);
dev_kfree_skb_any(ptp->tx_skb);
ptp->tx_skb = NULL;
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
@@ -206,23 +207,10 @@ static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
return phc_cycles;
}
-static void qede_ptp_init_cc(struct qede_dev *edev)
-{
- struct qede_ptp *ptp;
-
- ptp = edev->ptp;
- if (!ptp)
- return;
-
- memset(&ptp->cc, 0, sizeof(ptp->cc));
- ptp->cc.read = qede_ptp_read_cc;
- ptp->cc.mask = CYCLECOUNTER_MASK(64);
- ptp->cc.shift = 0;
- ptp->cc.mult = 1;
-}
-
static int qede_ptp_cfg_filters(struct qede_dev *edev)
{
+ enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON;
+ enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE;
struct qede_ptp *ptp = edev->ptp;
if (!ptp)
@@ -236,7 +224,12 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
switch (ptp->tx_type) {
case HWTSTAMP_TX_ON:
edev->flags |= QEDE_TX_TIMESTAMPING_EN;
- ptp->ops->hwtstamp_tx_on(edev->cdev);
+ tx_type = QED_PTP_HWTSTAMP_TX_ON;
+ break;
+
+ case HWTSTAMP_TX_OFF:
+ edev->flags &= ~QEDE_TX_TIMESTAMPING_EN;
+ tx_type = QED_PTP_HWTSTAMP_TX_OFF;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
@@ -247,42 +240,57 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
spin_lock_bh(&ptp->lock);
switch (ptp->rx_filter) {
case HWTSTAMP_FILTER_NONE:
+ rx_filter = QED_PTP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
ptp->rx_filter = HWTSTAMP_FILTER_NONE;
+ rx_filter = QED_PTP_FILTER_ALL;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ rx_filter = QED_PTP_FILTER_V1_L4_EVENT;
+ break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* Initialize PTP detection for UDP/IPv4 events */
- ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4);
+ rx_filter = QED_PTP_FILTER_V1_L4_GEN;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ rx_filter = QED_PTP_FILTER_V2_L4_EVENT;
+ break;
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
/* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
- ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4_IPV6);
+ rx_filter = QED_PTP_FILTER_V2_L4_GEN;
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ rx_filter = QED_PTP_FILTER_V2_L2_EVENT;
+ break;
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
/* Initialize PTP detection L2 events */
- ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_L2);
+ rx_filter = QED_PTP_FILTER_V2_L2_GEN;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ rx_filter = QED_PTP_FILTER_V2_EVENT;
+ break;
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
/* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
- ptp->ops->cfg_rx_filters(edev->cdev,
- QED_PTP_FILTER_L2_IPV4_IPV6);
+ rx_filter = QED_PTP_FILTER_V2_GEN;
break;
}
+ ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type);
+
spin_unlock_bh(&ptp->lock);
return 0;
@@ -324,61 +332,6 @@ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
-/* Called during load, to initialize PTP-related stuff */
-static void qede_ptp_init(struct qede_dev *edev, bool init_tc)
-{
- struct qede_ptp *ptp;
- int rc;
-
- ptp = edev->ptp;
- if (!ptp)
- return;
-
- spin_lock_init(&ptp->lock);
-
- /* Configure PTP in HW */
- rc = ptp->ops->enable(edev->cdev);
- if (rc) {
- DP_ERR(edev, "Stopping PTP initialization\n");
- return;
- }
-
- /* Init work queue for Tx timestamping */
- INIT_WORK(&ptp->work, qede_ptp_task);
-
- /* Init cyclecounter and timecounter. This is done only in the first
- * load. If done in every load, PTP application will fail when doing
- * unload / load (e.g. MTU change) while it is running.
- */
- if (init_tc) {
- qede_ptp_init_cc(edev);
- timecounter_init(&ptp->tc, &ptp->cc,
- ktime_to_ns(ktime_get_real()));
- }
-
- DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP initialization is successful\n");
-}
-
-void qede_ptp_start(struct qede_dev *edev, bool init_tc)
-{
- qede_ptp_init(edev, init_tc);
- qede_ptp_cfg_filters(edev);
-}
-
-void qede_ptp_remove(struct qede_dev *edev)
-{
- struct qede_ptp *ptp;
-
- ptp = edev->ptp;
- if (ptp && ptp->clock) {
- ptp_clock_unregister(ptp->clock);
- ptp->clock = NULL;
- }
-
- kfree(ptp);
- edev->ptp = NULL;
-}
-
int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
{
struct qede_ptp *ptp = edev->ptp;
@@ -417,8 +370,7 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
return 0;
}
-/* Called during unload, to stop PTP-related stuff */
-void qede_ptp_stop(struct qede_dev *edev)
+void qede_ptp_disable(struct qede_dev *edev)
{
struct qede_ptp *ptp;
@@ -426,6 +378,11 @@ void qede_ptp_stop(struct qede_dev *edev)
if (!ptp)
return;
+ if (ptp->clock) {
+ ptp_clock_unregister(ptp->clock);
+ ptp->clock = NULL;
+ }
+
/* Cancel PTP work queue. Should be done after the Tx queues are
* drained to prevent additional scheduling.
*/
@@ -439,11 +396,54 @@ void qede_ptp_stop(struct qede_dev *edev)
spin_lock_bh(&ptp->lock);
ptp->ops->disable(edev->cdev);
spin_unlock_bh(&ptp->lock);
+
+ kfree(ptp);
+ edev->ptp = NULL;
+}
+
+static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
+{
+ struct qede_ptp *ptp;
+ int rc;
+
+ ptp = edev->ptp;
+ if (!ptp)
+ return -EINVAL;
+
+ spin_lock_init(&ptp->lock);
+
+ /* Configure PTP in HW */
+ rc = ptp->ops->enable(edev->cdev);
+ if (rc) {
+ DP_INFO(edev, "PTP HW enable failed\n");
+ return rc;
+ }
+
+ /* Init work queue for Tx timestamping */
+ INIT_WORK(&ptp->work, qede_ptp_task);
+
+ /* Init cyclecounter and timecounter. This is done only in the first
+ * load. If done in every load, PTP application will fail when doing
+ * unload / load (e.g. MTU change) while it is running.
+ */
+ if (init_tc) {
+ memset(&ptp->cc, 0, sizeof(ptp->cc));
+ ptp->cc.read = qede_ptp_read_cc;
+ ptp->cc.mask = CYCLECOUNTER_MASK(64);
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;
+
+ timecounter_init(&ptp->tc, &ptp->cc,
+ ktime_to_ns(ktime_get_real()));
+ }
+
+ return rc;
}
-int qede_ptp_register_phc(struct qede_dev *edev)
+int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
{
struct qede_ptp *ptp;
+ int rc;
ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
if (!ptp) {
@@ -454,14 +454,19 @@ int qede_ptp_register_phc(struct qede_dev *edev)
ptp->edev = edev;
ptp->ops = edev->ops->ptp;
if (!ptp->ops) {
- kfree(ptp);
- edev->ptp = NULL;
- DP_ERR(edev, "PTP clock registeration failed\n");
- return -EIO;
+ DP_INFO(edev, "PTP enable failed\n");
+ rc = -EIO;
+ goto err1;
}
edev->ptp = ptp;
+ rc = qede_ptp_init(edev, init_tc);
+ if (rc)
+ goto err1;
+
+ qede_ptp_cfg_filters(edev);
+
/* Fill the ptp_clock_info struct and register PTP clock */
ptp->clock_info.owner = THIS_MODULE;
snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
@@ -478,13 +483,21 @@ int qede_ptp_register_phc(struct qede_dev *edev)
ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
if (IS_ERR(ptp->clock)) {
- ptp->clock = NULL;
- kfree(ptp);
- edev->ptp = NULL;
+ rc = -EINVAL;
DP_ERR(edev, "PTP clock registeration failed\n");
+ goto err2;
}
return 0;
+
+err2:
+ qede_ptp_disable(edev);
+ ptp->clock = NULL;
+err1:
+ kfree(ptp);
+ edev->ptp = NULL;
+
+ return rc;
}
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
@@ -495,6 +508,9 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
if (!ptp)
return;
+ if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags))
+ return;
+
if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) {
DP_NOTICE(edev,
"Tx timestamping was not enabled, this packet will not be timestamped\n");
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
index f328f9bba53a..691a14c4b2c5 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
@@ -40,10 +40,8 @@
void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
-void qede_ptp_start(struct qede_dev *edev, bool init_tc);
-void qede_ptp_stop(struct qede_dev *edev);
-void qede_ptp_remove(struct qede_dev *edev);
-int qede_ptp_register_phc(struct qede_dev *edev);
+void qede_ptp_disable(struct qede_dev *edev);
+int qede_ptp_enable(struct qede_dev *edev, bool init_tc);
int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index d7107055ec60..2f656f395f39 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -128,6 +128,8 @@ static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
return 0;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index e9e647072596..1188d420fe53 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4686,7 +4686,8 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
/*
* Set up the operating parameters.
*/
- qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM);
+ qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ ndev->name);
INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
index f62c215be779..7116be485e61 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
@@ -26,6 +26,7 @@
/* SGMII digital lane registers */
#define EMAC_SGMII_LN_DRVR_CTRL0 0x000C
+#define EMAC_SGMII_LN_DRVR_CTRL1 0x0010
#define EMAC_SGMII_LN_DRVR_TAP_EN 0x0018
#define EMAC_SGMII_LN_TX_MARGINING 0x001C
#define EMAC_SGMII_LN_TX_PRE 0x0020
@@ -48,6 +49,7 @@
#define EMAC_SGMII_LN_RX_EN_SIGNAL 0x02AC
#define EMAC_SGMII_LN_RX_MISC_CNTRL0 0x02B8
#define EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV 0x02C8
+#define EMAC_SGMII_LN_RX_RESECODE_OFFSET 0x02CC
/* SGMII digital lane register values */
#define UCDR_STEP_BY_TWO_MODE0 BIT(7)
@@ -73,6 +75,8 @@
#define CML_GEAR_MODE(x) (((x) & 7) << 3)
#define CML2CMOS_IBOOST_MODE(x) ((x) & 7)
+#define RESCODE_OFFSET(x) ((x) & 0x1f)
+
#define MIXER_LOADB_MODE(x) (((x) & 0xf) << 2)
#define MIXER_DATARATE_MODE(x) ((x) & 3)
@@ -159,6 +163,8 @@ static const struct emac_reg_write sgmii_laned[] = {
{EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)},
{EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(1)},
{EMAC_SGMII_LN_RX_BAND, BAND_MODE0(2)},
+ {EMAC_SGMII_LN_DRVR_CTRL1, RESCODE_OFFSET(7)},
+ {EMAC_SGMII_LN_RX_RESECODE_OFFSET, RESCODE_OFFSET(9)},
{EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)},
{EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(2) |
EN_DLL_MODE0 | EN_IQ_DCC_MODE0 | EN_IQCAL_MODE0},
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index 040b28977ee7..18c184ee1f3c 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -13,6 +13,7 @@
/* Qualcomm Technologies, Inc. EMAC SGMII Controller driver.
*/
+#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/acpi.h>
#include <linux/of_device.h>
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 672f6b696069..72233ab9474b 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1406,27 +1406,29 @@ static int cp_get_sset_count (struct net_device *dev, int sset)
}
}
-static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int cp_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct cp_private *cp = netdev_priv(dev);
int rc;
unsigned long flags;
spin_lock_irqsave(&cp->lock, flags);
- rc = mii_ethtool_gset(&cp->mii_if, cmd);
+ rc = mii_ethtool_get_link_ksettings(&cp->mii_if, cmd);
spin_unlock_irqrestore(&cp->lock, flags);
return rc;
}
-static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int cp_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct cp_private *cp = netdev_priv(dev);
int rc;
unsigned long flags;
spin_lock_irqsave(&cp->lock, flags);
- rc = mii_ethtool_sset(&cp->mii_if, cmd);
+ rc = mii_ethtool_set_link_ksettings(&cp->mii_if, cmd);
spin_unlock_irqrestore(&cp->lock, flags);
return rc;
@@ -1578,8 +1580,6 @@ static const struct ethtool_ops cp_ethtool_ops = {
.get_drvinfo = cp_get_drvinfo,
.get_regs_len = cp_get_regs_len,
.get_sset_count = cp_get_sset_count,
- .get_settings = cp_get_settings,
- .set_settings = cp_set_settings,
.nway_reset = cp_nway_reset,
.get_link = ethtool_op_get_link,
.get_msglevel = cp_get_msglevel,
@@ -1593,6 +1593,8 @@ static const struct ethtool_ops cp_ethtool_ops = {
.get_eeprom = cp_get_eeprom,
.set_eeprom = cp_set_eeprom,
.get_ringparam = cp_get_ringparam,
+ .get_link_ksettings = cp_get_link_ksettings,
+ .set_link_ksettings = cp_set_link_ksettings,
};
static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 89631753e799..ca22f2898664 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2384,21 +2384,23 @@ static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
}
-static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8139_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct rtl8139_private *tp = netdev_priv(dev);
spin_lock_irq(&tp->lock);
- mii_ethtool_gset(&tp->mii, cmd);
+ mii_ethtool_get_link_ksettings(&tp->mii, cmd);
spin_unlock_irq(&tp->lock);
return 0;
}
-static int rtl8139_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8139_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct rtl8139_private *tp = netdev_priv(dev);
int rc;
spin_lock_irq(&tp->lock);
- rc = mii_ethtool_sset(&tp->mii, cmd);
+ rc = mii_ethtool_set_link_ksettings(&tp->mii, cmd);
spin_unlock_irq(&tp->lock);
return rc;
}
@@ -2480,8 +2482,6 @@ static void rtl8139_get_strings(struct net_device *dev, u32 stringset, u8 *data)
static const struct ethtool_ops rtl8139_ethtool_ops = {
.get_drvinfo = rtl8139_get_drvinfo,
- .get_settings = rtl8139_get_settings,
- .set_settings = rtl8139_set_settings,
.get_regs_len = rtl8139_get_regs_len,
.get_regs = rtl8139_get_regs,
.nway_reset = rtl8139_nway_reset,
@@ -2493,6 +2493,8 @@ static const struct ethtool_ops rtl8139_ethtool_ops = {
.get_strings = rtl8139_get_strings,
.get_sset_count = rtl8139_get_sset_count,
.get_ethtool_stats = rtl8139_get_ethtool_stats,
+ .get_link_ksettings = rtl8139_get_link_ksettings,
+ .set_link_ksettings = rtl8139_set_link_ksettings,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 81f18a833527..0a8f2817ea60 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -817,7 +817,8 @@ struct rtl8169_private {
} csi_ops;
int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
- int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+ int (*get_link_ksettings)(struct net_device *,
+ struct ethtool_link_ksettings *);
void (*phy_reset_enable)(struct rtl8169_private *tp);
void (*hw_start)(struct net_device *);
unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
@@ -2115,41 +2116,49 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
}
-static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8169_get_link_ksettings_tbi(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
u32 status;
+ u32 supported, advertising;
- cmd->supported =
+ supported =
SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
- cmd->port = PORT_FIBRE;
- cmd->transceiver = XCVR_INTERNAL;
+ cmd->base.port = PORT_FIBRE;
status = RTL_R32(TBICSR);
- cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
- cmd->autoneg = !!(status & TBINwEnable);
+ advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
+ cmd->base.autoneg = !!(status & TBINwEnable);
- ethtool_cmd_speed_set(cmd, SPEED_1000);
- cmd->duplex = DUPLEX_FULL; /* Always set */
+ cmd->base.speed = SPEED_1000;
+ cmd->base.duplex = DUPLEX_FULL; /* Always set */
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8169_get_link_ksettings_xmii(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
- return mii_ethtool_gset(&tp->mii, cmd);
+ return mii_ethtool_get_link_ksettings(&tp->mii, cmd);
}
-static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8169_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
int rc;
rtl_lock_work(tp);
- rc = tp->get_settings(dev, cmd);
+ rc = tp->get_link_ksettings(dev, cmd);
rtl_unlock_work(tp);
return rc;
@@ -2356,7 +2365,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_drvinfo = rtl8169_get_drvinfo,
.get_regs_len = rtl8169_get_regs_len,
.get_link = ethtool_op_get_link,
- .get_settings = rtl8169_get_settings,
.set_settings = rtl8169_set_settings,
.get_msglevel = rtl8169_get_msglevel,
.set_msglevel = rtl8169_set_msglevel,
@@ -2368,6 +2376,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_ethtool_stats = rtl8169_get_ethtool_stats,
.get_ts_info = ethtool_op_get_ts_info,
.nway_reset = rtl8169_nway_reset,
+ .get_link_ksettings = rtl8169_get_link_ksettings,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -8351,14 +8360,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rtl_tbi_enabled(tp)) {
tp->set_speed = rtl8169_set_speed_tbi;
- tp->get_settings = rtl8169_gset_tbi;
+ tp->get_link_ksettings = rtl8169_get_link_ksettings_tbi;
tp->phy_reset_enable = rtl8169_tbi_reset_enable;
tp->phy_reset_pending = rtl8169_tbi_reset_pending;
tp->link_ok = rtl8169_tbi_link_ok;
tp->do_ioctl = rtl_tbi_ioctl;
} else {
tp->set_speed = rtl8169_set_speed_xmii;
- tp->get_settings = rtl8169_gset_xmii;
+ tp->get_link_ksettings = rtl8169_get_link_ksettings_xmii;
tp->phy_reset_enable = rtl8169_xmii_reset_enable;
tp->phy_reset_pending = rtl8169_xmii_reset_pending;
tp->link_ok = rtl8169_xmii_link_ok;
@@ -8444,9 +8453,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
~(RxBOVF | RxFOVF) : ~0;
- init_timer(&tp->timer);
- tp->timer.data = (unsigned long) dev;
- tp->timer.function = rtl8169_phy_timer;
+ setup_timer(&tp->timer, rtl8169_phy_timer, (unsigned long)dev);
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 0f63a44a955d..bab13613b138 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -33,6 +33,7 @@
#include <net/rtnetlink.h>
#include <net/netevent.h>
#include <net/arp.h>
+#include <net/fib_rules.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <generated/utsrelease.h>
@@ -1115,7 +1116,7 @@ rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
const struct rocker_desc_info *desc_info,
void *priv)
{
- struct ethtool_cmd *ecmd = priv;
+ struct ethtool_link_ksettings *ecmd = priv;
const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
u32 speed;
@@ -1137,13 +1138,14 @@ rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->supported = SUPPORTED_TP;
- ecmd->phy_address = 0xff;
- ecmd->port = PORT_TP;
- ethtool_cmd_speed_set(ecmd, speed);
- ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
- ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
+
+ ecmd->base.phy_address = 0xff;
+ ecmd->base.port = PORT_TP;
+ ecmd->base.speed = speed;
+ ecmd->base.duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ ecmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
return 0;
}
@@ -1250,7 +1252,7 @@ rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
- struct ethtool_cmd *ecmd = priv;
+ struct ethtool_link_ksettings *ecmd = priv;
struct rocker_tlv *cmd_info;
if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
@@ -1263,13 +1265,13 @@ rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
rocker_port->pport))
return -EMSGSIZE;
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
- ethtool_cmd_speed(ecmd)))
+ ecmd->base.speed))
return -EMSGSIZE;
if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
- ecmd->duplex))
+ ecmd->base.duplex))
return -EMSGSIZE;
if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
- ecmd->autoneg))
+ ecmd->base.autoneg))
return -EMSGSIZE;
rocker_tlv_nest_end(desc_info, cmd_info);
return 0;
@@ -1347,8 +1349,9 @@ rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
return 0;
}
-static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
- struct ethtool_cmd *ecmd)
+static int
+rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
+ struct ethtool_link_ksettings *ecmd)
{
return rocker_cmd_exec(rocker_port, false,
rocker_cmd_get_port_settings_prep, NULL,
@@ -1373,12 +1376,17 @@ static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
rocker_cmd_get_port_settings_mode_proc, p_mode);
}
-static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
- struct ethtool_cmd *ecmd)
+static int
+rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
+ const struct ethtool_link_ksettings *ecmd)
{
+ struct ethtool_link_ksettings copy_ecmd;
+
+ memcpy(&copy_ecmd, ecmd, sizeof(copy_ecmd));
+
return rocker_cmd_exec(rocker_port, false,
rocker_cmd_set_port_settings_ethtool_prep,
- ecmd, NULL, NULL);
+ &copy_ecmd, NULL, NULL);
}
static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
@@ -2168,7 +2176,10 @@ static const struct switchdev_ops rocker_port_switchdev_ops = {
struct rocker_fib_event_work {
struct work_struct work;
- struct fib_entry_notifier_info fen_info;
+ union {
+ struct fib_entry_notifier_info fen_info;
+ struct fib_rule_notifier_info fr_info;
+ };
struct rocker *rocker;
unsigned long event;
};
@@ -2178,6 +2189,7 @@ static void rocker_router_fib_event_work(struct work_struct *work)
struct rocker_fib_event_work *fib_work =
container_of(work, struct rocker_fib_event_work, work);
struct rocker *rocker = fib_work->rocker;
+ struct fib_rule *rule;
int err;
/* Protect internal structures from changes */
@@ -2195,7 +2207,10 @@ static void rocker_router_fib_event_work(struct work_struct *work)
break;
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
- rocker_world_fib4_abort(rocker);
+ rule = fib_work->fr_info.rule;
+ if (!fib4_rule_default(rule))
+ rocker_world_fib4_abort(rocker);
+ fib_rule_put(rule);
break;
}
rtnl_unlock();
@@ -2226,6 +2241,11 @@ static int rocker_router_fib_event(struct notifier_block *nb,
*/
fib_info_hold(fib_work->fen_info.fi);
break;
+ case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_DEL:
+ memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
+ fib_rule_get(fib_work->fr_info.rule);
+ break;
}
queue_work(rocker->rocker_owq, &fib_work->work);
@@ -2237,16 +2257,18 @@ static int rocker_router_fib_event(struct notifier_block *nb,
* ethtool interface
********************/
-static int rocker_port_get_settings(struct net_device *dev,
- struct ethtool_cmd *ecmd)
+static int
+rocker_port_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
{
struct rocker_port *rocker_port = netdev_priv(dev);
return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
}
-static int rocker_port_set_settings(struct net_device *dev,
- struct ethtool_cmd *ecmd)
+static int
+rocker_port_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
{
struct rocker_port *rocker_port = netdev_priv(dev);
@@ -2388,13 +2410,13 @@ static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
}
static const struct ethtool_ops rocker_port_ethtool_ops = {
- .get_settings = rocker_port_get_settings,
- .set_settings = rocker_port_set_settings,
.get_drvinfo = rocker_port_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = rocker_port_get_strings,
.get_ethtool_stats = rocker_port_get_stats,
.get_sset_count = rocker_port_get_sset_count,
+ .get_link_ksettings = rocker_port_get_link_ksettings,
+ .set_link_ksettings = rocker_port_set_link_ksettings,
};
/*****************
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index c60c2d4c646a..78efb2822b86 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -119,6 +119,7 @@ struct efx_ef10_filter_table {
bool mc_promisc;
/* Whether in multicast promiscuous mode when last changed */
bool mc_promisc_last;
+ bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
bool vlan_filter;
struct list_head vlan_list;
};
@@ -5058,6 +5059,7 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
struct netdev_hw_addr *mc;
unsigned int i, addr_count;
+ table->mc_overflow = false;
table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
addr_count = netdev_mc_count(net_dev);
@@ -5065,6 +5067,7 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
netdev_for_each_mc_addr(mc, net_dev) {
if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
table->mc_promisc = true;
+ table->mc_overflow = true;
break;
}
ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
@@ -5469,12 +5472,15 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
}
} else {
/* If we failed to insert promiscuous filters, don't
- * rollback. Regardless, also insert the mc_list
+ * rollback. Regardless, also insert the mc_list,
+ * unless it's incomplete due to overflow
*/
efx_ef10_filter_insert_def(efx, vlan,
EFX_ENCAP_TYPE_NONE,
true, false);
- efx_ef10_filter_insert_addr_list(efx, vlan, true, false);
+ if (!table->mc_overflow)
+ efx_ef10_filter_insert_addr_list(efx, vlan,
+ true, false);
}
} else {
/* If any filters failed to insert, rollback and fall back to
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index 104fb15a73f2..f6daf09b8627 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -437,11 +437,13 @@ int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
if (ntc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- num_tc = ntc->tc;
+ num_tc = ntc->mqprio->num_tc;
if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC)
return -EINVAL;
+ ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
if (num_tc == net_dev->num_tc)
return 0;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index ff88d60aa6d5..3bdf87f31087 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -665,11 +665,13 @@ int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
if (ntc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- num_tc = ntc->tc;
+ num_tc = ntc->mqprio->num_tc;
if (num_tc > EFX_MAX_TX_TC)
return -EINVAL;
+ ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
if (num_tc == net_dev->num_tc)
return 0;
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 57e6cef81ebe..52ead5524de7 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1558,25 +1558,27 @@ static void ioc3_get_drvinfo (struct net_device *dev,
strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info));
}
-static int ioc3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ioc3_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct ioc3_private *ip = netdev_priv(dev);
int rc;
spin_lock_irq(&ip->ioc3_lock);
- rc = mii_ethtool_gset(&ip->mii, cmd);
+ rc = mii_ethtool_get_link_ksettings(&ip->mii, cmd);
spin_unlock_irq(&ip->ioc3_lock);
return rc;
}
-static int ioc3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ioc3_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ioc3_private *ip = netdev_priv(dev);
int rc;
spin_lock_irq(&ip->ioc3_lock);
- rc = mii_ethtool_sset(&ip->mii, cmd);
+ rc = mii_ethtool_set_link_ksettings(&ip->mii, cmd);
spin_unlock_irq(&ip->ioc3_lock);
return rc;
@@ -1608,10 +1610,10 @@ static u32 ioc3_get_link(struct net_device *dev)
static const struct ethtool_ops ioc3_ethtool_ops = {
.get_drvinfo = ioc3_get_drvinfo,
- .get_settings = ioc3_get_settings,
- .set_settings = ioc3_set_settings,
.nway_reset = ioc3_nway_reset,
.get_link = ioc3_get_link,
+ .get_link_ksettings = ioc3_get_link_ksettings,
+ .set_link_ksettings = ioc3_set_link_ksettings,
};
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 6c2e2b311c16..751c81848f35 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1122,14 +1122,16 @@ static void sc92031_poll_controller(struct net_device *dev)
}
#endif
-static int sc92031_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int
+sc92031_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct sc92031_priv *priv = netdev_priv(dev);
void __iomem *port_base = priv->port_base;
u8 phy_address;
u32 phy_ctrl;
u16 output_status;
+ u32 supported, advertising;
spin_lock_bh(&priv->lock);
@@ -1142,68 +1144,77 @@ static int sc92031_ethtool_get_settings(struct net_device *dev,
spin_unlock_bh(&priv->lock);
- cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
+ supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
| SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
| SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII;
- cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+ advertising = ADVERTISED_TP | ADVERTISED_MII;
if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
== (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
- cmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10)
- cmd->advertising |= ADVERTISED_10baseT_Half;
+ advertising |= ADVERTISED_10baseT_Half;
if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux))
== (PhyCtrlSpd10 | PhyCtrlDux))
- cmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100)
- cmd->advertising |= ADVERTISED_100baseT_Half;
+ advertising |= ADVERTISED_100baseT_Half;
if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux))
== (PhyCtrlSpd100 | PhyCtrlDux))
- cmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
if (phy_ctrl & PhyCtrlAne)
- cmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
- ethtool_cmd_speed_set(cmd,
- (output_status & 0x2) ? SPEED_100 : SPEED_10);
- cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
- cmd->port = PORT_MII;
- cmd->phy_address = phy_address;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ cmd->base.speed = (output_status & 0x2) ? SPEED_100 : SPEED_10;
+ cmd->base.duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->base.port = PORT_MII;
+ cmd->base.phy_address = phy_address;
+ cmd->base.autoneg = (phy_ctrl & PhyCtrlAne) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int sc92031_ethtool_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int
+sc92031_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct sc92031_priv *priv = netdev_priv(dev);
void __iomem *port_base = priv->port_base;
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed = cmd->base.speed;
u32 phy_ctrl;
u32 old_phy_ctrl;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if (!(speed == SPEED_10 || speed == SPEED_100))
return -EINVAL;
- if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL))
- return -EINVAL;
- if (!(cmd->port == PORT_MII))
+ if (!(cmd->base.duplex == DUPLEX_HALF ||
+ cmd->base.duplex == DUPLEX_FULL))
return -EINVAL;
- if (!(cmd->phy_address == 0x1f))
+ if (!(cmd->base.port == PORT_MII))
return -EINVAL;
- if (!(cmd->transceiver == XCVR_INTERNAL))
+ if (!(cmd->base.phy_address == 0x1f))
return -EINVAL;
- if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE))
+ if (!(cmd->base.autoneg == AUTONEG_DISABLE ||
+ cmd->base.autoneg == AUTONEG_ENABLE))
return -EINVAL;
- if (cmd->autoneg == AUTONEG_ENABLE) {
- if (!(cmd->advertising & (ADVERTISED_Autoneg
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (!(advertising & (ADVERTISED_Autoneg
| ADVERTISED_100baseT_Full
| ADVERTISED_100baseT_Half
| ADVERTISED_10baseT_Full
@@ -1213,15 +1224,15 @@ static int sc92031_ethtool_set_settings(struct net_device *dev,
phy_ctrl = PhyCtrlAne;
// FIXME: I'm not sure what the original code was trying to do
- if (cmd->advertising & ADVERTISED_Autoneg)
+ if (advertising & ADVERTISED_Autoneg)
phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
- if (cmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
- if (cmd->advertising & ADVERTISED_100baseT_Half)
+ if (advertising & ADVERTISED_100baseT_Half)
phy_ctrl |= PhyCtrlSpd100;
- if (cmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux;
- if (cmd->advertising & ADVERTISED_10baseT_Half)
+ if (advertising & ADVERTISED_10baseT_Half)
phy_ctrl |= PhyCtrlSpd10;
} else {
// FIXME: Whole branch guessed
@@ -1232,7 +1243,7 @@ static int sc92031_ethtool_set_settings(struct net_device *dev,
else /* cmd->speed == SPEED_100 */
phy_ctrl |= PhyCtrlSpd100;
- if (cmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
phy_ctrl |= PhyCtrlDux;
}
@@ -1368,8 +1379,6 @@ static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops sc92031_ethtool_ops = {
- .get_settings = sc92031_ethtool_get_settings,
- .set_settings = sc92031_ethtool_set_settings,
.get_wol = sc92031_ethtool_get_wol,
.set_wol = sc92031_ethtool_set_wol,
.nway_reset = sc92031_ethtool_nway_reset,
@@ -1377,6 +1386,8 @@ static const struct ethtool_ops sc92031_ethtool_ops = {
.get_strings = sc92031_ethtool_get_strings,
.get_sset_count = sc92031_ethtool_get_sset_count,
.get_ethtool_stats = sc92031_ethtool_get_ethtool_stats,
+ .get_link_ksettings = sc92031_ethtool_get_link_ksettings,
+ .set_link_ksettings = sc92031_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 210e35d079dd..02da106c6e04 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1734,18 +1734,20 @@ static void sis190_set_speed_auto(struct net_device *dev)
BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
}
-static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int sis190_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct sis190_private *tp = netdev_priv(dev);
- return mii_ethtool_gset(&tp->mii_if, cmd);
+ return mii_ethtool_get_link_ksettings(&tp->mii_if, cmd);
}
-static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int sis190_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct sis190_private *tp = netdev_priv(dev);
- return mii_ethtool_sset(&tp->mii_if, cmd);
+ return mii_ethtool_set_link_ksettings(&tp->mii_if, cmd);
}
static void sis190_get_drvinfo(struct net_device *dev,
@@ -1797,8 +1799,6 @@ static void sis190_set_msglevel(struct net_device *dev, u32 value)
}
static const struct ethtool_ops sis190_ethtool_ops = {
- .get_settings = sis190_get_settings,
- .set_settings = sis190_set_settings,
.get_drvinfo = sis190_get_drvinfo,
.get_regs_len = sis190_get_regs_len,
.get_regs = sis190_get_regs,
@@ -1806,6 +1806,8 @@ static const struct ethtool_ops sis190_ethtool_ops = {
.get_msglevel = sis190_get_msglevel,
.set_msglevel = sis190_set_msglevel,
.nway_reset = sis190_nway_reset,
+ .get_link_ksettings = sis190_get_link_ksettings,
+ .set_link_ksettings = sis190_set_link_ksettings,
};
static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 1b6f6171d078..40bd88362e3d 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -2035,23 +2035,23 @@ static u32 sis900_get_link(struct net_device *net_dev)
return mii_link_ok(&sis_priv->mii_info);
}
-static int sis900_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static int sis900_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
spin_lock_irq(&sis_priv->lock);
- mii_ethtool_gset(&sis_priv->mii_info, cmd);
+ mii_ethtool_get_link_ksettings(&sis_priv->mii_info, cmd);
spin_unlock_irq(&sis_priv->lock);
return 0;
}
-static int sis900_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static int sis900_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
int rt;
spin_lock_irq(&sis_priv->lock);
- rt = mii_ethtool_sset(&sis_priv->mii_info, cmd);
+ rt = mii_ethtool_set_link_ksettings(&sis_priv->mii_info, cmd);
spin_unlock_irq(&sis_priv->lock);
return rt;
}
@@ -2129,11 +2129,11 @@ static const struct ethtool_ops sis900_ethtool_ops = {
.get_msglevel = sis900_get_msglevel,
.set_msglevel = sis900_set_msglevel,
.get_link = sis900_get_link,
- .get_settings = sis900_get_settings,
- .set_settings = sis900_set_settings,
.nway_reset = sis900_nway_reset,
.get_wol = sis900_get_wol,
- .set_wol = sis900_set_wol
+ .set_wol = sis900_set_wol,
+ .get_link_ksettings = sis900_get_link_ksettings,
+ .set_link_ksettings = sis900_set_link_ksettings,
};
/**
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 5f2737189c72..db6dcb06193d 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1387,25 +1387,27 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct epic_private *np = netdev_priv(dev);
int rc;
spin_lock_irq(&np->lock);
- rc = mii_ethtool_gset(&np->mii, cmd);
+ rc = mii_ethtool_get_link_ksettings(&np->mii, cmd);
spin_unlock_irq(&np->lock);
return rc;
}
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct epic_private *np = netdev_priv(dev);
int rc;
spin_lock_irq(&np->lock);
- rc = mii_ethtool_sset(&np->mii, cmd);
+ rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
spin_unlock_irq(&np->lock);
return rc;
@@ -1460,14 +1462,14 @@ static void ethtool_complete(struct net_device *dev)
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
- .get_settings = netdev_get_settings,
- .set_settings = netdev_set_settings,
.nway_reset = netdev_nway_reset,
.get_link = netdev_get_link,
.get_msglevel = netdev_get_msglevel,
.set_msglevel = netdev_set_msglevel,
.begin = ethtool_begin,
- .complete = ethtool_complete
+ .complete = ethtool_complete,
+ .get_link_ksettings = netdev_get_link_ksettings,
+ .set_link_ksettings = netdev_set_link_ksettings,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 4f19c6166182..36307d34f641 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1446,40 +1446,40 @@ static int smc911x_close(struct net_device *dev)
* Ethtool support
*/
static int
-smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+smc911x_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret, status;
unsigned long flags;
+ u32 supported;
DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
- cmd->maxtxpkt = 1;
- cmd->maxrxpkt = 1;
if (lp->phy_type != 0) {
spin_lock_irqsave(&lp->lock, flags);
- ret = mii_ethtool_gset(&lp->mii, cmd);
+ ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
spin_unlock_irqrestore(&lp->lock, flags);
} else {
- cmd->supported = SUPPORTED_10baseT_Half |
+ supported = SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_TP | SUPPORTED_AUI;
if (lp->ctl_rspeed == 10)
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
else if (lp->ctl_rspeed == 100)
- ethtool_cmd_speed_set(cmd, SPEED_100);
-
- cmd->autoneg = AUTONEG_DISABLE;
- if (lp->mii.phy_id==1)
- cmd->transceiver = XCVR_INTERNAL;
- else
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->port = 0;
+ cmd->base.speed = SPEED_100;
+
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = 0;
SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status);
- cmd->duplex =
+ cmd->base.duplex =
(status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ?
DUPLEX_FULL : DUPLEX_HALF;
+
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported, supported);
+
ret = 0;
}
@@ -1487,7 +1487,8 @@ smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
}
static int
-smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+smc911x_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret;
@@ -1495,16 +1496,18 @@ smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
if (lp->phy_type != 0) {
spin_lock_irqsave(&lp->lock, flags);
- ret = mii_ethtool_sset(&lp->mii, cmd);
+ ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
spin_unlock_irqrestore(&lp->lock, flags);
} else {
- if (cmd->autoneg != AUTONEG_DISABLE ||
- cmd->speed != SPEED_10 ||
- (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
- (cmd->port != PORT_TP && cmd->port != PORT_AUI))
+ if (cmd->base.autoneg != AUTONEG_DISABLE ||
+ cmd->base.speed != SPEED_10 ||
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL) ||
+ (cmd->base.port != PORT_TP &&
+ cmd->base.port != PORT_AUI))
return -EINVAL;
- lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
+ lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
ret = 0;
}
@@ -1686,8 +1689,6 @@ static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
}
static const struct ethtool_ops smc911x_ethtool_ops = {
- .get_settings = smc911x_ethtool_getsettings,
- .set_settings = smc911x_ethtool_setsettings,
.get_drvinfo = smc911x_ethtool_getdrvinfo,
.get_msglevel = smc911x_ethtool_getmsglevel,
.set_msglevel = smc911x_ethtool_setmsglevel,
@@ -1698,6 +1699,8 @@ static const struct ethtool_ops smc911x_ethtool_ops = {
.get_eeprom_len = smc911x_ethtool_geteeprom_len,
.get_eeprom = smc911x_ethtool_geteeprom,
.set_eeprom = smc911x_ethtool_seteeprom,
+ .get_link_ksettings = smc911x_ethtool_get_link_ksettings,
+ .set_link_ksettings = smc911x_ethtool_set_link_ksettings,
};
/*
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 97280daba27f..976aa876789a 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1843,56 +1843,60 @@ static int smc_link_ok(struct net_device *dev)
}
}
-static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int smc_netdev_get_ecmd(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
{
- u16 tmp;
- unsigned int ioaddr = dev->base_addr;
+ u16 tmp;
+ unsigned int ioaddr = dev->base_addr;
+ u32 supported;
- ecmd->supported = (SUPPORTED_TP | SUPPORTED_AUI |
- SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full);
-
- SMC_SELECT_BANK(1);
- tmp = inw(ioaddr + CONFIG);
- ecmd->port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP;
- ecmd->transceiver = XCVR_INTERNAL;
- ethtool_cmd_speed_set(ecmd, SPEED_10);
- ecmd->phy_address = ioaddr + MGMT;
+ supported = (SUPPORTED_TP | SUPPORTED_AUI |
+ SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full);
- SMC_SELECT_BANK(0);
- tmp = inw(ioaddr + TCR);
- ecmd->duplex = (tmp & TCR_FDUPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ SMC_SELECT_BANK(1);
+ tmp = inw(ioaddr + CONFIG);
+ ecmd->base.port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP;
+ ecmd->base.speed = SPEED_10;
+ ecmd->base.phy_address = ioaddr + MGMT;
- return 0;
+ SMC_SELECT_BANK(0);
+ tmp = inw(ioaddr + TCR);
+ ecmd->base.duplex = (tmp & TCR_FDUPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
+ supported);
+
+ return 0;
}
-static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int smc_netdev_set_ecmd(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
{
- u16 tmp;
- unsigned int ioaddr = dev->base_addr;
+ u16 tmp;
+ unsigned int ioaddr = dev->base_addr;
- if (ethtool_cmd_speed(ecmd) != SPEED_10)
- return -EINVAL;
- if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
- return -EINVAL;
- if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI)
- return -EINVAL;
- if (ecmd->transceiver != XCVR_INTERNAL)
- return -EINVAL;
+ if (ecmd->base.speed != SPEED_10)
+ return -EINVAL;
+ if (ecmd->base.duplex != DUPLEX_HALF &&
+ ecmd->base.duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (ecmd->base.port != PORT_TP && ecmd->base.port != PORT_AUI)
+ return -EINVAL;
- if (ecmd->port == PORT_AUI)
- smc_set_xcvr(dev, 1);
- else
- smc_set_xcvr(dev, 0);
+ if (ecmd->base.port == PORT_AUI)
+ smc_set_xcvr(dev, 1);
+ else
+ smc_set_xcvr(dev, 0);
- SMC_SELECT_BANK(0);
- tmp = inw(ioaddr + TCR);
- if (ecmd->duplex == DUPLEX_FULL)
- tmp |= TCR_FDUPLX;
- else
- tmp &= ~TCR_FDUPLX;
- outw(tmp, ioaddr + TCR);
-
- return 0;
+ SMC_SELECT_BANK(0);
+ tmp = inw(ioaddr + TCR);
+ if (ecmd->base.duplex == DUPLEX_FULL)
+ tmp |= TCR_FDUPLX;
+ else
+ tmp &= ~TCR_FDUPLX;
+ outw(tmp, ioaddr + TCR);
+
+ return 0;
}
static int check_if_running(struct net_device *dev)
@@ -1908,7 +1912,8 @@ static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
-static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int smc_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
{
struct smc_private *smc = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
@@ -1919,7 +1924,7 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
if (smc->cfg & CFG_MII_SELECT)
- ret = mii_ethtool_gset(&smc->mii_if, ecmd);
+ ret = mii_ethtool_get_link_ksettings(&smc->mii_if, ecmd);
else
ret = smc_netdev_get_ecmd(dev, ecmd);
SMC_SELECT_BANK(saved_bank);
@@ -1927,7 +1932,8 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return ret;
}
-static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int smc_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
{
struct smc_private *smc = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
@@ -1938,7 +1944,7 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
if (smc->cfg & CFG_MII_SELECT)
- ret = mii_ethtool_sset(&smc->mii_if, ecmd);
+ ret = mii_ethtool_set_link_ksettings(&smc->mii_if, ecmd);
else
ret = smc_netdev_set_ecmd(dev, ecmd);
SMC_SELECT_BANK(saved_bank);
@@ -1982,10 +1988,10 @@ static int smc_nway_reset(struct net_device *dev)
static const struct ethtool_ops ethtool_ops = {
.begin = check_if_running,
.get_drvinfo = smc_get_drvinfo,
- .get_settings = smc_get_settings,
- .set_settings = smc_set_settings,
.get_link = smc_get_link,
.nway_reset = smc_nway_reset,
+ .get_link_ksettings = smc_get_link_ksettings,
+ .set_link_ksettings = smc_set_link_ksettings,
};
static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 01a8c020d6db..e93c40b4631e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -26,12 +26,15 @@
static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)p;
- unsigned int entry = priv->cur_tx;
- struct dma_desc *desc = priv->dma_tx + entry;
+ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
unsigned int nopaged_len = skb_headlen(skb);
+ struct stmmac_priv *priv = tx_q->priv_data;
+ unsigned int entry = tx_q->cur_tx;
unsigned int bmax, des2;
unsigned int i = 1, len;
+ struct dma_desc *desc;
+
+ desc = tx_q->dma_tx + entry;
if (priv->plat->enh_desc)
bmax = BUF_SIZE_8KiB;
@@ -45,16 +48,16 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = cpu_to_le32(des2);
if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = des2;
- priv->tx_skbuff_dma[entry].len = bmax;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = bmax;
/* do not close the descriptor and do not set own bit */
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
- 0, false);
+ 0, false, skb->len);
while (len != 0) {
- priv->tx_skbuff[entry] = NULL;
+ tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
- desc = priv->dma_tx + entry;
+ desc = tx_q->dma_tx + entry;
if (len > bmax) {
des2 = dma_map_single(priv->device,
@@ -63,11 +66,11 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = cpu_to_le32(des2);
if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = des2;
- priv->tx_skbuff_dma[entry].len = bmax;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = bmax;
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
STMMAC_CHAIN_MODE, 1,
- false);
+ false, skb->len);
len -= bmax;
i++;
} else {
@@ -77,17 +80,17 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = cpu_to_le32(des2);
if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = des2;
- priv->tx_skbuff_dma[entry].len = len;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = len;
/* last descriptor can be set now */
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_CHAIN_MODE, 1,
- true);
+ true, skb->len);
len = 0;
}
}
- priv->cur_tx = entry;
+ tx_q->cur_tx = entry;
return entry;
}
@@ -136,32 +139,34 @@ static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr,
static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+ struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr;
+ struct stmmac_priv *priv = rx_q->priv_data;
if (priv->hwts_rx_en && !priv->extend_desc)
/* NOTE: Device will overwrite des3 with timestamp value if
* 1588-2002 time stamping is enabled, hence reinitialize it
* to keep explicit chaining in the descriptor.
*/
- p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy +
- (((priv->dirty_rx) + 1) %
+ p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
+ (((rx_q->dirty_rx) + 1) %
DMA_RX_SIZE) *
sizeof(struct dma_desc)));
}
static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
- unsigned int entry = priv->dirty_tx;
+ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
+ struct stmmac_priv *priv = tx_q->priv_data;
+ unsigned int entry = tx_q->dirty_tx;
- if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
+ if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
priv->hwts_tx_en)
/* NOTE: Device will overwrite des3 with timestamp value if
* 1588-2002 time stamping is enabled, hence reinitialize it
* to keep explicit chaining in the descriptor.
*/
- p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy +
- ((priv->dirty_tx + 1) % DMA_TX_SIZE))
+ p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
+ ((tx_q->dirty_tx + 1) % DMA_TX_SIZE))
* sizeof(struct dma_desc)));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 04d9245b7149..b7ce3fbb5375 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -246,6 +246,15 @@ struct stmmac_extra_stats {
#define STMMAC_TX_MAX_FRAMES 256
#define STMMAC_TX_FRAMES 64
+/* Packets types */
+enum packets_types {
+ PACKET_AVCPQ = 0x1, /* AV Untagged Control packets */
+ PACKET_PTPQ = 0x2, /* PTP Packets */
+ PACKET_DCBCPQ = 0x3, /* DCB Control Packets */
+ PACKET_UPQ = 0x4, /* Untagged Packets */
+ PACKET_MCBCQ = 0x5, /* Multicast & Broadcast Packets */
+};
+
/* Rx IPC status */
enum rx_frame_status {
good_frame = 0x0,
@@ -324,6 +333,9 @@ struct dma_features {
unsigned int number_tx_queues;
/* Alternate (enhanced) DESC mode */
unsigned int enh_desc;
+ /* TX and RX FIFO sizes */
+ unsigned int tx_fifo_size;
+ unsigned int rx_fifo_size;
};
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
@@ -361,7 +373,7 @@ struct stmmac_desc_ops {
/* Invoked by the xmit function to prepare the tx descriptor */
void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
- bool ls);
+ bool ls, unsigned int tot_pkt_len);
void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
int len2, bool tx_own, bool ls,
unsigned int tcphdrlen,
@@ -413,6 +425,14 @@ struct stmmac_dma_ops {
int (*reset)(void __iomem *ioaddr);
void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
u32 dma_tx, u32 dma_rx, int atds);
+ void (*init_chan)(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan);
+ void (*init_rx_chan)(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan);
+ void (*init_tx_chan)(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan);
/* Configure the AXI Bus Mode Register */
void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
/* Dump DMA registers */
@@ -421,25 +441,28 @@ struct stmmac_dma_ops {
* An invalid value enables the store-and-forward mode */
void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
int rxfifosz);
+ void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
+ int fifosz);
+ void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel);
/* To track extra statistic (if supported) */
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
void __iomem *ioaddr);
void (*enable_dma_transmission) (void __iomem *ioaddr);
- void (*enable_dma_irq) (void __iomem *ioaddr);
- void (*disable_dma_irq) (void __iomem *ioaddr);
- void (*start_tx) (void __iomem *ioaddr);
- void (*stop_tx) (void __iomem *ioaddr);
- void (*start_rx) (void __iomem *ioaddr);
- void (*stop_rx) (void __iomem *ioaddr);
+ void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan);
+ void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan);
+ void (*start_tx)(void __iomem *ioaddr, u32 chan);
+ void (*stop_tx)(void __iomem *ioaddr, u32 chan);
+ void (*start_rx)(void __iomem *ioaddr, u32 chan);
+ void (*stop_rx)(void __iomem *ioaddr, u32 chan);
int (*dma_interrupt) (void __iomem *ioaddr,
- struct stmmac_extra_stats *x);
+ struct stmmac_extra_stats *x, u32 chan);
/* If supported then get the optional core features */
void (*get_hw_feature)(void __iomem *ioaddr,
struct dma_features *dma_cap);
/* Program the HW RX Watchdog */
- void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
- void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len);
- void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len);
+ void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan);
+ void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
+ void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
@@ -451,20 +474,44 @@ struct mac_device_info;
struct stmmac_ops {
/* MAC core initialization */
void (*core_init)(struct mac_device_info *hw, int mtu);
+ /* Enable the MAC RX/TX */
+ void (*set_mac)(void __iomem *ioaddr, bool enable);
/* Enable and verify that the IPC module is supported */
int (*rx_ipc)(struct mac_device_info *hw);
/* Enable RX Queues */
- void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue);
+ void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue);
+ /* RX Queues Priority */
+ void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
+ /* TX Queues Priority */
+ void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
+ /* RX Queues Routing */
+ void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet,
+ u32 queue);
+ /* Program RX Algorithms */
+ void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg);
+ /* Program TX Algorithms */
+ void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg);
+ /* Set MTL TX queues weight */
+ void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw,
+ u32 weight, u32 queue);
+ /* RX MTL queue to RX dma mapping */
+ void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan);
+ /* Configure AV Algorithm */
+ void (*config_cbs)(struct mac_device_info *hw, u32 send_slope,
+ u32 idle_slope, u32 high_credit, u32 low_credit,
+ u32 queue);
/* Dump MAC registers */
void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
/* Handle extra events on specific interrupts hw dependent */
int (*host_irq_status)(struct mac_device_info *hw,
struct stmmac_extra_stats *x);
+ /* Handle MTL interrupts */
+ int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan);
/* Multicast filter setting */
void (*set_filter)(struct mac_device_info *hw, struct net_device *dev);
/* Flow control setting */
void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex,
- unsigned int fc, unsigned int pause_time);
+ unsigned int fc, unsigned int pause_time, u32 tx_cnt);
/* Set power management mode (e.g. magic frame) */
void (*pmt)(struct mac_device_info *hw, unsigned long mode);
/* Set/Get Unicast MAC addresses */
@@ -477,7 +524,8 @@ struct stmmac_ops {
void (*reset_eee_mode)(struct mac_device_info *hw);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
- void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x);
+ void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+ u32 rx_queues, u32 tx_queues);
/* PCS calls */
void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
bool loopback);
@@ -547,6 +595,11 @@ struct mac_device_info {
unsigned int ps;
};
+struct stmmac_rx_routing {
+ u32 reg_mask;
+ u32 reg_shift;
+};
+
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
int perfect_uc_entries,
int *synopsys_id);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 1a3fa3d9f855..dd6a2f9791cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -14,16 +14,34 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
+#include <linux/gpio/consumer.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/stmmac.h>
#include "stmmac_platform.h"
+#include "dwmac4.h"
+
+struct tegra_eqos {
+ struct device *dev;
+ void __iomem *regs;
+
+ struct reset_control *rst;
+ struct clk *clk_master;
+ struct clk *clk_slave;
+ struct clk *clk_tx;
+ struct clk *clk_rx;
+
+ struct gpio_desc *reset;
+};
static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat)
@@ -106,13 +124,309 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
return 0;
}
+static void *dwc_qos_probe(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *stmmac_res)
+{
+ int err;
+
+ plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(plat_dat->stmmac_clk)) {
+ dev_err(&pdev->dev, "apb_pclk clock not found.\n");
+ return ERR_CAST(plat_dat->stmmac_clk);
+ }
+
+ err = clk_prepare_enable(plat_dat->stmmac_clk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
+ err);
+ return ERR_PTR(err);
+ }
+
+ plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+ if (IS_ERR(plat_dat->pclk)) {
+ dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+ err = PTR_ERR(plat_dat->pclk);
+ goto disable;
+ }
+
+ err = clk_prepare_enable(plat_dat->pclk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
+ err);
+ goto disable;
+ }
+
+ return NULL;
+
+disable:
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+ return ERR_PTR(err);
+}
+
+static int dwc_qos_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ clk_disable_unprepare(priv->plat->pclk);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+
+ return 0;
+}
+
+#define SDMEMCOMPPADCTRL 0x8800
+#define SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
+
+#define AUTO_CAL_CONFIG 0x8804
+#define AUTO_CAL_CONFIG_START BIT(31)
+#define AUTO_CAL_CONFIG_ENABLE BIT(29)
+
+#define AUTO_CAL_STATUS 0x880c
+#define AUTO_CAL_STATUS_ACTIVE BIT(31)
+
+static void tegra_eqos_fix_speed(void *priv, unsigned int speed)
+{
+ struct tegra_eqos *eqos = priv;
+ unsigned long rate = 125000000;
+ bool needs_calibration = false;
+ u32 value;
+ int err;
+
+ switch (speed) {
+ case SPEED_1000:
+ needs_calibration = true;
+ rate = 125000000;
+ break;
+
+ case SPEED_100:
+ needs_calibration = true;
+ rate = 25000000;
+ break;
+
+ case SPEED_10:
+ rate = 2500000;
+ break;
+
+ default:
+ dev_err(eqos->dev, "invalid speed %u\n", speed);
+ break;
+ }
+
+ if (needs_calibration) {
+ /* calibrate */
+ value = readl(eqos->regs + SDMEMCOMPPADCTRL);
+ value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
+ writel(value, eqos->regs + SDMEMCOMPPADCTRL);
+
+ udelay(1);
+
+ value = readl(eqos->regs + AUTO_CAL_CONFIG);
+ value |= AUTO_CAL_CONFIG_START | AUTO_CAL_CONFIG_ENABLE;
+ writel(value, eqos->regs + AUTO_CAL_CONFIG);
+
+ err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
+ value,
+ value & AUTO_CAL_STATUS_ACTIVE,
+ 1, 10);
+ if (err < 0) {
+ dev_err(eqos->dev, "calibration did not start\n");
+ goto failed;
+ }
+
+ err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
+ value,
+ (value & AUTO_CAL_STATUS_ACTIVE) == 0,
+ 20, 200);
+ if (err < 0) {
+ dev_err(eqos->dev, "calibration didn't finish\n");
+ goto failed;
+ }
+
+ failed:
+ value = readl(eqos->regs + SDMEMCOMPPADCTRL);
+ value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
+ writel(value, eqos->regs + SDMEMCOMPPADCTRL);
+ } else {
+ value = readl(eqos->regs + AUTO_CAL_CONFIG);
+ value &= ~AUTO_CAL_CONFIG_ENABLE;
+ writel(value, eqos->regs + AUTO_CAL_CONFIG);
+ }
+
+ err = clk_set_rate(eqos->clk_tx, rate);
+ if (err < 0)
+ dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
+}
+
+static int tegra_eqos_init(struct platform_device *pdev, void *priv)
+{
+ struct tegra_eqos *eqos = priv;
+ unsigned long rate;
+ u32 value;
+
+ rate = clk_get_rate(eqos->clk_slave);
+
+ value = (rate / 1000000) - 1;
+ writel(value, eqos->regs + GMAC_1US_TIC_COUNTER);
+
+ return 0;
+}
+
+static void *tegra_eqos_probe(struct platform_device *pdev,
+ struct plat_stmmacenet_data *data,
+ struct stmmac_resources *res)
+{
+ struct tegra_eqos *eqos;
+ int err;
+
+ eqos = devm_kzalloc(&pdev->dev, sizeof(*eqos), GFP_KERNEL);
+ if (!eqos) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ eqos->dev = &pdev->dev;
+ eqos->regs = res->addr;
+
+ eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
+ if (IS_ERR(eqos->clk_master)) {
+ err = PTR_ERR(eqos->clk_master);
+ goto error;
+ }
+
+ err = clk_prepare_enable(eqos->clk_master);
+ if (err < 0)
+ goto error;
+
+ eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
+ if (IS_ERR(eqos->clk_slave)) {
+ err = PTR_ERR(eqos->clk_slave);
+ goto disable_master;
+ }
+
+ data->stmmac_clk = eqos->clk_slave;
+
+ err = clk_prepare_enable(eqos->clk_slave);
+ if (err < 0)
+ goto disable_master;
+
+ eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
+ if (IS_ERR(eqos->clk_rx)) {
+ err = PTR_ERR(eqos->clk_rx);
+ goto disable_slave;
+ }
+
+ err = clk_prepare_enable(eqos->clk_rx);
+ if (err < 0)
+ goto disable_slave;
+
+ eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
+ if (IS_ERR(eqos->clk_tx)) {
+ err = PTR_ERR(eqos->clk_tx);
+ goto disable_rx;
+ }
+
+ err = clk_prepare_enable(eqos->clk_tx);
+ if (err < 0)
+ goto disable_rx;
+
+ eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(eqos->reset)) {
+ err = PTR_ERR(eqos->reset);
+ goto disable_tx;
+ }
+
+ usleep_range(2000, 4000);
+ gpiod_set_value(eqos->reset, 0);
+
+ eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
+ if (IS_ERR(eqos->rst)) {
+ err = PTR_ERR(eqos->rst);
+ goto reset_phy;
+ }
+
+ err = reset_control_assert(eqos->rst);
+ if (err < 0)
+ goto reset_phy;
+
+ usleep_range(2000, 4000);
+
+ err = reset_control_deassert(eqos->rst);
+ if (err < 0)
+ goto reset_phy;
+
+ usleep_range(2000, 4000);
+
+ data->fix_mac_speed = tegra_eqos_fix_speed;
+ data->init = tegra_eqos_init;
+ data->bsp_priv = eqos;
+
+ err = tegra_eqos_init(pdev, eqos);
+ if (err < 0)
+ goto reset;
+
+out:
+ return eqos;
+
+reset:
+ reset_control_assert(eqos->rst);
+reset_phy:
+ gpiod_set_value(eqos->reset, 1);
+disable_tx:
+ clk_disable_unprepare(eqos->clk_tx);
+disable_rx:
+ clk_disable_unprepare(eqos->clk_rx);
+disable_slave:
+ clk_disable_unprepare(eqos->clk_slave);
+disable_master:
+ clk_disable_unprepare(eqos->clk_master);
+error:
+ eqos = ERR_PTR(err);
+ goto out;
+}
+
+static int tegra_eqos_remove(struct platform_device *pdev)
+{
+ struct tegra_eqos *eqos = get_stmmac_bsp_priv(&pdev->dev);
+
+ reset_control_assert(eqos->rst);
+ gpiod_set_value(eqos->reset, 1);
+ clk_disable_unprepare(eqos->clk_tx);
+ clk_disable_unprepare(eqos->clk_rx);
+ clk_disable_unprepare(eqos->clk_slave);
+ clk_disable_unprepare(eqos->clk_master);
+
+ return 0;
+}
+
+struct dwc_eth_dwmac_data {
+ void *(*probe)(struct platform_device *pdev,
+ struct plat_stmmacenet_data *data,
+ struct stmmac_resources *res);
+ int (*remove)(struct platform_device *pdev);
+};
+
+static const struct dwc_eth_dwmac_data dwc_qos_data = {
+ .probe = dwc_qos_probe,
+ .remove = dwc_qos_remove,
+};
+
+static const struct dwc_eth_dwmac_data tegra_eqos_data = {
+ .probe = tegra_eqos_probe,
+ .remove = tegra_eqos_remove,
+};
+
static int dwc_eth_dwmac_probe(struct platform_device *pdev)
{
+ const struct dwc_eth_dwmac_data *data;
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
struct resource *res;
+ void *priv;
int ret;
+ data = of_device_get_match_data(&pdev->dev);
+
memset(&stmmac_res, 0, sizeof(struct stmmac_resources));
/**
@@ -138,39 +452,26 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
- if (IS_ERR(plat_dat->stmmac_clk)) {
- dev_err(&pdev->dev, "apb_pclk clock not found.\n");
- ret = PTR_ERR(plat_dat->stmmac_clk);
- plat_dat->stmmac_clk = NULL;
- goto err_remove_config_dt;
- }
- clk_prepare_enable(plat_dat->stmmac_clk);
-
- plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
- if (IS_ERR(plat_dat->pclk)) {
- dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
- ret = PTR_ERR(plat_dat->pclk);
- plat_dat->pclk = NULL;
- goto err_out_clk_dis_phy;
+ priv = data->probe(pdev, plat_dat, &stmmac_res);
+ if (IS_ERR(priv)) {
+ ret = PTR_ERR(priv);
+ dev_err(&pdev->dev, "failed to probe subdriver: %d\n", ret);
+ goto remove_config;
}
- clk_prepare_enable(plat_dat->pclk);
ret = dwc_eth_dwmac_config_dt(pdev, plat_dat);
if (ret)
- goto err_out_clk_dis_aper;
+ goto remove;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- goto err_out_clk_dis_aper;
+ goto remove;
- return 0;
+ return ret;
-err_out_clk_dis_aper:
- clk_disable_unprepare(plat_dat->pclk);
-err_out_clk_dis_phy:
- clk_disable_unprepare(plat_dat->stmmac_clk);
-err_remove_config_dt:
+remove:
+ data->remove(pdev);
+remove_config:
stmmac_remove_config_dt(pdev, plat_dat);
return ret;
@@ -178,11 +479,29 @@ err_remove_config_dt:
static int dwc_eth_dwmac_remove(struct platform_device *pdev)
{
- return stmmac_pltfr_remove(pdev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ const struct dwc_eth_dwmac_data *data;
+ int err;
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ err = stmmac_dvr_remove(&pdev->dev);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to remove platform: %d\n", err);
+
+ err = data->remove(pdev);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to remove subdriver: %d\n", err);
+
+ stmmac_remove_config_dt(pdev, priv->plat);
+
+ return err;
}
static const struct of_device_id dwc_eth_dwmac_match[] = {
- { .compatible = "snps,dwc-qos-ethernet-4.10", },
+ { .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
+ { .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
{ }
};
MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index e5db6ac36235..f0df5193f047 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -74,6 +74,10 @@ struct rk_priv_data {
#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
#define GRF_CLR_BIT(nr) (BIT(nr+16))
+#define DELAY_ENABLE(soc, tx, rx) \
+ (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
+ ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+
#define RK3228_GRF_MAC_CON0 0x0900
#define RK3228_GRF_MAC_CON1 0x0904
@@ -115,8 +119,7 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_PHY_INTF_SEL_RGMII |
RK3228_GMAC_RMII_MODE_CLR |
- RK3228_GMAC_RXCLK_DLY_ENABLE |
- RK3228_GMAC_TXCLK_DLY_ENABLE);
+ DELAY_ENABLE(RK3228, tx_delay, rx_delay));
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0,
RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) |
@@ -232,8 +235,7 @@ static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3288_GMAC_PHY_INTF_SEL_RGMII |
RK3288_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
- RK3288_GMAC_RXCLK_DLY_ENABLE |
- RK3288_GMAC_TXCLK_DLY_ENABLE |
+ DELAY_ENABLE(RK3288, tx_delay, rx_delay) |
RK3288_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3288_GMAC_CLK_TX_DL_CFG(tx_delay));
}
@@ -460,8 +462,7 @@ static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3366_GMAC_PHY_INTF_SEL_RGMII |
RK3366_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
- RK3366_GMAC_RXCLK_DLY_ENABLE |
- RK3366_GMAC_TXCLK_DLY_ENABLE |
+ DELAY_ENABLE(RK3366, tx_delay, rx_delay) |
RK3366_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3366_GMAC_CLK_TX_DL_CFG(tx_delay));
}
@@ -572,8 +573,7 @@ static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3368_GMAC_PHY_INTF_SEL_RGMII |
RK3368_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
- RK3368_GMAC_RXCLK_DLY_ENABLE |
- RK3368_GMAC_TXCLK_DLY_ENABLE |
+ DELAY_ENABLE(RK3368, tx_delay, rx_delay) |
RK3368_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3368_GMAC_CLK_TX_DL_CFG(tx_delay));
}
@@ -684,8 +684,7 @@ static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3399_GMAC_PHY_INTF_SEL_RGMII |
RK3399_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
- RK3399_GMAC_RXCLK_DLY_ENABLE |
- RK3399_GMAC_TXCLK_DLY_ENABLE |
+ DELAY_ENABLE(RK3399, tx_delay, rx_delay) |
RK3399_GMAC_CLK_RX_DL_CFG(rx_delay) |
RK3399_GMAC_CLK_TX_DL_CFG(tx_delay));
}
@@ -985,14 +984,29 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
return ret;
/*rmii or rgmii*/
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
+ switch (bsp_priv->phy_iface) {
+ case PHY_INTERFACE_MODE_RGMII:
dev_info(dev, "init for RGMII\n");
bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay,
bsp_priv->rx_delay);
- } else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ dev_info(dev, "init for RGMII_ID\n");
+ bsp_priv->ops->set_to_rgmii(bsp_priv, 0, 0);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ dev_info(dev, "init for RGMII_RXID\n");
+ bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, 0);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ dev_info(dev, "init for RGMII_TXID\n");
+ bsp_priv->ops->set_to_rgmii(bsp_priv, 0, bsp_priv->rx_delay);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
dev_info(dev, "init for RMII\n");
bsp_priv->ops->set_to_rmii(bsp_priv);
- } else {
+ break;
+ default:
dev_err(dev, "NO interface defined!\n");
}
@@ -1022,12 +1036,19 @@ static void rk_fix_speed(void *priv, unsigned int speed)
struct rk_priv_data *bsp_priv = priv;
struct device *dev = &bsp_priv->pdev->dev;
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII)
+ switch (bsp_priv->phy_iface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
- else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ break;
+ case PHY_INTERFACE_MODE_RMII:
bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
- else
+ break;
+ default:
dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
+ }
}
static int rk_gmac_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 19b9b3087099..f3d9305e5f70 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -216,7 +216,8 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
+ unsigned int fc, unsigned int pause_time,
+ u32 tx_cnt)
{
void __iomem *ioaddr = hw->pcsr;
/* Set flow such that DZPQ in Mac Register 6 is 0,
@@ -412,7 +413,8 @@ static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
}
-static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+ u32 rx_queues, u32 tx_queues)
{
u32 value = readl(ioaddr + GMAC_DEBUG);
@@ -488,6 +490,7 @@ static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
static const struct stmmac_ops dwmac1000_ops = {
.core_init = dwmac1000_core_init,
+ .set_mac = stmmac_set_mac,
.rx_ipc = dwmac1000_rx_ipc_enable,
.dump_regs = dwmac1000_dump_regs,
.host_irq_status = dwmac1000_irq_status,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index d3654a447046..471a9aa6ac94 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -247,7 +247,8 @@ static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
}
-static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
+ u32 number_chan)
{
writel(riwt, ioaddr + DMA_RX_WATCHDOG);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index e370ccec6176..1b3609105484 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -131,7 +131,8 @@ static void dwmac100_set_filter(struct mac_device_info *hw,
}
static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
+ unsigned int fc, unsigned int pause_time,
+ u32 tx_cnt)
{
void __iomem *ioaddr = hw->pcsr;
unsigned int flow = MAC_FLOW_CTRL_ENABLE;
@@ -149,6 +150,7 @@ static void dwmac100_pmt(struct mac_device_info *hw, unsigned long mode)
static const struct stmmac_ops dwmac100_ops = {
.core_init = dwmac100_core_init,
+ .set_mac = stmmac_set_mac,
.rx_ipc = dwmac100_rx_ipc_enable,
.dump_regs = dwmac100_dump_mac_regs,
.host_irq_status = dwmac100_irq_status,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index db45134fddf0..d74cedf2a397 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -22,9 +22,15 @@
#define GMAC_HASH_TAB_32_63 0x00000014
#define GMAC_RX_FLOW_CTRL 0x00000090
#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
+#define GMAC_TXQ_PRTY_MAP0 0x98
+#define GMAC_TXQ_PRTY_MAP1 0x9C
#define GMAC_RXQ_CTRL0 0x000000a0
+#define GMAC_RXQ_CTRL1 0x000000a4
+#define GMAC_RXQ_CTRL2 0x000000a8
+#define GMAC_RXQ_CTRL3 0x000000ac
#define GMAC_INT_STATUS 0x000000b0
#define GMAC_INT_EN 0x000000b4
+#define GMAC_1US_TIC_COUNTER 0x000000dc
#define GMAC_PCS_BASE 0x000000e0
#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
#define GMAC_PMT 0x000000c0
@@ -38,6 +44,22 @@
#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
+/* RX Queues Routing */
+#define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0)
+#define GMAC_RXQCTRL_AVCPQ_SHIFT 0
+#define GMAC_RXQCTRL_PTPQ_MASK GENMASK(6, 4)
+#define GMAC_RXQCTRL_PTPQ_SHIFT 4
+#define GMAC_RXQCTRL_DCBCPQ_MASK GENMASK(10, 8)
+#define GMAC_RXQCTRL_DCBCPQ_SHIFT 8
+#define GMAC_RXQCTRL_UPQ_MASK GENMASK(14, 12)
+#define GMAC_RXQCTRL_UPQ_SHIFT 12
+#define GMAC_RXQCTRL_MCBCQ_MASK GENMASK(18, 16)
+#define GMAC_RXQCTRL_MCBCQ_SHIFT 16
+#define GMAC_RXQCTRL_MCBCQEN BIT(20)
+#define GMAC_RXQCTRL_MCBCQEN_SHIFT 20
+#define GMAC_RXQCTRL_TACPQE BIT(21)
+#define GMAC_RXQCTRL_TACPQE_SHIFT 21
+
/* MAC Packet Filtering */
#define GMAC_PACKET_FILTER_PR BIT(0)
#define GMAC_PACKET_FILTER_HMC BIT(2)
@@ -53,6 +75,14 @@
/* MAC Flow Control RX */
#define GMAC_RX_FLOW_CTRL_RFE BIT(0)
+/* RX Queues Priorities */
+#define GMAC_RXQCTRL_PSRQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
+#define GMAC_RXQCTRL_PSRQX_SHIFT(x) ((x) * 8)
+
+/* TX Queues Priorities */
+#define GMAC_TXQCTRL_PSTQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
+#define GMAC_TXQCTRL_PSTQX_SHIFT(x) ((x) * 8)
+
/* MAC Flow Control TX */
#define GMAC_TX_FLOW_CTRL_TFE BIT(1)
#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
@@ -148,6 +178,8 @@ enum power_event {
/* MAC HW features1 bitmap */
#define GMAC_HW_FEAT_AVSEL BIT(20)
#define GMAC_HW_TSOEN BIT(18)
+#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6)
+#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0)
/* MAC HW features2 bitmap */
#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18)
@@ -161,8 +193,25 @@ enum power_event {
#define GMAC_HI_REG_AE BIT(31)
/* MTL registers */
+#define MTL_OPERATION_MODE 0x00000c00
+#define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5)
+#define MTL_OPERATION_SCHALG_WRR (0x0 << 5)
+#define MTL_OPERATION_SCHALG_WFQ (0x1 << 5)
+#define MTL_OPERATION_SCHALG_DWRR (0x2 << 5)
+#define MTL_OPERATION_SCHALG_SP (0x3 << 5)
+#define MTL_OPERATION_RAA BIT(2)
+#define MTL_OPERATION_RAA_SP (0x0 << 2)
+#define MTL_OPERATION_RAA_WSP (0x1 << 2)
+
#define MTL_INT_STATUS 0x00000c20
-#define MTL_INT_Q0 BIT(0)
+#define MTL_INT_QX(x) BIT(x)
+
+#define MTL_RXQ_DMA_MAP0 0x00000c30 /* queue 0 to 3 */
+#define MTL_RXQ_DMA_MAP1 0x00000c34 /* queue 4 to 7 */
+#define MTL_RXQ_DMA_Q04MDMACH_MASK GENMASK(3, 0)
+#define MTL_RXQ_DMA_Q04MDMACH(x) ((x) << 0)
+#define MTL_RXQ_DMA_QXMDMACH_MASK(x) GENMASK(11 + (8 * ((x) - 1)), 8 * (x))
+#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q)))
#define MTL_CHAN_BASE_ADDR 0x00000d00
#define MTL_CHAN_BASE_OFFSET 0x40
@@ -180,6 +229,7 @@ enum power_event {
#define MTL_OP_MODE_TSF BIT(1)
#define MTL_OP_MODE_TQS_MASK GENMASK(24, 16)
+#define MTL_OP_MODE_TQS_SHIFT 16
#define MTL_OP_MODE_TTC_MASK 0x70
#define MTL_OP_MODE_TTC_SHIFT 4
@@ -193,6 +243,17 @@ enum power_event {
#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT)
#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_RQS_MASK GENMASK(29, 20)
+#define MTL_OP_MODE_RQS_SHIFT 20
+
+#define MTL_OP_MODE_RFD_MASK GENMASK(19, 14)
+#define MTL_OP_MODE_RFD_SHIFT 14
+
+#define MTL_OP_MODE_RFA_MASK GENMASK(13, 8)
+#define MTL_OP_MODE_RFA_SHIFT 8
+
+#define MTL_OP_MODE_EHFC BIT(7)
+
#define MTL_OP_MODE_RTC_MASK 0x18
#define MTL_OP_MODE_RTC_SHIFT 3
@@ -201,6 +262,46 @@ enum power_event {
#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT)
#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT)
+/* MTL ETS Control register */
+#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10
+#define MTL_ETS_CTRL_BASE_OFFSET 0x40
+#define MTL_ETSX_CTRL_BASE_ADDR(x) (MTL_ETS_CTRL_BASE_ADDR + \
+ ((x) * MTL_ETS_CTRL_BASE_OFFSET))
+
+#define MTL_ETS_CTRL_CC BIT(3)
+#define MTL_ETS_CTRL_AVALG BIT(2)
+
+/* MTL Queue Quantum Weight */
+#define MTL_TXQ_WEIGHT_BASE_ADDR 0x00000d18
+#define MTL_TXQ_WEIGHT_BASE_OFFSET 0x40
+#define MTL_TXQX_WEIGHT_BASE_ADDR(x) (MTL_TXQ_WEIGHT_BASE_ADDR + \
+ ((x) * MTL_TXQ_WEIGHT_BASE_OFFSET))
+#define MTL_TXQ_WEIGHT_ISCQW_MASK GENMASK(20, 0)
+
+/* MTL sendSlopeCredit register */
+#define MTL_SEND_SLP_CRED_BASE_ADDR 0x00000d1c
+#define MTL_SEND_SLP_CRED_OFFSET 0x40
+#define MTL_SEND_SLP_CREDX_BASE_ADDR(x) (MTL_SEND_SLP_CRED_BASE_ADDR + \
+ ((x) * MTL_SEND_SLP_CRED_OFFSET))
+
+#define MTL_SEND_SLP_CRED_SSC_MASK GENMASK(13, 0)
+
+/* MTL hiCredit register */
+#define MTL_HIGH_CRED_BASE_ADDR 0x00000d20
+#define MTL_HIGH_CRED_OFFSET 0x40
+#define MTL_HIGH_CREDX_BASE_ADDR(x) (MTL_HIGH_CRED_BASE_ADDR + \
+ ((x) * MTL_HIGH_CRED_OFFSET))
+
+#define MTL_HIGH_CRED_HC_MASK GENMASK(28, 0)
+
+/* MTL loCredit register */
+#define MTL_LOW_CRED_BASE_ADDR 0x00000d24
+#define MTL_LOW_CRED_OFFSET 0x40
+#define MTL_LOW_CREDX_BASE_ADDR(x) (MTL_LOW_CRED_BASE_ADDR + \
+ ((x) * MTL_LOW_CRED_OFFSET))
+
+#define MTL_HIGH_CRED_LC_MASK GENMASK(28, 0)
+
/* MTL debug */
#define MTL_DEBUG_TXSTSFSTS BIT(5)
#define MTL_DEBUG_TXFSTS BIT(4)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 1e79e6529c4a..48793f2e9307 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -59,17 +59,211 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
writel(value, ioaddr + GMAC_INT_EN);
}
-static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue)
+static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
+ u8 mode, u32 queue)
{
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
value &= GMAC_RX_QUEUE_CLEAR(queue);
- value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
+ if (mode == MTL_QUEUE_AVB)
+ value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
+ else if (mode == MTL_QUEUE_DCB)
+ value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
writel(value, ioaddr + GMAC_RXQ_CTRL0);
}
+static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
+ u32 prio, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 base_register;
+ u32 value;
+
+ base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
+
+ value = readl(ioaddr + base_register);
+
+ value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
+ value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
+ GMAC_RXQCTRL_PSRQX_MASK(queue);
+ writel(value, ioaddr + base_register);
+}
+
+static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
+ u32 prio, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 base_register;
+ u32 value;
+
+ base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
+
+ value = readl(ioaddr + base_register);
+
+ value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
+ value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
+ GMAC_TXQCTRL_PSTQX_MASK(queue);
+
+ writel(value, ioaddr + base_register);
+}
+
+static void dwmac4_tx_queue_routing(struct mac_device_info *hw,
+ u8 packet, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ const struct stmmac_rx_routing route_possibilities[] = {
+ { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
+ { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
+ { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
+ { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
+ { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
+ };
+
+ value = readl(ioaddr + GMAC_RXQ_CTRL1);
+
+ /* routing configuration */
+ value &= ~route_possibilities[packet - 1].reg_mask;
+ value |= (queue << route_possibilities[packet-1].reg_shift) &
+ route_possibilities[packet - 1].reg_mask;
+
+ /* some packets require extra ops */
+ if (packet == PACKET_AVCPQ) {
+ value &= ~GMAC_RXQCTRL_TACPQE;
+ value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
+ } else if (packet == PACKET_MCBCQ) {
+ value &= ~GMAC_RXQCTRL_MCBCQEN;
+ value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
+ }
+
+ writel(value, ioaddr + GMAC_RXQ_CTRL1);
+}
+
+static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
+ u32 rx_alg)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + MTL_OPERATION_MODE);
+
+ value &= ~MTL_OPERATION_RAA;
+ switch (rx_alg) {
+ case MTL_RX_ALGORITHM_SP:
+ value |= MTL_OPERATION_RAA_SP;
+ break;
+ case MTL_RX_ALGORITHM_WSP:
+ value |= MTL_OPERATION_RAA_WSP;
+ break;
+ default:
+ break;
+ }
+
+ writel(value, ioaddr + MTL_OPERATION_MODE);
+}
+
+static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
+ u32 tx_alg)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + MTL_OPERATION_MODE);
+
+ value &= ~MTL_OPERATION_SCHALG_MASK;
+ switch (tx_alg) {
+ case MTL_TX_ALGORITHM_WRR:
+ value |= MTL_OPERATION_SCHALG_WRR;
+ break;
+ case MTL_TX_ALGORITHM_WFQ:
+ value |= MTL_OPERATION_SCHALG_WFQ;
+ break;
+ case MTL_TX_ALGORITHM_DWRR:
+ value |= MTL_OPERATION_SCHALG_DWRR;
+ break;
+ case MTL_TX_ALGORITHM_SP:
+ value |= MTL_OPERATION_SCHALG_SP;
+ break;
+ default:
+ break;
+ }
+}
+
+static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
+ u32 weight, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
+
+ value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
+ value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
+ writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
+}
+
+static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ if (queue < 4)
+ value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
+ else
+ value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
+
+ if (queue == 0 || queue == 4) {
+ value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
+ value |= MTL_RXQ_DMA_Q04MDMACH(chan);
+ } else {
+ value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
+ value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
+ }
+
+ if (queue < 4)
+ writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
+ else
+ writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
+}
+
+static void dwmac4_config_cbs(struct mac_device_info *hw,
+ u32 send_slope, u32 idle_slope,
+ u32 high_credit, u32 low_credit, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
+ pr_debug("\tsend_slope: 0x%08x\n", send_slope);
+ pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
+ pr_debug("\thigh_credit: 0x%08x\n", high_credit);
+ pr_debug("\tlow_credit: 0x%08x\n", low_credit);
+
+ /* enable AV algorithm */
+ value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
+ value |= MTL_ETS_CTRL_AVALG;
+ value |= MTL_ETS_CTRL_CC;
+ writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
+
+ /* configure send slope */
+ value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
+ value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
+ value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
+ writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
+
+ /* configure idle slope (same register as tx weight) */
+ dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
+
+ /* configure high credit */
+ value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
+ value &= ~MTL_HIGH_CRED_HC_MASK;
+ value |= high_credit & MTL_HIGH_CRED_HC_MASK;
+ writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
+
+ /* configure high credit */
+ value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
+ value &= ~MTL_HIGH_CRED_LC_MASK;
+ value |= low_credit & MTL_HIGH_CRED_LC_MASK;
+ writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
+}
+
static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
{
void __iomem *ioaddr = hw->pcsr;
@@ -251,11 +445,12 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
}
static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
+ unsigned int fc, unsigned int pause_time,
+ u32 tx_cnt)
{
void __iomem *ioaddr = hw->pcsr;
- u32 channel = STMMAC_CHAN0; /* FIXME */
unsigned int flow = 0;
+ u32 queue = 0;
pr_debug("GMAC Flow-Control:\n");
if (fc & FLOW_RX) {
@@ -265,13 +460,18 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
}
if (fc & FLOW_TX) {
pr_debug("\tTransmit Flow-Control ON\n");
- flow |= GMAC_TX_FLOW_CTRL_TFE;
- writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
- if (duplex) {
+ if (duplex)
pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
- flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
- writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
+
+ for (queue = 0; queue < tx_cnt; queue++) {
+ flow |= GMAC_TX_FLOW_CTRL_TFE;
+
+ if (duplex)
+ flow |=
+ (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
+
+ writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
}
}
}
@@ -325,11 +525,34 @@ static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
}
}
+static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 mtl_int_qx_status;
+ int ret = 0;
+
+ mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
+
+ /* Check MTL Interrupt */
+ if (mtl_int_qx_status & MTL_INT_QX(chan)) {
+ /* read Queue x Interrupt status */
+ u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
+
+ if (status & MTL_RX_OVERFLOW_INT) {
+ /* clear Interrupt */
+ writel(status | MTL_RX_OVERFLOW_INT,
+ ioaddr + MTL_CHAN_INT_CTRL(chan));
+ ret = CORE_IRQ_MTL_RX_OVERFLOW;
+ }
+ }
+
+ return ret;
+}
+
static int dwmac4_irq_status(struct mac_device_info *hw,
struct stmmac_extra_stats *x)
{
void __iomem *ioaddr = hw->pcsr;
- u32 mtl_int_qx_status;
u32 intr_status;
int ret = 0;
@@ -348,20 +571,6 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
x->irq_receive_pmt_irq_n++;
}
- mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
- /* Check MTL Interrupt: Currently only one queue is used: Q0. */
- if (mtl_int_qx_status & MTL_INT_Q0) {
- /* read Queue 0 Interrupt status */
- u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
-
- if (status & MTL_RX_OVERFLOW_INT) {
- /* clear Interrupt */
- writel(status | MTL_RX_OVERFLOW_INT,
- ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
- ret = CORE_IRQ_MTL_RX_OVERFLOW;
- }
- }
-
dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
if (intr_status & PCS_RGSMIIIS_IRQ)
dwmac4_phystatus(ioaddr, x);
@@ -369,64 +578,69 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
return ret;
}
-static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+ u32 rx_queues, u32 tx_queues)
{
u32 value;
-
- /* Currently only channel 0 is supported */
- value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0));
-
- if (value & MTL_DEBUG_TXSTSFSTS)
- x->mtl_tx_status_fifo_full++;
- if (value & MTL_DEBUG_TXFSTS)
- x->mtl_tx_fifo_not_empty++;
- if (value & MTL_DEBUG_TWCSTS)
- x->mmtl_fifo_ctrl++;
- if (value & MTL_DEBUG_TRCSTS_MASK) {
- u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
- >> MTL_DEBUG_TRCSTS_SHIFT;
- if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
- x->mtl_tx_fifo_read_ctrl_write++;
- else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
- x->mtl_tx_fifo_read_ctrl_wait++;
- else if (trcsts == MTL_DEBUG_TRCSTS_READ)
- x->mtl_tx_fifo_read_ctrl_read++;
- else
- x->mtl_tx_fifo_read_ctrl_idle++;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queues; queue++) {
+ value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
+
+ if (value & MTL_DEBUG_TXSTSFSTS)
+ x->mtl_tx_status_fifo_full++;
+ if (value & MTL_DEBUG_TXFSTS)
+ x->mtl_tx_fifo_not_empty++;
+ if (value & MTL_DEBUG_TWCSTS)
+ x->mmtl_fifo_ctrl++;
+ if (value & MTL_DEBUG_TRCSTS_MASK) {
+ u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
+ >> MTL_DEBUG_TRCSTS_SHIFT;
+ if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
+ x->mtl_tx_fifo_read_ctrl_write++;
+ else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
+ x->mtl_tx_fifo_read_ctrl_wait++;
+ else if (trcsts == MTL_DEBUG_TRCSTS_READ)
+ x->mtl_tx_fifo_read_ctrl_read++;
+ else
+ x->mtl_tx_fifo_read_ctrl_idle++;
+ }
+ if (value & MTL_DEBUG_TXPAUSED)
+ x->mac_tx_in_pause++;
}
- if (value & MTL_DEBUG_TXPAUSED)
- x->mac_tx_in_pause++;
- value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0));
+ for (queue = 0; queue < rx_queues; queue++) {
+ value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
- if (value & MTL_DEBUG_RXFSTS_MASK) {
- u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
- >> MTL_DEBUG_RRCSTS_SHIFT;
+ if (value & MTL_DEBUG_RXFSTS_MASK) {
+ u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
+ >> MTL_DEBUG_RRCSTS_SHIFT;
- if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
- x->mtl_rx_fifo_fill_level_full++;
- else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
- x->mtl_rx_fifo_fill_above_thresh++;
- else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
- x->mtl_rx_fifo_fill_below_thresh++;
- else
- x->mtl_rx_fifo_fill_level_empty++;
- }
- if (value & MTL_DEBUG_RRCSTS_MASK) {
- u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
- MTL_DEBUG_RRCSTS_SHIFT;
-
- if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
- x->mtl_rx_fifo_read_ctrl_flush++;
- else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
- x->mtl_rx_fifo_read_ctrl_read_data++;
- else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
- x->mtl_rx_fifo_read_ctrl_status++;
- else
- x->mtl_rx_fifo_read_ctrl_idle++;
+ if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
+ x->mtl_rx_fifo_fill_level_full++;
+ else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
+ x->mtl_rx_fifo_fill_above_thresh++;
+ else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
+ x->mtl_rx_fifo_fill_below_thresh++;
+ else
+ x->mtl_rx_fifo_fill_level_empty++;
+ }
+ if (value & MTL_DEBUG_RRCSTS_MASK) {
+ u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
+ MTL_DEBUG_RRCSTS_SHIFT;
+
+ if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
+ x->mtl_rx_fifo_read_ctrl_flush++;
+ else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
+ x->mtl_rx_fifo_read_ctrl_read_data++;
+ else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
+ x->mtl_rx_fifo_read_ctrl_status++;
+ else
+ x->mtl_rx_fifo_read_ctrl_idle++;
+ }
+ if (value & MTL_DEBUG_RWCSTS)
+ x->mtl_rx_fifo_ctrl_active++;
}
- if (value & MTL_DEBUG_RWCSTS)
- x->mtl_rx_fifo_ctrl_active++;
/* GMAC debug */
value = readl(ioaddr + GMAC_DEBUG);
@@ -455,10 +669,51 @@ static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
static const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
+ .set_mac = stmmac_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable,
+ .rx_queue_prio = dwmac4_rx_queue_priority,
+ .tx_queue_prio = dwmac4_tx_queue_priority,
+ .rx_queue_routing = dwmac4_tx_queue_routing,
+ .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
+ .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
+ .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
+ .map_mtl_to_dma = dwmac4_map_mtl_dma,
+ .config_cbs = dwmac4_config_cbs,
.dump_regs = dwmac4_dump_regs,
.host_irq_status = dwmac4_irq_status,
+ .host_mtl_irq_status = dwmac4_irq_mtl_status,
+ .flow_ctrl = dwmac4_flow_ctrl,
+ .pmt = dwmac4_pmt,
+ .set_umac_addr = dwmac4_set_umac_addr,
+ .get_umac_addr = dwmac4_get_umac_addr,
+ .set_eee_mode = dwmac4_set_eee_mode,
+ .reset_eee_mode = dwmac4_reset_eee_mode,
+ .set_eee_timer = dwmac4_set_eee_timer,
+ .set_eee_pls = dwmac4_set_eee_pls,
+ .pcs_ctrl_ane = dwmac4_ctrl_ane,
+ .pcs_rane = dwmac4_rane,
+ .pcs_get_adv_lp = dwmac4_get_adv_lp,
+ .debug = dwmac4_debug,
+ .set_filter = dwmac4_set_filter,
+};
+
+static const struct stmmac_ops dwmac410_ops = {
+ .core_init = dwmac4_core_init,
+ .set_mac = stmmac_dwmac4_set_mac,
+ .rx_ipc = dwmac4_rx_ipc_enable,
+ .rx_queue_enable = dwmac4_rx_queue_enable,
+ .rx_queue_prio = dwmac4_rx_queue_priority,
+ .tx_queue_prio = dwmac4_tx_queue_priority,
+ .rx_queue_routing = dwmac4_tx_queue_routing,
+ .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
+ .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
+ .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
+ .map_mtl_to_dma = dwmac4_map_mtl_dma,
+ .config_cbs = dwmac4_config_cbs,
+ .dump_regs = dwmac4_dump_regs,
+ .host_irq_status = dwmac4_irq_status,
+ .host_mtl_irq_status = dwmac4_irq_mtl_status,
.flow_ctrl = dwmac4_flow_ctrl,
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
@@ -492,8 +747,6 @@ struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
- mac->mac = &dwmac4_ops;
-
mac->link.port = GMAC_CONFIG_PS;
mac->link.duplex = GMAC_CONFIG_DM;
mac->link.speed = GMAC_CONFIG_FES;
@@ -514,5 +767,10 @@ struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
else
mac->dma = &dwmac4_dma_ops;
+ if (*synopsys_id >= DWMAC_CORE_4_00)
+ mac->mac = &dwmac410_ops;
+ else
+ mac->mac = &dwmac4_ops;
+
return mac;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 843ec69222ea..aa6476439aee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -304,12 +304,13 @@ static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
- bool ls)
+ bool ls, unsigned int tot_pkt_len)
{
unsigned int tdes3 = le32_to_cpu(p->des3);
p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
+ tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK;
if (is_fs)
tdes3 |= TDES3_FIRST_DESCRIPTOR;
else
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index f97b0d5d9987..eec8463057fd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -71,36 +71,48 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(value, ioaddr + DMA_SYS_BUS_MODE);
}
-static void dwmac4_dma_init_channel(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx_phy, u32 dma_rx_phy,
- u32 channel)
+void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan)
{
u32 value;
- int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
- int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+ u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
- /* set PBL for each channels. Currently we affect same configuration
- * on each channel
- */
- value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
- if (dma_cfg->pblx8)
- value = value | DMA_BUS_MODE_PBL;
- writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
+ value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+ value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+
+ writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
+}
- value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
+void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan)
+{
+ u32 value;
+ u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
- writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
- value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
- value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
- writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
+ writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
+}
- /* Mask interrupts by writing to CSR7 */
- writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel));
+void dwmac4_dma_init_channel(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan)
+{
+ u32 value;
+
+ /* common channel control register config */
+ value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
+ if (dma_cfg->pblx8)
+ value = value | DMA_BUS_MODE_PBL;
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
- writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
- writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_CHAN_INTR_DEFAULT_MASK,
+ ioaddr + DMA_CHAN_INTR_ENA(chan));
}
static void dwmac4_dma_init(void __iomem *ioaddr,
@@ -108,7 +120,6 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
u32 dma_tx, u32 dma_rx, int atds)
{
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
- int i;
/* Set the Fixed burst mode */
if (dma_cfg->fixed_burst)
@@ -122,9 +133,6 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
value |= DMA_SYS_BUS_AAL;
writel(value, ioaddr + DMA_SYS_BUS_MODE);
-
- for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
- dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i);
}
static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
@@ -174,46 +182,121 @@ static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
_dwmac4_dump_dma_regs(ioaddr, i, reg_space);
}
-static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan)
{
- int i;
+ u32 chan;
- for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
- writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i));
+ for (chan = 0; chan < number_chan; chan++)
+ writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan));
}
-static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
- int rxmode, u32 channel)
+static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz)
{
- u32 mtl_tx_op, mtl_rx_op, mtl_rx_int;
+ unsigned int rqs = fifosz / 256 - 1;
+ u32 mtl_rx_op, mtl_rx_int;
- /* Following code only done for channel 0, other channels not yet
- * supported.
- */
- mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+ mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+
+ if (mode == SF_DMA_MODE) {
+ pr_debug("GMAC: enable RX store and forward mode\n");
+ mtl_rx_op |= MTL_OP_MODE_RSF;
+ } else {
+ pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
+ mtl_rx_op &= ~MTL_OP_MODE_RSF;
+ mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
+ if (mode <= 32)
+ mtl_rx_op |= MTL_OP_MODE_RTC_32;
+ else if (mode <= 64)
+ mtl_rx_op |= MTL_OP_MODE_RTC_64;
+ else if (mode <= 96)
+ mtl_rx_op |= MTL_OP_MODE_RTC_96;
+ else
+ mtl_rx_op |= MTL_OP_MODE_RTC_128;
+ }
+
+ mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
+ mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
+
+ /* enable flow control only if each channel gets 4 KiB or more FIFO */
+ if (fifosz >= 4096) {
+ unsigned int rfd, rfa;
+
+ mtl_rx_op |= MTL_OP_MODE_EHFC;
+
+ /* Set Threshold for Activating Flow Control to min 2 frames,
+ * i.e. 1500 * 2 = 3000 bytes.
+ *
+ * Set Threshold for Deactivating Flow Control to min 1 frame,
+ * i.e. 1500 bytes.
+ */
+ switch (fifosz) {
+ case 4096:
+ /* This violates the above formula because of FIFO size
+ * limit therefore overflow may occur in spite of this.
+ */
+ rfd = 0x03; /* Full-2.5K */
+ rfa = 0x01; /* Full-1.5K */
+ break;
+
+ case 8192:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x0a; /* Full-6K */
+ break;
+
+ case 16384:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x12; /* Full-10K */
+ break;
+
+ default:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x1e; /* Full-16K */
+ break;
+ }
+
+ mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK;
+ mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT;
- if (txmode == SF_DMA_MODE) {
+ mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK;
+ mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT;
+ }
+
+ writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+
+ /* Enable MTL RX overflow */
+ mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
+ writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
+ ioaddr + MTL_CHAN_INT_CTRL(channel));
+}
+
+static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
+ u32 channel)
+{
+ u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+
+ if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n");
/* Transmit COE type 2 cannot be done in cut-through mode. */
mtl_tx_op |= MTL_OP_MODE_TSF;
} else {
- pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
+ pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
mtl_tx_op &= ~MTL_OP_MODE_TSF;
mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
/* Set the transmit threshold */
- if (txmode <= 32)
+ if (mode <= 32)
mtl_tx_op |= MTL_OP_MODE_TTC_32;
- else if (txmode <= 64)
+ else if (mode <= 64)
mtl_tx_op |= MTL_OP_MODE_TTC_64;
- else if (txmode <= 96)
+ else if (mode <= 96)
mtl_tx_op |= MTL_OP_MODE_TTC_96;
- else if (txmode <= 128)
+ else if (mode <= 128)
mtl_tx_op |= MTL_OP_MODE_TTC_128;
- else if (txmode <= 192)
+ else if (mode <= 192)
mtl_tx_op |= MTL_OP_MODE_TTC_192;
- else if (txmode <= 256)
+ else if (mode <= 256)
mtl_tx_op |= MTL_OP_MODE_TTC_256;
- else if (txmode <= 384)
+ else if (mode <= 384)
mtl_tx_op |= MTL_OP_MODE_TTC_384;
else
mtl_tx_op |= MTL_OP_MODE_TTC_512;
@@ -230,39 +313,6 @@ static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
*/
mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
-
- mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
-
- if (rxmode == SF_DMA_MODE) {
- pr_debug("GMAC: enable RX store and forward mode\n");
- mtl_rx_op |= MTL_OP_MODE_RSF;
- } else {
- pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
- mtl_rx_op &= ~MTL_OP_MODE_RSF;
- mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
- if (rxmode <= 32)
- mtl_rx_op |= MTL_OP_MODE_RTC_32;
- else if (rxmode <= 64)
- mtl_rx_op |= MTL_OP_MODE_RTC_64;
- else if (rxmode <= 96)
- mtl_rx_op |= MTL_OP_MODE_RTC_96;
- else
- mtl_rx_op |= MTL_OP_MODE_RTC_128;
- }
-
- writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
-
- /* Enable MTL RX overflow */
- mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
- writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
- ioaddr + MTL_CHAN_INT_CTRL(channel));
-}
-
-static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode,
- int rxmode, int rxfifosz)
-{
- /* Only Channel 0 is actually configured and used */
- dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0);
}
static void dwmac4_get_hw_feature(void __iomem *ioaddr,
@@ -294,6 +344,11 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
+ /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
+ * shifting and store the sizes in bytes.
+ */
+ dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6);
+ dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0);
/* MAC HW feature2 */
hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
/* TX and RX number of channels */
@@ -332,9 +387,13 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
const struct stmmac_dma_ops dwmac4_dma_ops = {
.reset = dwmac4_dma_reset,
.init = dwmac4_dma_init,
+ .init_chan = dwmac4_dma_init_channel,
+ .init_rx_chan = dwmac4_dma_init_rx_chan,
+ .init_tx_chan = dwmac4_dma_init_tx_chan,
.axi = dwmac4_dma_axi,
.dump_regs = dwmac4_dump_dma_regs,
- .dma_mode = dwmac4_dma_operation_mode,
+ .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
+ .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
.enable_dma_irq = dwmac4_enable_dma_irq,
.disable_dma_irq = dwmac4_disable_dma_irq,
.start_tx = dwmac4_dma_start_tx,
@@ -354,9 +413,13 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
const struct stmmac_dma_ops dwmac410_dma_ops = {
.reset = dwmac4_dma_reset,
.init = dwmac4_dma_init,
+ .init_chan = dwmac4_dma_init_channel,
+ .init_rx_chan = dwmac4_dma_init_rx_chan,
+ .init_tx_chan = dwmac4_dma_init_tx_chan,
.axi = dwmac4_dma_axi,
.dump_regs = dwmac4_dump_dma_regs,
- .dma_mode = dwmac4_dma_operation_mode,
+ .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
+ .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
.enable_dma_irq = dwmac410_enable_dma_irq,
.disable_dma_irq = dwmac4_disable_dma_irq,
.start_tx = dwmac4_dma_start_tx,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 1b06df749e2b..8474bf961dd0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -185,17 +185,17 @@
int dwmac4_dma_reset(void __iomem *ioaddr);
void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
-void dwmac4_enable_dma_irq(void __iomem *ioaddr);
-void dwmac410_enable_dma_irq(void __iomem *ioaddr);
-void dwmac4_disable_dma_irq(void __iomem *ioaddr);
-void dwmac4_dma_start_tx(void __iomem *ioaddr);
-void dwmac4_dma_stop_tx(void __iomem *ioaddr);
-void dwmac4_dma_start_rx(void __iomem *ioaddr);
-void dwmac4_dma_stop_rx(void __iomem *ioaddr);
+void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan);
+void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
+void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
+void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan);
int dwmac4_dma_interrupt(void __iomem *ioaddr,
- struct stmmac_extra_stats *x);
-void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len);
-void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len);
+ struct stmmac_extra_stats *x, u32 chan);
+void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
+void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index c7326d5b2f43..49f5687879df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -37,96 +37,96 @@ int dwmac4_dma_reset(void __iomem *ioaddr)
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
{
- writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0));
+ writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan));
}
void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
{
- writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0));
+ writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan));
}
-void dwmac4_dma_start_tx(void __iomem *ioaddr)
+void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
value |= DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
value = readl(ioaddr + GMAC_CONFIG);
value |= GMAC_CONFIG_TE;
writel(value, ioaddr + GMAC_CONFIG);
}
-void dwmac4_dma_stop_tx(void __iomem *ioaddr)
+void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
value &= ~DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
value = readl(ioaddr + GMAC_CONFIG);
value &= ~GMAC_CONFIG_TE;
writel(value, ioaddr + GMAC_CONFIG);
}
-void dwmac4_dma_start_rx(void __iomem *ioaddr)
+void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
value |= DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
value = readl(ioaddr + GMAC_CONFIG);
value |= GMAC_CONFIG_RE;
writel(value, ioaddr + GMAC_CONFIG);
}
-void dwmac4_dma_stop_rx(void __iomem *ioaddr)
+void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
value &= ~DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
value = readl(ioaddr + GMAC_CONFIG);
value &= ~GMAC_CONFIG_RE;
writel(value, ioaddr + GMAC_CONFIG);
}
-void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len)
+void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
{
- writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0));
+ writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan));
}
-void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len)
+void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
{
- writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0));
+ writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
}
-void dwmac4_enable_dma_irq(void __iomem *ioaddr)
+void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan)
{
writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
- DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+ DMA_CHAN_INTR_ENA(chan));
}
-void dwmac410_enable_dma_irq(void __iomem *ioaddr)
+void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan)
{
writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
- ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+ ioaddr + DMA_CHAN_INTR_ENA(chan));
}
-void dwmac4_disable_dma_irq(void __iomem *ioaddr)
+void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan)
{
- writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+ writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
int dwmac4_dma_interrupt(void __iomem *ioaddr,
- struct stmmac_extra_stats *x)
+ struct stmmac_extra_stats *x, u32 chan)
{
int ret = 0;
- u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0));
+ u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
/* ABNORMAL interrupts */
if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
@@ -153,7 +153,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
u32 value;
- value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+ value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
/* to schedule NAPI on real RIE event. */
if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
x->rx_normal_irq_n++;
@@ -172,7 +172,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
* status [21-0] expect reserved bits [5-3]
*/
writel((intr_status & 0x3fffc7),
- ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0));
+ ioaddr + DMA_CHAN_STATUS(chan));
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 56e485f79077..9091df86723a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -137,13 +137,14 @@
#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
void dwmac_enable_dma_transmission(void __iomem *ioaddr);
-void dwmac_enable_dma_irq(void __iomem *ioaddr);
-void dwmac_disable_dma_irq(void __iomem *ioaddr);
-void dwmac_dma_start_tx(void __iomem *ioaddr);
-void dwmac_dma_stop_tx(void __iomem *ioaddr);
-void dwmac_dma_start_rx(void __iomem *ioaddr);
-void dwmac_dma_stop_rx(void __iomem *ioaddr);
-int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
+void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
+void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
+void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
+void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
+void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan);
+int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+ u32 chan);
int dwmac_dma_reset(void __iomem *ioaddr);
#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index e60bfca2a763..38f94305aab5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -47,38 +47,38 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr)
writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
}
-void dwmac_enable_dma_irq(void __iomem *ioaddr)
+void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
{
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
}
-void dwmac_disable_dma_irq(void __iomem *ioaddr)
+void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
{
writel(0, ioaddr + DMA_INTR_ENA);
}
-void dwmac_dma_start_tx(void __iomem *ioaddr)
+void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
}
-void dwmac_dma_stop_tx(void __iomem *ioaddr)
+void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
}
-void dwmac_dma_start_rx(void __iomem *ioaddr)
+void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CONTROL);
}
-void dwmac_dma_stop_rx(void __iomem *ioaddr)
+void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_SR;
@@ -156,7 +156,7 @@ static void show_rx_process_state(unsigned int status)
#endif
int dwmac_dma_interrupt(void __iomem *ioaddr,
- struct stmmac_extra_stats *x)
+ struct stmmac_extra_stats *x, u32 chan)
{
int ret = 0;
/* read the status register (CSR5) */
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 323b59ec74a3..7546b3664113 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -315,7 +315,7 @@ static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
- bool ls)
+ bool ls, unsigned int tot_pkt_len)
{
unsigned int tdes0 = le32_to_cpu(p->des0);
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index efb818ebd55e..f817f8f36569 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -191,7 +191,7 @@ static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
- bool ls)
+ bool ls, unsigned int tot_pkt_len)
{
unsigned int tdes1 = le32_to_cpu(p->des1);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 452f256ff03f..28e4b5d50ce6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -26,16 +26,17 @@
static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)p;
- unsigned int entry = priv->cur_tx;
- struct dma_desc *desc;
+ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
unsigned int nopaged_len = skb_headlen(skb);
+ struct stmmac_priv *priv = tx_q->priv_data;
+ unsigned int entry = tx_q->cur_tx;
unsigned int bmax, len, des2;
+ struct dma_desc *desc;
if (priv->extend_desc)
- desc = (struct dma_desc *)(priv->dma_etx + entry);
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else
- desc = priv->dma_tx + entry;
+ desc = tx_q->dma_tx + entry;
if (priv->plat->enh_desc)
bmax = BUF_SIZE_8KiB;
@@ -52,48 +53,51 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = des2;
- priv->tx_skbuff_dma[entry].len = bmax;
- priv->tx_skbuff_dma[entry].is_jumbo = true;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = bmax;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
- STMMAC_RING_MODE, 0, false);
- priv->tx_skbuff[entry] = NULL;
+ STMMAC_RING_MODE, 0,
+ false, skb->len);
+ tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
if (priv->extend_desc)
- desc = (struct dma_desc *)(priv->dma_etx + entry);
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else
- desc = priv->dma_tx + entry;
+ desc = tx_q->dma_tx + entry;
des2 = dma_map_single(priv->device, skb->data + bmax, len,
DMA_TO_DEVICE);
desc->des2 = cpu_to_le32(des2);
if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = des2;
- priv->tx_skbuff_dma[entry].len = len;
- priv->tx_skbuff_dma[entry].is_jumbo = true;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = len;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
- STMMAC_RING_MODE, 1, true);
+ STMMAC_RING_MODE, 1,
+ true, skb->len);
} else {
des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
desc->des2 = cpu_to_le32(des2);
if (dma_mapping_error(priv->device, des2))
return -1;
- priv->tx_skbuff_dma[entry].buf = des2;
- priv->tx_skbuff_dma[entry].len = nopaged_len;
- priv->tx_skbuff_dma[entry].is_jumbo = true;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = nopaged_len;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
- STMMAC_RING_MODE, 0, true);
+ STMMAC_RING_MODE, 0,
+ true, skb->len);
}
- priv->cur_tx = entry;
+ tx_q->cur_tx = entry;
return entry;
}
@@ -125,12 +129,13 @@ static void stmmac_init_desc3(struct dma_desc *p)
static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
- unsigned int entry = priv->dirty_tx;
+ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
+ struct stmmac_priv *priv = tx_q->priv_data;
+ unsigned int entry = tx_q->dirty_tx;
/* des3 is only used for jumbo frames tx or time stamping */
- if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo ||
- (priv->tx_skbuff_dma[entry].last_segment &&
+ if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo ||
+ (tx_q->tx_skbuff_dma[entry].last_segment &&
!priv->extend_desc && priv->hwts_tx_en)))
p->des3 = 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index cd8fb619b1e9..33efe7038cab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -46,38 +46,51 @@ struct stmmac_tx_info {
bool is_jumbo;
};
-struct stmmac_priv {
- /* Frequently used values are kept adjacent for cache effect */
+/* Frequently used values are kept adjacent for cache effect */
+struct stmmac_tx_queue {
+ u32 queue_index;
+ struct stmmac_priv *priv_data;
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
struct dma_desc *dma_tx;
struct sk_buff **tx_skbuff;
+ struct stmmac_tx_info *tx_skbuff_dma;
unsigned int cur_tx;
unsigned int dirty_tx;
+ dma_addr_t dma_tx_phy;
+ u32 tx_tail_addr;
+};
+
+struct stmmac_rx_queue {
+ u32 queue_index;
+ struct stmmac_priv *priv_data;
+ struct dma_extended_desc *dma_erx;
+ struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
+ struct sk_buff **rx_skbuff;
+ dma_addr_t *rx_skbuff_dma;
+ unsigned int cur_rx;
+ unsigned int dirty_rx;
+ u32 rx_zeroc_thresh;
+ dma_addr_t dma_rx_phy;
+ u32 rx_tail_addr;
+ struct napi_struct napi ____cacheline_aligned_in_smp;
+};
+
+struct stmmac_priv {
+ /* Frequently used values are kept adjacent for cache effect */
u32 tx_count_frames;
u32 tx_coal_frames;
u32 tx_coal_timer;
- struct stmmac_tx_info *tx_skbuff_dma;
- dma_addr_t dma_tx_phy;
+
int tx_coalesce;
int hwts_tx_en;
bool tx_path_in_lpi_mode;
struct timer_list txtimer;
bool tso;
- struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
- struct dma_extended_desc *dma_erx;
- struct sk_buff **rx_skbuff;
- unsigned int cur_rx;
- unsigned int dirty_rx;
unsigned int dma_buf_sz;
unsigned int rx_copybreak;
- unsigned int rx_zeroc_thresh;
u32 rx_riwt;
int hwts_rx_en;
- dma_addr_t *rx_skbuff_dma;
- dma_addr_t dma_rx_phy;
-
- struct napi_struct napi ____cacheline_aligned_in_smp;
void __iomem *ioaddr;
struct net_device *dev;
@@ -85,6 +98,12 @@ struct stmmac_priv {
struct mac_device_info *hw;
spinlock_t lock;
+ /* RX Queue */
+ struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
+
+ /* TX Queue */
+ struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
+
int oldlink;
int speed;
int oldduplex;
@@ -119,8 +138,6 @@ struct stmmac_priv {
spinlock_t ptp_lock;
void __iomem *mmcaddr;
void __iomem *ptpaddr;
- u32 rx_tail_addr;
- u32 tx_tail_addr;
u32 mss;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 85d64114e159..16808e48ca1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -481,6 +481,7 @@ stmmac_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct stmmac_priv *priv = netdev_priv(netdev);
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
struct phy_device *phy = netdev->phydev;
int new_pause = FLOW_OFF;
@@ -511,7 +512,7 @@ stmmac_set_pauseparam(struct net_device *netdev,
}
priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
- priv->pause);
+ priv->pause, tx_cnt);
return 0;
}
@@ -519,6 +520,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *dummy, u64 *data)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
int i, j = 0;
/* Update the DMA HW counters for dwmac10/100 */
@@ -549,7 +552,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
if ((priv->hw->mac->debug) &&
(priv->synopsys_id >= DWMAC_CORE_3_50))
priv->hw->mac->debug(priv->ioaddr,
- (void *)&priv->xstats);
+ (void *)&priv->xstats,
+ rx_queues_count, tx_queues_count);
}
for (i = 0; i < STMMAC_STATS_LEN; i++) {
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
@@ -726,6 +730,7 @@ static int stmmac_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int rx_riwt;
/* Check not supported parameters */
@@ -764,7 +769,7 @@ static int stmmac_set_coalesce(struct net_device *dev,
priv->tx_coal_frames = ec->tx_max_coalesced_frames;
priv->tx_coal_timer = ec->tx_coalesce_usecs;
priv->rx_riwt = rx_riwt;
- priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
+ priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4498a3861aa3..cd8c60132390 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -139,6 +139,64 @@ static void stmmac_verify_args(void)
}
/**
+ * stmmac_disable_all_queues - Disable all queues
+ * @priv: driver private structure
+ */
+static void stmmac_disable_all_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < rx_queues_cnt; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ napi_disable(&rx_q->napi);
+ }
+}
+
+/**
+ * stmmac_enable_all_queues - Enable all queues
+ * @priv: driver private structure
+ */
+static void stmmac_enable_all_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < rx_queues_cnt; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ napi_enable(&rx_q->napi);
+ }
+}
+
+/**
+ * stmmac_stop_all_queues - Stop all queues
+ * @priv: driver private structure
+ */
+static void stmmac_stop_all_queues(struct stmmac_priv *priv)
+{
+ u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queues_cnt; queue++)
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
+}
+
+/**
+ * stmmac_start_all_queues - Start all queues
+ * @priv: driver private structure
+ */
+static void stmmac_start_all_queues(struct stmmac_priv *priv)
+{
+ u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queues_cnt; queue++)
+ netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
+}
+
+/**
* stmmac_clk_csr_set - dynamically set the MDC clock
* @priv: driver private structure
* Description: this is to dynamically set the MDC clock according to the csr
@@ -185,26 +243,33 @@ static void print_pkt(unsigned char *buf, int len)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
}
-static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
+static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
u32 avail;
- if (priv->dirty_tx > priv->cur_tx)
- avail = priv->dirty_tx - priv->cur_tx - 1;
+ if (tx_q->dirty_tx > tx_q->cur_tx)
+ avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
else
- avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
+ avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
return avail;
}
-static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
+/**
+ * stmmac_rx_dirty - Get RX queue dirty
+ * @priv: driver private structure
+ * @queue: RX queue index
+ */
+static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
u32 dirty;
- if (priv->dirty_rx <= priv->cur_rx)
- dirty = priv->cur_rx - priv->dirty_rx;
+ if (rx_q->dirty_rx <= rx_q->cur_rx)
+ dirty = rx_q->cur_rx - rx_q->dirty_rx;
else
- dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
+ dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
return dirty;
}
@@ -232,9 +297,19 @@ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
*/
static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ /* check if all TX queues have the work finished */
+ for (queue = 0; queue < tx_cnt; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ if (tx_q->dirty_tx != tx_q->cur_tx)
+ return; /* still unfinished work */
+ }
+
/* Check and enter in LPI mode */
- if ((priv->dirty_tx == priv->cur_tx) &&
- (priv->tx_path_in_lpi_mode == false))
+ if (!priv->tx_path_in_lpi_mode)
priv->hw->mac->set_eee_mode(priv->hw,
priv->plat->en_tx_lpi_clockgating);
}
@@ -673,6 +748,19 @@ static void stmmac_release_ptp(struct stmmac_priv *priv)
}
/**
+ * stmmac_mac_flow_ctrl - Configure flow control in all queues
+ * @priv: driver private structure
+ * Description: It is used for configuring the flow control in all queues
+ */
+static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+
+ priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
+ priv->pause, tx_cnt);
+}
+
+/**
* stmmac_adjust_link - adjusts the link parameters
* @dev: net device structure
* Description: this is the helper called by the physical abstraction layer
@@ -687,7 +775,6 @@ static void stmmac_adjust_link(struct net_device *dev)
struct phy_device *phydev = dev->phydev;
unsigned long flags;
int new_state = 0;
- unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
if (!phydev)
return;
@@ -709,8 +796,7 @@ static void stmmac_adjust_link(struct net_device *dev)
}
/* Flow Control operation */
if (phydev->pause)
- priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
- fc, pause_time);
+ stmmac_mac_flow_ctrl(priv, phydev->duplex);
if (phydev->speed != priv->speed) {
new_state = 1;
@@ -878,22 +964,56 @@ static int stmmac_init_phy(struct net_device *dev)
return 0;
}
-static void stmmac_display_rings(struct stmmac_priv *priv)
+static void stmmac_display_rx_rings(struct stmmac_priv *priv)
{
- void *head_rx, *head_tx;
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ void *head_rx;
+ u32 queue;
- if (priv->extend_desc) {
- head_rx = (void *)priv->dma_erx;
- head_tx = (void *)priv->dma_etx;
- } else {
- head_rx = (void *)priv->dma_rx;
- head_tx = (void *)priv->dma_tx;
+ /* Display RX rings */
+ for (queue = 0; queue < rx_cnt; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ pr_info("\tRX Queue %u rings\n", queue);
+
+ if (priv->extend_desc)
+ head_rx = (void *)rx_q->dma_erx;
+ else
+ head_rx = (void *)rx_q->dma_rx;
+
+ /* Display RX ring */
+ priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
+ }
+}
+
+static void stmmac_display_tx_rings(struct stmmac_priv *priv)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ void *head_tx;
+ u32 queue;
+
+ /* Display TX rings */
+ for (queue = 0; queue < tx_cnt; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ pr_info("\tTX Queue %d rings\n", queue);
+
+ if (priv->extend_desc)
+ head_tx = (void *)tx_q->dma_etx;
+ else
+ head_tx = (void *)tx_q->dma_tx;
+
+ priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
}
+}
+
+static void stmmac_display_rings(struct stmmac_priv *priv)
+{
+ /* Display RX ring */
+ stmmac_display_rx_rings(priv);
- /* Display Rx ring */
- priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
- /* Display Tx ring */
- priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
+ /* Display TX ring */
+ stmmac_display_tx_rings(priv);
}
static int stmmac_set_bfsize(int mtu, int bufsize)
@@ -913,48 +1033,88 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
}
/**
- * stmmac_clear_descriptors - clear descriptors
+ * stmmac_clear_rx_descriptors - clear RX descriptors
* @priv: driver private structure
- * Description: this function is called to clear the tx and rx descriptors
+ * @queue: RX queue index
+ * Description: this function is called to clear the RX descriptors
* in case of both basic and extended descriptors are used.
*/
-static void stmmac_clear_descriptors(struct stmmac_priv *priv)
+static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
int i;
- /* Clear the Rx/Tx descriptors */
+ /* Clear the RX descriptors */
for (i = 0; i < DMA_RX_SIZE; i++)
if (priv->extend_desc)
- priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
+ priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode,
(i == DMA_RX_SIZE - 1));
else
- priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
+ priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
priv->use_riwt, priv->mode,
(i == DMA_RX_SIZE - 1));
+}
+
+/**
+ * stmmac_clear_tx_descriptors - clear tx descriptors
+ * @priv: driver private structure
+ * @queue: TX queue index.
+ * Description: this function is called to clear the TX descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ int i;
+
+ /* Clear the TX descriptors */
for (i = 0; i < DMA_TX_SIZE; i++)
if (priv->extend_desc)
- priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
+ priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
priv->mode,
(i == DMA_TX_SIZE - 1));
else
- priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
+ priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
priv->mode,
(i == DMA_TX_SIZE - 1));
}
/**
+ * stmmac_clear_descriptors - clear descriptors
+ * @priv: driver private structure
+ * Description: this function is called to clear the TX and RX descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_descriptors(struct stmmac_priv *priv)
+{
+ u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ /* Clear the RX descriptors */
+ for (queue = 0; queue < rx_queue_cnt; queue++)
+ stmmac_clear_rx_descriptors(priv, queue);
+
+ /* Clear the TX descriptors */
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ stmmac_clear_tx_descriptors(priv, queue);
+}
+
+/**
* stmmac_init_rx_buffers - init the RX descriptor buffer.
* @priv: driver private structure
* @p: descriptor pointer
* @i: descriptor index
- * @flags: gfp flag.
+ * @flags: gfp flag
+ * @queue: RX queue index
* Description: this function is called to allocate a receive buffer, perform
* the DMA mapping and init the descriptor.
*/
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
- int i, gfp_t flags)
+ int i, gfp_t flags, u32 queue)
{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct sk_buff *skb;
skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
@@ -963,20 +1123,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
"%s: Rx init fails; skb is NULL\n", __func__);
return -ENOMEM;
}
- priv->rx_skbuff[i] = skb;
- priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
+ rx_q->rx_skbuff[i] = skb;
+ rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
priv->dma_buf_sz,
DMA_FROM_DEVICE);
- if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
+ if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
dev_kfree_skb_any(skb);
return -EINVAL;
}
if (priv->synopsys_id >= DWMAC_CORE_4_00)
- p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
+ p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
else
- p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
+ p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
if ((priv->hw->mode->init_desc3) &&
(priv->dma_buf_sz == BUF_SIZE_16KiB))
@@ -985,30 +1145,71 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
return 0;
}
-static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
+/**
+ * stmmac_free_rx_buffer - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ * @i: buffer index.
+ */
+static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
{
- if (priv->rx_skbuff[i]) {
- dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ if (rx_q->rx_skbuff[i]) {
+ dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
priv->dma_buf_sz, DMA_FROM_DEVICE);
- dev_kfree_skb_any(priv->rx_skbuff[i]);
+ dev_kfree_skb_any(rx_q->rx_skbuff[i]);
}
- priv->rx_skbuff[i] = NULL;
+ rx_q->rx_skbuff[i] = NULL;
}
/**
- * init_dma_desc_rings - init the RX/TX descriptor rings
+ * stmmac_free_tx_buffer - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ * @i: buffer index.
+ */
+static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ if (tx_q->tx_skbuff_dma[i].buf) {
+ if (tx_q->tx_skbuff_dma[i].map_as_page)
+ dma_unmap_page(priv->device,
+ tx_q->tx_skbuff_dma[i].buf,
+ tx_q->tx_skbuff_dma[i].len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->device,
+ tx_q->tx_skbuff_dma[i].buf,
+ tx_q->tx_skbuff_dma[i].len,
+ DMA_TO_DEVICE);
+ }
+
+ if (tx_q->tx_skbuff[i]) {
+ dev_kfree_skb_any(tx_q->tx_skbuff[i]);
+ tx_q->tx_skbuff[i] = NULL;
+ tx_q->tx_skbuff_dma[i].buf = 0;
+ tx_q->tx_skbuff_dma[i].map_as_page = false;
+ }
+}
+
+/**
+ * init_dma_rx_desc_rings - init the RX descriptor rings
* @dev: net device structure
* @flags: gfp flag.
- * Description: this function initializes the DMA RX/TX descriptors
+ * Description: this function initializes the DMA RX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
+static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
{
- int i;
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_count = priv->plat->rx_queues_to_use;
unsigned int bfsize = 0;
int ret = -ENOMEM;
+ u32 queue;
+ int i;
if (priv->hw->mode->set_16kib_bfsize)
bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
@@ -1018,235 +1219,409 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
priv->dma_buf_sz = bfsize;
- netif_dbg(priv, probe, priv->dev,
- "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
- __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
-
/* RX INITIALIZATION */
netif_dbg(priv, probe, priv->dev,
"SKB addresses:\nskb\t\tskb data\tdma data\n");
- for (i = 0; i < DMA_RX_SIZE; i++) {
- struct dma_desc *p;
- if (priv->extend_desc)
- p = &((priv->dma_erx + i)->basic);
- else
- p = priv->dma_rx + i;
+ for (queue = 0; queue < rx_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- ret = stmmac_init_rx_buffers(priv, p, i, flags);
- if (ret)
- goto err_init_rx_buffers;
+ netif_dbg(priv, probe, priv->dev,
+ "(%s) dma_rx_phy=0x%08x\n", __func__,
+ (u32)rx_q->dma_rx_phy);
- netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
- priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
- (unsigned int)priv->rx_skbuff_dma[i]);
+ for (i = 0; i < DMA_RX_SIZE; i++) {
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = &((rx_q->dma_erx + i)->basic);
+ else
+ p = rx_q->dma_rx + i;
+
+ ret = stmmac_init_rx_buffers(priv, p, i, flags,
+ queue);
+ if (ret)
+ goto err_init_rx_buffers;
+
+ netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
+ rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
+ (unsigned int)rx_q->rx_skbuff_dma[i]);
+ }
+
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
+
+ stmmac_clear_rx_descriptors(priv, queue);
+
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc)
+ priv->hw->mode->init(rx_q->dma_erx,
+ rx_q->dma_rx_phy,
+ DMA_RX_SIZE, 1);
+ else
+ priv->hw->mode->init(rx_q->dma_rx,
+ rx_q->dma_rx_phy,
+ DMA_RX_SIZE, 0);
+ }
}
- priv->cur_rx = 0;
- priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
+
buf_sz = bfsize;
- /* Setup the chained descriptor addresses */
- if (priv->mode == STMMAC_CHAIN_MODE) {
- if (priv->extend_desc) {
- priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
- DMA_RX_SIZE, 1);
- priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
- DMA_TX_SIZE, 1);
- } else {
- priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
- DMA_RX_SIZE, 0);
- priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
- DMA_TX_SIZE, 0);
- }
+ return 0;
+
+err_init_rx_buffers:
+ while (queue >= 0) {
+ while (--i >= 0)
+ stmmac_free_rx_buffer(priv, queue, i);
+
+ if (queue == 0)
+ break;
+
+ i = DMA_RX_SIZE;
+ queue--;
}
- /* TX INITIALIZATION */
- for (i = 0; i < DMA_TX_SIZE; i++) {
- struct dma_desc *p;
- if (priv->extend_desc)
- p = &((priv->dma_etx + i)->basic);
- else
- p = priv->dma_tx + i;
+ return ret;
+}
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- p->des0 = 0;
- p->des1 = 0;
- p->des2 = 0;
- p->des3 = 0;
- } else {
- p->des2 = 0;
+/**
+ * init_dma_tx_desc_rings - init the TX descriptor rings
+ * @dev: net device structure.
+ * Description: this function initializes the DMA TX descriptors
+ * and allocates the socket buffers. It supports the chained and ring
+ * modes.
+ */
+static int init_dma_tx_desc_rings(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+ int i;
+
+ for (queue = 0; queue < tx_queue_cnt; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ netif_dbg(priv, probe, priv->dev,
+ "(%s) dma_tx_phy=0x%08x\n", __func__,
+ (u32)tx_q->dma_tx_phy);
+
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc)
+ priv->hw->mode->init(tx_q->dma_etx,
+ tx_q->dma_tx_phy,
+ DMA_TX_SIZE, 1);
+ else
+ priv->hw->mode->init(tx_q->dma_tx,
+ tx_q->dma_tx_phy,
+ DMA_TX_SIZE, 0);
+ }
+
+ for (i = 0; i < DMA_TX_SIZE; i++) {
+ struct dma_desc *p;
+ if (priv->extend_desc)
+ p = &((tx_q->dma_etx + i)->basic);
+ else
+ p = tx_q->dma_tx + i;
+
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+ } else {
+ p->des2 = 0;
+ }
+
+ tx_q->tx_skbuff_dma[i].buf = 0;
+ tx_q->tx_skbuff_dma[i].map_as_page = false;
+ tx_q->tx_skbuff_dma[i].len = 0;
+ tx_q->tx_skbuff_dma[i].last_segment = false;
+ tx_q->tx_skbuff[i] = NULL;
}
- priv->tx_skbuff_dma[i].buf = 0;
- priv->tx_skbuff_dma[i].map_as_page = false;
- priv->tx_skbuff_dma[i].len = 0;
- priv->tx_skbuff_dma[i].last_segment = false;
- priv->tx_skbuff[i] = NULL;
+ tx_q->dirty_tx = 0;
+ tx_q->cur_tx = 0;
+
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
}
- priv->dirty_tx = 0;
- priv->cur_tx = 0;
- netdev_reset_queue(priv->dev);
+ return 0;
+}
+
+/**
+ * init_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * @flags: gfp flag.
+ * Description: this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers. It supports the chained and ring
+ * modes.
+ */
+static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = init_dma_rx_desc_rings(dev, flags);
+ if (ret)
+ return ret;
+
+ ret = init_dma_tx_desc_rings(dev);
stmmac_clear_descriptors(priv);
if (netif_msg_hw(priv))
stmmac_display_rings(priv);
- return 0;
-err_init_rx_buffers:
- while (--i >= 0)
- stmmac_free_rx_buffers(priv, i);
return ret;
}
-static void dma_free_rx_skbufs(struct stmmac_priv *priv)
+/**
+ * dma_free_rx_skbufs - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ */
+static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
{
int i;
for (i = 0; i < DMA_RX_SIZE; i++)
- stmmac_free_rx_buffers(priv, i);
+ stmmac_free_rx_buffer(priv, queue, i);
}
-static void dma_free_tx_skbufs(struct stmmac_priv *priv)
+/**
+ * dma_free_tx_skbufs - free TX dma buffers
+ * @priv: private structure
+ * @queue: TX queue index
+ */
+static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
{
int i;
- for (i = 0; i < DMA_TX_SIZE; i++) {
- if (priv->tx_skbuff_dma[i].buf) {
- if (priv->tx_skbuff_dma[i].map_as_page)
- dma_unmap_page(priv->device,
- priv->tx_skbuff_dma[i].buf,
- priv->tx_skbuff_dma[i].len,
- DMA_TO_DEVICE);
- else
- dma_unmap_single(priv->device,
- priv->tx_skbuff_dma[i].buf,
- priv->tx_skbuff_dma[i].len,
- DMA_TO_DEVICE);
- }
+ for (i = 0; i < DMA_TX_SIZE; i++)
+ stmmac_free_tx_buffer(priv, queue, i);
+}
- if (priv->tx_skbuff[i]) {
- dev_kfree_skb_any(priv->tx_skbuff[i]);
- priv->tx_skbuff[i] = NULL;
- priv->tx_skbuff_dma[i].buf = 0;
- priv->tx_skbuff_dma[i].map_as_page = false;
- }
+/**
+ * free_dma_rx_desc_resources - free RX dma desc resources
+ * @priv: private structure
+ */
+static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
+{
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+
+ /* Free RX queue resources */
+ for (queue = 0; queue < rx_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ /* Release the DMA RX socket buffers */
+ dma_free_rx_skbufs(priv, queue);
+
+ /* Free DMA regions of consistent memory previously allocated */
+ if (!priv->extend_desc)
+ dma_free_coherent(priv->device,
+ DMA_RX_SIZE * sizeof(struct dma_desc),
+ rx_q->dma_rx, rx_q->dma_rx_phy);
+ else
+ dma_free_coherent(priv->device, DMA_RX_SIZE *
+ sizeof(struct dma_extended_desc),
+ rx_q->dma_erx, rx_q->dma_rx_phy);
+
+ kfree(rx_q->rx_skbuff_dma);
+ kfree(rx_q->rx_skbuff);
}
}
/**
- * alloc_dma_desc_resources - alloc TX/RX resources.
+ * free_dma_tx_desc_resources - free TX dma desc resources
+ * @priv: private structure
+ */
+static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
+{
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 queue = 0;
+
+ /* Free TX queue resources */
+ for (queue = 0; queue < tx_count; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ /* Release the DMA TX socket buffers */
+ dma_free_tx_skbufs(priv, queue);
+
+ /* Free DMA regions of consistent memory previously allocated */
+ if (!priv->extend_desc)
+ dma_free_coherent(priv->device,
+ DMA_TX_SIZE * sizeof(struct dma_desc),
+ tx_q->dma_tx, tx_q->dma_tx_phy);
+ else
+ dma_free_coherent(priv->device, DMA_TX_SIZE *
+ sizeof(struct dma_extended_desc),
+ tx_q->dma_etx, tx_q->dma_tx_phy);
+
+ kfree(tx_q->tx_skbuff_dma);
+ kfree(tx_q->tx_skbuff);
+ }
+}
+
+/**
+ * alloc_dma_rx_desc_resources - alloc RX resources.
* @priv: private structure
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
-static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
{
+ u32 rx_count = priv->plat->rx_queues_to_use;
int ret = -ENOMEM;
+ u32 queue;
- priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!priv->rx_skbuff_dma)
- return -ENOMEM;
+ /* RX queues buffers and DMA */
+ for (queue = 0; queue < rx_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!priv->rx_skbuff)
- goto err_rx_skbuff;
-
- priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
- sizeof(*priv->tx_skbuff_dma),
- GFP_KERNEL);
- if (!priv->tx_skbuff_dma)
- goto err_tx_skbuff_dma;
-
- priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!priv->tx_skbuff)
- goto err_tx_skbuff;
-
- if (priv->extend_desc) {
- priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
- if (!priv->dma_erx)
- goto err_dma;
+ rx_q->queue_index = queue;
+ rx_q->priv_data = priv;
- priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_tx_phy,
+ rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
+ sizeof(dma_addr_t),
GFP_KERNEL);
- if (!priv->dma_etx) {
- dma_free_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct dma_extended_desc),
- priv->dma_erx, priv->dma_rx_phy);
- goto err_dma;
- }
- } else {
- priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct dma_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
- if (!priv->dma_rx)
- goto err_dma;
+ if (!rx_q->rx_skbuff_dma)
+ return -ENOMEM;
- priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
- sizeof(struct dma_desc),
- &priv->dma_tx_phy,
- GFP_KERNEL);
- if (!priv->dma_tx) {
- dma_free_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
+ rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
+ sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!rx_q->rx_skbuff)
goto err_dma;
+
+ if (priv->extend_desc) {
+ rx_q->dma_erx = dma_zalloc_coherent(priv->device,
+ DMA_RX_SIZE *
+ sizeof(struct
+ dma_extended_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
+ if (!rx_q->dma_erx)
+ goto err_dma;
+
+ } else {
+ rx_q->dma_rx = dma_zalloc_coherent(priv->device,
+ DMA_RX_SIZE *
+ sizeof(struct
+ dma_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
+ if (!rx_q->dma_rx)
+ goto err_dma;
}
}
return 0;
err_dma:
- kfree(priv->tx_skbuff);
-err_tx_skbuff:
- kfree(priv->tx_skbuff_dma);
-err_tx_skbuff_dma:
- kfree(priv->rx_skbuff);
-err_rx_skbuff:
- kfree(priv->rx_skbuff_dma);
+ free_dma_rx_desc_resources(priv);
+
return ret;
}
-static void free_dma_desc_resources(struct stmmac_priv *priv)
+/**
+ * alloc_dma_tx_desc_resources - alloc TX resources.
+ * @priv: private structure
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
{
- /* Release the DMA TX/RX socket buffers */
- dma_free_rx_skbufs(priv);
- dma_free_tx_skbufs(priv);
-
- /* Free DMA regions of consistent memory previously allocated */
- if (!priv->extend_desc) {
- dma_free_coherent(priv->device,
- DMA_TX_SIZE * sizeof(struct dma_desc),
- priv->dma_tx, priv->dma_tx_phy);
- dma_free_coherent(priv->device,
- DMA_RX_SIZE * sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
- } else {
- dma_free_coherent(priv->device, DMA_TX_SIZE *
- sizeof(struct dma_extended_desc),
- priv->dma_etx, priv->dma_tx_phy);
- dma_free_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct dma_extended_desc),
- priv->dma_erx, priv->dma_rx_phy);
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ int ret = -ENOMEM;
+ u32 queue;
+
+ /* TX queues buffers and DMA */
+ for (queue = 0; queue < tx_count; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ tx_q->queue_index = queue;
+ tx_q->priv_data = priv;
+
+ tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
+ sizeof(*tx_q->tx_skbuff_dma),
+ GFP_KERNEL);
+ if (!tx_q->tx_skbuff_dma)
+ return -ENOMEM;
+
+ tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
+ sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!tx_q->tx_skbuff)
+ goto err_dma_buffers;
+
+ if (priv->extend_desc) {
+ tx_q->dma_etx = dma_zalloc_coherent(priv->device,
+ DMA_TX_SIZE *
+ sizeof(struct
+ dma_extended_desc),
+ &tx_q->dma_tx_phy,
+ GFP_KERNEL);
+ if (!tx_q->dma_etx)
+ goto err_dma_buffers;
+ } else {
+ tx_q->dma_tx = dma_zalloc_coherent(priv->device,
+ DMA_TX_SIZE *
+ sizeof(struct
+ dma_desc),
+ &tx_q->dma_tx_phy,
+ GFP_KERNEL);
+ if (!tx_q->dma_tx)
+ goto err_dma_buffers;
+ }
}
- kfree(priv->rx_skbuff_dma);
- kfree(priv->rx_skbuff);
- kfree(priv->tx_skbuff_dma);
- kfree(priv->tx_skbuff);
+
+ return 0;
+
+err_dma_buffers:
+ free_dma_tx_desc_resources(priv);
+
+ return ret;
+}
+
+/**
+ * alloc_dma_desc_resources - alloc TX/RX resources.
+ * @priv: private structure
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+{
+ /* RX Allocation */
+ int ret = alloc_dma_rx_desc_resources(priv);
+
+ if (ret)
+ return ret;
+
+ ret = alloc_dma_tx_desc_resources(priv);
+
+ return ret;
+}
+
+/**
+ * free_dma_desc_resources - free dma desc resources
+ * @priv: private structure
+ */
+static void free_dma_desc_resources(struct stmmac_priv *priv)
+{
+ /* Release the DMA RX socket buffers */
+ free_dma_rx_desc_resources(priv);
+
+ /* Release the DMA TX socket buffers */
+ free_dma_tx_desc_resources(priv);
}
/**
@@ -1256,19 +1631,104 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
*/
static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
{
- int rx_count = priv->dma_cap.number_rx_queues;
- int queue = 0;
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ int queue;
+ u8 mode;
- /* If GMAC does not have multiple queues, then this is not necessary*/
- if (rx_count == 1)
- return;
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
+ priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
+ }
+}
- /**
- * If the core is synthesized with multiple rx queues / multiple
- * dma channels, then rx queues will be disabled by default.
- * For now only rx queue 0 is enabled.
- */
- priv->hw->mac->rx_queue_enable(priv->hw, queue);
+/**
+ * stmmac_start_rx_dma - start RX DMA channel
+ * @priv: driver private structure
+ * @chan: RX channel index
+ * Description:
+ * This starts a RX DMA channel
+ */
+static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
+ priv->hw->dma->start_rx(priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_start_tx_dma - start TX DMA channel
+ * @priv: driver private structure
+ * @chan: TX channel index
+ * Description:
+ * This starts a TX DMA channel
+ */
+static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
+ priv->hw->dma->start_tx(priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_stop_rx_dma - stop RX DMA channel
+ * @priv: driver private structure
+ * @chan: RX channel index
+ * Description:
+ * This stops a RX DMA channel
+ */
+static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
+ priv->hw->dma->stop_rx(priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_stop_tx_dma - stop TX DMA channel
+ * @priv: driver private structure
+ * @chan: TX channel index
+ * Description:
+ * This stops a TX DMA channel
+ */
+static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
+ priv->hw->dma->stop_tx(priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_start_all_dma - start all RX and TX DMA channels
+ * @priv: driver private structure
+ * Description:
+ * This starts all the RX and TX DMA channels
+ */
+static void stmmac_start_all_dma(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan = 0;
+
+ for (chan = 0; chan < rx_channels_count; chan++)
+ stmmac_start_rx_dma(priv, chan);
+
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_start_tx_dma(priv, chan);
+}
+
+/**
+ * stmmac_stop_all_dma - stop all RX and TX DMA channels
+ * @priv: driver private structure
+ * Description:
+ * This stops the RX and TX DMA channels
+ */
+static void stmmac_stop_all_dma(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan = 0;
+
+ for (chan = 0; chan < rx_channels_count; chan++)
+ stmmac_stop_rx_dma(priv, chan);
+
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_stop_tx_dma(priv, chan);
}
/**
@@ -1279,11 +1739,20 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
*/
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
int rxfifosz = priv->plat->rx_fifo_size;
+ u32 txmode = 0;
+ u32 rxmode = 0;
+ u32 chan = 0;
- if (priv->plat->force_thresh_dma_mode)
- priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
- else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
+
+ if (priv->plat->force_thresh_dma_mode) {
+ txmode = tc;
+ rxmode = tc;
+ } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
/*
* In case of GMAC, SF mode can be enabled
* to perform the TX COE in HW. This depends on:
@@ -1291,37 +1760,53 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
* 2) There is no bugged Jumbo frame support
* that needs to not insert csum in the TDES.
*/
- priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
- rxfifosz);
+ txmode = SF_DMA_MODE;
+ rxmode = SF_DMA_MODE;
priv->xstats.threshold = SF_DMA_MODE;
- } else
- priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
+ } else {
+ txmode = tc;
+ rxmode = SF_DMA_MODE;
+ }
+
+ /* configure all channels */
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ for (chan = 0; chan < rx_channels_count; chan++)
+ priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
+ rxfifosz);
+
+ for (chan = 0; chan < tx_channels_count; chan++)
+ priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
+ } else {
+ priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
rxfifosz);
+ }
}
/**
* stmmac_tx_clean - to manage the transmission completion
* @priv: driver private structure
+ * @queue: TX queue index
* Description: it reclaims the transmit resources after transmission completes.
*/
-static void stmmac_tx_clean(struct stmmac_priv *priv)
+static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
- unsigned int entry = priv->dirty_tx;
+ unsigned int entry = tx_q->dirty_tx;
netif_tx_lock(priv->dev);
priv->xstats.tx_clean++;
- while (entry != priv->cur_tx) {
- struct sk_buff *skb = priv->tx_skbuff[entry];
+ while (entry != tx_q->cur_tx) {
+ struct sk_buff *skb = tx_q->tx_skbuff[entry];
struct dma_desc *p;
int status;
if (priv->extend_desc)
- p = (struct dma_desc *)(priv->dma_etx + entry);
+ p = (struct dma_desc *)(tx_q->dma_etx + entry);
else
- p = priv->dma_tx + entry;
+ p = tx_q->dma_tx + entry;
status = priv->hw->desc->tx_status(&priv->dev->stats,
&priv->xstats, p,
@@ -1342,48 +1827,51 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
stmmac_get_tx_hwtstamp(priv, p, skb);
}
- if (likely(priv->tx_skbuff_dma[entry].buf)) {
- if (priv->tx_skbuff_dma[entry].map_as_page)
+ if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
+ if (tx_q->tx_skbuff_dma[entry].map_as_page)
dma_unmap_page(priv->device,
- priv->tx_skbuff_dma[entry].buf,
- priv->tx_skbuff_dma[entry].len,
+ tx_q->tx_skbuff_dma[entry].buf,
+ tx_q->tx_skbuff_dma[entry].len,
DMA_TO_DEVICE);
else
dma_unmap_single(priv->device,
- priv->tx_skbuff_dma[entry].buf,
- priv->tx_skbuff_dma[entry].len,
+ tx_q->tx_skbuff_dma[entry].buf,
+ tx_q->tx_skbuff_dma[entry].len,
DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry].buf = 0;
- priv->tx_skbuff_dma[entry].len = 0;
- priv->tx_skbuff_dma[entry].map_as_page = false;
+ tx_q->tx_skbuff_dma[entry].buf = 0;
+ tx_q->tx_skbuff_dma[entry].len = 0;
+ tx_q->tx_skbuff_dma[entry].map_as_page = false;
}
if (priv->hw->mode->clean_desc3)
- priv->hw->mode->clean_desc3(priv, p);
+ priv->hw->mode->clean_desc3(tx_q, p);
- priv->tx_skbuff_dma[entry].last_segment = false;
- priv->tx_skbuff_dma[entry].is_jumbo = false;
+ tx_q->tx_skbuff_dma[entry].last_segment = false;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = false;
if (likely(skb != NULL)) {
pkts_compl++;
bytes_compl += skb->len;
dev_consume_skb_any(skb);
- priv->tx_skbuff[entry] = NULL;
+ tx_q->tx_skbuff[entry] = NULL;
}
priv->hw->desc->release_tx_desc(p, priv->mode);
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
}
- priv->dirty_tx = entry;
+ tx_q->dirty_tx = entry;
- netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
+ netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
+ pkts_compl, bytes_compl);
+
+ if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
+ queue))) &&
+ stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
- if (unlikely(netif_queue_stopped(priv->dev) &&
- stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
netif_dbg(priv, tx_done, priv->dev,
"%s: restart transmit\n", __func__);
- netif_wake_queue(priv->dev);
+ netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
}
if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
@@ -1393,45 +1881,76 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
netif_tx_unlock(priv->dev);
}
-static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
+static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
{
- priv->hw->dma->enable_dma_irq(priv->ioaddr);
+ priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
}
-static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
+static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
{
- priv->hw->dma->disable_dma_irq(priv->ioaddr);
+ priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
}
/**
* stmmac_tx_err - to manage the tx error
* @priv: driver private structure
+ * @chan: channel index
* Description: it cleans the descriptors and restarts the transmission
* in case of transmission errors.
*/
-static void stmmac_tx_err(struct stmmac_priv *priv)
+static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
int i;
- netif_stop_queue(priv->dev);
- priv->hw->dma->stop_tx(priv->ioaddr);
- dma_free_tx_skbufs(priv);
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
+
+ stmmac_stop_tx_dma(priv, chan);
+ dma_free_tx_skbufs(priv, chan);
for (i = 0; i < DMA_TX_SIZE; i++)
if (priv->extend_desc)
- priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
+ priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
priv->mode,
(i == DMA_TX_SIZE - 1));
else
- priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
+ priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
priv->mode,
(i == DMA_TX_SIZE - 1));
- priv->dirty_tx = 0;
- priv->cur_tx = 0;
- netdev_reset_queue(priv->dev);
- priv->hw->dma->start_tx(priv->ioaddr);
+ tx_q->dirty_tx = 0;
+ tx_q->cur_tx = 0;
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
+ stmmac_start_tx_dma(priv, chan);
priv->dev->stats.tx_errors++;
- netif_wake_queue(priv->dev);
+ netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
+}
+
+/**
+ * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
+ * @priv: driver private structure
+ * @txmode: TX operating mode
+ * @rxmode: RX operating mode
+ * @chan: channel index
+ * Description: it is used for configuring of the DMA operation mode in
+ * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
+ * mode.
+ */
+static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
+ u32 rxmode, u32 chan)
+{
+ int rxfifosz = priv->plat->rx_fifo_size;
+
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
+
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
+ rxfifosz);
+ priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
+ } else {
+ priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
+ rxfifosz);
+ }
}
/**
@@ -1443,31 +1962,43 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
*/
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
+ u32 tx_channel_count = priv->plat->tx_queues_to_use;
int status;
- int rxfifosz = priv->plat->rx_fifo_size;
+ u32 chan;
- status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
- if (likely((status & handle_rx)) || (status & handle_tx)) {
- if (likely(napi_schedule_prep(&priv->napi))) {
- stmmac_disable_dma_irq(priv);
- __napi_schedule(&priv->napi);
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+
+ status = priv->hw->dma->dma_interrupt(priv->ioaddr,
+ &priv->xstats, chan);
+ if (likely((status & handle_rx)) || (status & handle_tx)) {
+ if (likely(napi_schedule_prep(&rx_q->napi))) {
+ stmmac_disable_dma_irq(priv, chan);
+ __napi_schedule(&rx_q->napi);
+ }
}
- }
- if (unlikely(status & tx_hard_error_bump_tc)) {
- /* Try to bump up the dma threshold on this failure */
- if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
- (tc <= 256)) {
- tc += 64;
- if (priv->plat->force_thresh_dma_mode)
- priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
- rxfifosz);
- else
- priv->hw->dma->dma_mode(priv->ioaddr, tc,
- SF_DMA_MODE, rxfifosz);
- priv->xstats.threshold = tc;
+
+ if (unlikely(status & tx_hard_error_bump_tc)) {
+ /* Try to bump up the dma threshold on this failure */
+ if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
+ (tc <= 256)) {
+ tc += 64;
+ if (priv->plat->force_thresh_dma_mode)
+ stmmac_set_dma_operation_mode(priv,
+ tc,
+ tc,
+ chan);
+ else
+ stmmac_set_dma_operation_mode(priv,
+ tc,
+ SF_DMA_MODE,
+ chan);
+ priv->xstats.threshold = tc;
+ }
+ } else if (unlikely(status == tx_hard_error)) {
+ stmmac_tx_err(priv, chan);
}
- } else if (unlikely(status == tx_hard_error))
- stmmac_tx_err(priv);
+ }
}
/**
@@ -1574,6 +2105,13 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
*/
static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
+ u32 dummy_dma_rx_phy = 0;
+ u32 dummy_dma_tx_phy = 0;
+ u32 chan = 0;
int atds = 0;
int ret = 0;
@@ -1591,19 +2129,49 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
return ret;
}
- priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
- priv->dma_tx_phy, priv->dma_rx_phy, atds);
-
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- priv->rx_tail_addr = priv->dma_rx_phy +
- (DMA_RX_SIZE * sizeof(struct dma_desc));
- priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
- STMMAC_CHAN0);
+ /* DMA Configuration */
+ priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
+ dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
+
+ /* DMA RX Channel Configuration */
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ rx_q = &priv->rx_queue[chan];
+
+ priv->hw->dma->init_rx_chan(priv->ioaddr,
+ priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, chan);
+
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (DMA_RX_SIZE * sizeof(struct dma_desc));
+ priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
+ rx_q->rx_tail_addr,
+ chan);
+ }
+
+ /* DMA TX Channel Configuration */
+ for (chan = 0; chan < tx_channels_count; chan++) {
+ tx_q = &priv->tx_queue[chan];
+
+ priv->hw->dma->init_chan(priv->ioaddr,
+ priv->plat->dma_cfg,
+ chan);
+
+ priv->hw->dma->init_tx_chan(priv->ioaddr,
+ priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
- priv->tx_tail_addr = priv->dma_tx_phy +
- (DMA_TX_SIZE * sizeof(struct dma_desc));
- priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
- STMMAC_CHAN0);
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy +
+ (DMA_TX_SIZE * sizeof(struct dma_desc));
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
+ tx_q->tx_tail_addr,
+ chan);
+ }
+ } else {
+ rx_q = &priv->rx_queue[chan];
+ tx_q = &priv->tx_queue[chan];
+ priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
}
if (priv->plat->axi && priv->hw->dma->axi)
@@ -1621,8 +2189,12 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
static void stmmac_tx_timer(unsigned long data)
{
struct stmmac_priv *priv = (struct stmmac_priv *)data;
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 queue;
- stmmac_tx_clean(priv);
+ /* let's scan all the tx queues */
+ for (queue = 0; queue < tx_queues_count; queue++)
+ stmmac_tx_clean(priv, queue);
}
/**
@@ -1644,6 +2216,196 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
add_timer(&priv->txtimer);
}
+static void stmmac_set_rings_length(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan;
+
+ /* set TX ring length */
+ if (priv->hw->dma->set_tx_ring_len) {
+ for (chan = 0; chan < tx_channels_count; chan++)
+ priv->hw->dma->set_tx_ring_len(priv->ioaddr,
+ (DMA_TX_SIZE - 1), chan);
+ }
+
+ /* set RX ring length */
+ if (priv->hw->dma->set_rx_ring_len) {
+ for (chan = 0; chan < rx_channels_count; chan++)
+ priv->hw->dma->set_rx_ring_len(priv->ioaddr,
+ (DMA_RX_SIZE - 1), chan);
+ }
+}
+
+/**
+ * stmmac_set_tx_queue_weight - Set TX queue weight
+ * @priv: driver private structure
+ * Description: It is used for setting TX queues weight
+ */
+static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 weight;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queues_count; queue++) {
+ weight = priv->plat->tx_queues_cfg[queue].weight;
+ priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
+ }
+}
+
+/**
+ * stmmac_configure_cbs - Configure CBS in TX queue
+ * @priv: driver private structure
+ * Description: It is used for configuring CBS in AVB TX queues
+ */
+static void stmmac_configure_cbs(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 mode_to_use;
+ u32 queue;
+
+ /* queue 0 is reserved for legacy traffic */
+ for (queue = 1; queue < tx_queues_count; queue++) {
+ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+ if (mode_to_use == MTL_QUEUE_DCB)
+ continue;
+
+ priv->hw->mac->config_cbs(priv->hw,
+ priv->plat->tx_queues_cfg[queue].send_slope,
+ priv->plat->tx_queues_cfg[queue].idle_slope,
+ priv->plat->tx_queues_cfg[queue].high_credit,
+ priv->plat->tx_queues_cfg[queue].low_credit,
+ queue);
+ }
+}
+
+/**
+ * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
+ * @priv: driver private structure
+ * Description: It is used for mapping RX queues to RX dma channels
+ */
+static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ u32 chan;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ chan = priv->plat->rx_queues_cfg[queue].chan;
+ priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
+ }
+}
+
+/**
+ * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
+ * @priv: driver private structure
+ * Description: It is used for configuring the RX Queue Priority
+ */
+static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ u32 prio;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ if (!priv->plat->rx_queues_cfg[queue].use_prio)
+ continue;
+
+ prio = priv->plat->rx_queues_cfg[queue].prio;
+ priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
+ }
+}
+
+/**
+ * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
+ * @priv: driver private structure
+ * Description: It is used for configuring the TX Queue Priority
+ */
+static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 queue;
+ u32 prio;
+
+ for (queue = 0; queue < tx_queues_count; queue++) {
+ if (!priv->plat->tx_queues_cfg[queue].use_prio)
+ continue;
+
+ prio = priv->plat->tx_queues_cfg[queue].prio;
+ priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
+ }
+}
+
+/**
+ * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
+ * @priv: driver private structure
+ * Description: It is used for configuring the RX queue routing
+ */
+static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ u8 packet;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ /* no specific packet type routing specified for the queue */
+ if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
+ continue;
+
+ packet = priv->plat->rx_queues_cfg[queue].pkt_route;
+ priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
+ }
+}
+
+/**
+ * stmmac_mtl_configuration - Configure MTL
+ * @priv: driver private structure
+ * Description: It is used for configurring MTL
+ */
+static void stmmac_mtl_configuration(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+
+ if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
+ stmmac_set_tx_queue_weight(priv);
+
+ /* Configure MTL RX algorithms */
+ if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
+ priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
+ priv->plat->rx_sched_algorithm);
+
+ /* Configure MTL TX algorithms */
+ if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
+ priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
+ priv->plat->tx_sched_algorithm);
+
+ /* Configure CBS in AVB TX queues */
+ if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
+ stmmac_configure_cbs(priv);
+
+ /* Map RX MTL to DMA channels */
+ if (priv->hw->mac->map_mtl_to_dma)
+ stmmac_rx_queue_dma_chan_map(priv);
+
+ /* Enable MAC RX Queues */
+ if (priv->hw->mac->rx_queue_enable)
+ stmmac_mac_enable_rx_queues(priv);
+
+ /* Set RX priorities */
+ if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
+ stmmac_mac_config_rx_queues_prio(priv);
+
+ /* Set TX priorities */
+ if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
+ stmmac_mac_config_tx_queues_prio(priv);
+
+ /* Set RX routing */
+ if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
+ stmmac_mac_config_rx_queues_routing(priv);
+}
+
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
@@ -1659,6 +2421,9 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 chan;
int ret;
/* DMA initialization and SW reset */
@@ -1688,9 +2453,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->hw, dev->mtu);
- /* Initialize MAC RX Queues */
- if (priv->hw->mac->rx_queue_enable)
- stmmac_mac_enable_rx_queues(priv);
+ /* Initialize MTL*/
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ stmmac_mtl_configuration(priv);
ret = priv->hw->mac->rx_ipc(priv->hw);
if (!ret) {
@@ -1700,10 +2465,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
}
/* Enable the MAC Rx/Tx */
- if (priv->synopsys_id >= DWMAC_CORE_4_00)
- stmmac_dwmac4_set_mac(priv->ioaddr, true);
- else
- stmmac_set_mac(priv->ioaddr, true);
+ priv->hw->mac->set_mac(priv->ioaddr, true);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
@@ -1711,6 +2473,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_mmc_setup(priv);
if (init_ptp) {
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0)
+ netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
+
ret = stmmac_init_ptp(priv);
if (ret == -EOPNOTSUPP)
netdev_warn(priv->dev, "PTP not supported by HW\n");
@@ -1725,35 +2491,37 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
__func__);
#endif
/* Start the ball rolling... */
- netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
- priv->hw->dma->start_tx(priv->ioaddr);
- priv->hw->dma->start_rx(priv->ioaddr);
+ stmmac_start_all_dma(priv);
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
priv->rx_riwt = MAX_DMA_RIWT;
- priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
+ priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
}
if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
- /* set TX ring length */
- if (priv->hw->dma->set_tx_ring_len)
- priv->hw->dma->set_tx_ring_len(priv->ioaddr,
- (DMA_TX_SIZE - 1));
- /* set RX ring length */
- if (priv->hw->dma->set_rx_ring_len)
- priv->hw->dma->set_rx_ring_len(priv->ioaddr,
- (DMA_RX_SIZE - 1));
+ /* set TX and RX rings length */
+ stmmac_set_rings_length(priv);
+
/* Enable TSO */
- if (priv->tso)
- priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
+ if (priv->tso) {
+ for (chan = 0; chan < tx_cnt; chan++)
+ priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
+ }
return 0;
}
+static void stmmac_hw_teardown(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
+}
+
/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
@@ -1821,7 +2589,7 @@ static int stmmac_open(struct net_device *dev)
netdev_err(priv->dev,
"%s: ERROR: allocating the IRQ %d (error: %d)\n",
__func__, dev->irq, ret);
- goto init_error;
+ goto irq_error;
}
/* Request the Wake IRQ in case of another line is used for WoL */
@@ -1848,8 +2616,8 @@ static int stmmac_open(struct net_device *dev)
}
}
- napi_enable(&priv->napi);
- netif_start_queue(dev);
+ stmmac_enable_all_queues(priv);
+ stmmac_start_all_queues(priv);
return 0;
@@ -1858,7 +2626,12 @@ lpiirq_error:
free_irq(priv->wol_irq, dev);
wolirq_error:
free_irq(dev->irq, dev);
+irq_error:
+ if (dev->phydev)
+ phy_stop(dev->phydev);
+ del_timer_sync(&priv->txtimer);
+ stmmac_hw_teardown(dev);
init_error:
free_dma_desc_resources(priv);
dma_desc_error:
@@ -1887,9 +2660,9 @@ static int stmmac_release(struct net_device *dev)
phy_disconnect(dev->phydev);
}
- netif_stop_queue(dev);
+ stmmac_stop_all_queues(priv);
- napi_disable(&priv->napi);
+ stmmac_disable_all_queues(priv);
del_timer_sync(&priv->txtimer);
@@ -1901,14 +2674,13 @@ static int stmmac_release(struct net_device *dev)
free_irq(priv->lpi_irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
- priv->hw->dma->stop_tx(priv->ioaddr);
- priv->hw->dma->stop_rx(priv->ioaddr);
+ stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv);
/* Disable the MAC Rx/Tx */
- stmmac_set_mac(priv->ioaddr, false);
+ priv->hw->mac->set_mac(priv->ioaddr, false);
netif_carrier_off(dev);
@@ -1927,22 +2699,24 @@ static int stmmac_release(struct net_device *dev)
* @des: buffer start address
* @total_len: total length to fill in descriptors
* @last_segmant: condition for the last descriptor
+ * @queue: TX queue index
* Description:
* This function fills descriptor and request new descriptors according to
* buffer length to fill
*/
static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
- int total_len, bool last_segment)
+ int total_len, bool last_segment, u32 queue)
{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
struct dma_desc *desc;
- int tmp_len;
u32 buff_size;
+ int tmp_len;
tmp_len = total_len;
while (tmp_len > 0) {
- priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
- desc = priv->dma_tx + priv->cur_tx;
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ desc = tx_q->dma_tx + tx_q->cur_tx;
desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
@@ -1986,23 +2760,28 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
*/
static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
- u32 pay_len, mss;
- int tmp_pay_len = 0;
+ struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev);
int nfrags = skb_shinfo(skb)->nr_frags;
+ u32 queue = skb_get_queue_mapping(skb);
unsigned int first_entry, des;
- struct dma_desc *desc, *first, *mss_desc = NULL;
+ struct stmmac_tx_queue *tx_q;
+ int tmp_pay_len = 0;
+ u32 pay_len, mss;
u8 proto_hdr_len;
int i;
+ tx_q = &priv->tx_queue[queue];
+
/* Compute header lengths */
proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
/* Desc availability based on threshold should be enough safe */
- if (unlikely(stmmac_tx_avail(priv) <
+ if (unlikely(stmmac_tx_avail(priv, queue) <
(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
- if (!netif_queue_stopped(dev)) {
- netif_stop_queue(dev);
+ if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
+ queue));
/* This is a hard error, log it. */
netdev_err(priv->dev,
"%s: Tx Ring full when queue awake\n",
@@ -2017,10 +2796,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* set new MSS value if needed */
if (mss != priv->mss) {
- mss_desc = priv->dma_tx + priv->cur_tx;
+ mss_desc = tx_q->dma_tx + tx_q->cur_tx;
priv->hw->desc->set_mss(mss_desc, mss);
priv->mss = mss;
- priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
}
if (netif_msg_tx_queued(priv)) {
@@ -2030,9 +2809,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data_len);
}
- first_entry = priv->cur_tx;
+ first_entry = tx_q->cur_tx;
- desc = priv->dma_tx + first_entry;
+ desc = tx_q->dma_tx + first_entry;
first = desc;
/* first descriptor: fill Headers on Buf1 */
@@ -2041,9 +2820,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- priv->tx_skbuff_dma[first_entry].buf = des;
- priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
- priv->tx_skbuff[first_entry] = skb;
+ tx_q->tx_skbuff_dma[first_entry].buf = des;
+ tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
+ tx_q->tx_skbuff[first_entry] = skb;
first->des0 = cpu_to_le32(des);
@@ -2054,7 +2833,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* If needed take extra descriptors to fill the remaining payload */
tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
- stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
+ stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
/* Prepare fragments */
for (i = 0; i < nfrags; i++) {
@@ -2063,24 +2842,26 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
des = skb_frag_dma_map(priv->device, frag, 0,
skb_frag_size(frag),
DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err;
stmmac_tso_allocator(priv, des, skb_frag_size(frag),
- (i == nfrags - 1));
+ (i == nfrags - 1), queue);
- priv->tx_skbuff_dma[priv->cur_tx].buf = des;
- priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
- priv->tx_skbuff[priv->cur_tx] = NULL;
- priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
+ tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
}
- priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
- priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
- if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+ if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
__func__);
- netif_stop_queue(dev);
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
dev->stats.tx_bytes += skb->len;
@@ -2112,7 +2893,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->desc->prepare_tso_tx_desc(first, 1,
proto_hdr_len,
pay_len,
- 1, priv->tx_skbuff_dma[first_entry].last_segment,
+ 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
/* If context desc is used to change MSS */
@@ -2127,20 +2908,20 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if (netif_msg_pktdata(priv)) {
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
- __func__, priv->cur_tx, priv->dirty_tx, first_entry,
- priv->cur_tx, first, nfrags);
+ __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
+ tx_q->cur_tx, first, nfrags);
- priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
+ priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
0);
pr_info(">>> frame to be transmitted: ");
print_pkt(skb->data, skb_headlen(skb));
}
- netdev_sent_queue(dev, skb->len);
+ netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
- priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
- STMMAC_CHAN0);
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
+ queue);
return NETDEV_TX_OK;
@@ -2164,21 +2945,26 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int nopaged_len = skb_headlen(skb);
int i, csum_insertion = 0, is_jumbo = 0;
+ u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
unsigned int entry, first_entry;
struct dma_desc *desc, *first;
+ struct stmmac_tx_queue *tx_q;
unsigned int enh_desc;
unsigned int des;
+ tx_q = &priv->tx_queue[queue];
+
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
return stmmac_tso_xmit(skb, dev);
}
- if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
- if (!netif_queue_stopped(dev)) {
- netif_stop_queue(dev);
+ if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
+ if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
+ queue));
/* This is a hard error, log it. */
netdev_err(priv->dev,
"%s: Tx Ring full when queue awake\n",
@@ -2190,19 +2976,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (priv->tx_path_in_lpi_mode)
stmmac_disable_eee_mode(priv);
- entry = priv->cur_tx;
+ entry = tx_q->cur_tx;
first_entry = entry;
csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
if (likely(priv->extend_desc))
- desc = (struct dma_desc *)(priv->dma_etx + entry);
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else
- desc = priv->dma_tx + entry;
+ desc = tx_q->dma_tx + entry;
first = desc;
- priv->tx_skbuff[first_entry] = skb;
+ tx_q->tx_skbuff[first_entry] = skb;
enh_desc = priv->plat->enh_desc;
/* To program the descriptors according to the size of the frame */
@@ -2211,7 +2997,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(is_jumbo) && likely(priv->synopsys_id <
DWMAC_CORE_4_00)) {
- entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
+ entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
if (unlikely(entry < 0))
goto dma_map_err;
}
@@ -2224,48 +3010,49 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
if (likely(priv->extend_desc))
- desc = (struct dma_desc *)(priv->dma_etx + entry);
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else
- desc = priv->dma_tx + entry;
+ desc = tx_q->dma_tx + entry;
des = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, des))
goto dma_map_err; /* should reuse desc w/o issues */
- priv->tx_skbuff[entry] = NULL;
+ tx_q->tx_skbuff[entry] = NULL;
- priv->tx_skbuff_dma[entry].buf = des;
+ tx_q->tx_skbuff_dma[entry].buf = des;
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
desc->des0 = cpu_to_le32(des);
else
desc->des2 = cpu_to_le32(des);
- priv->tx_skbuff_dma[entry].map_as_page = true;
- priv->tx_skbuff_dma[entry].len = len;
- priv->tx_skbuff_dma[entry].last_segment = last_segment;
+ tx_q->tx_skbuff_dma[entry].map_as_page = true;
+ tx_q->tx_skbuff_dma[entry].len = len;
+ tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
/* Prepare the descriptor and set the own bit too */
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
- priv->mode, 1, last_segment);
+ priv->mode, 1, last_segment,
+ skb->len);
}
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
- priv->cur_tx = entry;
+ tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
void *tx_head;
netdev_dbg(priv->dev,
"%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
- __func__, priv->cur_tx, priv->dirty_tx, first_entry,
+ __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
entry, first, nfrags);
if (priv->extend_desc)
- tx_head = (void *)priv->dma_etx;
+ tx_head = (void *)tx_q->dma_etx;
else
- tx_head = (void *)priv->dma_tx;
+ tx_head = (void *)tx_q->dma_tx;
priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
@@ -2273,10 +3060,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
print_pkt(skb->data, skb->len);
}
- if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+ if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
__func__);
- netif_stop_queue(dev);
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
dev->stats.tx_bytes += skb->len;
@@ -2311,14 +3098,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- priv->tx_skbuff_dma[first_entry].buf = des;
+ tx_q->tx_skbuff_dma[first_entry].buf = des;
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
first->des0 = cpu_to_le32(des);
else
first->des2 = cpu_to_le32(des);
- priv->tx_skbuff_dma[first_entry].len = nopaged_len;
- priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
+ tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
+ tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
@@ -2330,7 +3117,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Prepare the first descriptor setting the OWN bit too */
priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
csum_insertion, priv->mode, 1,
- last_segment);
+ last_segment, skb->len);
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
@@ -2339,13 +3126,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dma_wmb();
}
- netdev_sent_queue(dev, skb->len);
+ netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
if (priv->synopsys_id < DWMAC_CORE_4_00)
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
else
- priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
- STMMAC_CHAN0);
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
+ queue);
return NETDEV_TX_OK;
@@ -2373,9 +3160,9 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
}
-static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
+static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
{
- if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
+ if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
return 0;
return 1;
@@ -2384,30 +3171,33 @@ static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
/**
* stmmac_rx_refill - refill used skb preallocated buffers
* @priv: driver private structure
+ * @queue: RX queue index
* Description : this is to reallocate the skb for the reception process
* that is based on zero-copy.
*/
-static inline void stmmac_rx_refill(struct stmmac_priv *priv)
+static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int dirty = stmmac_rx_dirty(priv, queue);
+ unsigned int entry = rx_q->dirty_rx;
+
int bfsize = priv->dma_buf_sz;
- unsigned int entry = priv->dirty_rx;
- int dirty = stmmac_rx_dirty(priv);
while (dirty-- > 0) {
struct dma_desc *p;
if (priv->extend_desc)
- p = (struct dma_desc *)(priv->dma_erx + entry);
+ p = (struct dma_desc *)(rx_q->dma_erx + entry);
else
- p = priv->dma_rx + entry;
+ p = rx_q->dma_rx + entry;
- if (likely(priv->rx_skbuff[entry] == NULL)) {
+ if (likely(!rx_q->rx_skbuff[entry])) {
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
if (unlikely(!skb)) {
/* so for a while no zero-copy! */
- priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
+ rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
if (unlikely(net_ratelimit()))
dev_err(priv->device,
"fail to alloc skb entry %d\n",
@@ -2415,28 +3205,28 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
break;
}
- priv->rx_skbuff[entry] = skb;
- priv->rx_skbuff_dma[entry] =
+ rx_q->rx_skbuff[entry] = skb;
+ rx_q->rx_skbuff_dma[entry] =
dma_map_single(priv->device, skb->data, bfsize,
DMA_FROM_DEVICE);
if (dma_mapping_error(priv->device,
- priv->rx_skbuff_dma[entry])) {
+ rx_q->rx_skbuff_dma[entry])) {
netdev_err(priv->dev, "Rx DMA map failed\n");
dev_kfree_skb(skb);
break;
}
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
- p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
+ p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
p->des1 = 0;
} else {
- p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
+ p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
}
if (priv->hw->mode->refill_desc3)
- priv->hw->mode->refill_desc3(priv, p);
+ priv->hw->mode->refill_desc3(rx_q, p);
- if (priv->rx_zeroc_thresh > 0)
- priv->rx_zeroc_thresh--;
+ if (rx_q->rx_zeroc_thresh > 0)
+ rx_q->rx_zeroc_thresh--;
netif_dbg(priv, rx_status, priv->dev,
"refill entry #%d\n", entry);
@@ -2452,31 +3242,33 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
}
- priv->dirty_rx = entry;
+ rx_q->dirty_rx = entry;
}
/**
* stmmac_rx - manage the receive process
* @priv: driver private structure
- * @limit: napi bugget.
+ * @limit: napi bugget
+ * @queue: RX queue index.
* Description : this the function called by the napi poll method.
* It gets all the frames inside the ring.
*/
-static int stmmac_rx(struct stmmac_priv *priv, int limit)
+static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
- unsigned int entry = priv->cur_rx;
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ unsigned int entry = rx_q->cur_rx;
+ int coe = priv->hw->rx_csum;
unsigned int next_entry;
unsigned int count = 0;
- int coe = priv->hw->rx_csum;
if (netif_msg_rx_status(priv)) {
void *rx_head;
netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
if (priv->extend_desc)
- rx_head = (void *)priv->dma_erx;
+ rx_head = (void *)rx_q->dma_erx;
else
- rx_head = (void *)priv->dma_rx;
+ rx_head = (void *)rx_q->dma_rx;
priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
}
@@ -2486,9 +3278,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
struct dma_desc *np;
if (priv->extend_desc)
- p = (struct dma_desc *)(priv->dma_erx + entry);
+ p = (struct dma_desc *)(rx_q->dma_erx + entry);
else
- p = priv->dma_rx + entry;
+ p = rx_q->dma_rx + entry;
/* read the status of the incoming frame */
status = priv->hw->desc->rx_status(&priv->dev->stats,
@@ -2499,20 +3291,20 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
count++;
- priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
- next_entry = priv->cur_rx;
+ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
+ next_entry = rx_q->cur_rx;
if (priv->extend_desc)
- np = (struct dma_desc *)(priv->dma_erx + next_entry);
+ np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
else
- np = priv->dma_rx + next_entry;
+ np = rx_q->dma_rx + next_entry;
prefetch(np);
if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
priv->hw->desc->rx_extended_status(&priv->dev->stats,
&priv->xstats,
- priv->dma_erx +
+ rx_q->dma_erx +
entry);
if (unlikely(status == discard_frame)) {
priv->dev->stats.rx_errors++;
@@ -2522,9 +3314,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
* them in stmmac_rx_refill() function so that
* device can reuse it.
*/
- priv->rx_skbuff[entry] = NULL;
+ rx_q->rx_skbuff[entry] = NULL;
dma_unmap_single(priv->device,
- priv->rx_skbuff_dma[entry],
+ rx_q->rx_skbuff_dma[entry],
priv->dma_buf_sz,
DMA_FROM_DEVICE);
}
@@ -2572,7 +3364,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
*/
if (unlikely(!priv->plat->has_gmac4 &&
((frame_len < priv->rx_copybreak) ||
- stmmac_rx_threshold_count(priv)))) {
+ stmmac_rx_threshold_count(rx_q)))) {
skb = netdev_alloc_skb_ip_align(priv->dev,
frame_len);
if (unlikely(!skb)) {
@@ -2584,21 +3376,21 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
}
dma_sync_single_for_cpu(priv->device,
- priv->rx_skbuff_dma
+ rx_q->rx_skbuff_dma
[entry], frame_len,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb,
- priv->
+ rx_q->
rx_skbuff[entry]->data,
frame_len);
skb_put(skb, frame_len);
dma_sync_single_for_device(priv->device,
- priv->rx_skbuff_dma
+ rx_q->rx_skbuff_dma
[entry], frame_len,
DMA_FROM_DEVICE);
} else {
- skb = priv->rx_skbuff[entry];
+ skb = rx_q->rx_skbuff[entry];
if (unlikely(!skb)) {
netdev_err(priv->dev,
"%s: Inconsistent Rx chain\n",
@@ -2607,12 +3399,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
break;
}
prefetch(skb->data - NET_IP_ALIGN);
- priv->rx_skbuff[entry] = NULL;
- priv->rx_zeroc_thresh++;
+ rx_q->rx_skbuff[entry] = NULL;
+ rx_q->rx_zeroc_thresh++;
skb_put(skb, frame_len);
dma_unmap_single(priv->device,
- priv->rx_skbuff_dma[entry],
+ rx_q->rx_skbuff_dma[entry],
priv->dma_buf_sz,
DMA_FROM_DEVICE);
}
@@ -2634,7 +3426,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
- napi_gro_receive(&priv->napi, skb);
+ napi_gro_receive(&rx_q->napi, skb);
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
@@ -2642,7 +3434,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
entry = next_entry;
}
- stmmac_rx_refill(priv);
+ stmmac_rx_refill(priv, queue);
priv->xstats.rx_pkt_n += count;
@@ -2659,16 +3451,24 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
*/
static int stmmac_poll(struct napi_struct *napi, int budget)
{
- struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
+ struct stmmac_rx_queue *rx_q =
+ container_of(napi, struct stmmac_rx_queue, napi);
+ struct stmmac_priv *priv = rx_q->priv_data;
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 chan = rx_q->queue_index;
int work_done = 0;
+ u32 queue;
priv->xstats.napi_poll++;
- stmmac_tx_clean(priv);
- work_done = stmmac_rx(priv, budget);
+ /* check all the queues */
+ for (queue = 0; queue < tx_count; queue++)
+ stmmac_tx_clean(priv, queue);
+
+ work_done = stmmac_rx(priv, budget, rx_q->queue_index);
if (work_done < budget) {
napi_complete_done(napi, work_done);
- stmmac_enable_dma_irq(priv);
+ stmmac_enable_dma_irq(priv, chan);
}
return work_done;
}
@@ -2684,9 +3484,12 @@ static int stmmac_poll(struct napi_struct *napi, int budget)
static void stmmac_tx_timeout(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 chan;
/* Clear Tx resources and restart transmitting again */
- stmmac_tx_err(priv);
+ for (chan = 0; chan < tx_count; chan++)
+ stmmac_tx_err(priv, chan);
}
/**
@@ -2795,6 +3598,12 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 queues_count;
+ u32 queue;
+
+ queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
if (priv->irq_wake)
pm_wakeup_event(priv->device, 0);
@@ -2808,16 +3617,30 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
int status = priv->hw->mac->host_irq_status(priv->hw,
&priv->xstats);
+
if (unlikely(status)) {
/* For LPI we need to save the tx status */
if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
priv->tx_path_in_lpi_mode = true;
if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
priv->tx_path_in_lpi_mode = false;
- if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
- priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
- priv->rx_tail_addr,
- STMMAC_CHAN0);
+ }
+
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ for (queue = 0; queue < queues_count; queue++) {
+ struct stmmac_rx_queue *rx_q =
+ &priv->rx_queue[queue];
+
+ status |=
+ priv->hw->mac->host_mtl_irq_status(priv->hw,
+ queue);
+
+ if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
+ priv->hw->dma->set_rx_tail_ptr)
+ priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
+ rx_q->rx_tail_addr,
+ queue);
+ }
}
/* PCS link status */
@@ -2915,17 +3738,40 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
{
struct net_device *dev = seq->private;
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 queue;
- if (priv->extend_desc) {
- seq_printf(seq, "Extended RX descriptor ring:\n");
- sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
- seq_printf(seq, "Extended TX descriptor ring:\n");
- sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
- } else {
- seq_printf(seq, "RX descriptor ring:\n");
- sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
- seq_printf(seq, "TX descriptor ring:\n");
- sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
+ for (queue = 0; queue < rx_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ seq_printf(seq, "RX Queue %d:\n", queue);
+
+ if (priv->extend_desc) {
+ seq_printf(seq, "Extended descriptor ring:\n");
+ sysfs_display_ring((void *)rx_q->dma_erx,
+ DMA_RX_SIZE, 1, seq);
+ } else {
+ seq_printf(seq, "Descriptor ring:\n");
+ sysfs_display_ring((void *)rx_q->dma_rx,
+ DMA_RX_SIZE, 0, seq);
+ }
+ }
+
+ for (queue = 0; queue < tx_count; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ seq_printf(seq, "TX Queue %d:\n", queue);
+
+ if (priv->extend_desc) {
+ seq_printf(seq, "Extended descriptor ring:\n");
+ sysfs_display_ring((void *)tx_q->dma_etx,
+ DMA_TX_SIZE, 1, seq);
+ } else {
+ seq_printf(seq, "Descriptor ring:\n");
+ sysfs_display_ring((void *)tx_q->dma_tx,
+ DMA_TX_SIZE, 0, seq);
+ }
}
return 0;
@@ -3208,11 +4054,14 @@ int stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res)
{
- int ret = 0;
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
+ int ret = 0;
+ u32 queue;
- ndev = alloc_etherdev(sizeof(struct stmmac_priv));
+ ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
+ MTL_MAX_TX_QUEUES,
+ MTL_MAX_RX_QUEUES);
if (!ndev)
return -ENOMEM;
@@ -3254,6 +4103,10 @@ int stmmac_dvr_probe(struct device *device,
if (ret)
goto error_hw_init;
+ /* Configure real RX and TX queues */
+ netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
+ netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
+
ndev->netdev_ops = &stmmac_netdev_ops;
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -3303,7 +4156,12 @@ int stmmac_dvr_probe(struct device *device,
"Enable RX Mitigation via HW Watchdog Timer\n");
}
- netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
+ for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
+ (8 * priv->plat->rx_queues_to_use));
+ }
spin_lock_init(&priv->lock);
@@ -3348,7 +4206,11 @@ error_netdev_register:
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
error_mdio_register:
- netif_napi_del(&priv->napi);
+ for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ netif_napi_del(&rx_q->napi);
+ }
error_hw_init:
free_netdev(ndev);
@@ -3369,10 +4231,9 @@ int stmmac_dvr_remove(struct device *dev)
netdev_info(priv->dev, "%s: removing driver", __func__);
- priv->hw->dma->stop_rx(priv->ioaddr);
- priv->hw->dma->stop_tx(priv->ioaddr);
+ stmmac_stop_all_dma(priv);
- stmmac_set_mac(priv->ioaddr, false);
+ priv->hw->mac->set_mac(priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
if (priv->plat->stmmac_rst)
@@ -3411,20 +4272,19 @@ int stmmac_suspend(struct device *dev)
spin_lock_irqsave(&priv->lock, flags);
netif_device_detach(ndev);
- netif_stop_queue(ndev);
+ stmmac_stop_all_queues(priv);
- napi_disable(&priv->napi);
+ stmmac_disable_all_queues(priv);
/* Stop TX/RX DMA */
- priv->hw->dma->stop_tx(priv->ioaddr);
- priv->hw->dma->stop_rx(priv->ioaddr);
+ stmmac_stop_all_dma(priv);
/* Enable Power down mode by programming the PMT regs */
if (device_may_wakeup(priv->device)) {
priv->hw->mac->pmt(priv->hw, priv->wolopts);
priv->irq_wake = 1;
} else {
- stmmac_set_mac(priv->ioaddr, false);
+ priv->hw->mac->set_mac(priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
clk_disable(priv->plat->pclk);
@@ -3440,6 +4300,31 @@ int stmmac_suspend(struct device *dev)
EXPORT_SYMBOL_GPL(stmmac_suspend);
/**
+ * stmmac_reset_queues_param - reset queue parameters
+ * @dev: device pointer
+ */
+static void stmmac_reset_queues_param(struct stmmac_priv *priv)
+{
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < rx_cnt; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = 0;
+ }
+
+ for (queue = 0; queue < tx_cnt; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ tx_q->cur_tx = 0;
+ tx_q->dirty_tx = 0;
+ }
+}
+
+/**
* stmmac_resume - resume callback
* @dev: device pointer
* Description: when resume this function is invoked to setup the DMA and CORE
@@ -3479,10 +4364,8 @@ int stmmac_resume(struct device *dev)
spin_lock_irqsave(&priv->lock, flags);
- priv->cur_rx = 0;
- priv->dirty_rx = 0;
- priv->dirty_tx = 0;
- priv->cur_tx = 0;
+ stmmac_reset_queues_param(priv);
+
/* reset private mss value to force mss context settings at
* next tso xmit (only used for gmac4).
*/
@@ -3494,9 +4377,9 @@ int stmmac_resume(struct device *dev)
stmmac_init_tx_coalesce(priv);
stmmac_set_rx_mode(ndev);
- napi_enable(&priv->napi);
+ stmmac_enable_all_queues(priv);
- netif_start_queue(ndev);
+ stmmac_start_all_queues(priv);
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 5c9e462276b9..39be96779145 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -32,6 +32,7 @@
*/
struct stmmac_pci_dmi_data {
const char *name;
+ const char *asset_tag;
unsigned int func;
int phy_addr;
};
@@ -46,6 +47,7 @@ struct stmmac_pci_info {
static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
{
const char *name = dmi_get_system_info(DMI_BOARD_NAME);
+ const char *asset_tag = dmi_get_system_info(DMI_BOARD_ASSET_TAG);
unsigned int func = PCI_FUNC(info->pdev->devfn);
struct stmmac_pci_dmi_data *dmi;
@@ -57,8 +59,12 @@ static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
return 1;
for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) {
- if (!strcmp(dmi->name, name) && dmi->func == func)
+ if (!strcmp(dmi->name, name) && dmi->func == func) {
+ /* If asset tag is provided, match on it as well. */
+ if (dmi->asset_tag && strcmp(dmi->asset_tag, asset_tag))
+ continue;
return dmi->phy_addr;
+ }
}
return -ENODEV;
@@ -88,6 +94,17 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
/* Set the maxmtu to a default of JUMBO_LEN */
plat->maxmtu = JUMBO_LEN;
+
+ /* Set default number of RX and TX queues to use */
+ plat->tx_queues_to_use = 1;
+ plat->rx_queues_to_use = 1;
+
+ /* Disable Priority config by default */
+ plat->tx_queues_cfg[0].use_prio = false;
+ plat->rx_queues_cfg[0].use_prio = false;
+
+ /* Disable RX queues routing by default */
+ plat->rx_queues_cfg[0].pkt_route = 0x0;
}
static int quark_default_data(struct plat_stmmacenet_data *plat,
@@ -142,6 +159,24 @@ static struct stmmac_pci_dmi_data quark_pci_dmi_data[] = {
.func = 6,
.phy_addr = 1,
},
+ {
+ .name = "SIMATIC IOT2000",
+ .asset_tag = "6ES7647-0AA00-0YA2",
+ .func = 6,
+ .phy_addr = 1,
+ },
+ {
+ .name = "SIMATIC IOT2000",
+ .asset_tag = "6ES7647-0AA00-1YA2",
+ .func = 6,
+ .phy_addr = 1,
+ },
+ {
+ .name = "SIMATIC IOT2000",
+ .asset_tag = "6ES7647-0AA00-1YA2",
+ .func = 7,
+ .phy_addr = 1,
+ },
{}
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 433a84239a68..7fc3a1ef395a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -108,7 +108,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
if (!np)
return NULL;
- axi = kzalloc(sizeof(*axi), GFP_KERNEL);
+ axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
if (!axi) {
of_node_put(np);
return ERR_PTR(-ENOMEM);
@@ -132,6 +132,155 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
}
/**
+ * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
+ * @pdev: platform device
+ */
+static void stmmac_mtl_setup(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct device_node *q_node;
+ struct device_node *rx_node;
+ struct device_node *tx_node;
+ u8 queue = 0;
+
+ /* For backwards-compatibility with device trees that don't have any
+ * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
+ * to one RX and TX queues each.
+ */
+ plat->rx_queues_to_use = 1;
+ plat->tx_queues_to_use = 1;
+
+ rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
+ if (!rx_node)
+ return;
+
+ tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
+ if (!tx_node) {
+ of_node_put(rx_node);
+ return;
+ }
+
+ /* Processing RX queues common config */
+ if (of_property_read_u8(rx_node, "snps,rx-queues-to-use",
+ &plat->rx_queues_to_use))
+ plat->rx_queues_to_use = 1;
+
+ if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+ else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp"))
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP;
+ else
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+
+ /* Processing individual RX queue config */
+ for_each_child_of_node(rx_node, q_node) {
+ if (queue >= plat->rx_queues_to_use)
+ break;
+
+ if (of_property_read_bool(q_node, "snps,dcb-algorithm"))
+ plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ else if (of_property_read_bool(q_node, "snps,avb-algorithm"))
+ plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
+ else
+ plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+
+ if (of_property_read_u8(q_node, "snps,map-to-dma-channel",
+ &plat->rx_queues_cfg[queue].chan))
+ plat->rx_queues_cfg[queue].chan = queue;
+ /* TODO: Dynamic mapping to be included in the future */
+
+ if (of_property_read_u32(q_node, "snps,priority",
+ &plat->rx_queues_cfg[queue].prio)) {
+ plat->rx_queues_cfg[queue].prio = 0;
+ plat->rx_queues_cfg[queue].use_prio = false;
+ } else {
+ plat->rx_queues_cfg[queue].use_prio = true;
+ }
+
+ /* RX queue specific packet type routing */
+ if (of_property_read_bool(q_node, "snps,route-avcp"))
+ plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ;
+ else if (of_property_read_bool(q_node, "snps,route-ptp"))
+ plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ;
+ else if (of_property_read_bool(q_node, "snps,route-dcbcp"))
+ plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ;
+ else if (of_property_read_bool(q_node, "snps,route-up"))
+ plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
+ else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
+ plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
+ else
+ plat->rx_queues_cfg[queue].pkt_route = 0x0;
+
+ queue++;
+ }
+
+ /* Processing TX queues common config */
+ if (of_property_read_u8(tx_node, "snps,tx-queues-to-use",
+ &plat->tx_queues_to_use))
+ plat->tx_queues_to_use = 1;
+
+ if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
+ else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq"))
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
+ else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
+ else if (of_property_read_bool(tx_node, "snps,tx-sched-sp"))
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
+ else
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
+
+ queue = 0;
+
+ /* Processing individual TX queue config */
+ for_each_child_of_node(tx_node, q_node) {
+ if (queue >= plat->tx_queues_to_use)
+ break;
+
+ if (of_property_read_u8(q_node, "snps,weight",
+ &plat->tx_queues_cfg[queue].weight))
+ plat->tx_queues_cfg[queue].weight = 0x10 + queue;
+
+ if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
+ plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ } else if (of_property_read_bool(q_node,
+ "snps,avb-algorithm")) {
+ plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
+
+ /* Credit Base Shaper parameters used by AVB */
+ if (of_property_read_u32(q_node, "snps,send_slope",
+ &plat->tx_queues_cfg[queue].send_slope))
+ plat->tx_queues_cfg[queue].send_slope = 0x0;
+ if (of_property_read_u32(q_node, "snps,idle_slope",
+ &plat->tx_queues_cfg[queue].idle_slope))
+ plat->tx_queues_cfg[queue].idle_slope = 0x0;
+ if (of_property_read_u32(q_node, "snps,high_credit",
+ &plat->tx_queues_cfg[queue].high_credit))
+ plat->tx_queues_cfg[queue].high_credit = 0x0;
+ if (of_property_read_u32(q_node, "snps,low_credit",
+ &plat->tx_queues_cfg[queue].low_credit))
+ plat->tx_queues_cfg[queue].low_credit = 0x0;
+ } else {
+ plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ }
+
+ if (of_property_read_u32(q_node, "snps,priority",
+ &plat->tx_queues_cfg[queue].prio)) {
+ plat->tx_queues_cfg[queue].prio = 0;
+ plat->tx_queues_cfg[queue].use_prio = false;
+ } else {
+ plat->tx_queues_cfg[queue].use_prio = true;
+ }
+
+ queue++;
+ }
+
+ of_node_put(rx_node);
+ of_node_put(tx_node);
+ of_node_put(q_node);
+}
+
+/**
* stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
* @plat: driver data platform structure
* @np: device tree node
@@ -340,6 +489,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->axi = stmmac_axi_setup(pdev);
+ stmmac_mtl_setup(pdev, plat);
+
/* clock setup */
plat->stmmac_clk = devm_clk_get(&pdev->dev,
STMMAC_RESOURCE_NAME);
@@ -359,13 +510,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
clk_prepare_enable(plat->pclk);
/* Fall-back to main clock in case of no PTP ref is passed */
- plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref");
+ plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref");
if (IS_ERR(plat->clk_ptp_ref)) {
plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
plat->clk_ptp_ref = NULL;
dev_warn(&pdev->dev, "PTP uses main clock\n");
} else {
- clk_prepare_enable(plat->clk_ptp_ref);
plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 0e8e89f17dbb..382993c1561c 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -691,7 +691,8 @@ static void cas_mif_poll(struct cas *cp, const int enable)
}
/* Must be invoked under cp->lock */
-static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
+static void cas_begin_auto_negotiation(struct cas *cp,
+ const struct ethtool_link_ksettings *ep)
{
u16 ctl;
#if 1
@@ -704,16 +705,16 @@ static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
if (!ep)
goto start_aneg;
lcntl = cp->link_cntl;
- if (ep->autoneg == AUTONEG_ENABLE)
+ if (ep->base.autoneg == AUTONEG_ENABLE) {
cp->link_cntl = BMCR_ANENABLE;
- else {
- u32 speed = ethtool_cmd_speed(ep);
+ } else {
+ u32 speed = ep->base.speed;
cp->link_cntl = 0;
if (speed == SPEED_100)
cp->link_cntl |= BMCR_SPEED100;
else if (speed == SPEED_1000)
cp->link_cntl |= CAS_BMCR_SPEED1000;
- if (ep->duplex == DUPLEX_FULL)
+ if (ep->base.duplex == DUPLEX_FULL)
cp->link_cntl |= BMCR_FULLDPLX;
}
#if 1
@@ -4528,19 +4529,21 @@ static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
}
-static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int cas_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct cas *cp = netdev_priv(dev);
u16 bmcr;
int full_duplex, speed, pause;
unsigned long flags;
enum link_state linkstate = link_up;
+ u32 supported, advertising;
- cmd->advertising = 0;
- cmd->supported = SUPPORTED_Autoneg;
+ advertising = 0;
+ supported = SUPPORTED_Autoneg;
if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
- cmd->supported |= SUPPORTED_1000baseT_Full;
- cmd->advertising |= ADVERTISED_1000baseT_Full;
+ supported |= SUPPORTED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
}
/* Record PHY settings if HW is on. */
@@ -4548,17 +4551,15 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bmcr = 0;
linkstate = cp->lstate;
if (CAS_PHY_MII(cp->phy_type)) {
- cmd->port = PORT_MII;
- cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
- XCVR_INTERNAL : XCVR_EXTERNAL;
- cmd->phy_address = cp->phy_addr;
- cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
+ cmd->base.port = PORT_MII;
+ cmd->base.phy_address = cp->phy_addr;
+ advertising |= ADVERTISED_TP | ADVERTISED_MII |
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
- cmd->supported |=
+ supported |=
(SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
@@ -4574,11 +4575,10 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
} else {
- cmd->port = PORT_FIBRE;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->phy_address = 0;
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.phy_address = 0;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
if (cp->hw_running) {
/* pcs uses the same bits as mii */
@@ -4590,21 +4590,20 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
spin_unlock_irqrestore(&cp->lock, flags);
if (bmcr & BMCR_ANENABLE) {
- cmd->advertising |= ADVERTISED_Autoneg;
- cmd->autoneg = AUTONEG_ENABLE;
- ethtool_cmd_speed_set(cmd, ((speed == 10) ?
+ advertising |= ADVERTISED_Autoneg;
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ cmd->base.speed = ((speed == 10) ?
SPEED_10 :
((speed == 1000) ?
- SPEED_1000 : SPEED_100)));
- cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ SPEED_1000 : SPEED_100));
+ cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
} else {
- cmd->autoneg = AUTONEG_DISABLE;
- ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ?
SPEED_1000 :
((bmcr & BMCR_SPEED100) ?
- SPEED_100 : SPEED_10)));
- cmd->duplex =
- (bmcr & BMCR_FULLDPLX) ?
+ SPEED_100 : SPEED_10));
+ cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
DUPLEX_FULL : DUPLEX_HALF;
}
if (linkstate != link_up) {
@@ -4619,39 +4618,46 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
* settings that we configured.
*/
if (cp->link_cntl & BMCR_ANENABLE) {
- ethtool_cmd_speed_set(cmd, 0);
- cmd->duplex = 0xff;
+ cmd->base.speed = 0;
+ cmd->base.duplex = 0xff;
} else {
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
if (cp->link_cntl & BMCR_SPEED100) {
- ethtool_cmd_speed_set(cmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
} else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
- ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
}
- cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
+ cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
DUPLEX_FULL : DUPLEX_HALF;
}
}
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int cas_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct cas *cp = netdev_priv(dev);
unsigned long flags;
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed = cmd->base.speed;
/* Verify the settings we care about. */
- if (cmd->autoneg != AUTONEG_ENABLE &&
- cmd->autoneg != AUTONEG_DISABLE)
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_DISABLE &&
+ if (cmd->base.autoneg == AUTONEG_DISABLE &&
((speed != SPEED_1000 &&
speed != SPEED_100 &&
speed != SPEED_10) ||
- (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL)))
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)))
return -EINVAL;
/* Apply settings and restart link process. */
@@ -4753,8 +4759,6 @@ static void cas_get_ethtool_stats(struct net_device *dev,
static const struct ethtool_ops cas_ethtool_ops = {
.get_drvinfo = cas_get_drvinfo,
- .get_settings = cas_get_settings,
- .set_settings = cas_set_settings,
.nway_reset = cas_nway_reset,
.get_link = cas_get_link,
.get_msglevel = cas_get_msglevel,
@@ -4764,6 +4768,8 @@ static const struct ethtool_ops cas_ethtool_ops = {
.get_sset_count = cas_get_sset_count,
.get_strings = cas_get_strings,
.get_ethtool_stats = cas_get_ethtool_stats,
+ .get_link_ksettings = cas_get_link_ksettings,
+ .set_link_ksettings = cas_set_link_ksettings,
};
static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 89952deae47f..5a90fed06260 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -1,6 +1,6 @@
/* ldmvsw.c: Sun4v LDOM Virtual Switch Driver.
*
- * Copyright (C) 2016 Oracle. All rights reserved.
+ * Copyright (C) 2016-2017 Oracle. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -41,8 +41,8 @@
static u8 vsw_port_hwaddr[ETH_ALEN] = {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
#define DRV_MODULE_NAME "ldmvsw"
-#define DRV_MODULE_VERSION "1.1"
-#define DRV_MODULE_RELDATE "February 3, 2017"
+#define DRV_MODULE_VERSION "1.2"
+#define DRV_MODULE_RELDATE "March 4, 2017"
static char version[] =
DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
@@ -123,6 +123,20 @@ static void vsw_set_rx_mode(struct net_device *dev)
return sunvnet_set_rx_mode_common(dev, port->vp);
}
+int ldmvsw_open(struct net_device *dev)
+{
+ struct vnet_port *port = netdev_priv(dev);
+ struct vio_driver_state *vio = &port->vio;
+
+ /* reset the channel */
+ vio_link_state_change(vio, LDC_EVENT_RESET);
+ vnet_port_reset(port);
+ vio_port_up(vio);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ldmvsw_open);
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void vsw_poll_controller(struct net_device *dev)
{
@@ -133,7 +147,7 @@ static void vsw_poll_controller(struct net_device *dev)
#endif
static const struct net_device_ops vsw_ops = {
- .ndo_open = sunvnet_open_common,
+ .ndo_open = ldmvsw_open,
.ndo_stop = sunvnet_close_common,
.ndo_set_rx_mode = vsw_set_rx_mode,
.ndo_set_mac_address = sunvnet_set_mac_addr_common,
@@ -365,6 +379,11 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
napi_enable(&port->napi);
vio_port_up(&port->vio);
+ /* assure no carrier until we receive an LDC_EVENT_UP,
+ * even if the vsw config script tries to force us up
+ */
+ netif_carrier_off(dev);
+
netdev_info(dev, "LDOM vsw-port %pM\n", dev->dev_addr);
pr_info("%s: PORT ( remote-mac %pM%s )\n", dev->name,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 57978056b336..2dcca249eb9c 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6813,7 +6813,8 @@ static void niu_get_drvinfo(struct net_device *dev,
sizeof(info->bus_info));
}
-static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int niu_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct niu *np = netdev_priv(dev);
struct niu_link_config *lp;
@@ -6821,28 +6822,30 @@ static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
lp = &np->link_config;
memset(cmd, 0, sizeof(*cmd));
- cmd->phy_address = np->phy_addr;
- cmd->supported = lp->supported;
- cmd->advertising = lp->active_advertising;
- cmd->autoneg = lp->active_autoneg;
- ethtool_cmd_speed_set(cmd, lp->active_speed);
- cmd->duplex = lp->active_duplex;
- cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
- cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
- XCVR_EXTERNAL : XCVR_INTERNAL;
+ cmd->base.phy_address = np->phy_addr;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ lp->supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ lp->active_advertising);
+ cmd->base.autoneg = lp->active_autoneg;
+ cmd->base.speed = lp->active_speed;
+ cmd->base.duplex = lp->active_duplex;
+ cmd->base.port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
return 0;
}
-static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int niu_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct niu *np = netdev_priv(dev);
struct niu_link_config *lp = &np->link_config;
- lp->advertising = cmd->advertising;
- lp->speed = ethtool_cmd_speed(cmd);
- lp->duplex = cmd->duplex;
- lp->autoneg = cmd->autoneg;
+ ethtool_convert_link_mode_to_legacy_u32(&lp->advertising,
+ cmd->link_modes.advertising);
+ lp->speed = cmd->base.speed;
+ lp->duplex = cmd->base.duplex;
+ lp->autoneg = cmd->base.autoneg;
return niu_init_link(np);
}
@@ -7902,14 +7905,14 @@ static const struct ethtool_ops niu_ethtool_ops = {
.nway_reset = niu_nway_reset,
.get_eeprom_len = niu_get_eeprom_len,
.get_eeprom = niu_get_eeprom,
- .get_settings = niu_get_settings,
- .set_settings = niu_set_settings,
.get_strings = niu_get_strings,
.get_sset_count = niu_get_sset_count,
.get_ethtool_stats = niu_get_ethtool_stats,
.set_phys_id = niu_set_phys_id,
.get_rxnfc = niu_get_nfc,
.set_rxnfc = niu_set_nfc,
+ .get_link_ksettings = niu_get_link_ksettings,
+ .set_link_ksettings = niu_set_link_ksettings,
};
static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index c4caf486cbef..3189722110c2 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -169,7 +169,7 @@ static void bigmac_stop(struct bigmac *bp)
static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs)
{
- struct net_device_stats *stats = &bp->enet_stats;
+ struct net_device_stats *stats = &bp->dev->stats;
stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR);
sbus_writel(0, bregs + BMAC_RCRCECTR);
@@ -774,8 +774,8 @@ static void bigmac_tx(struct bigmac *bp)
if (this->tx_flags & TXD_OWN)
break;
skb = bp->tx_skbs[elem];
- bp->enet_stats.tx_packets++;
- bp->enet_stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
dma_unmap_single(&bp->bigmac_op->dev,
this->tx_addr, skb->len,
DMA_TO_DEVICE);
@@ -811,12 +811,12 @@ static void bigmac_rx(struct bigmac *bp)
/* Check for errors. */
if (len < ETH_ZLEN) {
- bp->enet_stats.rx_errors++;
- bp->enet_stats.rx_length_errors++;
+ bp->dev->stats.rx_errors++;
+ bp->dev->stats.rx_length_errors++;
drop_it:
/* Return it to the BigMAC. */
- bp->enet_stats.rx_dropped++;
+ bp->dev->stats.rx_dropped++;
this->rx_flags =
(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
goto next;
@@ -875,8 +875,8 @@ static void bigmac_rx(struct bigmac *bp)
/* No checksums done by the BigMAC ;-( */
skb->protocol = eth_type_trans(skb, bp->dev);
netif_rx(skb);
- bp->enet_stats.rx_packets++;
- bp->enet_stats.rx_bytes += len;
+ bp->dev->stats.rx_packets++;
+ bp->dev->stats.rx_bytes += len;
next:
elem = NEXT_RX(elem);
this = &rxbase[elem];
@@ -987,7 +987,7 @@ static struct net_device_stats *bigmac_get_stats(struct net_device *dev)
struct bigmac *bp = netdev_priv(dev);
bigmac_get_counters(bp, bp->bregs);
- return &bp->enet_stats;
+ return &dev->stats;
}
static void bigmac_set_multicast(struct net_device *dev)
diff --git a/drivers/net/ethernet/sun/sunbmac.h b/drivers/net/ethernet/sun/sunbmac.h
index 532fc56830cf..ee56930475a8 100644
--- a/drivers/net/ethernet/sun/sunbmac.h
+++ b/drivers/net/ethernet/sun/sunbmac.h
@@ -311,7 +311,6 @@ struct bigmac {
enum bigmac_timer_state timer_state;
unsigned int timer_ticks;
- struct net_device_stats enet_stats;
struct platform_device *qec_op;
struct platform_device *bigmac_op;
struct net_device *dev;
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 5c5952e782cd..fa607d062cb3 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1250,12 +1250,18 @@ static void gem_stop_dma(struct gem *gp)
// XXX dbl check what that function should do when called on PCS PHY
-static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
+static void gem_begin_auto_negotiation(struct gem *gp,
+ const struct ethtool_link_ksettings *ep)
{
u32 advertise, features;
int autoneg;
int speed;
int duplex;
+ u32 advertising;
+
+ if (ep)
+ ethtool_convert_link_mode_to_legacy_u32(
+ &advertising, ep->link_modes.advertising);
if (gp->phy_type != phy_mii_mdio0 &&
gp->phy_type != phy_mii_mdio1)
@@ -1278,13 +1284,13 @@ static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
/* Setup link parameters */
if (!ep)
goto start_aneg;
- if (ep->autoneg == AUTONEG_ENABLE) {
- advertise = ep->advertising;
+ if (ep->base.autoneg == AUTONEG_ENABLE) {
+ advertise = advertising;
autoneg = 1;
} else {
autoneg = 0;
- speed = ethtool_cmd_speed(ep);
- duplex = ep->duplex;
+ speed = ep->base.speed;
+ duplex = ep->base.duplex;
}
start_aneg:
@@ -2515,85 +2521,96 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
}
-static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int gem_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct gem *gp = netdev_priv(dev);
+ u32 supported, advertising;
if (gp->phy_type == phy_mii_mdio0 ||
gp->phy_type == phy_mii_mdio1) {
if (gp->phy_mii.def)
- cmd->supported = gp->phy_mii.def->features;
+ supported = gp->phy_mii.def->features;
else
- cmd->supported = (SUPPORTED_10baseT_Half |
+ supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full);
/* XXX hardcoded stuff for now */
- cmd->port = PORT_MII;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->phy_address = 0; /* XXX fixed PHYAD */
+ cmd->base.port = PORT_MII;
+ cmd->base.phy_address = 0; /* XXX fixed PHYAD */
/* Return current PHY settings */
- cmd->autoneg = gp->want_autoneg;
- ethtool_cmd_speed_set(cmd, gp->phy_mii.speed);
- cmd->duplex = gp->phy_mii.duplex;
- cmd->advertising = gp->phy_mii.advertising;
+ cmd->base.autoneg = gp->want_autoneg;
+ cmd->base.speed = gp->phy_mii.speed;
+ cmd->base.duplex = gp->phy_mii.duplex;
+ advertising = gp->phy_mii.advertising;
/* If we started with a forced mode, we don't have a default
* advertise set, we need to return something sensible so
* userland can re-enable autoneg properly.
*/
- if (cmd->advertising == 0)
- cmd->advertising = cmd->supported;
+ if (advertising == 0)
+ advertising = supported;
} else { // XXX PCS ?
- cmd->supported =
+ supported =
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg);
- cmd->advertising = cmd->supported;
- ethtool_cmd_speed_set(cmd, 0);
- cmd->duplex = cmd->port = cmd->phy_address =
- cmd->transceiver = cmd->autoneg = 0;
+ advertising = supported;
+ cmd->base.speed = 0;
+ cmd->base.duplex = 0;
+ cmd->base.port = 0;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = 0;
/* serdes means usually a Fibre connector, with most fixed */
if (gp->phy_type == phy_serdes) {
- cmd->port = PORT_FIBRE;
- cmd->supported = (SUPPORTED_1000baseT_Half |
+ cmd->base.port = PORT_FIBRE;
+ supported = (SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full |
SUPPORTED_FIBRE | SUPPORTED_Autoneg |
SUPPORTED_Pause | SUPPORTED_Asym_Pause);
- cmd->advertising = cmd->supported;
- cmd->transceiver = XCVR_INTERNAL;
+ advertising = supported;
if (gp->lstate == link_up)
- ethtool_cmd_speed_set(cmd, SPEED_1000);
- cmd->duplex = DUPLEX_FULL;
- cmd->autoneg = 1;
+ cmd->base.speed = SPEED_1000;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.autoneg = 1;
}
}
- cmd->maxtxpkt = cmd->maxrxpkt = 0;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int gem_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct gem *gp = netdev_priv(dev);
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed = cmd->base.speed;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
/* Verify the settings we care about. */
- if (cmd->autoneg != AUTONEG_ENABLE &&
- cmd->autoneg != AUTONEG_DISABLE)
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_ENABLE &&
- cmd->advertising == 0)
+ if (cmd->base.autoneg == AUTONEG_ENABLE &&
+ advertising == 0)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_DISABLE &&
+ if (cmd->base.autoneg == AUTONEG_DISABLE &&
((speed != SPEED_1000 &&
speed != SPEED_100 &&
speed != SPEED_10) ||
- (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL)))
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)))
return -EINVAL;
/* Apply settings and restart link process. */
@@ -2666,13 +2683,13 @@ static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static const struct ethtool_ops gem_ethtool_ops = {
.get_drvinfo = gem_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_settings = gem_get_settings,
- .set_settings = gem_set_settings,
.nway_reset = gem_nway_reset,
.get_msglevel = gem_get_msglevel,
.set_msglevel = gem_set_msglevel,
.get_wol = gem_get_wol,
.set_wol = gem_set_wol,
+ .get_link_ksettings = gem_get_link_ksettings,
+ .set_link_ksettings = gem_set_link_ksettings,
};
static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 72ff05cd3ed8..9e983e1d8249 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -933,7 +933,7 @@ static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
/* hp->happy_lock must be held */
static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
{
- struct net_device_stats *stats = &hp->net_stats;
+ struct net_device_stats *stats = &hp->dev->stats;
stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
@@ -1294,9 +1294,10 @@ static void happy_meal_init_rings(struct happy_meal *hp)
}
/* hp->happy_lock must be held */
-static void happy_meal_begin_auto_negotiation(struct happy_meal *hp,
- void __iomem *tregs,
- struct ethtool_cmd *ep)
+static void
+happy_meal_begin_auto_negotiation(struct happy_meal *hp,
+ void __iomem *tregs,
+ const struct ethtool_link_ksettings *ep)
{
int timeout;
@@ -1309,7 +1310,7 @@ static void happy_meal_begin_auto_negotiation(struct happy_meal *hp,
/* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
- if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
+ if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
/* Advertise everything we can support. */
if (hp->sw_bmsr & BMSR_10HALF)
hp->sw_advertise |= (ADVERTISE_10HALF);
@@ -1384,14 +1385,14 @@ force_link:
/* Disable auto-negotiation in BMCR, enable the duplex and
* speed setting, init the timer state machine, and fire it off.
*/
- if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
+ if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
hp->sw_bmcr = BMCR_SPEED100;
} else {
- if (ethtool_cmd_speed(ep) == SPEED_100)
+ if (ep->base.speed == SPEED_100)
hp->sw_bmcr = BMCR_SPEED100;
else
hp->sw_bmcr = 0;
- if (ep->duplex == DUPLEX_FULL)
+ if (ep->base.duplex == DUPLEX_FULL)
hp->sw_bmcr |= BMCR_FULLDPLX;
}
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
@@ -1856,7 +1857,7 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
if (status & GREG_STAT_TXLERR)
printk("LateError ");
if (status & GREG_STAT_TXPERR)
- printk("ParityErro ");
+ printk("ParityError ");
if (status & GREG_STAT_TXTERR)
printk("TagBotch ");
printk("]\n");
@@ -1946,7 +1947,7 @@ static void happy_meal_tx(struct happy_meal *hp)
break;
}
hp->tx_skbs[elem] = NULL;
- hp->net_stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
dma_addr = hme_read_desc32(hp, &this->tx_addr);
@@ -1963,7 +1964,7 @@ static void happy_meal_tx(struct happy_meal *hp)
}
dev_kfree_skb_irq(skb);
- hp->net_stats.tx_packets++;
+ dev->stats.tx_packets++;
}
hp->tx_old = elem;
TXD((">"));
@@ -2008,17 +2009,17 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
/* Check for errors. */
if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
RXD(("ERR(%08x)]", flags));
- hp->net_stats.rx_errors++;
+ dev->stats.rx_errors++;
if (len < ETH_ZLEN)
- hp->net_stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
if (len & (RXFLAG_OVERFLOW >> 16)) {
- hp->net_stats.rx_over_errors++;
- hp->net_stats.rx_fifo_errors++;
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_fifo_errors++;
}
/* Return it to the Happy meal. */
drop_it:
- hp->net_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
dma_addr);
@@ -2083,8 +2084,8 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
- hp->net_stats.rx_packets++;
- hp->net_stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
next:
elem = NEXT_RX(elem);
this = &rxbase[elem];
@@ -2395,7 +2396,7 @@ static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
happy_meal_get_counters(hp, hp->bigmacregs);
spin_unlock_irq(&hp->happy_lock);
- return &hp->net_stats;
+ return &dev->stats;
}
static void happy_meal_set_multicast(struct net_device *dev)
@@ -2434,20 +2435,21 @@ static void happy_meal_set_multicast(struct net_device *dev)
}
/* Ethtool support... */
-static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int hme_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct happy_meal *hp = netdev_priv(dev);
u32 speed;
+ u32 supported;
- cmd->supported =
+ supported =
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
/* XXX hardcoded stuff for now */
- cmd->port = PORT_TP; /* XXX no MII support */
- cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */
- cmd->phy_address = 0; /* XXX fixed PHYAD */
+ cmd->base.port = PORT_TP; /* XXX no MII support */
+ cmd->base.phy_address = 0; /* XXX fixed PHYAD */
/* Record PHY settings. */
spin_lock_irq(&hp->happy_lock);
@@ -2456,41 +2458,45 @@ static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
spin_unlock_irq(&hp->happy_lock);
if (hp->sw_bmcr & BMCR_ANENABLE) {
- cmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
SPEED_100 : SPEED_10);
if (speed == SPEED_100)
- cmd->duplex =
+ cmd->base.duplex =
(hp->sw_lpa & (LPA_100FULL)) ?
DUPLEX_FULL : DUPLEX_HALF;
else
- cmd->duplex =
+ cmd->base.duplex =
(hp->sw_lpa & (LPA_10FULL)) ?
DUPLEX_FULL : DUPLEX_HALF;
} else {
- cmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
- cmd->duplex =
+ cmd->base.duplex =
(hp->sw_bmcr & BMCR_FULLDPLX) ?
DUPLEX_FULL : DUPLEX_HALF;
}
- ethtool_cmd_speed_set(cmd, speed);
+ cmd->base.speed = speed;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+
return 0;
}
-static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int hme_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct happy_meal *hp = netdev_priv(dev);
/* Verify the settings we care about. */
- if (cmd->autoneg != AUTONEG_ENABLE &&
- cmd->autoneg != AUTONEG_DISABLE)
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_DISABLE &&
- ((ethtool_cmd_speed(cmd) != SPEED_100 &&
- ethtool_cmd_speed(cmd) != SPEED_10) ||
- (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL)))
+ if (cmd->base.autoneg == AUTONEG_DISABLE &&
+ ((cmd->base.speed != SPEED_100 &&
+ cmd->base.speed != SPEED_10) ||
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)))
return -EINVAL;
/* Ok, do it to it. */
@@ -2537,10 +2543,10 @@ static u32 hme_get_link(struct net_device *dev)
}
static const struct ethtool_ops hme_ethtool_ops = {
- .get_settings = hme_get_settings,
- .set_settings = hme_set_settings,
.get_drvinfo = hme_get_drvinfo,
.get_link = hme_get_link,
+ .get_link_ksettings = hme_get_link_ksettings,
+ .set_link_ksettings = hme_set_link_ksettings,
};
static int hme_version_printed;
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
index 4a8d5b18dfd5..3af540adb3c5 100644
--- a/drivers/net/ethernet/sun/sunhme.h
+++ b/drivers/net/ethernet/sun/sunhme.h
@@ -418,8 +418,6 @@ struct happy_meal {
int rx_new, tx_new, rx_old, tx_old;
- struct net_device_stats net_stats; /* Statistical counters */
-
#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
u32 (*read32)(void __iomem *);
void (*write32)(void __iomem *, u32);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 4cc2571f71c6..0b95105f7060 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1,7 +1,7 @@
/* sunvnet.c: Sun LDOM Virtual Network Driver.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
- * Copyright (C) 2016 Oracle. All rights reserved.
+ * Copyright (C) 2016-2017 Oracle. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -77,11 +77,125 @@ static void vnet_set_msglevel(struct net_device *dev, u32 value)
vp->msg_enable = value;
}
+static const struct {
+ const char string[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+ { "rx_packets" },
+ { "tx_packets" },
+ { "rx_bytes" },
+ { "tx_bytes" },
+ { "rx_errors" },
+ { "tx_errors" },
+ { "rx_dropped" },
+ { "tx_dropped" },
+ { "multicast" },
+ { "rx_length_errors" },
+ { "rx_frame_errors" },
+ { "rx_missed_errors" },
+ { "tx_carrier_errors" },
+ { "nports" },
+};
+
+static int vnet_get_sset_count(struct net_device *dev, int sset)
+{
+ struct vnet *vp = (struct vnet *)netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ethtool_stats_keys)
+ + (NUM_VNET_PORT_STATS * vp->nports);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void vnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct vnet *vp = (struct vnet *)netdev_priv(dev);
+ struct vnet_port *port;
+ char *p = (char *)buf;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+ p += sizeof(ethtool_stats_keys);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &vp->port_list, list) {
+ snprintf(p, ETH_GSTRING_LEN, "p%u.%s-%pM",
+ port->q_index, port->switch_port ? "s" : "q",
+ port->raddr);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "p%u.rx_packets",
+ port->q_index);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "p%u.tx_packets",
+ port->q_index);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "p%u.rx_bytes",
+ port->q_index);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "p%u.tx_bytes",
+ port->q_index);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "p%u.event_up",
+ port->q_index);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "p%u.event_reset",
+ port->q_index);
+ p += ETH_GSTRING_LEN;
+ }
+ rcu_read_unlock();
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void vnet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats, u64 *data)
+{
+ struct vnet *vp = (struct vnet *)netdev_priv(dev);
+ struct vnet_port *port;
+ int i = 0;
+
+ data[i++] = dev->stats.rx_packets;
+ data[i++] = dev->stats.tx_packets;
+ data[i++] = dev->stats.rx_bytes;
+ data[i++] = dev->stats.tx_bytes;
+ data[i++] = dev->stats.rx_errors;
+ data[i++] = dev->stats.tx_errors;
+ data[i++] = dev->stats.rx_dropped;
+ data[i++] = dev->stats.tx_dropped;
+ data[i++] = dev->stats.multicast;
+ data[i++] = dev->stats.rx_length_errors;
+ data[i++] = dev->stats.rx_frame_errors;
+ data[i++] = dev->stats.rx_missed_errors;
+ data[i++] = dev->stats.tx_carrier_errors;
+ data[i++] = vp->nports;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &vp->port_list, list) {
+ data[i++] = port->q_index;
+ data[i++] = port->stats.rx_packets;
+ data[i++] = port->stats.tx_packets;
+ data[i++] = port->stats.rx_bytes;
+ data[i++] = port->stats.tx_bytes;
+ data[i++] = port->stats.event_up;
+ data[i++] = port->stats.event_reset;
+ }
+ rcu_read_unlock();
+}
+
static const struct ethtool_ops vnet_ethtool_ops = {
.get_drvinfo = vnet_get_drvinfo,
.get_msglevel = vnet_get_msglevel,
.set_msglevel = vnet_set_msglevel,
.get_link = ethtool_op_get_link,
+ .get_sset_count = vnet_get_sset_count,
+ .get_strings = vnet_get_strings,
+ .get_ethtool_stats = vnet_get_ethtool_stats,
};
static LIST_HEAD(vnet_list);
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index fa2d11ca9b81..9e86833249d4 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1,7 +1,7 @@
/* sunvnet.c: Sun LDOM Virtual Network Driver.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
- * Copyright (C) 2016 Oracle. All rights reserved.
+ * Copyright (C) 2016-2017 Oracle. All rights reserved.
*/
#include <linux/module.h>
@@ -43,7 +43,6 @@ MODULE_LICENSE("GPL");
MODULE_VERSION("1.1");
static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
-static void vnet_port_reset(struct vnet_port *port);
static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
{
@@ -410,8 +409,12 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
+ if (unlikely(is_multicast_ether_addr(eth_hdr(skb)->h_dest)))
+ dev->stats.multicast++;
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
+ port->stats.rx_packets++;
+ port->stats.rx_bytes += len;
napi_gro_receive(&port->napi, skb);
return 0;
@@ -747,6 +750,13 @@ static int vnet_event_napi(struct vnet_port *port, int budget)
/* RESET takes precedent over any other event */
if (port->rx_event & LDC_EVENT_RESET) {
+ /* a link went down */
+
+ if (port->vsw == 1) {
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+ }
+
vio_link_state_change(vio, LDC_EVENT_RESET);
vnet_port_reset(port);
vio_port_up(vio);
@@ -762,12 +772,21 @@ static int vnet_event_napi(struct vnet_port *port, int budget)
maybe_tx_wakeup(port);
port->rx_event = 0;
+ port->stats.event_reset++;
return 0;
}
if (port->rx_event & LDC_EVENT_UP) {
+ /* a link came up */
+
+ if (port->vsw == 1) {
+ netif_carrier_on(port->dev);
+ netif_tx_start_all_queues(port->dev);
+ }
+
vio_link_state_change(vio, LDC_EVENT_UP);
port->rx_event = 0;
+ port->stats.event_up++;
return 0;
}
@@ -1417,6 +1436,8 @@ ldc_start_done:
dev->stats.tx_packets++;
dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
+ port->stats.tx_packets++;
+ port->stats.tx_bytes += port->tx_bufs[txi].skb->len;
dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
@@ -1631,7 +1652,7 @@ void sunvnet_port_free_tx_bufs_common(struct vnet_port *port)
}
EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common);
-static void vnet_port_reset(struct vnet_port *port)
+void vnet_port_reset(struct vnet_port *port)
{
del_timer(&port->clean_timer);
sunvnet_port_free_tx_bufs_common(port);
@@ -1639,6 +1660,7 @@ static void vnet_port_reset(struct vnet_port *port)
port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */
port->tsolen = 0;
}
+EXPORT_SYMBOL_GPL(vnet_port_reset);
static int vnet_port_alloc_tx_ring(struct vnet_port *port)
{
@@ -1708,20 +1730,32 @@ EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common);
void sunvnet_port_add_txq_common(struct vnet_port *port)
{
struct vnet *vp = port->vp;
- int n;
+ int smallest = 0;
+ int i;
+
+ /* find the first least-used q
+ * When there are more ldoms than q's, we start to
+ * double up on ports per queue.
+ */
+ for (i = 0; i < VNET_MAX_TXQS; i++) {
+ if (vp->q_used[i] == 0) {
+ smallest = i;
+ break;
+ }
+ if (vp->q_used[i] < vp->q_used[smallest])
+ smallest = i;
+ }
- n = vp->nports++;
- n = n & (VNET_MAX_TXQS - 1);
- port->q_index = n;
- netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
- port->q_index));
+ vp->nports++;
+ vp->q_used[smallest]++;
+ port->q_index = smallest;
}
EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common);
void sunvnet_port_rm_txq_common(struct vnet_port *port)
{
port->vp->nports--;
- netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
- port->q_index));
+ port->vp->q_used[port->q_index]--;
+ port->q_index = 0;
}
EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common);
diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h
index ce5c824128a3..b20d6fa7ef25 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.h
+++ b/drivers/net/ethernet/sun/sunvnet_common.h
@@ -35,6 +35,19 @@ struct vnet_tx_entry {
struct vnet;
+struct vnet_port_stats {
+ /* keep them all the same size */
+ u32 rx_bytes;
+ u32 tx_bytes;
+ u32 rx_packets;
+ u32 tx_packets;
+ u32 event_up;
+ u32 event_reset;
+ u32 q_placeholder;
+};
+
+#define NUM_VNET_PORT_STATS (sizeof(struct vnet_port_stats) / sizeof(u32))
+
/* Structure to describe a vnet-port or vsw-port in the MD.
* If the vsw bit is set, this structure represents a vswitch
* port, and the net_device can be found from ->dev. If the
@@ -44,6 +57,8 @@ struct vnet;
struct vnet_port {
struct vio_driver_state vio;
+ struct vnet_port_stats stats;
+
struct hlist_node hash;
u8 raddr[ETH_ALEN];
unsigned switch_port:1;
@@ -97,22 +112,15 @@ struct vnet_mcast_entry {
};
struct vnet {
- /* Protects port_list and port_hash. */
- spinlock_t lock;
-
+ spinlock_t lock; /* Protects port_list and port_hash. */
struct net_device *dev;
-
u32 msg_enable;
-
+ u8 q_used[VNET_MAX_TXQS];
struct list_head port_list;
-
struct hlist_head port_hash[VNET_PORT_HASH_SIZE];
-
struct vnet_mcast_entry *mcast_list;
-
struct list_head list;
u64 local_mac;
-
int nports;
};
@@ -139,6 +147,7 @@ int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg);
void sunvnet_handshake_complete_common(struct vio_driver_state *vio);
int sunvnet_poll_common(struct napi_struct *napi, int budget);
void sunvnet_port_free_tx_bufs_common(struct vnet_port *port);
+void vnet_port_reset(struct vnet_port *port);
bool sunvnet_port_is_up_common(struct vnet_port *vnet);
void sunvnet_port_add_txq_common(struct vnet_port *port);
void sunvnet_port_rm_txq_common(struct vnet_port *port);
diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig
new file mode 100644
index 000000000000..a9503884e1c2
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Kconfig
@@ -0,0 +1,41 @@
+#
+# Synopsys network device configuration
+#
+
+config NET_VENDOR_SYNOPSYS
+ bool "Synopsys devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) device belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Synopsys devices. If you say Y, you will be asked
+ for your specific device in the following questions.
+
+if NET_VENDOR_SYNOPSYS
+
+config DWC_XLGMAC
+ tristate "Synopsys DWC Enterprise Ethernet (XLGMAC) driver support"
+ depends on HAS_IOMEM && HAS_DMA
+ select BITREVERSE
+ select CRC32
+ ---help---
+ This driver supports the Synopsys DesignWare Cores Enterprise
+ Ethernet (dwc-xlgmac).
+
+if DWC_XLGMAC
+
+config DWC_XLGMAC_PCI
+ tristate "XLGMAC PCI bus support"
+ depends on DWC_XLGMAC && PCI
+ ---help---
+ This selects the pci bus support for the dwc-xlgmac driver.
+ This driver was tested on Synopsys XLGMAC IP Prototyping Kit.
+
+ If you have a controller with this interface, say Y or M here.
+ If unsure, say N.
+
+endif # DWC_XLGMAC
+
+endif # NET_VENDOR_SYNOPSYS
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
new file mode 100644
index 000000000000..0ad01916f11e
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the Synopsys network device drivers.
+#
+
+obj-$(CONFIG_DWC_XLGMAC) += dwc-xlgmac.o
+dwc-xlgmac-objs := dwc-xlgmac-net.o dwc-xlgmac-desc.o \
+ dwc-xlgmac-hw.o dwc-xlgmac-common.o \
+ dwc-xlgmac-ethtool.o
+
+dwc-xlgmac-$(CONFIG_DWC_XLGMAC_PCI) += dwc-xlgmac-pci.o
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
new file mode 100644
index 000000000000..d655a4261e98
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -0,0 +1,737 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+static int debug = -1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "DWC ethernet debug level (0=none,...,16=all)");
+static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP);
+
+static unsigned char dev_addr[6] = {0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7};
+
+static void xlgmac_read_mac_addr(struct xlgmac_pdata *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+
+ /* Currently it uses a static mac address for test */
+ memcpy(pdata->mac_addr, dev_addr, netdev->addr_len);
+}
+
+static void xlgmac_default_config(struct xlgmac_pdata *pdata)
+{
+ pdata->tx_osp_mode = DMA_OSP_ENABLE;
+ pdata->tx_sf_mode = MTL_TSF_ENABLE;
+ pdata->rx_sf_mode = MTL_RSF_DISABLE;
+ pdata->pblx8 = DMA_PBL_X8_ENABLE;
+ pdata->tx_pbl = DMA_PBL_32;
+ pdata->rx_pbl = DMA_PBL_32;
+ pdata->tx_threshold = MTL_TX_THRESHOLD_128;
+ pdata->rx_threshold = MTL_RX_THRESHOLD_128;
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+ pdata->phy_speed = SPEED_25000;
+ pdata->sysclk_rate = XLGMAC_SYSCLOCK;
+
+ strlcpy(pdata->drv_name, XLGMAC_DRV_NAME, sizeof(pdata->drv_name));
+ strlcpy(pdata->drv_ver, XLGMAC_DRV_VERSION, sizeof(pdata->drv_ver));
+}
+
+static void xlgmac_init_all_ops(struct xlgmac_pdata *pdata)
+{
+ xlgmac_init_desc_ops(&pdata->desc_ops);
+ xlgmac_init_hw_ops(&pdata->hw_ops);
+}
+
+static int xlgmac_init(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int i;
+ int ret;
+
+ /* Set default configuration data */
+ xlgmac_default_config(pdata);
+
+ /* Set irq, base_addr, MAC address, */
+ netdev->irq = pdata->dev_irq;
+ netdev->base_addr = (unsigned long)pdata->mac_regs;
+ xlgmac_read_mac_addr(pdata);
+ memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+
+ /* Set all the function pointers */
+ xlgmac_init_all_ops(pdata);
+
+ /* Issue software reset to device */
+ hw_ops->exit(pdata);
+
+ /* Populate the hardware features */
+ xlgmac_get_all_hw_features(pdata);
+ xlgmac_print_all_hw_features(pdata);
+
+ /* TODO: Set the PHY mode to XLGMII */
+
+ /* Set the DMA mask */
+ ret = dma_set_mask_and_coherent(pdata->dev,
+ DMA_BIT_MASK(pdata->hw_feat.dma_width));
+ if (ret) {
+ dev_err(pdata->dev, "dma_set_mask_and_coherent failed\n");
+ return ret;
+ }
+
+ /* Channel and ring params initializtion
+ * pdata->channel_count;
+ * pdata->tx_ring_count;
+ * pdata->rx_ring_count;
+ * pdata->tx_desc_count;
+ * pdata->rx_desc_count;
+ */
+ BUILD_BUG_ON_NOT_POWER_OF_2(XLGMAC_TX_DESC_CNT);
+ pdata->tx_desc_count = XLGMAC_TX_DESC_CNT;
+ if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
+ dev_err(pdata->dev, "tx descriptor count (%d) is not valid\n",
+ pdata->tx_desc_count);
+ ret = -EINVAL;
+ return ret;
+ }
+ BUILD_BUG_ON_NOT_POWER_OF_2(XLGMAC_RX_DESC_CNT);
+ pdata->rx_desc_count = XLGMAC_RX_DESC_CNT;
+ if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
+ dev_err(pdata->dev, "rx descriptor count (%d) is not valid\n",
+ pdata->rx_desc_count);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
+ pdata->hw_feat.tx_ch_cnt);
+ pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
+ pdata->hw_feat.tx_q_cnt);
+ pdata->tx_q_count = pdata->tx_ring_count;
+ ret = netif_set_real_num_tx_queues(netdev, pdata->tx_q_count);
+ if (ret) {
+ dev_err(pdata->dev, "error setting real tx queue count\n");
+ return ret;
+ }
+
+ pdata->rx_ring_count = min_t(unsigned int,
+ netif_get_num_default_rss_queues(),
+ pdata->hw_feat.rx_ch_cnt);
+ pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
+ pdata->hw_feat.rx_q_cnt);
+ pdata->rx_q_count = pdata->rx_ring_count;
+ ret = netif_set_real_num_rx_queues(netdev, pdata->rx_q_count);
+ if (ret) {
+ dev_err(pdata->dev, "error setting real rx queue count\n");
+ return ret;
+ }
+
+ pdata->channel_count =
+ max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+
+ /* Initialize RSS hash key and lookup table */
+ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
+
+ for (i = 0; i < XLGMAC_RSS_MAX_TABLE_SIZE; i++)
+ pdata->rss_table[i] = XLGMAC_SET_REG_BITS(
+ pdata->rss_table[i],
+ MAC_RSSDR_DMCH_POS,
+ MAC_RSSDR_DMCH_LEN,
+ i % pdata->rx_ring_count);
+
+ pdata->rss_options = XLGMAC_SET_REG_BITS(
+ pdata->rss_options,
+ MAC_RSSCR_IP2TE_POS,
+ MAC_RSSCR_IP2TE_LEN, 1);
+ pdata->rss_options = XLGMAC_SET_REG_BITS(
+ pdata->rss_options,
+ MAC_RSSCR_TCP4TE_POS,
+ MAC_RSSCR_TCP4TE_LEN, 1);
+ pdata->rss_options = XLGMAC_SET_REG_BITS(
+ pdata->rss_options,
+ MAC_RSSCR_UDP4TE_POS,
+ MAC_RSSCR_UDP4TE_LEN, 1);
+
+ /* Set device operations */
+ netdev->netdev_ops = xlgmac_get_netdev_ops();
+ netdev->ethtool_ops = xlgmac_get_ethtool_ops();
+
+ /* Set device features */
+ if (pdata->hw_feat.tso) {
+ netdev->hw_features = NETIF_F_TSO;
+ netdev->hw_features |= NETIF_F_TSO6;
+ netdev->hw_features |= NETIF_F_SG;
+ netdev->hw_features |= NETIF_F_IP_CSUM;
+ netdev->hw_features |= NETIF_F_IPV6_CSUM;
+ } else if (pdata->hw_feat.tx_coe) {
+ netdev->hw_features = NETIF_F_IP_CSUM;
+ netdev->hw_features |= NETIF_F_IPV6_CSUM;
+ }
+
+ if (pdata->hw_feat.rx_coe) {
+ netdev->hw_features |= NETIF_F_RXCSUM;
+ netdev->hw_features |= NETIF_F_GRO;
+ }
+
+ if (pdata->hw_feat.rss)
+ netdev->hw_features |= NETIF_F_RXHASH;
+
+ netdev->vlan_features |= netdev->hw_features;
+
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ if (pdata->hw_feat.sa_vlan_ins)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ if (pdata->hw_feat.vlhash)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->features |= netdev->hw_features;
+ pdata->netdev_features = netdev->features;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Use default watchdog timeout */
+ netdev->watchdog_timeo = 0;
+
+ /* Tx coalesce parameters initialization */
+ pdata->tx_usecs = XLGMAC_INIT_DMA_TX_USECS;
+ pdata->tx_frames = XLGMAC_INIT_DMA_TX_FRAMES;
+
+ /* Rx coalesce parameters initialization */
+ pdata->rx_riwt = hw_ops->usec_to_riwt(pdata, XLGMAC_INIT_DMA_RX_USECS);
+ pdata->rx_usecs = XLGMAC_INIT_DMA_RX_USECS;
+ pdata->rx_frames = XLGMAC_INIT_DMA_RX_FRAMES;
+
+ return 0;
+}
+
+int xlgmac_drv_probe(struct device *dev, struct xlgmac_resources *res)
+{
+ struct xlgmac_pdata *pdata;
+ struct net_device *netdev;
+ int ret;
+
+ netdev = alloc_etherdev_mq(sizeof(struct xlgmac_pdata),
+ XLGMAC_MAX_DMA_CHANNELS);
+
+ if (!netdev) {
+ dev_err(dev, "alloc_etherdev failed\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(netdev, dev);
+ dev_set_drvdata(dev, netdev);
+ pdata = netdev_priv(netdev);
+ pdata->dev = dev;
+ pdata->netdev = netdev;
+
+ pdata->dev_irq = res->irq;
+ pdata->mac_regs = res->addr;
+
+ mutex_init(&pdata->rss_mutex);
+ pdata->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ ret = xlgmac_init(pdata);
+ if (ret) {
+ dev_err(dev, "xlgmac init failed\n");
+ goto err_free_netdev;
+ }
+
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(dev, "net device registration failed\n");
+ goto err_free_netdev;
+ }
+
+ return 0;
+
+err_free_netdev:
+ free_netdev(netdev);
+
+ return ret;
+}
+
+int xlgmac_drv_remove(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+
+ return 0;
+}
+
+void xlgmac_dump_tx_desc(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ unsigned int idx,
+ unsigned int count,
+ unsigned int flag)
+{
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+
+ while (count--) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, idx);
+ dma_desc = desc_data->dma_desc;
+
+ netdev_dbg(pdata->netdev, "TX: dma_desc=%p, dma_desc_addr=%pad\n",
+ desc_data->dma_desc, &desc_data->dma_desc_addr);
+ netdev_dbg(pdata->netdev,
+ "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+ (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+ le32_to_cpu(dma_desc->desc0),
+ le32_to_cpu(dma_desc->desc1),
+ le32_to_cpu(dma_desc->desc2),
+ le32_to_cpu(dma_desc->desc3));
+
+ idx++;
+ }
+}
+
+void xlgmac_dump_rx_desc(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ unsigned int idx)
+{
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+
+ desc_data = XLGMAC_GET_DESC_DATA(ring, idx);
+ dma_desc = desc_data->dma_desc;
+
+ netdev_dbg(pdata->netdev, "RX: dma_desc=%p, dma_desc_addr=%pad\n",
+ desc_data->dma_desc, &desc_data->dma_desc_addr);
+ netdev_dbg(pdata->netdev,
+ "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
+ idx,
+ le32_to_cpu(dma_desc->desc0),
+ le32_to_cpu(dma_desc->desc1),
+ le32_to_cpu(dma_desc->desc2),
+ le32_to_cpu(dma_desc->desc3));
+}
+
+void xlgmac_print_pkt(struct net_device *netdev,
+ struct sk_buff *skb, bool tx_rx)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ unsigned char *buf = skb->data;
+ unsigned char buffer[128];
+ unsigned int i, j;
+
+ netdev_dbg(netdev, "\n************** SKB dump ****************\n");
+
+ netdev_dbg(netdev, "%s packet of %d bytes\n",
+ (tx_rx ? "TX" : "RX"), skb->len);
+
+ netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
+ netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
+ netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
+
+ for (i = 0, j = 0; i < skb->len;) {
+ j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
+ buf[i++]);
+
+ if ((i % 32) == 0) {
+ netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
+ j = 0;
+ } else if ((i % 16) == 0) {
+ buffer[j++] = ' ';
+ buffer[j++] = ' ';
+ } else if ((i % 4) == 0) {
+ buffer[j++] = ' ';
+ }
+ }
+ if (i % 32)
+ netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
+
+ netdev_dbg(netdev, "\n************** SKB dump ****************\n");
+}
+
+void xlgmac_get_all_hw_features(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_features *hw_feat = &pdata->hw_feat;
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+
+ mac_hfr0 = readl(pdata->mac_regs + MAC_HWF0R);
+ mac_hfr1 = readl(pdata->mac_regs + MAC_HWF1R);
+ mac_hfr2 = readl(pdata->mac_regs + MAC_HWF2R);
+
+ memset(hw_feat, 0, sizeof(*hw_feat));
+
+ hw_feat->version = readl(pdata->mac_regs + MAC_VR);
+
+ /* Hardware feature register 0 */
+ hw_feat->phyifsel = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_PHYIFSEL_POS,
+ MAC_HWF0R_PHYIFSEL_LEN);
+ hw_feat->vlhash = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_VLHASH_POS,
+ MAC_HWF0R_VLHASH_LEN);
+ hw_feat->sma = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_SMASEL_POS,
+ MAC_HWF0R_SMASEL_LEN);
+ hw_feat->rwk = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_RWKSEL_POS,
+ MAC_HWF0R_RWKSEL_LEN);
+ hw_feat->mgk = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_MGKSEL_POS,
+ MAC_HWF0R_MGKSEL_LEN);
+ hw_feat->mmc = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_MMCSEL_POS,
+ MAC_HWF0R_MMCSEL_LEN);
+ hw_feat->aoe = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_ARPOFFSEL_POS,
+ MAC_HWF0R_ARPOFFSEL_LEN);
+ hw_feat->ts = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_TSSEL_POS,
+ MAC_HWF0R_TSSEL_LEN);
+ hw_feat->eee = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_EEESEL_POS,
+ MAC_HWF0R_EEESEL_LEN);
+ hw_feat->tx_coe = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_TXCOESEL_POS,
+ MAC_HWF0R_TXCOESEL_LEN);
+ hw_feat->rx_coe = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_RXCOESEL_POS,
+ MAC_HWF0R_RXCOESEL_LEN);
+ hw_feat->addn_mac = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_ADDMACADRSEL_POS,
+ MAC_HWF0R_ADDMACADRSEL_LEN);
+ hw_feat->ts_src = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_TSSTSSEL_POS,
+ MAC_HWF0R_TSSTSSEL_LEN);
+ hw_feat->sa_vlan_ins = XLGMAC_GET_REG_BITS(mac_hfr0,
+ MAC_HWF0R_SAVLANINS_POS,
+ MAC_HWF0R_SAVLANINS_LEN);
+
+ /* Hardware feature register 1 */
+ hw_feat->rx_fifo_size = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_RXFIFOSIZE_POS,
+ MAC_HWF1R_RXFIFOSIZE_LEN);
+ hw_feat->tx_fifo_size = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_TXFIFOSIZE_POS,
+ MAC_HWF1R_TXFIFOSIZE_LEN);
+ hw_feat->adv_ts_hi = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_ADVTHWORD_POS,
+ MAC_HWF1R_ADVTHWORD_LEN);
+ hw_feat->dma_width = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_ADDR64_POS,
+ MAC_HWF1R_ADDR64_LEN);
+ hw_feat->dcb = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_DCBEN_POS,
+ MAC_HWF1R_DCBEN_LEN);
+ hw_feat->sph = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_SPHEN_POS,
+ MAC_HWF1R_SPHEN_LEN);
+ hw_feat->tso = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_TSOEN_POS,
+ MAC_HWF1R_TSOEN_LEN);
+ hw_feat->dma_debug = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_DBGMEMA_POS,
+ MAC_HWF1R_DBGMEMA_LEN);
+ hw_feat->rss = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_RSSEN_POS,
+ MAC_HWF1R_RSSEN_LEN);
+ hw_feat->tc_cnt = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_NUMTC_POS,
+ MAC_HWF1R_NUMTC_LEN);
+ hw_feat->hash_table_size = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_HASHTBLSZ_POS,
+ MAC_HWF1R_HASHTBLSZ_LEN);
+ hw_feat->l3l4_filter_num = XLGMAC_GET_REG_BITS(mac_hfr1,
+ MAC_HWF1R_L3L4FNUM_POS,
+ MAC_HWF1R_L3L4FNUM_LEN);
+
+ /* Hardware feature register 2 */
+ hw_feat->rx_q_cnt = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_RXQCNT_POS,
+ MAC_HWF2R_RXQCNT_LEN);
+ hw_feat->tx_q_cnt = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_TXQCNT_POS,
+ MAC_HWF2R_TXQCNT_LEN);
+ hw_feat->rx_ch_cnt = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_RXCHCNT_POS,
+ MAC_HWF2R_RXCHCNT_LEN);
+ hw_feat->tx_ch_cnt = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_TXCHCNT_POS,
+ MAC_HWF2R_TXCHCNT_LEN);
+ hw_feat->pps_out_num = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_PPSOUTNUM_POS,
+ MAC_HWF2R_PPSOUTNUM_LEN);
+ hw_feat->aux_snap_num = XLGMAC_GET_REG_BITS(mac_hfr2,
+ MAC_HWF2R_AUXSNAPNUM_POS,
+ MAC_HWF2R_AUXSNAPNUM_LEN);
+
+ /* Translate the Hash Table size into actual number */
+ switch (hw_feat->hash_table_size) {
+ case 0:
+ break;
+ case 1:
+ hw_feat->hash_table_size = 64;
+ break;
+ case 2:
+ hw_feat->hash_table_size = 128;
+ break;
+ case 3:
+ hw_feat->hash_table_size = 256;
+ break;
+ }
+
+ /* Translate the address width setting into actual number */
+ switch (hw_feat->dma_width) {
+ case 0:
+ hw_feat->dma_width = 32;
+ break;
+ case 1:
+ hw_feat->dma_width = 40;
+ break;
+ case 2:
+ hw_feat->dma_width = 48;
+ break;
+ default:
+ hw_feat->dma_width = 32;
+ }
+
+ /* The Queue, Channel and TC counts are zero based so increment them
+ * to get the actual number
+ */
+ hw_feat->rx_q_cnt++;
+ hw_feat->tx_q_cnt++;
+ hw_feat->rx_ch_cnt++;
+ hw_feat->tx_ch_cnt++;
+ hw_feat->tc_cnt++;
+}
+
+void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata)
+{
+ char *str = NULL;
+
+ XLGMAC_PR("\n");
+ XLGMAC_PR("=====================================================\n");
+ XLGMAC_PR("\n");
+ XLGMAC_PR("HW support following features\n");
+ XLGMAC_PR("\n");
+ /* HW Feature Register0 */
+ XLGMAC_PR("VLAN Hash Filter Selected : %s\n",
+ pdata->hw_feat.vlhash ? "YES" : "NO");
+ XLGMAC_PR("SMA (MDIO) Interface : %s\n",
+ pdata->hw_feat.sma ? "YES" : "NO");
+ XLGMAC_PR("PMT Remote Wake-up Packet Enable : %s\n",
+ pdata->hw_feat.rwk ? "YES" : "NO");
+ XLGMAC_PR("PMT Magic Packet Enable : %s\n",
+ pdata->hw_feat.mgk ? "YES" : "NO");
+ XLGMAC_PR("RMON/MMC Module Enable : %s\n",
+ pdata->hw_feat.mmc ? "YES" : "NO");
+ XLGMAC_PR("ARP Offload Enabled : %s\n",
+ pdata->hw_feat.aoe ? "YES" : "NO");
+ XLGMAC_PR("IEEE 1588-2008 Timestamp Enabled : %s\n",
+ pdata->hw_feat.ts ? "YES" : "NO");
+ XLGMAC_PR("Energy Efficient Ethernet Enabled : %s\n",
+ pdata->hw_feat.eee ? "YES" : "NO");
+ XLGMAC_PR("Transmit Checksum Offload Enabled : %s\n",
+ pdata->hw_feat.tx_coe ? "YES" : "NO");
+ XLGMAC_PR("Receive Checksum Offload Enabled : %s\n",
+ pdata->hw_feat.rx_coe ? "YES" : "NO");
+ XLGMAC_PR("Additional MAC Addresses 1-31 Selected : %s\n",
+ pdata->hw_feat.addn_mac ? "YES" : "NO");
+
+ switch (pdata->hw_feat.ts_src) {
+ case 0:
+ str = "RESERVED";
+ break;
+ case 1:
+ str = "INTERNAL";
+ break;
+ case 2:
+ str = "EXTERNAL";
+ break;
+ case 3:
+ str = "BOTH";
+ break;
+ }
+ XLGMAC_PR("Timestamp System Time Source : %s\n", str);
+
+ XLGMAC_PR("Source Address or VLAN Insertion Enable : %s\n",
+ pdata->hw_feat.sa_vlan_ins ? "YES" : "NO");
+
+ /* HW Feature Register1 */
+ switch (pdata->hw_feat.rx_fifo_size) {
+ case 0:
+ str = "128 bytes";
+ break;
+ case 1:
+ str = "256 bytes";
+ break;
+ case 2:
+ str = "512 bytes";
+ break;
+ case 3:
+ str = "1 KBytes";
+ break;
+ case 4:
+ str = "2 KBytes";
+ break;
+ case 5:
+ str = "4 KBytes";
+ break;
+ case 6:
+ str = "8 KBytes";
+ break;
+ case 7:
+ str = "16 KBytes";
+ break;
+ case 8:
+ str = "32 kBytes";
+ break;
+ case 9:
+ str = "64 KBytes";
+ break;
+ case 10:
+ str = "128 KBytes";
+ break;
+ case 11:
+ str = "256 KBytes";
+ break;
+ default:
+ str = "RESERVED";
+ }
+ XLGMAC_PR("MTL Receive FIFO Size : %s\n", str);
+
+ switch (pdata->hw_feat.tx_fifo_size) {
+ case 0:
+ str = "128 bytes";
+ break;
+ case 1:
+ str = "256 bytes";
+ break;
+ case 2:
+ str = "512 bytes";
+ break;
+ case 3:
+ str = "1 KBytes";
+ break;
+ case 4:
+ str = "2 KBytes";
+ break;
+ case 5:
+ str = "4 KBytes";
+ break;
+ case 6:
+ str = "8 KBytes";
+ break;
+ case 7:
+ str = "16 KBytes";
+ break;
+ case 8:
+ str = "32 kBytes";
+ break;
+ case 9:
+ str = "64 KBytes";
+ break;
+ case 10:
+ str = "128 KBytes";
+ break;
+ case 11:
+ str = "256 KBytes";
+ break;
+ default:
+ str = "RESERVED";
+ }
+ XLGMAC_PR("MTL Transmit FIFO Size : %s\n", str);
+
+ XLGMAC_PR("IEEE 1588 High Word Register Enable : %s\n",
+ pdata->hw_feat.adv_ts_hi ? "YES" : "NO");
+ XLGMAC_PR("Address width : %u\n",
+ pdata->hw_feat.dma_width);
+ XLGMAC_PR("DCB Feature Enable : %s\n",
+ pdata->hw_feat.dcb ? "YES" : "NO");
+ XLGMAC_PR("Split Header Feature Enable : %s\n",
+ pdata->hw_feat.sph ? "YES" : "NO");
+ XLGMAC_PR("TCP Segmentation Offload Enable : %s\n",
+ pdata->hw_feat.tso ? "YES" : "NO");
+ XLGMAC_PR("DMA Debug Registers Enabled : %s\n",
+ pdata->hw_feat.dma_debug ? "YES" : "NO");
+ XLGMAC_PR("RSS Feature Enabled : %s\n",
+ pdata->hw_feat.rss ? "YES" : "NO");
+ XLGMAC_PR("Number of Traffic classes : %u\n",
+ (pdata->hw_feat.tc_cnt));
+ XLGMAC_PR("Hash Table Size : %u\n",
+ pdata->hw_feat.hash_table_size);
+ XLGMAC_PR("Total number of L3 or L4 Filters : %u\n",
+ pdata->hw_feat.l3l4_filter_num);
+
+ /* HW Feature Register2 */
+ XLGMAC_PR("Number of MTL Receive Queues : %u\n",
+ pdata->hw_feat.rx_q_cnt);
+ XLGMAC_PR("Number of MTL Transmit Queues : %u\n",
+ pdata->hw_feat.tx_q_cnt);
+ XLGMAC_PR("Number of DMA Receive Channels : %u\n",
+ pdata->hw_feat.rx_ch_cnt);
+ XLGMAC_PR("Number of DMA Transmit Channels : %u\n",
+ pdata->hw_feat.tx_ch_cnt);
+
+ switch (pdata->hw_feat.pps_out_num) {
+ case 0:
+ str = "No PPS output";
+ break;
+ case 1:
+ str = "1 PPS output";
+ break;
+ case 2:
+ str = "2 PPS output";
+ break;
+ case 3:
+ str = "3 PPS output";
+ break;
+ case 4:
+ str = "4 PPS output";
+ break;
+ default:
+ str = "RESERVED";
+ }
+ XLGMAC_PR("Number of PPS Outputs : %s\n", str);
+
+ switch (pdata->hw_feat.aux_snap_num) {
+ case 0:
+ str = "No auxiliary input";
+ break;
+ case 1:
+ str = "1 auxiliary input";
+ break;
+ case 2:
+ str = "2 auxiliary input";
+ break;
+ case 3:
+ str = "3 auxiliary input";
+ break;
+ case 4:
+ str = "4 auxiliary input";
+ break;
+ default:
+ str = "RESERVED";
+ }
+ XLGMAC_PR("Number of Auxiliary Snapshot Inputs : %s", str);
+
+ XLGMAC_PR("\n");
+ XLGMAC_PR("=====================================================\n");
+ XLGMAC_PR("\n");
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
new file mode 100644
index 000000000000..e9672b1f9968
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
@@ -0,0 +1,644 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static void xlgmac_unmap_desc_data(struct xlgmac_pdata *pdata,
+ struct xlgmac_desc_data *desc_data)
+{
+ if (desc_data->skb_dma) {
+ if (desc_data->mapped_as_page) {
+ dma_unmap_page(pdata->dev, desc_data->skb_dma,
+ desc_data->skb_dma_len, DMA_TO_DEVICE);
+ } else {
+ dma_unmap_single(pdata->dev, desc_data->skb_dma,
+ desc_data->skb_dma_len, DMA_TO_DEVICE);
+ }
+ desc_data->skb_dma = 0;
+ desc_data->skb_dma_len = 0;
+ }
+
+ if (desc_data->skb) {
+ dev_kfree_skb_any(desc_data->skb);
+ desc_data->skb = NULL;
+ }
+
+ if (desc_data->rx.hdr.pa.pages)
+ put_page(desc_data->rx.hdr.pa.pages);
+
+ if (desc_data->rx.hdr.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, desc_data->rx.hdr.pa_unmap.pages_dma,
+ desc_data->rx.hdr.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(desc_data->rx.hdr.pa_unmap.pages);
+ }
+
+ if (desc_data->rx.buf.pa.pages)
+ put_page(desc_data->rx.buf.pa.pages);
+
+ if (desc_data->rx.buf.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, desc_data->rx.buf.pa_unmap.pages_dma,
+ desc_data->rx.buf.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(desc_data->rx.buf.pa_unmap.pages);
+ }
+
+ memset(&desc_data->tx, 0, sizeof(desc_data->tx));
+ memset(&desc_data->rx, 0, sizeof(desc_data->rx));
+
+ desc_data->mapped_as_page = 0;
+
+ if (desc_data->state_saved) {
+ desc_data->state_saved = 0;
+ desc_data->state.skb = NULL;
+ desc_data->state.len = 0;
+ desc_data->state.error = 0;
+ }
+}
+
+static void xlgmac_free_ring(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring)
+{
+ struct xlgmac_desc_data *desc_data;
+ unsigned int i;
+
+ if (!ring)
+ return;
+
+ if (ring->desc_data_head) {
+ for (i = 0; i < ring->dma_desc_count; i++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, i);
+ xlgmac_unmap_desc_data(pdata, desc_data);
+ }
+
+ kfree(ring->desc_data_head);
+ ring->desc_data_head = NULL;
+ }
+
+ if (ring->rx_hdr_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
+ ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_hdr_pa.pages);
+
+ ring->rx_hdr_pa.pages = NULL;
+ ring->rx_hdr_pa.pages_len = 0;
+ ring->rx_hdr_pa.pages_offset = 0;
+ ring->rx_hdr_pa.pages_dma = 0;
+ }
+
+ if (ring->rx_buf_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
+ ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_buf_pa.pages);
+
+ ring->rx_buf_pa.pages = NULL;
+ ring->rx_buf_pa.pages_len = 0;
+ ring->rx_buf_pa.pages_offset = 0;
+ ring->rx_buf_pa.pages_dma = 0;
+ }
+
+ if (ring->dma_desc_head) {
+ dma_free_coherent(pdata->dev,
+ (sizeof(struct xlgmac_dma_desc) *
+ ring->dma_desc_count),
+ ring->dma_desc_head,
+ ring->dma_desc_head_addr);
+ ring->dma_desc_head = NULL;
+ }
+}
+
+static int xlgmac_init_ring(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ unsigned int dma_desc_count)
+{
+ if (!ring)
+ return 0;
+
+ /* Descriptors */
+ ring->dma_desc_count = dma_desc_count;
+ ring->dma_desc_head = dma_alloc_coherent(pdata->dev,
+ (sizeof(struct xlgmac_dma_desc) *
+ dma_desc_count),
+ &ring->dma_desc_head_addr,
+ GFP_KERNEL);
+ if (!ring->dma_desc_head)
+ return -ENOMEM;
+
+ /* Array of descriptor data */
+ ring->desc_data_head = kcalloc(dma_desc_count,
+ sizeof(struct xlgmac_desc_data),
+ GFP_KERNEL);
+ if (!ring->desc_data_head)
+ return -ENOMEM;
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "dma_desc_head=%p, dma_desc_head_addr=%pad, desc_data_head=%p\n",
+ ring->dma_desc_head,
+ &ring->dma_desc_head_addr,
+ ring->desc_data_head);
+
+ return 0;
+}
+
+static void xlgmac_free_rings(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ if (!pdata->channel_head)
+ return;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ xlgmac_free_ring(pdata, channel->tx_ring);
+ xlgmac_free_ring(pdata, channel->rx_ring);
+ }
+}
+
+static int xlgmac_alloc_rings(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ int ret;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
+ channel->name);
+
+ ret = xlgmac_init_ring(pdata, channel->tx_ring,
+ pdata->tx_desc_count);
+
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Tx ring");
+ goto err_init_ring;
+ }
+
+ netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
+ channel->name);
+
+ ret = xlgmac_init_ring(pdata, channel->rx_ring,
+ pdata->rx_desc_count);
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Rx ring\n");
+ goto err_init_ring;
+ }
+ }
+
+ return 0;
+
+err_init_ring:
+ xlgmac_free_rings(pdata);
+
+ return ret;
+}
+
+static void xlgmac_free_channels(struct xlgmac_pdata *pdata)
+{
+ if (!pdata->channel_head)
+ return;
+
+ kfree(pdata->channel_head->tx_ring);
+ pdata->channel_head->tx_ring = NULL;
+
+ kfree(pdata->channel_head->rx_ring);
+ pdata->channel_head->rx_ring = NULL;
+
+ kfree(pdata->channel_head);
+
+ pdata->channel_head = NULL;
+ pdata->channel_count = 0;
+}
+
+static int xlgmac_alloc_channels(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel_head, *channel;
+ struct xlgmac_ring *tx_ring, *rx_ring;
+ int ret = -ENOMEM;
+ unsigned int i;
+
+ channel_head = kcalloc(pdata->channel_count,
+ sizeof(struct xlgmac_channel), GFP_KERNEL);
+ if (!channel_head)
+ return ret;
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "channel_head=%p\n", channel_head);
+
+ tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xlgmac_ring),
+ GFP_KERNEL);
+ if (!tx_ring)
+ goto err_tx_ring;
+
+ rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xlgmac_ring),
+ GFP_KERNEL);
+ if (!rx_ring)
+ goto err_rx_ring;
+
+ for (i = 0, channel = channel_head; i < pdata->channel_count;
+ i++, channel++) {
+ snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
+ channel->pdata = pdata;
+ channel->queue_index = i;
+ channel->dma_regs = pdata->mac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * i);
+
+ if (pdata->per_channel_irq) {
+ /* Get the per DMA interrupt */
+ ret = pdata->channel_irq[i];
+ if (ret < 0) {
+ netdev_err(pdata->netdev,
+ "get_irq %u failed\n",
+ i + 1);
+ goto err_irq;
+ }
+ channel->dma_irq = ret;
+ }
+
+ if (i < pdata->tx_ring_count)
+ channel->tx_ring = tx_ring++;
+
+ if (i < pdata->rx_ring_count)
+ channel->rx_ring = rx_ring++;
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "%s: dma_regs=%p, tx_ring=%p, rx_ring=%p\n",
+ channel->name, channel->dma_regs,
+ channel->tx_ring, channel->rx_ring);
+ }
+
+ pdata->channel_head = channel_head;
+
+ return 0;
+
+err_irq:
+ kfree(rx_ring);
+
+err_rx_ring:
+ kfree(tx_ring);
+
+err_tx_ring:
+ kfree(channel_head);
+
+ return ret;
+}
+
+static void xlgmac_free_channels_and_rings(struct xlgmac_pdata *pdata)
+{
+ xlgmac_free_rings(pdata);
+
+ xlgmac_free_channels(pdata);
+}
+
+static int xlgmac_alloc_channels_and_rings(struct xlgmac_pdata *pdata)
+{
+ int ret;
+
+ ret = xlgmac_alloc_channels(pdata);
+ if (ret)
+ goto err_alloc;
+
+ ret = xlgmac_alloc_rings(pdata);
+ if (ret)
+ goto err_alloc;
+
+ return 0;
+
+err_alloc:
+ xlgmac_free_channels_and_rings(pdata);
+
+ return ret;
+}
+
+static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata,
+ struct xlgmac_page_alloc *pa,
+ gfp_t gfp, int order)
+{
+ struct page *pages = NULL;
+ dma_addr_t pages_dma;
+
+ /* Try to obtain pages, decreasing order if necessary */
+ gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+ while (order >= 0) {
+ pages = alloc_pages(gfp, order);
+ if (pages)
+ break;
+
+ order--;
+ }
+ if (!pages)
+ return -ENOMEM;
+
+ /* Map the pages */
+ pages_dma = dma_map_page(pdata->dev, pages, 0,
+ PAGE_SIZE << order, DMA_FROM_DEVICE);
+ if (dma_mapping_error(pdata->dev, pages_dma)) {
+ put_page(pages);
+ return -ENOMEM;
+ }
+
+ pa->pages = pages;
+ pa->pages_len = PAGE_SIZE << order;
+ pa->pages_offset = 0;
+ pa->pages_dma = pages_dma;
+
+ return 0;
+}
+
+static void xlgmac_set_buffer_data(struct xlgmac_buffer_data *bd,
+ struct xlgmac_page_alloc *pa,
+ unsigned int len)
+{
+ get_page(pa->pages);
+ bd->pa = *pa;
+
+ bd->dma_base = pa->pages_dma;
+ bd->dma_off = pa->pages_offset;
+ bd->dma_len = len;
+
+ pa->pages_offset += len;
+ if ((pa->pages_offset + len) > pa->pages_len) {
+ /* This data descriptor is responsible for unmapping page(s) */
+ bd->pa_unmap = *pa;
+
+ /* Get a new allocation next time */
+ pa->pages = NULL;
+ pa->pages_len = 0;
+ pa->pages_offset = 0;
+ pa->pages_dma = 0;
+ }
+}
+
+static int xlgmac_map_rx_buffer(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ struct xlgmac_desc_data *desc_data)
+{
+ int order, ret;
+
+ if (!ring->rx_hdr_pa.pages) {
+ ret = xlgmac_alloc_pages(pdata, &ring->rx_hdr_pa,
+ GFP_ATOMIC, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (!ring->rx_buf_pa.pages) {
+ order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
+ ret = xlgmac_alloc_pages(pdata, &ring->rx_buf_pa,
+ GFP_ATOMIC, order);
+ if (ret)
+ return ret;
+ }
+
+ /* Set up the header page info */
+ xlgmac_set_buffer_data(&desc_data->rx.hdr, &ring->rx_hdr_pa,
+ XLGMAC_SKB_ALLOC_SIZE);
+
+ /* Set up the buffer page info */
+ xlgmac_set_buffer_data(&desc_data->rx.buf, &ring->rx_buf_pa,
+ pdata->rx_buf_size);
+
+ return 0;
+}
+
+static void xlgmac_tx_desc_init(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+ struct xlgmac_channel *channel;
+ struct xlgmac_ring *ring;
+ dma_addr_t dma_desc_addr;
+ unsigned int i, j;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ dma_desc = ring->dma_desc_head;
+ dma_desc_addr = ring->dma_desc_head_addr;
+
+ for (j = 0; j < ring->dma_desc_count; j++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, j);
+
+ desc_data->dma_desc = dma_desc;
+ desc_data->dma_desc_addr = dma_desc_addr;
+
+ dma_desc++;
+ dma_desc_addr += sizeof(struct xlgmac_dma_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+ memset(&ring->tx, 0, sizeof(ring->tx));
+
+ hw_ops->tx_desc_init(channel);
+ }
+}
+
+static void xlgmac_rx_desc_init(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+ struct xlgmac_channel *channel;
+ struct xlgmac_ring *ring;
+ dma_addr_t dma_desc_addr;
+ unsigned int i, j;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ dma_desc = ring->dma_desc_head;
+ dma_desc_addr = ring->dma_desc_head_addr;
+
+ for (j = 0; j < ring->dma_desc_count; j++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, j);
+
+ desc_data->dma_desc = dma_desc;
+ desc_data->dma_desc_addr = dma_desc_addr;
+
+ if (xlgmac_map_rx_buffer(pdata, ring, desc_data))
+ break;
+
+ dma_desc++;
+ dma_desc_addr += sizeof(struct xlgmac_dma_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+
+ hw_ops->rx_desc_init(channel);
+ }
+}
+
+static int xlgmac_map_tx_skb(struct xlgmac_channel *channel,
+ struct sk_buff *skb)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->tx_ring;
+ unsigned int start_index, cur_index;
+ struct xlgmac_desc_data *desc_data;
+ unsigned int offset, datalen, len;
+ struct xlgmac_pkt_info *pkt_info;
+ struct skb_frag_struct *frag;
+ unsigned int tso, vlan;
+ dma_addr_t skb_dma;
+ unsigned int i;
+
+ offset = 0;
+ start_index = ring->cur;
+ cur_index = ring->cur;
+
+ pkt_info = &ring->pkt_info;
+ pkt_info->desc_count = 0;
+ pkt_info->length = 0;
+
+ tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
+ vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
+
+ /* Save space for a context descriptor if needed */
+ if ((tso && (pkt_info->mss != ring->tx.cur_mss)) ||
+ (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)))
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+
+ if (tso) {
+ /* Map the TSO header */
+ skb_dma = dma_map_single(pdata->dev, skb->data,
+ pkt_info->header_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ desc_data->skb_dma = skb_dma;
+ desc_data->skb_dma_len = pkt_info->header_len;
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb header: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, pkt_info->header_len);
+
+ offset = pkt_info->header_len;
+
+ pkt_info->length += pkt_info->header_len;
+
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ }
+
+ /* Map the (remainder of the) packet */
+ for (datalen = skb_headlen(skb) - offset; datalen; ) {
+ len = min_t(unsigned int, datalen, XLGMAC_TX_MAX_BUF_SIZE);
+
+ skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ desc_data->skb_dma = skb_dma;
+ desc_data->skb_dma_len = len;
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb data: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ pkt_info->length += len;
+
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "mapping frag %u\n", i);
+
+ frag = &skb_shinfo(skb)->frags[i];
+ offset = 0;
+
+ for (datalen = skb_frag_size(frag); datalen; ) {
+ len = min_t(unsigned int, datalen,
+ XLGMAC_TX_MAX_BUF_SIZE);
+
+ skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev,
+ "skb_frag_dma_map failed\n");
+ goto err_out;
+ }
+ desc_data->skb_dma = skb_dma;
+ desc_data->skb_dma_len = len;
+ desc_data->mapped_as_page = 1;
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb frag: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ pkt_info->length += len;
+
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ }
+ }
+
+ /* Save the skb address in the last entry. We always have some data
+ * that has been mapped so desc_data is always advanced past the last
+ * piece of mapped data - use the entry pointed to by cur_index - 1.
+ */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index - 1);
+ desc_data->skb = skb;
+
+ /* Save the number of descriptor entries used */
+ pkt_info->desc_count = cur_index - start_index;
+
+ return pkt_info->desc_count;
+
+err_out:
+ while (start_index < cur_index) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index++);
+ xlgmac_unmap_desc_data(pdata, desc_data);
+ }
+
+ return 0;
+}
+
+void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops)
+{
+ desc_ops->alloc_channles_and_rings = xlgmac_alloc_channels_and_rings;
+ desc_ops->free_channels_and_rings = xlgmac_free_channels_and_rings;
+ desc_ops->map_tx_skb = xlgmac_map_tx_skb;
+ desc_ops->map_rx_buffer = xlgmac_map_rx_buffer;
+ desc_ops->unmap_desc_data = xlgmac_unmap_desc_data;
+ desc_ops->tx_desc_init = xlgmac_tx_desc_init;
+ desc_ops->rx_desc_init = xlgmac_rx_desc_init;
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
new file mode 100644
index 000000000000..fde722136869
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
@@ -0,0 +1,275 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+struct xlgmac_stats_desc {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_offset;
+};
+
+#define XLGMAC_STAT(str, var) \
+ { \
+ str, \
+ offsetof(struct xlgmac_pdata, stats.var), \
+ }
+
+static const struct xlgmac_stats_desc xlgmac_gstring_stats[] = {
+ /* MMC TX counters */
+ XLGMAC_STAT("tx_bytes", txoctetcount_gb),
+ XLGMAC_STAT("tx_bytes_good", txoctetcount_g),
+ XLGMAC_STAT("tx_packets", txframecount_gb),
+ XLGMAC_STAT("tx_packets_good", txframecount_g),
+ XLGMAC_STAT("tx_unicast_packets", txunicastframes_gb),
+ XLGMAC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
+ XLGMAC_STAT("tx_broadcast_packets_good", txbroadcastframes_g),
+ XLGMAC_STAT("tx_multicast_packets", txmulticastframes_gb),
+ XLGMAC_STAT("tx_multicast_packets_good", txmulticastframes_g),
+ XLGMAC_STAT("tx_vlan_packets_good", txvlanframes_g),
+ XLGMAC_STAT("tx_64_byte_packets", tx64octets_gb),
+ XLGMAC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
+ XLGMAC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
+ XLGMAC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
+ XLGMAC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+ XLGMAC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
+ XLGMAC_STAT("tx_underflow_errors", txunderflowerror),
+ XLGMAC_STAT("tx_pause_frames", txpauseframes),
+
+ /* MMC RX counters */
+ XLGMAC_STAT("rx_bytes", rxoctetcount_gb),
+ XLGMAC_STAT("rx_bytes_good", rxoctetcount_g),
+ XLGMAC_STAT("rx_packets", rxframecount_gb),
+ XLGMAC_STAT("rx_unicast_packets_good", rxunicastframes_g),
+ XLGMAC_STAT("rx_broadcast_packets_good", rxbroadcastframes_g),
+ XLGMAC_STAT("rx_multicast_packets_good", rxmulticastframes_g),
+ XLGMAC_STAT("rx_vlan_packets", rxvlanframes_gb),
+ XLGMAC_STAT("rx_64_byte_packets", rx64octets_gb),
+ XLGMAC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
+ XLGMAC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
+ XLGMAC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
+ XLGMAC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+ XLGMAC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
+ XLGMAC_STAT("rx_undersize_packets_good", rxundersize_g),
+ XLGMAC_STAT("rx_oversize_packets_good", rxoversize_g),
+ XLGMAC_STAT("rx_crc_errors", rxcrcerror),
+ XLGMAC_STAT("rx_crc_errors_small_packets", rxrunterror),
+ XLGMAC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
+ XLGMAC_STAT("rx_length_errors", rxlengtherror),
+ XLGMAC_STAT("rx_out_of_range_errors", rxoutofrangetype),
+ XLGMAC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
+ XLGMAC_STAT("rx_watchdog_errors", rxwatchdogerror),
+ XLGMAC_STAT("rx_pause_frames", rxpauseframes),
+
+ /* Extra counters */
+ XLGMAC_STAT("tx_tso_packets", tx_tso_packets),
+ XLGMAC_STAT("rx_split_header_packets", rx_split_header_packets),
+ XLGMAC_STAT("tx_process_stopped", tx_process_stopped),
+ XLGMAC_STAT("rx_process_stopped", rx_process_stopped),
+ XLGMAC_STAT("tx_buffer_unavailable", tx_buffer_unavailable),
+ XLGMAC_STAT("rx_buffer_unavailable", rx_buffer_unavailable),
+ XLGMAC_STAT("fatal_bus_error", fatal_bus_error),
+ XLGMAC_STAT("tx_vlan_packets", tx_vlan_packets),
+ XLGMAC_STAT("rx_vlan_packets", rx_vlan_packets),
+ XLGMAC_STAT("napi_poll_isr", napi_poll_isr),
+ XLGMAC_STAT("napi_poll_txtimer", napi_poll_txtimer),
+};
+
+#define XLGMAC_STATS_COUNT ARRAY_SIZE(xlgmac_gstring_stats)
+
+static void xlgmac_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ u32 ver = pdata->hw_feat.version;
+ u32 snpsver, devid, userver;
+
+ strlcpy(drvinfo->driver, pdata->drv_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, pdata->drv_ver, sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ sizeof(drvinfo->bus_info));
+ /* S|SNPSVER: Synopsys-defined Version
+ * D|DEVID: Indicates the Device family
+ * U|USERVER: User-defined Version
+ */
+ snpsver = XLGMAC_GET_REG_BITS(ver, MAC_VR_SNPSVER_POS,
+ MAC_VR_SNPSVER_LEN);
+ devid = XLGMAC_GET_REG_BITS(ver, MAC_VR_DEVID_POS,
+ MAC_VR_DEVID_LEN);
+ userver = XLGMAC_GET_REG_BITS(ver, MAC_VR_USERVER_POS,
+ MAC_VR_USERVER_LEN);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "S.D.U: %x.%x.%x", snpsver, devid, userver);
+}
+
+static u32 xlgmac_ethtool_get_msglevel(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+
+ return pdata->msg_enable;
+}
+
+static void xlgmac_ethtool_set_msglevel(struct net_device *netdev,
+ u32 msglevel)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+
+ pdata->msg_enable = msglevel;
+}
+
+static void xlgmac_ethtool_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channel)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+
+ channel->max_rx = XLGMAC_MAX_DMA_CHANNELS;
+ channel->max_tx = XLGMAC_MAX_DMA_CHANNELS;
+ channel->rx_count = pdata->rx_q_count;
+ channel->tx_count = pdata->tx_q_count;
+}
+
+static int xlgmac_ethtool_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+
+ memset(ec, 0, sizeof(struct ethtool_coalesce));
+ ec->rx_coalesce_usecs = pdata->rx_usecs;
+ ec->rx_max_coalesced_frames = pdata->rx_frames;
+ ec->tx_max_coalesced_frames = pdata->tx_frames;
+
+ return 0;
+}
+
+static int xlgmac_ethtool_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ unsigned int rx_frames, rx_riwt, rx_usecs;
+ unsigned int tx_frames;
+
+ /* Check for not supported parameters */
+ if ((ec->rx_coalesce_usecs_irq) || (ec->rx_max_coalesced_frames_irq) ||
+ (ec->tx_coalesce_usecs) || (ec->tx_coalesce_usecs_high) ||
+ (ec->tx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
+ (ec->stats_block_coalesce_usecs) || (ec->pkt_rate_low) ||
+ (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
+ (ec->rx_max_coalesced_frames_low) || (ec->rx_coalesce_usecs_low) ||
+ (ec->tx_coalesce_usecs_low) || (ec->tx_max_coalesced_frames_low) ||
+ (ec->pkt_rate_high) || (ec->rx_coalesce_usecs_high) ||
+ (ec->rx_max_coalesced_frames_high) ||
+ (ec->tx_max_coalesced_frames_high) ||
+ (ec->rate_sample_interval))
+ return -EOPNOTSUPP;
+
+ rx_usecs = ec->rx_coalesce_usecs;
+ rx_riwt = hw_ops->usec_to_riwt(pdata, rx_usecs);
+ rx_frames = ec->rx_max_coalesced_frames;
+ tx_frames = ec->tx_max_coalesced_frames;
+
+ if ((rx_riwt > XLGMAC_MAX_DMA_RIWT) ||
+ (rx_riwt < XLGMAC_MIN_DMA_RIWT) ||
+ (rx_frames > pdata->rx_desc_count))
+ return -EINVAL;
+
+ if (tx_frames > pdata->tx_desc_count)
+ return -EINVAL;
+
+ pdata->rx_riwt = rx_riwt;
+ pdata->rx_usecs = rx_usecs;
+ pdata->rx_frames = rx_frames;
+ hw_ops->config_rx_coalesce(pdata);
+
+ pdata->tx_frames = tx_frames;
+ hw_ops->config_tx_coalesce(pdata);
+
+ return 0;
+}
+
+static void xlgmac_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < XLGMAC_STATS_COUNT; i++) {
+ memcpy(data, xlgmac_gstring_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static int xlgmac_ethtool_get_sset_count(struct net_device *netdev,
+ int stringset)
+{
+ int ret;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ ret = XLGMAC_STATS_COUNT;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static void xlgmac_ethtool_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ u8 *stat;
+ int i;
+
+ pdata->hw_ops.read_mmc_stats(pdata);
+ for (i = 0; i < XLGMAC_STATS_COUNT; i++) {
+ stat = (u8 *)pdata + xlgmac_gstring_stats[i].stat_offset;
+ *data++ = *(u64 *)stat;
+ }
+}
+
+static const struct ethtool_ops xlgmac_ethtool_ops = {
+ .get_drvinfo = xlgmac_ethtool_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = xlgmac_ethtool_get_msglevel,
+ .set_msglevel = xlgmac_ethtool_set_msglevel,
+ .get_channels = xlgmac_ethtool_get_channels,
+ .get_coalesce = xlgmac_ethtool_get_coalesce,
+ .set_coalesce = xlgmac_ethtool_set_coalesce,
+ .get_strings = xlgmac_ethtool_get_strings,
+ .get_sset_count = xlgmac_ethtool_get_sset_count,
+ .get_ethtool_stats = xlgmac_ethtool_get_ethtool_stats,
+};
+
+const struct ethtool_ops *xlgmac_get_ethtool_ops(void)
+{
+ return &xlgmac_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
new file mode 100644
index 000000000000..458a7844260a
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
@@ -0,0 +1,3147 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/clk.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
+#include <linux/dcbnl.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static int xlgmac_tx_complete(struct xlgmac_dma_desc *dma_desc)
+{
+ return !XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ TX_NORMAL_DESC3_OWN_POS,
+ TX_NORMAL_DESC3_OWN_LEN);
+}
+
+static int xlgmac_disable_rx_csum(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS,
+ MAC_RCR_IPC_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+
+ return 0;
+}
+
+static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS,
+ MAC_RCR_IPC_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+
+ return 0;
+}
+
+static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, u8 *addr)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+
+ mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
+ mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
+ (addr[1] << 8) | (addr[0] << 0);
+
+ writel(mac_addr_hi, pdata->mac_regs + MAC_MACA0HR);
+ writel(mac_addr_lo, pdata->mac_regs + MAC_MACA0LR);
+
+ return 0;
+}
+
+static void xlgmac_set_mac_reg(struct xlgmac_pdata *pdata,
+ struct netdev_hw_addr *ha,
+ unsigned int *mac_reg)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+ u8 *mac_addr;
+
+ mac_addr_lo = 0;
+ mac_addr_hi = 0;
+
+ if (ha) {
+ mac_addr = (u8 *)&mac_addr_lo;
+ mac_addr[0] = ha->addr[0];
+ mac_addr[1] = ha->addr[1];
+ mac_addr[2] = ha->addr[2];
+ mac_addr[3] = ha->addr[3];
+ mac_addr = (u8 *)&mac_addr_hi;
+ mac_addr[0] = ha->addr[4];
+ mac_addr[1] = ha->addr[5];
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "adding mac address %pM at %#x\n",
+ ha->addr, *mac_reg);
+
+ mac_addr_hi = XLGMAC_SET_REG_BITS(mac_addr_hi,
+ MAC_MACA1HR_AE_POS,
+ MAC_MACA1HR_AE_LEN,
+ 1);
+ }
+
+ writel(mac_addr_hi, pdata->mac_regs + *mac_reg);
+ *mac_reg += MAC_MACA_INC;
+ writel(mac_addr_lo, pdata->mac_regs + *mac_reg);
+ *mac_reg += MAC_MACA_INC;
+}
+
+static int xlgmac_enable_rx_vlan_stripping(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_VLANTR);
+ /* Put the VLAN tag in the Rx descriptor */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLRXS_POS,
+ MAC_VLANTR_EVLRXS_LEN, 1);
+ /* Don't check the VLAN type */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_DOVLTC_POS,
+ MAC_VLANTR_DOVLTC_LEN, 1);
+ /* Check only C-TAG (0x8100) packets */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ERSVLM_POS,
+ MAC_VLANTR_ERSVLM_LEN, 0);
+ /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ESVL_POS,
+ MAC_VLANTR_ESVL_LEN, 0);
+ /* Enable VLAN tag stripping */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS,
+ MAC_VLANTR_EVLS_LEN, 0x3);
+ writel(regval, pdata->mac_regs + MAC_VLANTR);
+
+ return 0;
+}
+
+static int xlgmac_disable_rx_vlan_stripping(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_VLANTR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS,
+ MAC_VLANTR_EVLS_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_VLANTR);
+
+ return 0;
+}
+
+static int xlgmac_enable_rx_vlan_filtering(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_PFR);
+ /* Enable VLAN filtering */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS,
+ MAC_PFR_VTFE_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_PFR);
+
+ regval = readl(pdata->mac_regs + MAC_VLANTR);
+ /* Enable VLAN Hash Table filtering */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTHM_POS,
+ MAC_VLANTR_VTHM_LEN, 1);
+ /* Disable VLAN tag inverse matching */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTIM_POS,
+ MAC_VLANTR_VTIM_LEN, 0);
+ /* Only filter on the lower 12-bits of the VLAN tag */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ETV_POS,
+ MAC_VLANTR_ETV_LEN, 1);
+ /* In order for the VLAN Hash Table filtering to be effective,
+ * the VLAN tag identifier in the VLAN Tag Register must not
+ * be zero. Set the VLAN tag identifier to "1" to enable the
+ * VLAN Hash Table filtering. This implies that a VLAN tag of
+ * 1 will always pass filtering.
+ */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS,
+ MAC_VLANTR_VL_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_VLANTR);
+
+ return 0;
+}
+
+static int xlgmac_disable_rx_vlan_filtering(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_PFR);
+ /* Disable VLAN filtering */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS,
+ MAC_PFR_VTFE_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_PFR);
+
+ return 0;
+}
+
+static u32 xlgmac_vid_crc32_le(__le16 vid_le)
+{
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ u32 poly = 0xedb88320;
+ u32 crc = ~0;
+ u32 temp = 0;
+ int i, bits;
+
+ bits = get_bitmask_order(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= poly;
+ }
+
+ return crc;
+}
+
+static int xlgmac_update_vlan_hash_table(struct xlgmac_pdata *pdata)
+{
+ u16 vlan_hash_table = 0;
+ __le16 vid_le;
+ u32 regval;
+ u32 crc;
+ u16 vid;
+
+ /* Generate the VLAN Hash Table value */
+ for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
+ /* Get the CRC32 value of the VLAN ID */
+ vid_le = cpu_to_le16(vid);
+ crc = bitrev32(~xlgmac_vid_crc32_le(vid_le)) >> 28;
+
+ vlan_hash_table |= (1 << crc);
+ }
+
+ regval = readl(pdata->mac_regs + MAC_VLANHTR);
+ /* Set the VLAN Hash Table filtering register */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANHTR_VLHT_POS,
+ MAC_VLANHTR_VLHT_LEN, vlan_hash_table);
+ writel(regval, pdata->mac_regs + MAC_VLANHTR);
+
+ return 0;
+}
+
+static int xlgmac_set_promiscuous_mode(struct xlgmac_pdata *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR),
+ MAC_PFR_PR_POS, MAC_PFR_PR_LEN);
+ if (regval == val)
+ return 0;
+
+ netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
+ enable ? "entering" : "leaving");
+
+ regval = readl(pdata->mac_regs + MAC_PFR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PR_POS,
+ MAC_PFR_PR_LEN, val);
+ writel(regval, pdata->mac_regs + MAC_PFR);
+
+ /* Hardware will still perform VLAN filtering in promiscuous mode */
+ if (enable) {
+ xlgmac_disable_rx_vlan_filtering(pdata);
+ } else {
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ xlgmac_enable_rx_vlan_filtering(pdata);
+ }
+
+ return 0;
+}
+
+static int xlgmac_set_all_multicast_mode(struct xlgmac_pdata *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR),
+ MAC_PFR_PM_POS, MAC_PFR_PM_LEN);
+ if (regval == val)
+ return 0;
+
+ netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
+ enable ? "entering" : "leaving");
+
+ regval = readl(pdata->mac_regs + MAC_PFR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PM_POS,
+ MAC_PFR_PM_LEN, val);
+ writel(regval, pdata->mac_regs + MAC_PFR);
+
+ return 0;
+}
+
+static void xlgmac_set_mac_addn_addrs(struct xlgmac_pdata *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int addn_macs;
+ unsigned int mac_reg;
+
+ mac_reg = MAC_MACA1HR;
+ addn_macs = pdata->hw_feat.addn_mac;
+
+ if (netdev_uc_count(netdev) > addn_macs) {
+ xlgmac_set_promiscuous_mode(pdata, 1);
+ } else {
+ netdev_for_each_uc_addr(ha, netdev) {
+ xlgmac_set_mac_reg(pdata, ha, &mac_reg);
+ addn_macs--;
+ }
+
+ if (netdev_mc_count(netdev) > addn_macs) {
+ xlgmac_set_all_multicast_mode(pdata, 1);
+ } else {
+ netdev_for_each_mc_addr(ha, netdev) {
+ xlgmac_set_mac_reg(pdata, ha, &mac_reg);
+ addn_macs--;
+ }
+ }
+ }
+
+ /* Clear remaining additional MAC address entries */
+ while (addn_macs--)
+ xlgmac_set_mac_reg(pdata, NULL, &mac_reg);
+}
+
+static void xlgmac_set_mac_hash_table(struct xlgmac_pdata *pdata)
+{
+ unsigned int hash_table_shift, hash_table_count;
+ u32 hash_table[XLGMAC_MAC_HASH_TABLE_SIZE];
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int hash_reg;
+ unsigned int i;
+ u32 crc;
+
+ hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
+ hash_table_count = pdata->hw_feat.hash_table_size / 32;
+ memset(hash_table, 0, sizeof(hash_table));
+
+ /* Build the MAC Hash Table register values */
+ netdev_for_each_uc_addr(ha, netdev) {
+ crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+ crc >>= hash_table_shift;
+ hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+ }
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+ crc >>= hash_table_shift;
+ hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+ }
+
+ /* Set the MAC Hash Table registers */
+ hash_reg = MAC_HTR0;
+ for (i = 0; i < hash_table_count; i++) {
+ writel(hash_table[i], pdata->mac_regs + hash_reg);
+ hash_reg += MAC_HTR_INC;
+ }
+}
+
+static int xlgmac_add_mac_addresses(struct xlgmac_pdata *pdata)
+{
+ if (pdata->hw_feat.hash_table_size)
+ xlgmac_set_mac_hash_table(pdata);
+ else
+ xlgmac_set_mac_addn_addrs(pdata);
+
+ return 0;
+}
+
+static void xlgmac_config_mac_address(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ xlgmac_set_mac_address(pdata, pdata->netdev->dev_addr);
+
+ /* Filtering is done using perfect filtering and hash filtering */
+ if (pdata->hw_feat.hash_table_size) {
+ regval = readl(pdata->mac_regs + MAC_PFR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS,
+ MAC_PFR_HPF_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS,
+ MAC_PFR_HUC_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HMC_POS,
+ MAC_PFR_HMC_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_PFR);
+ }
+}
+
+static void xlgmac_config_jumbo_enable(struct xlgmac_pdata *pdata)
+{
+ unsigned int val;
+ u32 regval;
+
+ val = (pdata->netdev->mtu > XLGMAC_STD_PACKET_MTU) ? 1 : 0;
+
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_JE_POS,
+ MAC_RCR_JE_LEN, val);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+}
+
+static void xlgmac_config_checksum_offload(struct xlgmac_pdata *pdata)
+{
+ if (pdata->netdev->features & NETIF_F_RXCSUM)
+ xlgmac_enable_rx_csum(pdata);
+ else
+ xlgmac_disable_rx_csum(pdata);
+}
+
+static void xlgmac_config_vlan_support(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_VLANIR);
+ /* Indicate that VLAN Tx CTAGs come from context descriptors */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS,
+ MAC_VLANIR_CSVL_LEN, 0);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS,
+ MAC_VLANIR_VLTI_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_VLANIR);
+
+ /* Set the current VLAN Hash Table register value */
+ xlgmac_update_vlan_hash_table(pdata);
+
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ xlgmac_enable_rx_vlan_filtering(pdata);
+ else
+ xlgmac_disable_rx_vlan_filtering(pdata);
+
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ xlgmac_enable_rx_vlan_stripping(pdata);
+ else
+ xlgmac_disable_rx_vlan_stripping(pdata);
+}
+
+static int xlgmac_config_rx_mode(struct xlgmac_pdata *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ unsigned int pr_mode, am_mode;
+
+ pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
+ am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
+
+ xlgmac_set_promiscuous_mode(pdata, pr_mode);
+ xlgmac_set_all_multicast_mode(pdata, am_mode);
+
+ xlgmac_add_mac_addresses(pdata);
+
+ return 0;
+}
+
+static void xlgmac_prepare_tx_stop(struct xlgmac_pdata *pdata,
+ struct xlgmac_channel *channel)
+{
+ unsigned int tx_dsr, tx_pos, tx_qidx;
+ unsigned long tx_timeout;
+ unsigned int tx_status;
+
+ /* Calculate the status register to read and the position within */
+ if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
+ tx_dsr = DMA_DSR0;
+ tx_pos = (channel->queue_index * DMA_DSR_Q_LEN) +
+ DMA_DSR0_TPS_START;
+ } else {
+ tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
+
+ tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
+ tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_LEN) +
+ DMA_DSRX_TPS_START;
+ }
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * descriptors. Wait for the Tx engine to enter the stopped or
+ * suspended state. Don't wait forever though...
+ */
+ tx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ);
+ while (time_before(jiffies, tx_timeout)) {
+ tx_status = readl(pdata->mac_regs + tx_dsr);
+ tx_status = XLGMAC_GET_REG_BITS(tx_status, tx_pos,
+ DMA_DSR_TPS_LEN);
+ if ((tx_status == DMA_TPS_STOPPED) ||
+ (tx_status == DMA_TPS_SUSPENDED))
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ if (!time_before(jiffies, tx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Tx DMA channel %u to stop\n",
+ channel->queue_index);
+}
+
+static void xlgmac_enable_tx(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ /* Enable each Tx DMA channel */
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS,
+ DMA_CH_TCR_ST_LEN, 1);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ }
+
+ /* Enable each Tx queue */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS,
+ MTL_Q_TQOMR_TXQEN_LEN,
+ MTL_Q_ENABLED);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ /* Enable MAC Tx */
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS,
+ MAC_TCR_TE_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+}
+
+static void xlgmac_disable_tx(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ /* Prepare for Tx DMA channel stop */
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ xlgmac_prepare_tx_stop(pdata, channel);
+ }
+
+ /* Disable MAC Tx */
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS,
+ MAC_TCR_TE_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+
+ /* Disable each Tx queue */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS,
+ MTL_Q_TQOMR_TXQEN_LEN, 0);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ /* Disable each Tx DMA channel */
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS,
+ DMA_CH_TCR_ST_LEN, 0);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ }
+}
+
+static void xlgmac_prepare_rx_stop(struct xlgmac_pdata *pdata,
+ unsigned int queue)
+{
+ unsigned int rx_status, prxq, rxqsts;
+ unsigned long rx_timeout;
+
+ /* The Rx engine cannot be stopped if it is actively processing
+ * packets. Wait for the Rx queue to empty the Rx fifo. Don't
+ * wait forever though...
+ */
+ rx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ);
+ while (time_before(jiffies, rx_timeout)) {
+ rx_status = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR));
+ prxq = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS,
+ MTL_Q_RQDR_PRXQ_LEN);
+ rxqsts = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_RXQSTS_POS,
+ MTL_Q_RQDR_RXQSTS_LEN);
+ if ((prxq == 0) && (rxqsts == 0))
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ if (!time_before(jiffies, rx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Rx queue %u to empty\n",
+ queue);
+}
+
+static void xlgmac_enable_rx(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int regval, i;
+
+ /* Enable each Rx DMA channel */
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS,
+ DMA_CH_RCR_SR_LEN, 1);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ }
+
+ /* Enable each Rx queue */
+ regval = 0;
+ for (i = 0; i < pdata->rx_q_count; i++)
+ regval |= (0x02 << (i << 1));
+ writel(regval, pdata->mac_regs + MAC_RQC0R);
+
+ /* Enable MAC Rx */
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS,
+ MAC_RCR_DCRCC_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS,
+ MAC_RCR_CST_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS,
+ MAC_RCR_ACS_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS,
+ MAC_RCR_RE_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+}
+
+static void xlgmac_disable_rx(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ /* Disable MAC Rx */
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS,
+ MAC_RCR_DCRCC_LEN, 0);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS,
+ MAC_RCR_CST_LEN, 0);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS,
+ MAC_RCR_ACS_LEN, 0);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS,
+ MAC_RCR_RE_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+
+ /* Prepare for Rx DMA channel stop */
+ for (i = 0; i < pdata->rx_q_count; i++)
+ xlgmac_prepare_rx_stop(pdata, i);
+
+ /* Disable each Rx queue */
+ writel(0, pdata->mac_regs + MAC_RQC0R);
+
+ /* Disable each Rx DMA channel */
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS,
+ DMA_CH_RCR_SR_LEN, 0);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ }
+}
+
+static void xlgmac_tx_start_xmit(struct xlgmac_channel *channel,
+ struct xlgmac_ring *ring)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_desc_data *desc_data;
+
+ /* Make sure everything is written before the register write */
+ wmb();
+
+ /* Issue a poll command to Tx DMA by writing address
+ * of next immediate free descriptor
+ */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+ writel(lower_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_TDTR_LO));
+
+ /* Start the Tx timer */
+ if (pdata->tx_usecs && !channel->tx_timer_active) {
+ channel->tx_timer_active = 1;
+ mod_timer(&channel->tx_timer,
+ jiffies + usecs_to_jiffies(pdata->tx_usecs));
+ }
+
+ ring->tx.xmit_more = 0;
+}
+
+static void xlgmac_dev_xmit(struct xlgmac_channel *channel)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->tx_ring;
+ unsigned int tso_context, vlan_context;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+ struct xlgmac_pkt_info *pkt_info;
+ unsigned int csum, tso, vlan;
+ int start_index = ring->cur;
+ int cur_index = ring->cur;
+ unsigned int tx_set_ic;
+ int i;
+
+ pkt_info = &ring->pkt_info;
+ csum = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN);
+ tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
+ vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
+
+ if (tso && (pkt_info->mss != ring->tx.cur_mss))
+ tso_context = 1;
+ else
+ tso_context = 0;
+
+ if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag))
+ vlan_context = 1;
+ else
+ vlan_context = 0;
+
+ /* Determine if an interrupt should be generated for this Tx:
+ * Interrupt:
+ * - Tx frame count exceeds the frame count setting
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set exceeds the frame count setting
+ * No interrupt:
+ * - No frame count setting specified (ethtool -C ethX tx-frames 0)
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set does not exceed the frame count setting
+ */
+ ring->coalesce_count += pkt_info->tx_packets;
+ if (!pdata->tx_frames)
+ tx_set_ic = 0;
+ else if (pkt_info->tx_packets > pdata->tx_frames)
+ tx_set_ic = 1;
+ else if ((ring->coalesce_count % pdata->tx_frames) <
+ pkt_info->tx_packets)
+ tx_set_ic = 1;
+ else
+ tx_set_ic = 0;
+
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ dma_desc = desc_data->dma_desc;
+
+ /* Create a context descriptor if this is a TSO pkt_info */
+ if (tso_context || vlan_context) {
+ if (tso_context) {
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "TSO context descriptor, mss=%u\n",
+ pkt_info->mss);
+
+ /* Set the MSS size */
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_CONTEXT_DESC2_MSS_POS,
+ TX_CONTEXT_DESC2_MSS_LEN,
+ pkt_info->mss);
+
+ /* Mark it as a CONTEXT descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_CONTEXT_DESC3_CTXT_POS,
+ TX_CONTEXT_DESC3_CTXT_LEN,
+ 1);
+
+ /* Indicate this descriptor contains the MSS */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_CONTEXT_DESC3_TCMSSV_POS,
+ TX_CONTEXT_DESC3_TCMSSV_LEN,
+ 1);
+
+ ring->tx.cur_mss = pkt_info->mss;
+ }
+
+ if (vlan_context) {
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "VLAN context descriptor, ctag=%u\n",
+ pkt_info->vlan_ctag);
+
+ /* Mark it as a CONTEXT descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_CONTEXT_DESC3_CTXT_POS,
+ TX_CONTEXT_DESC3_CTXT_LEN,
+ 1);
+
+ /* Set the VLAN tag */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_CONTEXT_DESC3_VT_POS,
+ TX_CONTEXT_DESC3_VT_LEN,
+ pkt_info->vlan_ctag);
+
+ /* Indicate this descriptor contains the VLAN tag */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_CONTEXT_DESC3_VLTV_POS,
+ TX_CONTEXT_DESC3_VLTV_LEN,
+ 1);
+
+ ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag;
+ }
+
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ dma_desc = desc_data->dma_desc;
+ }
+
+ /* Update buffer address (for TSO this is the header) */
+ dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma));
+ dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma));
+
+ /* Update the buffer length */
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_NORMAL_DESC2_HL_B1L_POS,
+ TX_NORMAL_DESC2_HL_B1L_LEN,
+ desc_data->skb_dma_len);
+
+ /* VLAN tag insertion check */
+ if (vlan) {
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_NORMAL_DESC2_VTIR_POS,
+ TX_NORMAL_DESC2_VTIR_LEN,
+ TX_NORMAL_DESC2_VLAN_INSERT);
+ pdata->stats.tx_vlan_packets++;
+ }
+
+ /* Timestamp enablement check */
+ if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_PTP_POS,
+ TX_PACKET_ATTRIBUTES_PTP_LEN))
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_NORMAL_DESC2_TTSE_POS,
+ TX_NORMAL_DESC2_TTSE_LEN,
+ 1);
+
+ /* Mark it as First Descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_FD_POS,
+ TX_NORMAL_DESC3_FD_LEN,
+ 1);
+
+ /* Mark it as a NORMAL descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_CTXT_POS,
+ TX_NORMAL_DESC3_CTXT_LEN,
+ 0);
+
+ /* Set OWN bit if not the first descriptor */
+ if (cur_index != start_index)
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_OWN_POS,
+ TX_NORMAL_DESC3_OWN_LEN,
+ 1);
+
+ if (tso) {
+ /* Enable TSO */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_TSE_POS,
+ TX_NORMAL_DESC3_TSE_LEN, 1);
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_TCPPL_POS,
+ TX_NORMAL_DESC3_TCPPL_LEN,
+ pkt_info->tcp_payload_len);
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_TCPHDRLEN_POS,
+ TX_NORMAL_DESC3_TCPHDRLEN_LEN,
+ pkt_info->tcp_header_len / 4);
+
+ pdata->stats.tx_tso_packets++;
+ } else {
+ /* Enable CRC and Pad Insertion */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_CPC_POS,
+ TX_NORMAL_DESC3_CPC_LEN, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_CIC_POS,
+ TX_NORMAL_DESC3_CIC_LEN,
+ 0x3);
+
+ /* Set the total length to be transmitted */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_FL_POS,
+ TX_NORMAL_DESC3_FL_LEN,
+ pkt_info->length);
+ }
+
+ for (i = cur_index - start_index + 1; i < pkt_info->desc_count; i++) {
+ cur_index++;
+ desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
+ dma_desc = desc_data->dma_desc;
+
+ /* Update buffer address */
+ dma_desc->desc0 =
+ cpu_to_le32(lower_32_bits(desc_data->skb_dma));
+ dma_desc->desc1 =
+ cpu_to_le32(upper_32_bits(desc_data->skb_dma));
+
+ /* Update the buffer length */
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_NORMAL_DESC2_HL_B1L_POS,
+ TX_NORMAL_DESC2_HL_B1L_LEN,
+ desc_data->skb_dma_len);
+
+ /* Set OWN bit */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_OWN_POS,
+ TX_NORMAL_DESC3_OWN_LEN, 1);
+
+ /* Mark it as NORMAL descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_CTXT_POS,
+ TX_NORMAL_DESC3_CTXT_LEN, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_CIC_POS,
+ TX_NORMAL_DESC3_CIC_LEN,
+ 0x3);
+ }
+
+ /* Set LAST bit for the last descriptor */
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_LD_POS,
+ TX_NORMAL_DESC3_LD_LEN, 1);
+
+ /* Set IC bit based on Tx coalescing settings */
+ if (tx_set_ic)
+ dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc2,
+ TX_NORMAL_DESC2_IC_POS,
+ TX_NORMAL_DESC2_IC_LEN, 1);
+
+ /* Save the Tx info to report back during cleanup */
+ desc_data->tx.packets = pkt_info->tx_packets;
+ desc_data->tx.bytes = pkt_info->tx_bytes;
+
+ /* In case the Tx DMA engine is running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the first descriptor
+ */
+ dma_wmb();
+
+ /* Set OWN bit for the first descriptor */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
+ dma_desc = desc_data->dma_desc;
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ TX_NORMAL_DESC3_OWN_POS,
+ TX_NORMAL_DESC3_OWN_LEN, 1);
+
+ if (netif_msg_tx_queued(pdata))
+ xlgmac_dump_tx_desc(pdata, ring, start_index,
+ pkt_info->desc_count, 1);
+
+ /* Make sure ownership is written to the descriptor */
+ smp_wmb();
+
+ ring->cur = cur_index + 1;
+ if (!pkt_info->skb->xmit_more ||
+ netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
+ channel->queue_index)))
+ xlgmac_tx_start_xmit(channel, ring);
+ else
+ ring->tx.xmit_more = 1;
+
+ XLGMAC_PR("%s: descriptors %u to %u written\n",
+ channel->name, start_index & (ring->dma_desc_count - 1),
+ (ring->cur - 1) & (ring->dma_desc_count - 1));
+}
+
+static void xlgmac_get_rx_tstamp(struct xlgmac_pkt_info *pkt_info,
+ struct xlgmac_dma_desc *dma_desc)
+{
+ u32 tsa, tsd;
+ u64 nsec;
+
+ tsa = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_CONTEXT_DESC3_TSA_POS,
+ RX_CONTEXT_DESC3_TSA_LEN);
+ tsd = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_CONTEXT_DESC3_TSD_POS,
+ RX_CONTEXT_DESC3_TSD_LEN);
+ if (tsa && !tsd) {
+ nsec = le32_to_cpu(dma_desc->desc1);
+ nsec <<= 32;
+ nsec |= le32_to_cpu(dma_desc->desc0);
+ if (nsec != 0xffffffffffffffffULL) {
+ pkt_info->rx_tstamp = nsec;
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS,
+ RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN,
+ 1);
+ }
+ }
+}
+
+static void xlgmac_tx_desc_reset(struct xlgmac_desc_data *desc_data)
+{
+ struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc;
+
+ /* Reset the Tx descriptor
+ * Set buffer 1 (lo) address to zero
+ * Set buffer 1 (hi) address to zero
+ * Reset all other control bits (IC, TTSE, B2L & B1L)
+ * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
+ */
+ dma_desc->desc0 = 0;
+ dma_desc->desc1 = 0;
+ dma_desc->desc2 = 0;
+ dma_desc->desc3 = 0;
+
+ /* Make sure ownership is written to the descriptor */
+ dma_wmb();
+}
+
+static void xlgmac_tx_desc_init(struct xlgmac_channel *channel)
+{
+ struct xlgmac_ring *ring = channel->tx_ring;
+ struct xlgmac_desc_data *desc_data;
+ int start_index = ring->cur;
+ int i;
+
+ /* Initialze all descriptors */
+ for (i = 0; i < ring->dma_desc_count; i++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, i);
+
+ /* Initialize Tx descriptor */
+ xlgmac_tx_desc_reset(desc_data);
+ }
+
+ /* Update the total number of Tx descriptors */
+ writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_TDRLR));
+
+ /* Update the starting address of descriptor ring */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
+ writel(upper_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_TDLR_HI));
+ writel(lower_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_TDLR_LO));
+}
+
+static void xlgmac_rx_desc_reset(struct xlgmac_pdata *pdata,
+ struct xlgmac_desc_data *desc_data,
+ unsigned int index)
+{
+ struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc;
+ unsigned int rx_frames = pdata->rx_frames;
+ unsigned int rx_usecs = pdata->rx_usecs;
+ dma_addr_t hdr_dma, buf_dma;
+ unsigned int inte;
+
+ if (!rx_usecs && !rx_frames) {
+ /* No coalescing, interrupt for every descriptor */
+ inte = 1;
+ } else {
+ /* Set interrupt based on Rx frame coalescing setting */
+ if (rx_frames && !((index + 1) % rx_frames))
+ inte = 1;
+ else
+ inte = 0;
+ }
+
+ /* Reset the Rx descriptor
+ * Set buffer 1 (lo) address to header dma address (lo)
+ * Set buffer 1 (hi) address to header dma address (hi)
+ * Set buffer 2 (lo) address to buffer dma address (lo)
+ * Set buffer 2 (hi) address to buffer dma address (hi) and
+ * set control bits OWN and INTE
+ */
+ hdr_dma = desc_data->rx.hdr.dma_base + desc_data->rx.hdr.dma_off;
+ buf_dma = desc_data->rx.buf.dma_base + desc_data->rx.buf.dma_off;
+ dma_desc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
+ dma_desc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
+ dma_desc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
+ dma_desc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
+
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ RX_NORMAL_DESC3_INTE_POS,
+ RX_NORMAL_DESC3_INTE_LEN,
+ inte);
+
+ /* Since the Rx DMA engine is likely running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the descriptor
+ */
+ dma_wmb();
+
+ dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
+ dma_desc->desc3,
+ RX_NORMAL_DESC3_OWN_POS,
+ RX_NORMAL_DESC3_OWN_LEN,
+ 1);
+
+ /* Make sure ownership is written to the descriptor */
+ dma_wmb();
+}
+
+static void xlgmac_rx_desc_init(struct xlgmac_channel *channel)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->rx_ring;
+ unsigned int start_index = ring->cur;
+ struct xlgmac_desc_data *desc_data;
+ unsigned int i;
+
+ /* Initialize all descriptors */
+ for (i = 0; i < ring->dma_desc_count; i++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, i);
+
+ /* Initialize Rx descriptor */
+ xlgmac_rx_desc_reset(pdata, desc_data, i);
+ }
+
+ /* Update the total number of Rx descriptors */
+ writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_RDRLR));
+
+ /* Update the starting address of descriptor ring */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
+ writel(upper_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_RDLR_HI));
+ writel(lower_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_RDLR_LO));
+
+ /* Update the Rx Descriptor Tail Pointer */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, start_index +
+ ring->dma_desc_count - 1);
+ writel(lower_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
+}
+
+static int xlgmac_is_context_desc(struct xlgmac_dma_desc *dma_desc)
+{
+ /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
+ return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ TX_NORMAL_DESC3_CTXT_POS,
+ TX_NORMAL_DESC3_CTXT_LEN);
+}
+
+static int xlgmac_is_last_desc(struct xlgmac_dma_desc *dma_desc)
+{
+ /* Rx and Tx share LD bit, so check TDES3.LD bit */
+ return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ TX_NORMAL_DESC3_LD_POS,
+ TX_NORMAL_DESC3_LD_LEN);
+}
+
+static int xlgmac_disable_tx_flow_control(struct xlgmac_pdata *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, regval;
+ unsigned int i;
+
+ /* Clear MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS,
+ MTL_Q_RQOMR_EHFC_LEN, 0);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+
+ /* Clear MAC flow control */
+ max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ regval = readl(pdata->mac_regs + reg);
+ regval = XLGMAC_SET_REG_BITS(regval,
+ MAC_Q0TFCR_TFE_POS,
+ MAC_Q0TFCR_TFE_LEN,
+ 0);
+ writel(regval, pdata->mac_regs + reg);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xlgmac_enable_tx_flow_control(struct xlgmac_pdata *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, regval;
+ unsigned int i;
+
+ /* Set MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS,
+ MTL_Q_RQOMR_EHFC_LEN, 1);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+
+ /* Set MAC flow control */
+ max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ regval = readl(pdata->mac_regs + reg);
+
+ /* Enable transmit flow control */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS,
+ MAC_Q0TFCR_TFE_LEN, 1);
+ /* Set pause time */
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_PT_POS,
+ MAC_Q0TFCR_PT_LEN, 0xffff);
+
+ writel(regval, pdata->mac_regs + reg);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xlgmac_disable_rx_flow_control(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_RFCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS,
+ MAC_RFCR_RFE_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_RFCR);
+
+ return 0;
+}
+
+static int xlgmac_enable_rx_flow_control(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MAC_RFCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS,
+ MAC_RFCR_RFE_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_RFCR);
+
+ return 0;
+}
+
+static int xlgmac_config_tx_flow_control(struct xlgmac_pdata *pdata)
+{
+ if (pdata->tx_pause)
+ xlgmac_enable_tx_flow_control(pdata);
+ else
+ xlgmac_disable_tx_flow_control(pdata);
+
+ return 0;
+}
+
+static int xlgmac_config_rx_flow_control(struct xlgmac_pdata *pdata)
+{
+ if (pdata->rx_pause)
+ xlgmac_enable_rx_flow_control(pdata);
+ else
+ xlgmac_disable_rx_flow_control(pdata);
+
+ return 0;
+}
+
+static int xlgmac_config_rx_coalesce(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RIWT));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RIWT_RWT_POS,
+ DMA_CH_RIWT_RWT_LEN,
+ pdata->rx_riwt);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RIWT));
+ }
+
+ return 0;
+}
+
+static void xlgmac_config_flow_control(struct xlgmac_pdata *pdata)
+{
+ xlgmac_config_tx_flow_control(pdata);
+ xlgmac_config_rx_flow_control(pdata);
+}
+
+static void xlgmac_config_rx_fep_enable(struct xlgmac_pdata *pdata)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FEP_POS,
+ MTL_Q_RQOMR_FEP_LEN, 1);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+}
+
+static void xlgmac_config_rx_fup_enable(struct xlgmac_pdata *pdata)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FUP_POS,
+ MTL_Q_RQOMR_FUP_LEN, 1);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+}
+
+static int xlgmac_config_tx_coalesce(struct xlgmac_pdata *pdata)
+{
+ return 0;
+}
+
+static void xlgmac_config_rx_buffer_size(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_RBSZ_POS,
+ DMA_CH_RCR_RBSZ_LEN,
+ pdata->rx_buf_size);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ }
+}
+
+static void xlgmac_config_tso_mode(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ if (pdata->hw_feat.tso) {
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS,
+ DMA_CH_TCR_TSE_LEN, 1);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ }
+ }
+}
+
+static void xlgmac_config_sph_mode(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_SPH_POS,
+ DMA_CH_CR_SPH_LEN, 1);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR));
+ }
+
+ regval = readl(pdata->mac_regs + MAC_RCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_HDSMS_POS,
+ MAC_RCR_HDSMS_LEN,
+ XLGMAC_SPH_HDSMS_SIZE);
+ writel(regval, pdata->mac_regs + MAC_RCR);
+}
+
+static unsigned int xlgmac_usec_to_riwt(struct xlgmac_pdata *pdata,
+ unsigned int usec)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ rate = pdata->sysclk_rate;
+
+ /* Convert the input usec value to the watchdog timer value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( usec * ( system_clock_mhz / 10^6 ) / 256
+ */
+ ret = (usec * (rate / 1000000)) / 256;
+
+ return ret;
+}
+
+static unsigned int xlgmac_riwt_to_usec(struct xlgmac_pdata *pdata,
+ unsigned int riwt)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ rate = pdata->sysclk_rate;
+
+ /* Convert the input watchdog timer value to the usec value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
+ */
+ ret = (riwt * 256) / (rate / 1000000);
+
+ return ret;
+}
+
+static int xlgmac_config_rx_threshold(struct xlgmac_pdata *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RTC_POS,
+ MTL_Q_RQOMR_RTC_LEN, val);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+
+ return 0;
+}
+
+static void xlgmac_config_mtl_mode(struct xlgmac_pdata *pdata)
+{
+ unsigned int i;
+ u32 regval;
+
+ /* Set Tx to weighted round robin scheduling algorithm */
+ regval = readl(pdata->mac_regs + MTL_OMR);
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_ETSALG_POS,
+ MTL_OMR_ETSALG_LEN, MTL_ETSALG_WRR);
+ writel(regval, pdata->mac_regs + MTL_OMR);
+
+ /* Set Tx traffic classes to use WRR algorithm with equal weights */
+ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_ETSCR_TSA_POS,
+ MTL_TC_ETSCR_TSA_LEN, MTL_TSA_ETS);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR));
+
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_QWR_QW_POS,
+ MTL_TC_QWR_QW_LEN, 1);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR));
+ }
+
+ /* Set Rx to strict priority algorithm */
+ regval = readl(pdata->mac_regs + MTL_OMR);
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_RAA_POS,
+ MTL_OMR_RAA_LEN, MTL_RAA_SP);
+ writel(regval, pdata->mac_regs + MTL_OMR);
+}
+
+static void xlgmac_config_queue_mapping(struct xlgmac_pdata *pdata)
+{
+ unsigned int ppq, ppq_extra, prio, prio_queues;
+ unsigned int qptc, qptc_extra, queue;
+ unsigned int reg, regval;
+ unsigned int mask;
+ unsigned int i, j;
+
+ /* Map the MTL Tx Queues to Traffic Classes
+ * Note: Tx Queues >= Traffic Classes
+ */
+ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
+ qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
+
+ for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ for (j = 0; j < qptc; j++) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TXq%u mapped to TC%u\n", queue, i);
+ regval = readl(XLGMAC_MTL_REG(pdata, queue,
+ MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval,
+ MTL_Q_TQOMR_Q2TCMAP_POS,
+ MTL_Q_TQOMR_Q2TCMAP_LEN,
+ i);
+ writel(regval, XLGMAC_MTL_REG(pdata, queue,
+ MTL_Q_TQOMR));
+ queue++;
+ }
+
+ if (i < qptc_extra) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TXq%u mapped to TC%u\n", queue, i);
+ regval = readl(XLGMAC_MTL_REG(pdata, queue,
+ MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval,
+ MTL_Q_TQOMR_Q2TCMAP_POS,
+ MTL_Q_TQOMR_Q2TCMAP_LEN,
+ i);
+ writel(regval, XLGMAC_MTL_REG(pdata, queue,
+ MTL_Q_TQOMR));
+ queue++;
+ }
+ }
+
+ /* Map the 8 VLAN priority values to available MTL Rx queues */
+ prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
+ pdata->rx_q_count);
+ ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
+ ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
+
+ reg = MAC_RQC2R;
+ regval = 0;
+ for (i = 0, prio = 0; i < prio_queues;) {
+ mask = 0;
+ for (j = 0; j < ppq; j++) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "PRIO%u mapped to RXq%u\n", prio, i);
+ mask |= (1 << prio);
+ prio++;
+ }
+
+ if (i < ppq_extra) {
+ netif_dbg(pdata, drv, pdata->netdev,
+ "PRIO%u mapped to RXq%u\n", prio, i);
+ mask |= (1 << prio);
+ prio++;
+ }
+
+ regval |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
+
+ if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
+ continue;
+
+ writel(regval, pdata->mac_regs + reg);
+ reg += MAC_RQC2_INC;
+ regval = 0;
+ }
+
+ /* Configure one to one, MTL Rx queue to DMA Rx channel mapping
+ * ie Q0 <--> CH0, Q1 <--> CH1 ... Q11 <--> CH11
+ */
+ reg = MTL_RQDCM0R;
+ regval = readl(pdata->mac_regs + reg);
+ regval |= (MTL_RQDCM0R_Q0MDMACH | MTL_RQDCM0R_Q1MDMACH |
+ MTL_RQDCM0R_Q2MDMACH | MTL_RQDCM0R_Q3MDMACH);
+ writel(regval, pdata->mac_regs + reg);
+
+ reg += MTL_RQDCM_INC;
+ regval = readl(pdata->mac_regs + reg);
+ regval |= (MTL_RQDCM1R_Q4MDMACH | MTL_RQDCM1R_Q5MDMACH |
+ MTL_RQDCM1R_Q6MDMACH | MTL_RQDCM1R_Q7MDMACH);
+ writel(regval, pdata->mac_regs + reg);
+
+ reg += MTL_RQDCM_INC;
+ regval = readl(pdata->mac_regs + reg);
+ regval |= (MTL_RQDCM2R_Q8MDMACH | MTL_RQDCM2R_Q9MDMACH |
+ MTL_RQDCM2R_Q10MDMACH | MTL_RQDCM2R_Q11MDMACH);
+ writel(regval, pdata->mac_regs + reg);
+}
+
+static unsigned int xlgmac_calculate_per_queue_fifo(
+ unsigned int fifo_size,
+ unsigned int queue_count)
+{
+ unsigned int q_fifo_size;
+ unsigned int p_fifo;
+
+ /* Calculate the configured fifo size */
+ q_fifo_size = 1 << (fifo_size + 7);
+
+ /* The configured value may not be the actual amount of fifo RAM */
+ q_fifo_size = min_t(unsigned int, XLGMAC_MAX_FIFO, q_fifo_size);
+
+ q_fifo_size = q_fifo_size / queue_count;
+
+ /* Each increment in the queue fifo size represents 256 bytes of
+ * fifo, with 0 representing 256 bytes. Distribute the fifo equally
+ * between the queues.
+ */
+ p_fifo = q_fifo_size / 256;
+ if (p_fifo)
+ p_fifo--;
+
+ return p_fifo;
+}
+
+static void xlgmac_config_tx_fifo_size(struct xlgmac_pdata *pdata)
+{
+ unsigned int fifo_size;
+ unsigned int i;
+ u32 regval;
+
+ fifo_size = xlgmac_calculate_per_queue_fifo(
+ pdata->hw_feat.tx_fifo_size,
+ pdata->tx_q_count);
+
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS,
+ MTL_Q_TQOMR_TQS_LEN, fifo_size);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ netif_info(pdata, drv, pdata->netdev,
+ "%d Tx hardware queues, %d byte fifo per queue\n",
+ pdata->tx_q_count, ((fifo_size + 1) * 256));
+}
+
+static void xlgmac_config_rx_fifo_size(struct xlgmac_pdata *pdata)
+{
+ unsigned int fifo_size;
+ unsigned int i;
+ u32 regval;
+
+ fifo_size = xlgmac_calculate_per_queue_fifo(
+ pdata->hw_feat.rx_fifo_size,
+ pdata->rx_q_count);
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RQS_POS,
+ MTL_Q_RQOMR_RQS_LEN, fifo_size);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+
+ netif_info(pdata, drv, pdata->netdev,
+ "%d Rx hardware queues, %d byte fifo per queue\n",
+ pdata->rx_q_count, ((fifo_size + 1) * 256));
+}
+
+static void xlgmac_config_flow_control_threshold(struct xlgmac_pdata *pdata)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR));
+ /* Activate flow control when less than 4k left in fifo */
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFA_POS,
+ MTL_Q_RQFCR_RFA_LEN, 2);
+ /* De-activate flow control when more than 6k left in fifo */
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFD_POS,
+ MTL_Q_RQFCR_RFD_LEN, 4);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR));
+ }
+}
+
+static int xlgmac_config_tx_threshold(struct xlgmac_pdata *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TTC_POS,
+ MTL_Q_TQOMR_TTC_LEN, val);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_config_rsf_mode(struct xlgmac_pdata *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RSF_POS,
+ MTL_Q_RQOMR_RSF_LEN, val);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_config_tsf_mode(struct xlgmac_pdata *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+ u32 regval;
+
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TSF_POS,
+ MTL_Q_TQOMR_TSF_LEN, val);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_config_osp_mode(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_OSP_POS,
+ DMA_CH_TCR_OSP_LEN,
+ pdata->tx_osp_mode);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_config_pblx8(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_PBLX8_POS,
+ DMA_CH_CR_PBLX8_LEN,
+ pdata->pblx8);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_get_tx_pbl_val(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_TCR));
+ regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_TCR_PBL_POS,
+ DMA_CH_TCR_PBL_LEN);
+ return regval;
+}
+
+static int xlgmac_config_tx_pbl_val(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_PBL_POS,
+ DMA_CH_TCR_PBL_LEN,
+ pdata->tx_pbl);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
+ }
+
+ return 0;
+}
+
+static int xlgmac_get_rx_pbl_val(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_RCR));
+ regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_RCR_PBL_POS,
+ DMA_CH_RCR_PBL_LEN);
+ return regval;
+}
+
+static int xlgmac_config_rx_pbl_val(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ u32 regval;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_PBL_POS,
+ DMA_CH_RCR_PBL_LEN,
+ pdata->rx_pbl);
+ writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
+ }
+
+ return 0;
+}
+
+static u64 xlgmac_mmc_read(struct xlgmac_pdata *pdata, unsigned int reg_lo)
+{
+ bool read_hi;
+ u64 val;
+
+ switch (reg_lo) {
+ /* These registers are always 64 bit */
+ case MMC_TXOCTETCOUNT_GB_LO:
+ case MMC_TXOCTETCOUNT_G_LO:
+ case MMC_RXOCTETCOUNT_GB_LO:
+ case MMC_RXOCTETCOUNT_G_LO:
+ read_hi = true;
+ break;
+
+ default:
+ read_hi = false;
+ }
+
+ val = (u64)readl(pdata->mac_regs + reg_lo);
+
+ if (read_hi)
+ val |= ((u64)readl(pdata->mac_regs + reg_lo + 4) << 32);
+
+ return val;
+}
+
+static void xlgmac_tx_mmc_int(struct xlgmac_pdata *pdata)
+{
+ unsigned int mmc_isr = readl(pdata->mac_regs + MMC_TISR);
+ struct xlgmac_stats *stats = &pdata->stats;
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXOCTETCOUNT_GB_POS,
+ MMC_TISR_TXOCTETCOUNT_GB_LEN))
+ stats->txoctetcount_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXFRAMECOUNT_GB_POS,
+ MMC_TISR_TXFRAMECOUNT_GB_LEN))
+ stats->txframecount_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXBROADCASTFRAMES_G_POS,
+ MMC_TISR_TXBROADCASTFRAMES_G_LEN))
+ stats->txbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXMULTICASTFRAMES_G_POS,
+ MMC_TISR_TXMULTICASTFRAMES_G_LEN))
+ stats->txmulticastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX64OCTETS_GB_POS,
+ MMC_TISR_TX64OCTETS_GB_LEN))
+ stats->tx64octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX65TO127OCTETS_GB_POS,
+ MMC_TISR_TX65TO127OCTETS_GB_LEN))
+ stats->tx65to127octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX128TO255OCTETS_GB_POS,
+ MMC_TISR_TX128TO255OCTETS_GB_LEN))
+ stats->tx128to255octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX256TO511OCTETS_GB_POS,
+ MMC_TISR_TX256TO511OCTETS_GB_LEN))
+ stats->tx256to511octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX512TO1023OCTETS_GB_POS,
+ MMC_TISR_TX512TO1023OCTETS_GB_LEN))
+ stats->tx512to1023octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TX1024TOMAXOCTETS_GB_POS,
+ MMC_TISR_TX1024TOMAXOCTETS_GB_LEN))
+ stats->tx1024tomaxoctets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXUNICASTFRAMES_GB_POS,
+ MMC_TISR_TXUNICASTFRAMES_GB_LEN))
+ stats->txunicastframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXMULTICASTFRAMES_GB_POS,
+ MMC_TISR_TXMULTICASTFRAMES_GB_LEN))
+ stats->txmulticastframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXBROADCASTFRAMES_GB_POS,
+ MMC_TISR_TXBROADCASTFRAMES_GB_LEN))
+ stats->txbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXUNDERFLOWERROR_POS,
+ MMC_TISR_TXUNDERFLOWERROR_LEN))
+ stats->txunderflowerror +=
+ xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXOCTETCOUNT_G_POS,
+ MMC_TISR_TXOCTETCOUNT_G_LEN))
+ stats->txoctetcount_g +=
+ xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXFRAMECOUNT_G_POS,
+ MMC_TISR_TXFRAMECOUNT_G_LEN))
+ stats->txframecount_g +=
+ xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXPAUSEFRAMES_POS,
+ MMC_TISR_TXPAUSEFRAMES_LEN))
+ stats->txpauseframes +=
+ xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_TISR_TXVLANFRAMES_G_POS,
+ MMC_TISR_TXVLANFRAMES_G_LEN))
+ stats->txvlanframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
+}
+
+static void xlgmac_rx_mmc_int(struct xlgmac_pdata *pdata)
+{
+ unsigned int mmc_isr = readl(pdata->mac_regs + MMC_RISR);
+ struct xlgmac_stats *stats = &pdata->stats;
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXFRAMECOUNT_GB_POS,
+ MMC_RISR_RXFRAMECOUNT_GB_LEN))
+ stats->rxframecount_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXOCTETCOUNT_GB_POS,
+ MMC_RISR_RXOCTETCOUNT_GB_LEN))
+ stats->rxoctetcount_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXOCTETCOUNT_G_POS,
+ MMC_RISR_RXOCTETCOUNT_G_LEN))
+ stats->rxoctetcount_g +=
+ xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXBROADCASTFRAMES_G_POS,
+ MMC_RISR_RXBROADCASTFRAMES_G_LEN))
+ stats->rxbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXMULTICASTFRAMES_G_POS,
+ MMC_RISR_RXMULTICASTFRAMES_G_LEN))
+ stats->rxmulticastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXCRCERROR_POS,
+ MMC_RISR_RXCRCERROR_LEN))
+ stats->rxcrcerror +=
+ xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXRUNTERROR_POS,
+ MMC_RISR_RXRUNTERROR_LEN))
+ stats->rxrunterror +=
+ xlgmac_mmc_read(pdata, MMC_RXRUNTERROR);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXJABBERERROR_POS,
+ MMC_RISR_RXJABBERERROR_LEN))
+ stats->rxjabbererror +=
+ xlgmac_mmc_read(pdata, MMC_RXJABBERERROR);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXUNDERSIZE_G_POS,
+ MMC_RISR_RXUNDERSIZE_G_LEN))
+ stats->rxundersize_g +=
+ xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXOVERSIZE_G_POS,
+ MMC_RISR_RXOVERSIZE_G_LEN))
+ stats->rxoversize_g +=
+ xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX64OCTETS_GB_POS,
+ MMC_RISR_RX64OCTETS_GB_LEN))
+ stats->rx64octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX65TO127OCTETS_GB_POS,
+ MMC_RISR_RX65TO127OCTETS_GB_LEN))
+ stats->rx65to127octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX128TO255OCTETS_GB_POS,
+ MMC_RISR_RX128TO255OCTETS_GB_LEN))
+ stats->rx128to255octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX256TO511OCTETS_GB_POS,
+ MMC_RISR_RX256TO511OCTETS_GB_LEN))
+ stats->rx256to511octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX512TO1023OCTETS_GB_POS,
+ MMC_RISR_RX512TO1023OCTETS_GB_LEN))
+ stats->rx512to1023octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RX1024TOMAXOCTETS_GB_POS,
+ MMC_RISR_RX1024TOMAXOCTETS_GB_LEN))
+ stats->rx1024tomaxoctets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXUNICASTFRAMES_G_POS,
+ MMC_RISR_RXUNICASTFRAMES_G_LEN))
+ stats->rxunicastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXLENGTHERROR_POS,
+ MMC_RISR_RXLENGTHERROR_LEN))
+ stats->rxlengtherror +=
+ xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXOUTOFRANGETYPE_POS,
+ MMC_RISR_RXOUTOFRANGETYPE_LEN))
+ stats->rxoutofrangetype +=
+ xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXPAUSEFRAMES_POS,
+ MMC_RISR_RXPAUSEFRAMES_LEN))
+ stats->rxpauseframes +=
+ xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXFIFOOVERFLOW_POS,
+ MMC_RISR_RXFIFOOVERFLOW_LEN))
+ stats->rxfifooverflow +=
+ xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXVLANFRAMES_GB_POS,
+ MMC_RISR_RXVLANFRAMES_GB_LEN))
+ stats->rxvlanframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ if (XLGMAC_GET_REG_BITS(mmc_isr,
+ MMC_RISR_RXWATCHDOGERROR_POS,
+ MMC_RISR_RXWATCHDOGERROR_LEN))
+ stats->rxwatchdogerror +=
+ xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+}
+
+static void xlgmac_read_mmc_stats(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_stats *stats = &pdata->stats;
+ u32 regval;
+
+ /* Freeze counters */
+ regval = readl(pdata->mac_regs + MMC_CR);
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS,
+ MMC_CR_MCF_LEN, 1);
+ writel(regval, pdata->mac_regs + MMC_CR);
+
+ stats->txoctetcount_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ stats->txframecount_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ stats->txmulticastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ stats->tx64octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
+
+ stats->tx65to127octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ stats->tx128to255octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ stats->tx256to511octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ stats->tx512to1023octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ stats->tx1024tomaxoctets_gb +=
+ xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ stats->txunicastframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ stats->txmulticastframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ stats->txunderflowerror +=
+ xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ stats->txoctetcount_g +=
+ xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ stats->txframecount_g +=
+ xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ stats->txpauseframes +=
+ xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ stats->txvlanframes_g +=
+ xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
+
+ stats->rxframecount_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ stats->rxoctetcount_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ stats->rxoctetcount_g +=
+ xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ stats->rxbroadcastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ stats->rxmulticastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ stats->rxcrcerror +=
+ xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO);
+
+ stats->rxrunterror +=
+ xlgmac_mmc_read(pdata, MMC_RXRUNTERROR);
+
+ stats->rxjabbererror +=
+ xlgmac_mmc_read(pdata, MMC_RXJABBERERROR);
+
+ stats->rxundersize_g +=
+ xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G);
+
+ stats->rxoversize_g +=
+ xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G);
+
+ stats->rx64octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
+
+ stats->rx65to127octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ stats->rx128to255octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ stats->rx256to511octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ stats->rx512to1023octets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ stats->rx1024tomaxoctets_gb +=
+ xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ stats->rxunicastframes_g +=
+ xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ stats->rxlengtherror +=
+ xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
+
+ stats->rxoutofrangetype +=
+ xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ stats->rxpauseframes +=
+ xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ stats->rxfifooverflow +=
+ xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ stats->rxvlanframes_gb +=
+ xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ stats->rxwatchdogerror +=
+ xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+
+ /* Un-freeze counters */
+ regval = readl(pdata->mac_regs + MMC_CR);
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS,
+ MMC_CR_MCF_LEN, 0);
+ writel(regval, pdata->mac_regs + MMC_CR);
+}
+
+static void xlgmac_config_mmc(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + MMC_CR);
+ /* Set counters to reset on read */
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_ROR_POS,
+ MMC_CR_ROR_LEN, 1);
+ /* Reset the counters */
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS,
+ MMC_CR_CR_LEN, 1);
+ writel(regval, pdata->mac_regs + MMC_CR);
+}
+
+static int xlgmac_write_rss_reg(struct xlgmac_pdata *pdata, unsigned int type,
+ unsigned int index, unsigned int val)
+{
+ unsigned int wait;
+ int ret = 0;
+ u32 regval;
+
+ mutex_lock(&pdata->rss_mutex);
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR),
+ MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN);
+ if (regval) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ writel(val, pdata->mac_regs + MAC_RSSDR);
+
+ regval = readl(pdata->mac_regs + MAC_RSSAR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_RSSIA_POS,
+ MAC_RSSAR_RSSIA_LEN, index);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_ADDRT_POS,
+ MAC_RSSAR_ADDRT_LEN, type);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_CT_POS,
+ MAC_RSSAR_CT_LEN, 0);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_OB_POS,
+ MAC_RSSAR_OB_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_RSSAR);
+
+ wait = 1000;
+ while (wait--) {
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR),
+ MAC_RSSAR_OB_POS,
+ MAC_RSSAR_OB_LEN);
+ if (!regval)
+ goto unlock;
+
+ usleep_range(1000, 1500);
+ }
+
+ ret = -EBUSY;
+
+unlock:
+ mutex_unlock(&pdata->rss_mutex);
+
+ return ret;
+}
+
+static int xlgmac_write_rss_hash_key(struct xlgmac_pdata *pdata)
+{
+ unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+ unsigned int *key = (unsigned int *)&pdata->rss_key;
+ int ret;
+
+ while (key_regs--) {
+ ret = xlgmac_write_rss_reg(pdata, XLGMAC_RSS_HASH_KEY_TYPE,
+ key_regs, *key++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xlgmac_write_rss_lookup_table(struct xlgmac_pdata *pdata)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ ret = xlgmac_write_rss_reg(pdata,
+ XLGMAC_RSS_LOOKUP_TABLE_TYPE, i,
+ pdata->rss_table[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xlgmac_set_rss_hash_key(struct xlgmac_pdata *pdata, const u8 *key)
+{
+ memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
+
+ return xlgmac_write_rss_hash_key(pdata);
+}
+
+static int xlgmac_set_rss_lookup_table(struct xlgmac_pdata *pdata,
+ const u32 *table)
+{
+ unsigned int i;
+ u32 tval;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ tval = table[i];
+ pdata->rss_table[i] = XLGMAC_SET_REG_BITS(
+ pdata->rss_table[i],
+ MAC_RSSDR_DMCH_POS,
+ MAC_RSSDR_DMCH_LEN,
+ tval);
+ }
+
+ return xlgmac_write_rss_lookup_table(pdata);
+}
+
+static int xlgmac_enable_rss(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ /* Program the hash key */
+ ret = xlgmac_write_rss_hash_key(pdata);
+ if (ret)
+ return ret;
+
+ /* Program the lookup table */
+ ret = xlgmac_write_rss_lookup_table(pdata);
+ if (ret)
+ return ret;
+
+ /* Set the RSS options */
+ writel(pdata->rss_options, pdata->mac_regs + MAC_RSSCR);
+
+ /* Enable RSS */
+ regval = readl(pdata->mac_regs + MAC_RSSCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS,
+ MAC_RSSCR_RSSE_LEN, 1);
+ writel(regval, pdata->mac_regs + MAC_RSSCR);
+
+ return 0;
+}
+
+static int xlgmac_disable_rss(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ regval = readl(pdata->mac_regs + MAC_RSSCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS,
+ MAC_RSSCR_RSSE_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_RSSCR);
+
+ return 0;
+}
+
+static void xlgmac_config_rss(struct xlgmac_pdata *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return;
+
+ if (pdata->netdev->features & NETIF_F_RXHASH)
+ ret = xlgmac_enable_rss(pdata);
+ else
+ ret = xlgmac_disable_rss(pdata);
+
+ if (ret)
+ netdev_err(pdata->netdev,
+ "error configuring RSS, RSS disabled\n");
+}
+
+static void xlgmac_enable_dma_interrupts(struct xlgmac_pdata *pdata)
+{
+ unsigned int dma_ch_isr, dma_ch_ier;
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ /* Clear all the interrupts which are set */
+ dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
+ writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
+
+ /* Clear all interrupt enable bits */
+ dma_ch_ier = 0;
+
+ /* Enable following interrupts
+ * NIE - Normal Interrupt Summary Enable
+ * AIE - Abnormal Interrupt Summary Enable
+ * FBEE - Fatal Bus Error Enable
+ */
+ dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
+ DMA_CH_IER_NIE_POS,
+ DMA_CH_IER_NIE_LEN, 1);
+ dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
+ DMA_CH_IER_AIE_POS,
+ DMA_CH_IER_AIE_LEN, 1);
+ dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
+ DMA_CH_IER_FBEE_POS,
+ DMA_CH_IER_FBEE_LEN, 1);
+
+ if (channel->tx_ring) {
+ /* Enable the following Tx interrupts
+ * TIE - Transmit Interrupt Enable (unless using
+ * per channel interrupts)
+ */
+ if (!pdata->per_channel_irq)
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier,
+ DMA_CH_IER_TIE_POS,
+ DMA_CH_IER_TIE_LEN,
+ 1);
+ }
+ if (channel->rx_ring) {
+ /* Enable following Rx interrupts
+ * RBUE - Receive Buffer Unavailable Enable
+ * RIE - Receive Interrupt Enable (unless using
+ * per channel interrupts)
+ */
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier,
+ DMA_CH_IER_RBUE_POS,
+ DMA_CH_IER_RBUE_LEN,
+ 1);
+ if (!pdata->per_channel_irq)
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier,
+ DMA_CH_IER_RIE_POS,
+ DMA_CH_IER_RIE_LEN,
+ 1);
+ }
+
+ writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_IER));
+ }
+}
+
+static void xlgmac_enable_mtl_interrupts(struct xlgmac_pdata *pdata)
+{
+ unsigned int q_count, i;
+ unsigned int mtl_q_isr;
+
+ q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
+ for (i = 0; i < q_count; i++) {
+ /* Clear all the interrupts which are set */
+ mtl_q_isr = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR));
+ writel(mtl_q_isr, XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR));
+
+ /* No MTL interrupts to be enabled */
+ writel(0, XLGMAC_MTL_REG(pdata, i, MTL_Q_IER));
+ }
+}
+
+static void xlgmac_enable_mac_interrupts(struct xlgmac_pdata *pdata)
+{
+ unsigned int mac_ier = 0;
+ u32 regval;
+
+ /* Enable Timestamp interrupt */
+ mac_ier = XLGMAC_SET_REG_BITS(mac_ier, MAC_IER_TSIE_POS,
+ MAC_IER_TSIE_LEN, 1);
+
+ writel(mac_ier, pdata->mac_regs + MAC_IER);
+
+ /* Enable all counter interrupts */
+ regval = readl(pdata->mac_regs + MMC_RIER);
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS,
+ MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff);
+ writel(regval, pdata->mac_regs + MMC_RIER);
+ regval = readl(pdata->mac_regs + MMC_TIER);
+ regval = XLGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS,
+ MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff);
+ writel(regval, pdata->mac_regs + MMC_TIER);
+}
+
+static int xlgmac_set_xlgmii_25000_speed(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
+ MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
+ if (regval == 0x1)
+ return 0;
+
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
+ MAC_TCR_SS_LEN, 0x1);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+
+ return 0;
+}
+
+static int xlgmac_set_xlgmii_40000_speed(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
+ MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
+ if (regval == 0)
+ return 0;
+
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
+ MAC_TCR_SS_LEN, 0);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+
+ return 0;
+}
+
+static int xlgmac_set_xlgmii_50000_speed(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
+ MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
+ if (regval == 0x2)
+ return 0;
+
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
+ MAC_TCR_SS_LEN, 0x2);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+
+ return 0;
+}
+
+static int xlgmac_set_xlgmii_100000_speed(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
+ MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
+ if (regval == 0x3)
+ return 0;
+
+ regval = readl(pdata->mac_regs + MAC_TCR);
+ regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
+ MAC_TCR_SS_LEN, 0x3);
+ writel(regval, pdata->mac_regs + MAC_TCR);
+
+ return 0;
+}
+
+static void xlgmac_config_mac_speed(struct xlgmac_pdata *pdata)
+{
+ switch (pdata->phy_speed) {
+ case SPEED_100000:
+ xlgmac_set_xlgmii_100000_speed(pdata);
+ break;
+
+ case SPEED_50000:
+ xlgmac_set_xlgmii_50000_speed(pdata);
+ break;
+
+ case SPEED_40000:
+ xlgmac_set_xlgmii_40000_speed(pdata);
+ break;
+
+ case SPEED_25000:
+ xlgmac_set_xlgmii_25000_speed(pdata);
+ break;
+ }
+}
+
+static int xlgmac_dev_read(struct xlgmac_channel *channel)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->rx_ring;
+ struct net_device *netdev = pdata->netdev;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+ struct xlgmac_pkt_info *pkt_info;
+ unsigned int err, etlt, l34t;
+
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+ dma_desc = desc_data->dma_desc;
+ pkt_info = &ring->pkt_info;
+
+ /* Check for data availability */
+ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_OWN_POS,
+ RX_NORMAL_DESC3_OWN_LEN))
+ return 1;
+
+ /* Make sure descriptor fields are read after reading the OWN bit */
+ dma_rmb();
+
+ if (netif_msg_rx_status(pdata))
+ xlgmac_dump_rx_desc(pdata, ring, ring->cur);
+
+ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_CTXT_POS,
+ RX_NORMAL_DESC3_CTXT_LEN)) {
+ /* Timestamp Context Descriptor */
+ xlgmac_get_rx_tstamp(pkt_info, dma_desc);
+
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_LEN,
+ 1);
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN,
+ 0);
+ return 0;
+ }
+
+ /* Normal Descriptor, be sure Context Descriptor bit is off */
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_LEN,
+ 0);
+
+ /* Indicate if a Context Descriptor is next */
+ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_CDA_POS,
+ RX_NORMAL_DESC3_CDA_LEN))
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN,
+ 1);
+
+ /* Get the header length */
+ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_FD_POS,
+ RX_NORMAL_DESC3_FD_LEN)) {
+ desc_data->rx.hdr_len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc2,
+ RX_NORMAL_DESC2_HL_POS,
+ RX_NORMAL_DESC2_HL_LEN);
+ if (desc_data->rx.hdr_len)
+ pdata->stats.rx_split_header_packets++;
+ }
+
+ /* Get the RSS hash */
+ if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_RSV_POS,
+ RX_NORMAL_DESC3_RSV_LEN)) {
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
+ RX_PACKET_ATTRIBUTES_RSS_HASH_LEN,
+ 1);
+
+ pkt_info->rss_hash = le32_to_cpu(dma_desc->desc1);
+
+ l34t = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_L34T_POS,
+ RX_NORMAL_DESC3_L34T_LEN);
+ switch (l34t) {
+ case RX_DESC3_L34T_IPV4_TCP:
+ case RX_DESC3_L34T_IPV4_UDP:
+ case RX_DESC3_L34T_IPV6_TCP:
+ case RX_DESC3_L34T_IPV6_UDP:
+ pkt_info->rss_hash_type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ pkt_info->rss_hash_type = PKT_HASH_TYPE_L3;
+ }
+ }
+
+ /* Get the pkt_info length */
+ desc_data->rx.len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_PL_POS,
+ RX_NORMAL_DESC3_PL_LEN);
+
+ if (!XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_LD_POS,
+ RX_NORMAL_DESC3_LD_LEN)) {
+ /* Not all the data has been transferred for this pkt_info */
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN,
+ 1);
+ return 0;
+ }
+
+ /* This is the last of the data for this pkt_info */
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN,
+ 0);
+
+ /* Set checksum done indicator as appropriate */
+ if (netdev->features & NETIF_F_RXCSUM)
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN,
+ 1);
+
+ /* Check for errors (only valid in last descriptor) */
+ err = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_ES_POS,
+ RX_NORMAL_DESC3_ES_LEN);
+ etlt = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
+ RX_NORMAL_DESC3_ETLT_POS,
+ RX_NORMAL_DESC3_ETLT_LEN);
+ netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
+
+ if (!err || !etlt) {
+ /* No error if err is 0 or etlt is 0 */
+ if ((etlt == 0x09) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+ RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
+ 1);
+ pkt_info->vlan_ctag =
+ XLGMAC_GET_REG_BITS_LE(dma_desc->desc0,
+ RX_NORMAL_DESC0_OVT_POS,
+ RX_NORMAL_DESC0_OVT_LEN);
+ netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
+ pkt_info->vlan_ctag);
+ }
+ } else {
+ if ((etlt == 0x05) || (etlt == 0x06))
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN,
+ 0);
+ else
+ pkt_info->errors = XLGMAC_SET_REG_BITS(
+ pkt_info->errors,
+ RX_PACKET_ERRORS_FRAME_POS,
+ RX_PACKET_ERRORS_FRAME_LEN,
+ 1);
+ }
+
+ XLGMAC_PR("%s - descriptor=%u (cur=%d)\n", channel->name,
+ ring->cur & (ring->dma_desc_count - 1), ring->cur);
+
+ return 0;
+}
+
+static int xlgmac_enable_int(struct xlgmac_channel *channel,
+ enum xlgmac_int int_id)
+{
+ unsigned int dma_ch_ier;
+
+ dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER));
+
+ switch (int_id) {
+ case XLGMAC_INT_DMA_CH_SR_TI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TIE_POS,
+ DMA_CH_IER_TIE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TPS:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TXSE_POS,
+ DMA_CH_IER_TXSE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TBU:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TBUE_POS,
+ DMA_CH_IER_TBUE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RIE_POS,
+ DMA_CH_IER_RIE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RBU:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RBUE_POS,
+ DMA_CH_IER_RBUE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RPS:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RSE_POS,
+ DMA_CH_IER_RSE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TI_RI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TIE_POS,
+ DMA_CH_IER_TIE_LEN, 1);
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RIE_POS,
+ DMA_CH_IER_RIE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_FBE:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_FBEE_POS,
+ DMA_CH_IER_FBEE_LEN, 1);
+ break;
+ case XLGMAC_INT_DMA_ALL:
+ dma_ch_ier |= channel->saved_ier;
+ break;
+ default:
+ return -1;
+ }
+
+ writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER));
+
+ return 0;
+}
+
+static int xlgmac_disable_int(struct xlgmac_channel *channel,
+ enum xlgmac_int int_id)
+{
+ unsigned int dma_ch_ier;
+
+ dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER));
+
+ switch (int_id) {
+ case XLGMAC_INT_DMA_CH_SR_TI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TIE_POS,
+ DMA_CH_IER_TIE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TPS:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TXSE_POS,
+ DMA_CH_IER_TXSE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TBU:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TBUE_POS,
+ DMA_CH_IER_TBUE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RIE_POS,
+ DMA_CH_IER_RIE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RBU:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RBUE_POS,
+ DMA_CH_IER_RBUE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_RPS:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RSE_POS,
+ DMA_CH_IER_RSE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_TI_RI:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_TIE_POS,
+ DMA_CH_IER_TIE_LEN, 0);
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_RIE_POS,
+ DMA_CH_IER_RIE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_CH_SR_FBE:
+ dma_ch_ier = XLGMAC_SET_REG_BITS(
+ dma_ch_ier, DMA_CH_IER_FBEE_POS,
+ DMA_CH_IER_FBEE_LEN, 0);
+ break;
+ case XLGMAC_INT_DMA_ALL:
+ channel->saved_ier = dma_ch_ier & XLGMAC_DMA_INTERRUPT_MASK;
+ dma_ch_ier &= ~XLGMAC_DMA_INTERRUPT_MASK;
+ break;
+ default:
+ return -1;
+ }
+
+ writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER));
+
+ return 0;
+}
+
+static int xlgmac_flush_tx_queues(struct xlgmac_pdata *pdata)
+{
+ unsigned int i, count;
+ u32 regval;
+
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS,
+ MTL_Q_TQOMR_FTQ_LEN, 1);
+ writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ }
+
+ /* Poll Until Poll Condition */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ count = 2000;
+ regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
+ regval = XLGMAC_GET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS,
+ MTL_Q_TQOMR_FTQ_LEN);
+ while (--count && regval)
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void xlgmac_config_dma_bus(struct xlgmac_pdata *pdata)
+{
+ u32 regval;
+
+ regval = readl(pdata->mac_regs + DMA_SBMR);
+ /* Set enhanced addressing mode */
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS,
+ DMA_SBMR_EAME_LEN, 1);
+ /* Set the System Bus mode */
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_UNDEF_POS,
+ DMA_SBMR_UNDEF_LEN, 1);
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_256_POS,
+ DMA_SBMR_BLEN_256_LEN, 1);
+ writel(regval, pdata->mac_regs + DMA_SBMR);
+}
+
+static int xlgmac_hw_init(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
+ int ret;
+
+ /* Flush Tx queues */
+ ret = xlgmac_flush_tx_queues(pdata);
+ if (ret)
+ return ret;
+
+ /* Initialize DMA related features */
+ xlgmac_config_dma_bus(pdata);
+ xlgmac_config_osp_mode(pdata);
+ xlgmac_config_pblx8(pdata);
+ xlgmac_config_tx_pbl_val(pdata);
+ xlgmac_config_rx_pbl_val(pdata);
+ xlgmac_config_rx_coalesce(pdata);
+ xlgmac_config_tx_coalesce(pdata);
+ xlgmac_config_rx_buffer_size(pdata);
+ xlgmac_config_tso_mode(pdata);
+ xlgmac_config_sph_mode(pdata);
+ xlgmac_config_rss(pdata);
+ desc_ops->tx_desc_init(pdata);
+ desc_ops->rx_desc_init(pdata);
+ xlgmac_enable_dma_interrupts(pdata);
+
+ /* Initialize MTL related features */
+ xlgmac_config_mtl_mode(pdata);
+ xlgmac_config_queue_mapping(pdata);
+ xlgmac_config_tsf_mode(pdata, pdata->tx_sf_mode);
+ xlgmac_config_rsf_mode(pdata, pdata->rx_sf_mode);
+ xlgmac_config_tx_threshold(pdata, pdata->tx_threshold);
+ xlgmac_config_rx_threshold(pdata, pdata->rx_threshold);
+ xlgmac_config_tx_fifo_size(pdata);
+ xlgmac_config_rx_fifo_size(pdata);
+ xlgmac_config_flow_control_threshold(pdata);
+ xlgmac_config_rx_fep_enable(pdata);
+ xlgmac_config_rx_fup_enable(pdata);
+ xlgmac_enable_mtl_interrupts(pdata);
+
+ /* Initialize MAC related features */
+ xlgmac_config_mac_address(pdata);
+ xlgmac_config_rx_mode(pdata);
+ xlgmac_config_jumbo_enable(pdata);
+ xlgmac_config_flow_control(pdata);
+ xlgmac_config_mac_speed(pdata);
+ xlgmac_config_checksum_offload(pdata);
+ xlgmac_config_vlan_support(pdata);
+ xlgmac_config_mmc(pdata);
+ xlgmac_enable_mac_interrupts(pdata);
+
+ return 0;
+}
+
+static int xlgmac_hw_exit(struct xlgmac_pdata *pdata)
+{
+ unsigned int count = 2000;
+ u32 regval;
+
+ /* Issue a software reset */
+ regval = readl(pdata->mac_regs + DMA_MR);
+ regval = XLGMAC_SET_REG_BITS(regval, DMA_MR_SWR_POS,
+ DMA_MR_SWR_LEN, 1);
+ writel(regval, pdata->mac_regs + DMA_MR);
+ usleep_range(10, 15);
+
+ /* Poll Until Poll Condition */
+ while (--count &&
+ XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + DMA_MR),
+ DMA_MR_SWR_POS, DMA_MR_SWR_LEN))
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+
+ return 0;
+}
+
+void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops)
+{
+ hw_ops->init = xlgmac_hw_init;
+ hw_ops->exit = xlgmac_hw_exit;
+
+ hw_ops->tx_complete = xlgmac_tx_complete;
+
+ hw_ops->enable_tx = xlgmac_enable_tx;
+ hw_ops->disable_tx = xlgmac_disable_tx;
+ hw_ops->enable_rx = xlgmac_enable_rx;
+ hw_ops->disable_rx = xlgmac_disable_rx;
+
+ hw_ops->dev_xmit = xlgmac_dev_xmit;
+ hw_ops->dev_read = xlgmac_dev_read;
+ hw_ops->enable_int = xlgmac_enable_int;
+ hw_ops->disable_int = xlgmac_disable_int;
+
+ hw_ops->set_mac_address = xlgmac_set_mac_address;
+ hw_ops->config_rx_mode = xlgmac_config_rx_mode;
+ hw_ops->enable_rx_csum = xlgmac_enable_rx_csum;
+ hw_ops->disable_rx_csum = xlgmac_disable_rx_csum;
+
+ /* For MII speed configuration */
+ hw_ops->set_xlgmii_25000_speed = xlgmac_set_xlgmii_25000_speed;
+ hw_ops->set_xlgmii_40000_speed = xlgmac_set_xlgmii_40000_speed;
+ hw_ops->set_xlgmii_50000_speed = xlgmac_set_xlgmii_50000_speed;
+ hw_ops->set_xlgmii_100000_speed = xlgmac_set_xlgmii_100000_speed;
+
+ /* For descriptor related operation */
+ hw_ops->tx_desc_init = xlgmac_tx_desc_init;
+ hw_ops->rx_desc_init = xlgmac_rx_desc_init;
+ hw_ops->tx_desc_reset = xlgmac_tx_desc_reset;
+ hw_ops->rx_desc_reset = xlgmac_rx_desc_reset;
+ hw_ops->is_last_desc = xlgmac_is_last_desc;
+ hw_ops->is_context_desc = xlgmac_is_context_desc;
+ hw_ops->tx_start_xmit = xlgmac_tx_start_xmit;
+
+ /* For Flow Control */
+ hw_ops->config_tx_flow_control = xlgmac_config_tx_flow_control;
+ hw_ops->config_rx_flow_control = xlgmac_config_rx_flow_control;
+
+ /* For Vlan related config */
+ hw_ops->enable_rx_vlan_stripping = xlgmac_enable_rx_vlan_stripping;
+ hw_ops->disable_rx_vlan_stripping = xlgmac_disable_rx_vlan_stripping;
+ hw_ops->enable_rx_vlan_filtering = xlgmac_enable_rx_vlan_filtering;
+ hw_ops->disable_rx_vlan_filtering = xlgmac_disable_rx_vlan_filtering;
+ hw_ops->update_vlan_hash_table = xlgmac_update_vlan_hash_table;
+
+ /* For RX coalescing */
+ hw_ops->config_rx_coalesce = xlgmac_config_rx_coalesce;
+ hw_ops->config_tx_coalesce = xlgmac_config_tx_coalesce;
+ hw_ops->usec_to_riwt = xlgmac_usec_to_riwt;
+ hw_ops->riwt_to_usec = xlgmac_riwt_to_usec;
+
+ /* For RX and TX threshold config */
+ hw_ops->config_rx_threshold = xlgmac_config_rx_threshold;
+ hw_ops->config_tx_threshold = xlgmac_config_tx_threshold;
+
+ /* For RX and TX Store and Forward Mode config */
+ hw_ops->config_rsf_mode = xlgmac_config_rsf_mode;
+ hw_ops->config_tsf_mode = xlgmac_config_tsf_mode;
+
+ /* For TX DMA Operating on Second Frame config */
+ hw_ops->config_osp_mode = xlgmac_config_osp_mode;
+
+ /* For RX and TX PBL config */
+ hw_ops->config_rx_pbl_val = xlgmac_config_rx_pbl_val;
+ hw_ops->get_rx_pbl_val = xlgmac_get_rx_pbl_val;
+ hw_ops->config_tx_pbl_val = xlgmac_config_tx_pbl_val;
+ hw_ops->get_tx_pbl_val = xlgmac_get_tx_pbl_val;
+ hw_ops->config_pblx8 = xlgmac_config_pblx8;
+
+ /* For MMC statistics support */
+ hw_ops->tx_mmc_int = xlgmac_tx_mmc_int;
+ hw_ops->rx_mmc_int = xlgmac_rx_mmc_int;
+ hw_ops->read_mmc_stats = xlgmac_read_mmc_stats;
+
+ /* For Receive Side Scaling */
+ hw_ops->enable_rss = xlgmac_enable_rss;
+ hw_ops->disable_rss = xlgmac_disable_rss;
+ hw_ops->set_rss_hash_key = xlgmac_set_rss_hash_key;
+ hw_ops->set_rss_lookup_table = xlgmac_set_rss_lookup_table;
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
new file mode 100644
index 000000000000..3b91257683bc
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -0,0 +1,1350 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/tcp.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static int xlgmac_one_poll(struct napi_struct *, int);
+static int xlgmac_all_poll(struct napi_struct *, int);
+
+static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring)
+{
+ return (ring->dma_desc_count - (ring->cur - ring->dirty));
+}
+
+static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring)
+{
+ return (ring->cur - ring->dirty);
+}
+
+static int xlgmac_maybe_stop_tx_queue(
+ struct xlgmac_channel *channel,
+ struct xlgmac_ring *ring,
+ unsigned int count)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+
+ if (count > xlgmac_tx_avail_desc(ring)) {
+ netif_info(pdata, drv, pdata->netdev,
+ "Tx queue stopped, not enough descriptors available\n");
+ netif_stop_subqueue(pdata->netdev, channel->queue_index);
+ ring->tx.queue_stopped = 1;
+
+ /* If we haven't notified the hardware because of xmit_more
+ * support, tell it now
+ */
+ if (ring->tx.xmit_more)
+ pdata->hw_ops.tx_start_xmit(channel, ring);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return 0;
+}
+
+static void xlgmac_prep_vlan(struct sk_buff *skb,
+ struct xlgmac_pkt_info *pkt_info)
+{
+ if (skb_vlan_tag_present(skb))
+ pkt_info->vlan_ctag = skb_vlan_tag_get(skb);
+}
+
+static int xlgmac_prep_tso(struct sk_buff *skb,
+ struct xlgmac_pkt_info *pkt_info)
+{
+ int ret;
+
+ if (!XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN))
+ return 0;
+
+ ret = skb_cow_head(skb, 0);
+ if (ret)
+ return ret;
+
+ pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ pkt_info->tcp_header_len = tcp_hdrlen(skb);
+ pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
+ pkt_info->mss = skb_shinfo(skb)->gso_size;
+
+ XLGMAC_PR("header_len=%u\n", pkt_info->header_len);
+ XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n",
+ pkt_info->tcp_header_len, pkt_info->tcp_payload_len);
+ XLGMAC_PR("mss=%u\n", pkt_info->mss);
+
+ /* Update the number of packets that will ultimately be transmitted
+ * along with the extra bytes for each extra packet
+ */
+ pkt_info->tx_packets = skb_shinfo(skb)->gso_segs;
+ pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len;
+
+ return 0;
+}
+
+static int xlgmac_is_tso(struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ return 1;
+}
+
+static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ struct sk_buff *skb,
+ struct xlgmac_pkt_info *pkt_info)
+{
+ struct skb_frag_struct *frag;
+ unsigned int context_desc;
+ unsigned int len;
+ unsigned int i;
+
+ pkt_info->skb = skb;
+
+ context_desc = 0;
+ pkt_info->desc_count = 0;
+
+ pkt_info->tx_packets = 1;
+ pkt_info->tx_bytes = skb->len;
+
+ if (xlgmac_is_tso(skb)) {
+ /* TSO requires an extra descriptor if mss is different */
+ if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
+ context_desc = 1;
+ pkt_info->desc_count++;
+ }
+
+ /* TSO requires an extra descriptor for TSO header */
+ pkt_info->desc_count++;
+
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN,
+ 1);
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
+ 1);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
+ TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
+ 1);
+
+ if (skb_vlan_tag_present(skb)) {
+ /* VLAN requires an extra descriptor if tag is different */
+ if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
+ /* We can share with the TSO context descriptor */
+ if (!context_desc) {
+ context_desc = 1;
+ pkt_info->desc_count++;
+ }
+
+ pkt_info->attributes = XLGMAC_SET_REG_BITS(
+ pkt_info->attributes,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+ TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
+ 1);
+ }
+
+ for (len = skb_headlen(skb); len;) {
+ pkt_info->desc_count++;
+ len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ for (len = skb_frag_size(frag); len; ) {
+ pkt_info->desc_count++;
+ len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
+ }
+ }
+}
+
+static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
+{
+ unsigned int rx_buf_size;
+
+ if (mtu > XLGMAC_JUMBO_PACKET_MTU) {
+ netdev_alert(netdev, "MTU exceeds maximum supported value\n");
+ return -EINVAL;
+ }
+
+ rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE);
+
+ rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) &
+ ~(XLGMAC_RX_BUF_ALIGN - 1);
+
+ return rx_buf_size;
+}
+
+static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct xlgmac_channel *channel;
+ enum xlgmac_int int_id;
+ unsigned int i;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_RI;
+ else
+ continue;
+
+ hw_ops->enable_int(channel, int_id);
+ }
+}
+
+static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct xlgmac_channel *channel;
+ enum xlgmac_int int_id;
+ unsigned int i;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XLGMAC_INT_DMA_CH_SR_RI;
+ else
+ continue;
+
+ hw_ops->disable_int(channel, int_id);
+ }
+}
+
+static irqreturn_t xlgmac_isr(int irq, void *data)
+{
+ unsigned int dma_isr, dma_ch_isr, mac_isr;
+ struct xlgmac_pdata *pdata = data;
+ struct xlgmac_channel *channel;
+ struct xlgmac_hw_ops *hw_ops;
+ unsigned int i, ti, ri;
+
+ hw_ops = &pdata->hw_ops;
+
+ /* The DMA interrupt status register also reports MAC and MTL
+ * interrupts. So for polling mode, we just need to check for
+ * this register to be non-zero
+ */
+ dma_isr = readl(pdata->mac_regs + DMA_ISR);
+ if (!dma_isr)
+ return IRQ_HANDLED;
+
+ netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!(dma_isr & (1 << i)))
+ continue;
+
+ channel = pdata->channel_head + i;
+
+ dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
+ netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
+ i, dma_ch_isr);
+
+ /* The TI or RI interrupt bits may still be set even if using
+ * per channel DMA interrupts. Check to be sure those are not
+ * enabled before using the private data napi structure.
+ */
+ ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS,
+ DMA_CH_SR_TI_LEN);
+ ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS,
+ DMA_CH_SR_RI_LEN);
+ if (!pdata->per_channel_irq && (ti || ri)) {
+ if (napi_schedule_prep(&pdata->napi)) {
+ /* Disable Tx and Rx interrupts */
+ xlgmac_disable_rx_tx_ints(pdata);
+
+ pdata->stats.napi_poll_isr++;
+ /* Turn on polling */
+ __napi_schedule_irqoff(&pdata->napi);
+ }
+ }
+
+ if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS,
+ DMA_CH_SR_TPS_LEN))
+ pdata->stats.tx_process_stopped++;
+
+ if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS,
+ DMA_CH_SR_RPS_LEN))
+ pdata->stats.rx_process_stopped++;
+
+ if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS,
+ DMA_CH_SR_TBU_LEN))
+ pdata->stats.tx_buffer_unavailable++;
+
+ if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS,
+ DMA_CH_SR_RBU_LEN))
+ pdata->stats.rx_buffer_unavailable++;
+
+ /* Restart the device on a Fatal Bus Error */
+ if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS,
+ DMA_CH_SR_FBE_LEN)) {
+ pdata->stats.fatal_bus_error++;
+ schedule_work(&pdata->restart_work);
+ }
+
+ /* Clear all interrupt signals */
+ writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
+ }
+
+ if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS,
+ DMA_ISR_MACIS_LEN)) {
+ mac_isr = readl(pdata->mac_regs + MAC_ISR);
+
+ if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS,
+ MAC_ISR_MMCTXIS_LEN))
+ hw_ops->tx_mmc_int(pdata);
+
+ if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS,
+ MAC_ISR_MMCRXIS_LEN))
+ hw_ops->rx_mmc_int(pdata);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xlgmac_dma_isr(int irq, void *data)
+{
+ struct xlgmac_channel *channel = data;
+
+ /* Per channel DMA interrupts are enabled, so we use the per
+ * channel napi structure and not the private data napi structure
+ */
+ if (napi_schedule_prep(&channel->napi)) {
+ /* Disable Tx and Rx interrupts */
+ disable_irq_nosync(channel->dma_irq);
+
+ /* Turn on polling */
+ __napi_schedule_irqoff(&channel->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void xlgmac_tx_timer(unsigned long data)
+{
+ struct xlgmac_channel *channel = (struct xlgmac_channel *)data;
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct napi_struct *napi;
+
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
+ if (napi_schedule_prep(napi)) {
+ /* Disable Tx and Rx interrupts */
+ if (pdata->per_channel_irq)
+ disable_irq_nosync(channel->dma_irq);
+ else
+ xlgmac_disable_rx_tx_ints(pdata);
+
+ pdata->stats.napi_poll_txtimer++;
+ /* Turn on polling */
+ __napi_schedule(napi);
+ }
+
+ channel->tx_timer_active = 0;
+}
+
+static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ setup_timer(&channel->tx_timer, xlgmac_tx_timer,
+ (unsigned long)channel);
+ }
+}
+
+static void xlgmac_stop_timers(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ del_timer_sync(&channel->tx_timer);
+ }
+}
+
+static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (add)
+ netif_napi_add(pdata->netdev, &channel->napi,
+ xlgmac_one_poll,
+ NAPI_POLL_WEIGHT);
+
+ napi_enable(&channel->napi);
+ }
+ } else {
+ if (add)
+ netif_napi_add(pdata->netdev, &pdata->napi,
+ xlgmac_all_poll, NAPI_POLL_WEIGHT);
+
+ napi_enable(&pdata->napi);
+ }
+}
+
+static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ napi_disable(&channel->napi);
+
+ if (del)
+ netif_napi_del(&channel->napi);
+ }
+ } else {
+ napi_disable(&pdata->napi);
+
+ if (del)
+ netif_napi_del(&pdata->napi);
+ }
+}
+
+static int xlgmac_request_irqs(struct xlgmac_pdata *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct xlgmac_channel *channel;
+ unsigned int i;
+ int ret;
+
+ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr,
+ IRQF_SHARED, netdev->name, pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ pdata->dev_irq);
+ return ret;
+ }
+
+ if (!pdata->per_channel_irq)
+ return 0;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ snprintf(channel->dma_irq_name,
+ sizeof(channel->dma_irq_name) - 1,
+ "%s-TxRx-%u", netdev_name(netdev),
+ channel->queue_index);
+
+ ret = devm_request_irq(pdata->dev, channel->dma_irq,
+ xlgmac_dma_isr, 0,
+ channel->dma_irq_name, channel);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ channel->dma_irq);
+ goto err_irq;
+ }
+ }
+
+ return 0;
+
+err_irq:
+ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+ for (i--, channel--; i < pdata->channel_count; i--, channel--)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ return ret;
+}
+
+static void xlgmac_free_irqs(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ if (!pdata->per_channel_irq)
+ return;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+}
+
+static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_channel *channel;
+ struct xlgmac_ring *ring;
+ unsigned int i, j;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->dma_desc_count; j++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, j);
+ desc_ops->unmap_desc_data(pdata, desc_data);
+ }
+ }
+}
+
+static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_channel *channel;
+ struct xlgmac_ring *ring;
+ unsigned int i, j;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->dma_desc_count; j++) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, j);
+ desc_ops->unmap_desc_data(pdata, desc_data);
+ }
+ }
+}
+
+static int xlgmac_start(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct net_device *netdev = pdata->netdev;
+ int ret;
+
+ hw_ops->init(pdata);
+ xlgmac_napi_enable(pdata, 1);
+
+ ret = xlgmac_request_irqs(pdata);
+ if (ret)
+ goto err_napi;
+
+ hw_ops->enable_tx(pdata);
+ hw_ops->enable_rx(pdata);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+
+err_napi:
+ xlgmac_napi_disable(pdata, 1);
+ hw_ops->exit(pdata);
+
+ return ret;
+}
+
+static void xlgmac_stop(struct xlgmac_pdata *pdata)
+{
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct net_device *netdev = pdata->netdev;
+ struct xlgmac_channel *channel;
+ struct netdev_queue *txq;
+ unsigned int i;
+
+ netif_tx_stop_all_queues(netdev);
+ xlgmac_stop_timers(pdata);
+ hw_ops->disable_tx(pdata);
+ hw_ops->disable_rx(pdata);
+ xlgmac_free_irqs(pdata);
+ xlgmac_napi_disable(pdata, 1);
+ hw_ops->exit(pdata);
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ continue;
+
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ netdev_tx_reset_queue(txq);
+ }
+}
+
+static void xlgmac_restart_dev(struct xlgmac_pdata *pdata)
+{
+ /* If not running, "restart" will happen on open */
+ if (!netif_running(pdata->netdev))
+ return;
+
+ xlgmac_stop(pdata);
+
+ xlgmac_free_tx_data(pdata);
+ xlgmac_free_rx_data(pdata);
+
+ xlgmac_start(pdata);
+}
+
+static void xlgmac_restart(struct work_struct *work)
+{
+ struct xlgmac_pdata *pdata = container_of(work,
+ struct xlgmac_pdata,
+ restart_work);
+
+ rtnl_lock();
+
+ xlgmac_restart_dev(pdata);
+
+ rtnl_unlock();
+}
+
+static int xlgmac_open(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_desc_ops *desc_ops;
+ int ret;
+
+ desc_ops = &pdata->desc_ops;
+
+ /* TODO: Initialize the phy */
+
+ /* Calculate the Rx buffer size before allocating rings */
+ ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu);
+ if (ret < 0)
+ return ret;
+ pdata->rx_buf_size = ret;
+
+ /* Allocate the channels and rings */
+ ret = desc_ops->alloc_channles_and_rings(pdata);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&pdata->restart_work, xlgmac_restart);
+ xlgmac_init_timers(pdata);
+
+ ret = xlgmac_start(pdata);
+ if (ret)
+ goto err_channels_and_rings;
+
+ return 0;
+
+err_channels_and_rings:
+ desc_ops->free_channels_and_rings(pdata);
+
+ return ret;
+}
+
+static int xlgmac_close(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_desc_ops *desc_ops;
+
+ desc_ops = &pdata->desc_ops;
+
+ /* Stop the device */
+ xlgmac_stop(pdata);
+
+ /* Free the channels and rings */
+ desc_ops->free_channels_and_rings(pdata);
+
+ return 0;
+}
+
+static void xlgmac_tx_timeout(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+
+ netdev_warn(netdev, "tx timeout, device restarting\n");
+ schedule_work(&pdata->restart_work);
+}
+
+static int xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_pkt_info *tx_pkt_info;
+ struct xlgmac_desc_ops *desc_ops;
+ struct xlgmac_channel *channel;
+ struct xlgmac_hw_ops *hw_ops;
+ struct netdev_queue *txq;
+ struct xlgmac_ring *ring;
+ int ret;
+
+ desc_ops = &pdata->desc_ops;
+ hw_ops = &pdata->hw_ops;
+
+ XLGMAC_PR("skb->len = %d\n", skb->len);
+
+ channel = pdata->channel_head + skb->queue_mapping;
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ ring = channel->tx_ring;
+ tx_pkt_info = &ring->pkt_info;
+
+ if (skb->len == 0) {
+ netif_err(pdata, tx_err, netdev,
+ "empty skb received from stack\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Prepare preliminary packet info for TX */
+ memset(tx_pkt_info, 0, sizeof(*tx_pkt_info));
+ xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info);
+
+ /* Check that there are enough descriptors available */
+ ret = xlgmac_maybe_stop_tx_queue(channel, ring,
+ tx_pkt_info->desc_count);
+ if (ret)
+ return ret;
+
+ ret = xlgmac_prep_tso(skb, tx_pkt_info);
+ if (ret) {
+ netif_err(pdata, tx_err, netdev,
+ "error processing TSO packet\n");
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ xlgmac_prep_vlan(skb, tx_pkt_info);
+
+ if (!desc_ops->map_tx_skb(channel, skb)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Report on the actual number of bytes (to be) sent */
+ netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes);
+
+ /* Configure required descriptor fields for transmission */
+ hw_ops->dev_xmit(channel);
+
+ if (netif_msg_pktdata(pdata))
+ xlgmac_print_pkt(netdev, skb, true);
+
+ /* Stop the queue in advance if there may not be enough descriptors */
+ xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR);
+
+ return NETDEV_TX_OK;
+}
+
+static void xlgmac_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *s)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_stats *pstats = &pdata->stats;
+
+ pdata->hw_ops.read_mmc_stats(pdata);
+
+ s->rx_packets = pstats->rxframecount_gb;
+ s->rx_bytes = pstats->rxoctetcount_gb;
+ s->rx_errors = pstats->rxframecount_gb -
+ pstats->rxbroadcastframes_g -
+ pstats->rxmulticastframes_g -
+ pstats->rxunicastframes_g;
+ s->multicast = pstats->rxmulticastframes_g;
+ s->rx_length_errors = pstats->rxlengtherror;
+ s->rx_crc_errors = pstats->rxcrcerror;
+ s->rx_fifo_errors = pstats->rxfifooverflow;
+
+ s->tx_packets = pstats->txframecount_gb;
+ s->tx_bytes = pstats->txoctetcount_gb;
+ s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
+ s->tx_dropped = netdev->stats.tx_dropped;
+}
+
+static int xlgmac_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+
+ hw_ops->set_mac_address(pdata, netdev->dev_addr);
+
+ return 0;
+}
+
+static int xlgmac_ioctl(struct net_device *netdev,
+ struct ifreq *ifreq, int cmd)
+{
+ if (!netif_running(netdev))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int xlgmac_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ int ret;
+
+ ret = xlgmac_calc_rx_buf_size(netdev, mtu);
+ if (ret < 0)
+ return ret;
+
+ pdata->rx_buf_size = ret;
+ netdev->mtu = mtu;
+
+ xlgmac_restart_dev(pdata);
+
+ return 0;
+}
+
+static int xlgmac_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto,
+ u16 vid)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+
+ set_bit(vid, pdata->active_vlans);
+ hw_ops->update_vlan_hash_table(pdata);
+
+ return 0;
+}
+
+static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto,
+ u16 vid)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+
+ clear_bit(vid, pdata->active_vlans);
+ hw_ops->update_vlan_hash_table(pdata);
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void xlgmac_poll_controller(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ xlgmac_dma_isr(channel->dma_irq, channel);
+ } else {
+ disable_irq(pdata->dev_irq);
+ xlgmac_isr(pdata->dev_irq, pdata);
+ enable_irq(pdata->dev_irq);
+ }
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static int xlgmac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+ int ret = 0;
+
+ rxhash = pdata->netdev_features & NETIF_F_RXHASH;
+ rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
+ rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
+ rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if ((features & NETIF_F_RXHASH) && !rxhash)
+ ret = hw_ops->enable_rss(pdata);
+ else if (!(features & NETIF_F_RXHASH) && rxhash)
+ ret = hw_ops->disable_rss(pdata);
+ if (ret)
+ return ret;
+
+ if ((features & NETIF_F_RXCSUM) && !rxcsum)
+ hw_ops->enable_rx_csum(pdata);
+ else if (!(features & NETIF_F_RXCSUM) && rxcsum)
+ hw_ops->disable_rx_csum(pdata);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
+ hw_ops->enable_rx_vlan_stripping(pdata);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
+ hw_ops->disable_rx_vlan_stripping(pdata);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
+ hw_ops->enable_rx_vlan_filtering(pdata);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
+ hw_ops->disable_rx_vlan_filtering(pdata);
+
+ pdata->netdev_features = features;
+
+ return 0;
+}
+
+static void xlgmac_set_rx_mode(struct net_device *netdev)
+{
+ struct xlgmac_pdata *pdata = netdev_priv(netdev);
+ struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
+
+ hw_ops->config_rx_mode(pdata);
+}
+
+static const struct net_device_ops xlgmac_netdev_ops = {
+ .ndo_open = xlgmac_open,
+ .ndo_stop = xlgmac_close,
+ .ndo_start_xmit = xlgmac_xmit,
+ .ndo_tx_timeout = xlgmac_tx_timeout,
+ .ndo_get_stats64 = xlgmac_get_stats64,
+ .ndo_change_mtu = xlgmac_change_mtu,
+ .ndo_set_mac_address = xlgmac_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = xlgmac_ioctl,
+ .ndo_vlan_rx_add_vid = xlgmac_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = xlgmac_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xlgmac_poll_controller,
+#endif
+ .ndo_set_features = xlgmac_set_features,
+ .ndo_set_rx_mode = xlgmac_set_rx_mode,
+};
+
+const struct net_device_ops *xlgmac_get_netdev_ops(void)
+{
+ return &xlgmac_netdev_ops;
+}
+
+static void xlgmac_rx_refresh(struct xlgmac_channel *channel)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->rx_ring;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_desc_ops *desc_ops;
+ struct xlgmac_hw_ops *hw_ops;
+
+ desc_ops = &pdata->desc_ops;
+ hw_ops = &pdata->hw_ops;
+
+ while (ring->dirty != ring->cur) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
+
+ /* Reset desc_data values */
+ desc_ops->unmap_desc_data(pdata, desc_data);
+
+ if (desc_ops->map_rx_buffer(pdata, ring, desc_data))
+ break;
+
+ hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty);
+
+ ring->dirty++;
+ }
+
+ /* Make sure everything is written before the register write */
+ wmb();
+
+ /* Update the Rx Tail Pointer Register with address of
+ * the last cleaned entry
+ */
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1);
+ writel(lower_32_bits(desc_data->dma_desc_addr),
+ XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
+}
+
+static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata,
+ struct napi_struct *napi,
+ struct xlgmac_desc_data *desc_data,
+ unsigned int len)
+{
+ unsigned int copy_len;
+ struct sk_buff *skb;
+ u8 *packet;
+
+ skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len);
+ if (!skb)
+ return NULL;
+
+ /* Start with the header buffer which may contain just the header
+ * or the header plus data
+ */
+ dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base,
+ desc_data->rx.hdr.dma_off,
+ desc_data->rx.hdr.dma_len,
+ DMA_FROM_DEVICE);
+
+ packet = page_address(desc_data->rx.hdr.pa.pages) +
+ desc_data->rx.hdr.pa.pages_offset;
+ copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len;
+ copy_len = min(desc_data->rx.hdr.dma_len, copy_len);
+ skb_copy_to_linear_data(skb, packet, copy_len);
+ skb_put(skb, copy_len);
+
+ len -= copy_len;
+ if (len) {
+ /* Add the remaining data as a frag */
+ dma_sync_single_range_for_cpu(pdata->dev,
+ desc_data->rx.buf.dma_base,
+ desc_data->rx.buf.dma_off,
+ desc_data->rx.buf.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ desc_data->rx.buf.pa.pages,
+ desc_data->rx.buf.pa.pages_offset,
+ len, desc_data->rx.buf.dma_len);
+ desc_data->rx.buf.pa.pages = NULL;
+ }
+
+ return skb;
+}
+
+static int xlgmac_tx_poll(struct xlgmac_channel *channel)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->tx_ring;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int tx_packets = 0, tx_bytes = 0;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_dma_desc *dma_desc;
+ struct xlgmac_desc_ops *desc_ops;
+ struct xlgmac_hw_ops *hw_ops;
+ struct netdev_queue *txq;
+ int processed = 0;
+ unsigned int cur;
+
+ desc_ops = &pdata->desc_ops;
+ hw_ops = &pdata->hw_ops;
+
+ /* Nothing to do if there isn't a Tx ring for this channel */
+ if (!ring)
+ return 0;
+
+ cur = ring->cur;
+
+ /* Be sure we get ring->cur before accessing descriptor data */
+ smp_rmb();
+
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+
+ while ((processed < XLGMAC_TX_DESC_MAX_PROC) &&
+ (ring->dirty != cur)) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
+ dma_desc = desc_data->dma_desc;
+
+ if (!hw_ops->tx_complete(dma_desc))
+ break;
+
+ /* Make sure descriptor fields are read after reading
+ * the OWN bit
+ */
+ dma_rmb();
+
+ if (netif_msg_tx_done(pdata))
+ xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
+
+ if (hw_ops->is_last_desc(dma_desc)) {
+ tx_packets += desc_data->tx.packets;
+ tx_bytes += desc_data->tx.bytes;
+ }
+
+ /* Free the SKB and reset the descriptor for re-use */
+ desc_ops->unmap_desc_data(pdata, desc_data);
+ hw_ops->tx_desc_reset(desc_data);
+
+ processed++;
+ ring->dirty++;
+ }
+
+ if (!processed)
+ return 0;
+
+ netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
+
+ if ((ring->tx.queue_stopped == 1) &&
+ (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) {
+ ring->tx.queue_stopped = 0;
+ netif_tx_wake_queue(txq);
+ }
+
+ XLGMAC_PR("processed=%d\n", processed);
+
+ return processed;
+}
+
+static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget)
+{
+ struct xlgmac_pdata *pdata = channel->pdata;
+ struct xlgmac_ring *ring = channel->rx_ring;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int len, dma_desc_len, max_len;
+ unsigned int context_next, context;
+ struct xlgmac_desc_data *desc_data;
+ struct xlgmac_pkt_info *pkt_info;
+ unsigned int incomplete, error;
+ struct xlgmac_hw_ops *hw_ops;
+ unsigned int received = 0;
+ struct napi_struct *napi;
+ struct sk_buff *skb;
+ int packet_count = 0;
+
+ hw_ops = &pdata->hw_ops;
+
+ /* Nothing to do if there isn't a Rx ring for this channel */
+ if (!ring)
+ return 0;
+
+ incomplete = 0;
+ context_next = 0;
+
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+ pkt_info = &ring->pkt_info;
+ while (packet_count < budget) {
+ /* First time in loop see if we need to restore state */
+ if (!received && desc_data->state_saved) {
+ skb = desc_data->state.skb;
+ error = desc_data->state.error;
+ len = desc_data->state.len;
+ } else {
+ memset(pkt_info, 0, sizeof(*pkt_info));
+ skb = NULL;
+ error = 0;
+ len = 0;
+ }
+
+read_again:
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+
+ if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY)
+ xlgmac_rx_refresh(channel);
+
+ if (hw_ops->dev_read(channel))
+ break;
+
+ received++;
+ ring->cur++;
+
+ incomplete = XLGMAC_GET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
+ RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN);
+ context_next = XLGMAC_GET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN);
+ context = XLGMAC_GET_REG_BITS(
+ pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CONTEXT_POS,
+ RX_PACKET_ATTRIBUTES_CONTEXT_LEN);
+
+ /* Earlier error, just drain the remaining data */
+ if ((incomplete || context_next) && error)
+ goto read_again;
+
+ if (error || pkt_info->errors) {
+ if (pkt_info->errors)
+ netif_err(pdata, rx_err, netdev,
+ "error in received packet\n");
+ dev_kfree_skb(skb);
+ goto next_packet;
+ }
+
+ if (!context) {
+ /* Length is cumulative, get this descriptor's length */
+ dma_desc_len = desc_data->rx.len - len;
+ len += dma_desc_len;
+
+ if (dma_desc_len && !skb) {
+ skb = xlgmac_create_skb(pdata, napi, desc_data,
+ dma_desc_len);
+ if (!skb)
+ error = 1;
+ } else if (dma_desc_len) {
+ dma_sync_single_range_for_cpu(
+ pdata->dev,
+ desc_data->rx.buf.dma_base,
+ desc_data->rx.buf.dma_off,
+ desc_data->rx.buf.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(
+ skb, skb_shinfo(skb)->nr_frags,
+ desc_data->rx.buf.pa.pages,
+ desc_data->rx.buf.pa.pages_offset,
+ dma_desc_len,
+ desc_data->rx.buf.dma_len);
+ desc_data->rx.buf.pa.pages = NULL;
+ }
+ }
+
+ if (incomplete || context_next)
+ goto read_again;
+
+ if (!skb)
+ goto next_packet;
+
+ /* Be sure we don't exceed the configured MTU */
+ max_len = netdev->mtu + ETH_HLEN;
+ if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (skb->protocol == htons(ETH_P_8021Q)))
+ max_len += VLAN_HLEN;
+
+ if (skb->len > max_len) {
+ netif_err(pdata, rx_err, netdev,
+ "packet length exceeds configured MTU\n");
+ dev_kfree_skb(skb);
+ goto next_packet;
+ }
+
+ if (netif_msg_pktdata(pdata))
+ xlgmac_print_pkt(netdev, skb, false);
+
+ skb_checksum_none_assert(skb);
+ if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
+ RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
+ RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ pkt_info->vlan_ctag);
+ pdata->stats.rx_vlan_packets++;
+ }
+
+ if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
+ RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
+ RX_PACKET_ATTRIBUTES_RSS_HASH_LEN))
+ skb_set_hash(skb, pkt_info->rss_hash,
+ pkt_info->rss_hash_type);
+
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, netdev);
+ skb_record_rx_queue(skb, channel->queue_index);
+
+ napi_gro_receive(napi, skb);
+
+next_packet:
+ packet_count++;
+ }
+
+ /* Check if we need to save state before leaving */
+ if (received && (incomplete || context_next)) {
+ desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
+ desc_data->state_saved = 1;
+ desc_data->state.skb = skb;
+ desc_data->state.len = len;
+ desc_data->state.error = error;
+ }
+
+ XLGMAC_PR("packet_count = %d\n", packet_count);
+
+ return packet_count;
+}
+
+static int xlgmac_one_poll(struct napi_struct *napi, int budget)
+{
+ struct xlgmac_channel *channel = container_of(napi,
+ struct xlgmac_channel,
+ napi);
+ int processed = 0;
+
+ XLGMAC_PR("budget=%d\n", budget);
+
+ /* Cleanup Tx ring first */
+ xlgmac_tx_poll(channel);
+
+ /* Process Rx ring next */
+ processed = xlgmac_rx_poll(channel, budget);
+
+ /* If we processed everything, we are done */
+ if (processed < budget) {
+ /* Turn off polling */
+ napi_complete_done(napi, processed);
+
+ /* Enable Tx and Rx interrupts */
+ enable_irq(channel->dma_irq);
+ }
+
+ XLGMAC_PR("received = %d\n", processed);
+
+ return processed;
+}
+
+static int xlgmac_all_poll(struct napi_struct *napi, int budget)
+{
+ struct xlgmac_pdata *pdata = container_of(napi,
+ struct xlgmac_pdata,
+ napi);
+ struct xlgmac_channel *channel;
+ int processed, last_processed;
+ int ring_budget;
+ unsigned int i;
+
+ XLGMAC_PR("budget=%d\n", budget);
+
+ processed = 0;
+ ring_budget = budget / pdata->rx_ring_count;
+ do {
+ last_processed = processed;
+
+ channel = pdata->channel_head;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ /* Cleanup Tx ring first */
+ xlgmac_tx_poll(channel);
+
+ /* Process Rx ring next */
+ if (ring_budget > (budget - processed))
+ ring_budget = budget - processed;
+ processed += xlgmac_rx_poll(channel, ring_budget);
+ }
+ } while ((processed < budget) && (processed != last_processed));
+
+ /* If we processed everything, we are done */
+ if (processed < budget) {
+ /* Turn off polling */
+ napi_complete_done(napi, processed);
+
+ /* Enable Tx and Rx interrupts */
+ xlgmac_enable_rx_tx_ints(pdata);
+ }
+
+ XLGMAC_PR("received = %d\n", processed);
+
+ return processed;
+}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
new file mode 100644
index 000000000000..386bafe74c3f
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
@@ -0,0 +1,78 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "dwc-xlgmac.h"
+#include "dwc-xlgmac-reg.h"
+
+static int xlgmac_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
+{
+ struct device *dev = &pcidev->dev;
+ struct xlgmac_resources res;
+ int i, ret;
+
+ ret = pcim_enable_device(pcidev);
+ if (ret) {
+ dev_err(dev, "ERROR: failed to enable device\n");
+ return ret;
+ }
+
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ if (pci_resource_len(pcidev, i) == 0)
+ continue;
+ ret = pcim_iomap_regions(pcidev, BIT(i), XLGMAC_DRV_NAME);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ pci_set_master(pcidev);
+
+ memset(&res, 0, sizeof(res));
+ res.irq = pcidev->irq;
+ res.addr = pcim_iomap_table(pcidev)[i];
+
+ return xlgmac_drv_probe(&pcidev->dev, &res);
+}
+
+static void xlgmac_remove(struct pci_dev *pcidev)
+{
+ xlgmac_drv_remove(&pcidev->dev);
+}
+
+static const struct pci_device_id xlgmac_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0x7302) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, xlgmac_pci_tbl);
+
+static struct pci_driver xlgmac_pci_driver = {
+ .name = XLGMAC_DRV_NAME,
+ .id_table = xlgmac_pci_tbl,
+ .probe = xlgmac_probe,
+ .remove = xlgmac_remove,
+};
+
+module_pci_driver(xlgmac_pci_driver);
+
+MODULE_DESCRIPTION(XLGMAC_DRV_DESC);
+MODULE_VERSION(XLGMAC_DRV_VERSION);
+MODULE_AUTHOR("Jie Deng <jiedeng@synopsys.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h b/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h
new file mode 100644
index 000000000000..3754f220567d
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h
@@ -0,0 +1,744 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#ifndef __DWC_XLGMAC_REG_H__
+#define __DWC_XLGMAC_REG_H__
+
+/* MAC register offsets */
+#define MAC_TCR 0x0000
+#define MAC_RCR 0x0004
+#define MAC_PFR 0x0008
+#define MAC_HTR0 0x0010
+#define MAC_VLANTR 0x0050
+#define MAC_VLANHTR 0x0058
+#define MAC_VLANIR 0x0060
+#define MAC_Q0TFCR 0x0070
+#define MAC_RFCR 0x0090
+#define MAC_RQC0R 0x00a0
+#define MAC_RQC1R 0x00a4
+#define MAC_RQC2R 0x00a8
+#define MAC_RQC3R 0x00ac
+#define MAC_ISR 0x00b0
+#define MAC_IER 0x00b4
+#define MAC_VR 0x0110
+#define MAC_HWF0R 0x011c
+#define MAC_HWF1R 0x0120
+#define MAC_HWF2R 0x0124
+#define MAC_MACA0HR 0x0300
+#define MAC_MACA0LR 0x0304
+#define MAC_MACA1HR 0x0308
+#define MAC_MACA1LR 0x030c
+#define MAC_RSSCR 0x0c80
+#define MAC_RSSAR 0x0c88
+#define MAC_RSSDR 0x0c8c
+
+#define MAC_QTFCR_INC 4
+#define MAC_MACA_INC 4
+#define MAC_HTR_INC 4
+#define MAC_RQC2_INC 4
+#define MAC_RQC2_Q_PER_REG 4
+
+/* MAC register entry bit positions and sizes */
+#define MAC_HWF0R_ADDMACADRSEL_POS 18
+#define MAC_HWF0R_ADDMACADRSEL_LEN 5
+#define MAC_HWF0R_ARPOFFSEL_POS 9
+#define MAC_HWF0R_ARPOFFSEL_LEN 1
+#define MAC_HWF0R_EEESEL_POS 13
+#define MAC_HWF0R_EEESEL_LEN 1
+#define MAC_HWF0R_PHYIFSEL_POS 1
+#define MAC_HWF0R_PHYIFSEL_LEN 2
+#define MAC_HWF0R_MGKSEL_POS 7
+#define MAC_HWF0R_MGKSEL_LEN 1
+#define MAC_HWF0R_MMCSEL_POS 8
+#define MAC_HWF0R_MMCSEL_LEN 1
+#define MAC_HWF0R_RWKSEL_POS 6
+#define MAC_HWF0R_RWKSEL_LEN 1
+#define MAC_HWF0R_RXCOESEL_POS 16
+#define MAC_HWF0R_RXCOESEL_LEN 1
+#define MAC_HWF0R_SAVLANINS_POS 27
+#define MAC_HWF0R_SAVLANINS_LEN 1
+#define MAC_HWF0R_SMASEL_POS 5
+#define MAC_HWF0R_SMASEL_LEN 1
+#define MAC_HWF0R_TSSEL_POS 12
+#define MAC_HWF0R_TSSEL_LEN 1
+#define MAC_HWF0R_TSSTSSEL_POS 25
+#define MAC_HWF0R_TSSTSSEL_LEN 2
+#define MAC_HWF0R_TXCOESEL_POS 14
+#define MAC_HWF0R_TXCOESEL_LEN 1
+#define MAC_HWF0R_VLHASH_POS 4
+#define MAC_HWF0R_VLHASH_LEN 1
+#define MAC_HWF1R_ADDR64_POS 14
+#define MAC_HWF1R_ADDR64_LEN 2
+#define MAC_HWF1R_ADVTHWORD_POS 13
+#define MAC_HWF1R_ADVTHWORD_LEN 1
+#define MAC_HWF1R_DBGMEMA_POS 19
+#define MAC_HWF1R_DBGMEMA_LEN 1
+#define MAC_HWF1R_DCBEN_POS 16
+#define MAC_HWF1R_DCBEN_LEN 1
+#define MAC_HWF1R_HASHTBLSZ_POS 24
+#define MAC_HWF1R_HASHTBLSZ_LEN 3
+#define MAC_HWF1R_L3L4FNUM_POS 27
+#define MAC_HWF1R_L3L4FNUM_LEN 4
+#define MAC_HWF1R_NUMTC_POS 21
+#define MAC_HWF1R_NUMTC_LEN 3
+#define MAC_HWF1R_RSSEN_POS 20
+#define MAC_HWF1R_RSSEN_LEN 1
+#define MAC_HWF1R_RXFIFOSIZE_POS 0
+#define MAC_HWF1R_RXFIFOSIZE_LEN 5
+#define MAC_HWF1R_SPHEN_POS 17
+#define MAC_HWF1R_SPHEN_LEN 1
+#define MAC_HWF1R_TSOEN_POS 18
+#define MAC_HWF1R_TSOEN_LEN 1
+#define MAC_HWF1R_TXFIFOSIZE_POS 6
+#define MAC_HWF1R_TXFIFOSIZE_LEN 5
+#define MAC_HWF2R_AUXSNAPNUM_POS 28
+#define MAC_HWF2R_AUXSNAPNUM_LEN 3
+#define MAC_HWF2R_PPSOUTNUM_POS 24
+#define MAC_HWF2R_PPSOUTNUM_LEN 3
+#define MAC_HWF2R_RXCHCNT_POS 12
+#define MAC_HWF2R_RXCHCNT_LEN 4
+#define MAC_HWF2R_RXQCNT_POS 0
+#define MAC_HWF2R_RXQCNT_LEN 4
+#define MAC_HWF2R_TXCHCNT_POS 18
+#define MAC_HWF2R_TXCHCNT_LEN 4
+#define MAC_HWF2R_TXQCNT_POS 6
+#define MAC_HWF2R_TXQCNT_LEN 4
+#define MAC_IER_TSIE_POS 12
+#define MAC_IER_TSIE_LEN 1
+#define MAC_ISR_MMCRXIS_POS 9
+#define MAC_ISR_MMCRXIS_LEN 1
+#define MAC_ISR_MMCTXIS_POS 10
+#define MAC_ISR_MMCTXIS_LEN 1
+#define MAC_ISR_PMTIS_POS 4
+#define MAC_ISR_PMTIS_LEN 1
+#define MAC_ISR_TSIS_POS 12
+#define MAC_ISR_TSIS_LEN 1
+#define MAC_MACA1HR_AE_POS 31
+#define MAC_MACA1HR_AE_LEN 1
+#define MAC_PFR_HMC_POS 2
+#define MAC_PFR_HMC_LEN 1
+#define MAC_PFR_HPF_POS 10
+#define MAC_PFR_HPF_LEN 1
+#define MAC_PFR_HUC_POS 1
+#define MAC_PFR_HUC_LEN 1
+#define MAC_PFR_PM_POS 4
+#define MAC_PFR_PM_LEN 1
+#define MAC_PFR_PR_POS 0
+#define MAC_PFR_PR_LEN 1
+#define MAC_PFR_VTFE_POS 16
+#define MAC_PFR_VTFE_LEN 1
+#define MAC_Q0TFCR_PT_POS 16
+#define MAC_Q0TFCR_PT_LEN 16
+#define MAC_Q0TFCR_TFE_POS 1
+#define MAC_Q0TFCR_TFE_LEN 1
+#define MAC_RCR_ACS_POS 1
+#define MAC_RCR_ACS_LEN 1
+#define MAC_RCR_CST_POS 2
+#define MAC_RCR_CST_LEN 1
+#define MAC_RCR_DCRCC_POS 3
+#define MAC_RCR_DCRCC_LEN 1
+#define MAC_RCR_HDSMS_POS 12
+#define MAC_RCR_HDSMS_LEN 3
+#define MAC_RCR_IPC_POS 9
+#define MAC_RCR_IPC_LEN 1
+#define MAC_RCR_JE_POS 8
+#define MAC_RCR_JE_LEN 1
+#define MAC_RCR_LM_POS 10
+#define MAC_RCR_LM_LEN 1
+#define MAC_RCR_RE_POS 0
+#define MAC_RCR_RE_LEN 1
+#define MAC_RFCR_PFCE_POS 8
+#define MAC_RFCR_PFCE_LEN 1
+#define MAC_RFCR_RFE_POS 0
+#define MAC_RFCR_RFE_LEN 1
+#define MAC_RFCR_UP_POS 1
+#define MAC_RFCR_UP_LEN 1
+#define MAC_RQC0R_RXQ0EN_POS 0
+#define MAC_RQC0R_RXQ0EN_LEN 2
+#define MAC_RSSAR_ADDRT_POS 2
+#define MAC_RSSAR_ADDRT_LEN 1
+#define MAC_RSSAR_CT_POS 1
+#define MAC_RSSAR_CT_LEN 1
+#define MAC_RSSAR_OB_POS 0
+#define MAC_RSSAR_OB_LEN 1
+#define MAC_RSSAR_RSSIA_POS 8
+#define MAC_RSSAR_RSSIA_LEN 8
+#define MAC_RSSCR_IP2TE_POS 1
+#define MAC_RSSCR_IP2TE_LEN 1
+#define MAC_RSSCR_RSSE_POS 0
+#define MAC_RSSCR_RSSE_LEN 1
+#define MAC_RSSCR_TCP4TE_POS 2
+#define MAC_RSSCR_TCP4TE_LEN 1
+#define MAC_RSSCR_UDP4TE_POS 3
+#define MAC_RSSCR_UDP4TE_LEN 1
+#define MAC_RSSDR_DMCH_POS 0
+#define MAC_RSSDR_DMCH_LEN 4
+#define MAC_TCR_SS_POS 28
+#define MAC_TCR_SS_LEN 3
+#define MAC_TCR_TE_POS 0
+#define MAC_TCR_TE_LEN 1
+#define MAC_VLANHTR_VLHT_POS 0
+#define MAC_VLANHTR_VLHT_LEN 16
+#define MAC_VLANIR_VLTI_POS 20
+#define MAC_VLANIR_VLTI_LEN 1
+#define MAC_VLANIR_CSVL_POS 19
+#define MAC_VLANIR_CSVL_LEN 1
+#define MAC_VLANTR_DOVLTC_POS 20
+#define MAC_VLANTR_DOVLTC_LEN 1
+#define MAC_VLANTR_ERSVLM_POS 19
+#define MAC_VLANTR_ERSVLM_LEN 1
+#define MAC_VLANTR_ESVL_POS 18
+#define MAC_VLANTR_ESVL_LEN 1
+#define MAC_VLANTR_ETV_POS 16
+#define MAC_VLANTR_ETV_LEN 1
+#define MAC_VLANTR_EVLS_POS 21
+#define MAC_VLANTR_EVLS_LEN 2
+#define MAC_VLANTR_EVLRXS_POS 24
+#define MAC_VLANTR_EVLRXS_LEN 1
+#define MAC_VLANTR_VL_POS 0
+#define MAC_VLANTR_VL_LEN 16
+#define MAC_VLANTR_VTHM_POS 25
+#define MAC_VLANTR_VTHM_LEN 1
+#define MAC_VLANTR_VTIM_POS 17
+#define MAC_VLANTR_VTIM_LEN 1
+#define MAC_VR_DEVID_POS 8
+#define MAC_VR_DEVID_LEN 8
+#define MAC_VR_SNPSVER_POS 0
+#define MAC_VR_SNPSVER_LEN 8
+#define MAC_VR_USERVER_POS 16
+#define MAC_VR_USERVER_LEN 8
+
+/* MMC register offsets */
+#define MMC_CR 0x0800
+#define MMC_RISR 0x0804
+#define MMC_TISR 0x0808
+#define MMC_RIER 0x080c
+#define MMC_TIER 0x0810
+#define MMC_TXOCTETCOUNT_GB_LO 0x0814
+#define MMC_TXFRAMECOUNT_GB_LO 0x081c
+#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
+#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
+#define MMC_TX64OCTETS_GB_LO 0x0834
+#define MMC_TX65TO127OCTETS_GB_LO 0x083c
+#define MMC_TX128TO255OCTETS_GB_LO 0x0844
+#define MMC_TX256TO511OCTETS_GB_LO 0x084c
+#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
+#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
+#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
+#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
+#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
+#define MMC_TXUNDERFLOWERROR_LO 0x087c
+#define MMC_TXOCTETCOUNT_G_LO 0x0884
+#define MMC_TXFRAMECOUNT_G_LO 0x088c
+#define MMC_TXPAUSEFRAMES_LO 0x0894
+#define MMC_TXVLANFRAMES_G_LO 0x089c
+#define MMC_RXFRAMECOUNT_GB_LO 0x0900
+#define MMC_RXOCTETCOUNT_GB_LO 0x0908
+#define MMC_RXOCTETCOUNT_G_LO 0x0910
+#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
+#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
+#define MMC_RXCRCERROR_LO 0x0928
+#define MMC_RXRUNTERROR 0x0930
+#define MMC_RXJABBERERROR 0x0934
+#define MMC_RXUNDERSIZE_G 0x0938
+#define MMC_RXOVERSIZE_G 0x093c
+#define MMC_RX64OCTETS_GB_LO 0x0940
+#define MMC_RX65TO127OCTETS_GB_LO 0x0948
+#define MMC_RX128TO255OCTETS_GB_LO 0x0950
+#define MMC_RX256TO511OCTETS_GB_LO 0x0958
+#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
+#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
+#define MMC_RXUNICASTFRAMES_G_LO 0x0970
+#define MMC_RXLENGTHERROR_LO 0x0978
+#define MMC_RXOUTOFRANGETYPE_LO 0x0980
+#define MMC_RXPAUSEFRAMES_LO 0x0988
+#define MMC_RXFIFOOVERFLOW_LO 0x0990
+#define MMC_RXVLANFRAMES_GB_LO 0x0998
+#define MMC_RXWATCHDOGERROR 0x09a0
+
+/* MMC register entry bit positions and sizes */
+#define MMC_CR_CR_POS 0
+#define MMC_CR_CR_LEN 1
+#define MMC_CR_CSR_POS 1
+#define MMC_CR_CSR_LEN 1
+#define MMC_CR_ROR_POS 2
+#define MMC_CR_ROR_LEN 1
+#define MMC_CR_MCF_POS 3
+#define MMC_CR_MCF_LEN 1
+#define MMC_CR_MCT_POS 4
+#define MMC_CR_MCT_LEN 2
+#define MMC_RIER_ALL_INTERRUPTS_POS 0
+#define MMC_RIER_ALL_INTERRUPTS_LEN 23
+#define MMC_RISR_RXFRAMECOUNT_GB_POS 0
+#define MMC_RISR_RXFRAMECOUNT_GB_LEN 1
+#define MMC_RISR_RXOCTETCOUNT_GB_POS 1
+#define MMC_RISR_RXOCTETCOUNT_GB_LEN 1
+#define MMC_RISR_RXOCTETCOUNT_G_POS 2
+#define MMC_RISR_RXOCTETCOUNT_G_LEN 1
+#define MMC_RISR_RXBROADCASTFRAMES_G_POS 3
+#define MMC_RISR_RXBROADCASTFRAMES_G_LEN 1
+#define MMC_RISR_RXMULTICASTFRAMES_G_POS 4
+#define MMC_RISR_RXMULTICASTFRAMES_G_LEN 1
+#define MMC_RISR_RXCRCERROR_POS 5
+#define MMC_RISR_RXCRCERROR_LEN 1
+#define MMC_RISR_RXRUNTERROR_POS 6
+#define MMC_RISR_RXRUNTERROR_LEN 1
+#define MMC_RISR_RXJABBERERROR_POS 7
+#define MMC_RISR_RXJABBERERROR_LEN 1
+#define MMC_RISR_RXUNDERSIZE_G_POS 8
+#define MMC_RISR_RXUNDERSIZE_G_LEN 1
+#define MMC_RISR_RXOVERSIZE_G_POS 9
+#define MMC_RISR_RXOVERSIZE_G_LEN 1
+#define MMC_RISR_RX64OCTETS_GB_POS 10
+#define MMC_RISR_RX64OCTETS_GB_LEN 1
+#define MMC_RISR_RX65TO127OCTETS_GB_POS 11
+#define MMC_RISR_RX65TO127OCTETS_GB_LEN 1
+#define MMC_RISR_RX128TO255OCTETS_GB_POS 12
+#define MMC_RISR_RX128TO255OCTETS_GB_LEN 1
+#define MMC_RISR_RX256TO511OCTETS_GB_POS 13
+#define MMC_RISR_RX256TO511OCTETS_GB_LEN 1
+#define MMC_RISR_RX512TO1023OCTETS_GB_POS 14
+#define MMC_RISR_RX512TO1023OCTETS_GB_LEN 1
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_POS 15
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_LEN 1
+#define MMC_RISR_RXUNICASTFRAMES_G_POS 16
+#define MMC_RISR_RXUNICASTFRAMES_G_LEN 1
+#define MMC_RISR_RXLENGTHERROR_POS 17
+#define MMC_RISR_RXLENGTHERROR_LEN 1
+#define MMC_RISR_RXOUTOFRANGETYPE_POS 18
+#define MMC_RISR_RXOUTOFRANGETYPE_LEN 1
+#define MMC_RISR_RXPAUSEFRAMES_POS 19
+#define MMC_RISR_RXPAUSEFRAMES_LEN 1
+#define MMC_RISR_RXFIFOOVERFLOW_POS 20
+#define MMC_RISR_RXFIFOOVERFLOW_LEN 1
+#define MMC_RISR_RXVLANFRAMES_GB_POS 21
+#define MMC_RISR_RXVLANFRAMES_GB_LEN 1
+#define MMC_RISR_RXWATCHDOGERROR_POS 22
+#define MMC_RISR_RXWATCHDOGERROR_LEN 1
+#define MMC_TIER_ALL_INTERRUPTS_POS 0
+#define MMC_TIER_ALL_INTERRUPTS_LEN 18
+#define MMC_TISR_TXOCTETCOUNT_GB_POS 0
+#define MMC_TISR_TXOCTETCOUNT_GB_LEN 1
+#define MMC_TISR_TXFRAMECOUNT_GB_POS 1
+#define MMC_TISR_TXFRAMECOUNT_GB_LEN 1
+#define MMC_TISR_TXBROADCASTFRAMES_G_POS 2
+#define MMC_TISR_TXBROADCASTFRAMES_G_LEN 1
+#define MMC_TISR_TXMULTICASTFRAMES_G_POS 3
+#define MMC_TISR_TXMULTICASTFRAMES_G_LEN 1
+#define MMC_TISR_TX64OCTETS_GB_POS 4
+#define MMC_TISR_TX64OCTETS_GB_LEN 1
+#define MMC_TISR_TX65TO127OCTETS_GB_POS 5
+#define MMC_TISR_TX65TO127OCTETS_GB_LEN 1
+#define MMC_TISR_TX128TO255OCTETS_GB_POS 6
+#define MMC_TISR_TX128TO255OCTETS_GB_LEN 1
+#define MMC_TISR_TX256TO511OCTETS_GB_POS 7
+#define MMC_TISR_TX256TO511OCTETS_GB_LEN 1
+#define MMC_TISR_TX512TO1023OCTETS_GB_POS 8
+#define MMC_TISR_TX512TO1023OCTETS_GB_LEN 1
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_POS 9
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_LEN 1
+#define MMC_TISR_TXUNICASTFRAMES_GB_POS 10
+#define MMC_TISR_TXUNICASTFRAMES_GB_LEN 1
+#define MMC_TISR_TXMULTICASTFRAMES_GB_POS 11
+#define MMC_TISR_TXMULTICASTFRAMES_GB_LEN 1
+#define MMC_TISR_TXBROADCASTFRAMES_GB_POS 12
+#define MMC_TISR_TXBROADCASTFRAMES_GB_LEN 1
+#define MMC_TISR_TXUNDERFLOWERROR_POS 13
+#define MMC_TISR_TXUNDERFLOWERROR_LEN 1
+#define MMC_TISR_TXOCTETCOUNT_G_POS 14
+#define MMC_TISR_TXOCTETCOUNT_G_LEN 1
+#define MMC_TISR_TXFRAMECOUNT_G_POS 15
+#define MMC_TISR_TXFRAMECOUNT_G_LEN 1
+#define MMC_TISR_TXPAUSEFRAMES_POS 16
+#define MMC_TISR_TXPAUSEFRAMES_LEN 1
+#define MMC_TISR_TXVLANFRAMES_G_POS 17
+#define MMC_TISR_TXVLANFRAMES_G_LEN 1
+
+/* MTL register offsets */
+#define MTL_OMR 0x1000
+#define MTL_FDDR 0x1010
+#define MTL_RQDCM0R 0x1030
+
+#define MTL_RQDCM_INC 4
+#define MTL_RQDCM_Q_PER_REG 4
+
+/* MTL register entry bit positions and sizes */
+#define MTL_OMR_ETSALG_POS 5
+#define MTL_OMR_ETSALG_LEN 2
+#define MTL_OMR_RAA_POS 2
+#define MTL_OMR_RAA_LEN 1
+
+/* MTL queue register offsets
+ * Multiple queues can be active. The first queue has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_Q_BASE 0x1100
+#define MTL_Q_INC 0x80
+
+#define MTL_Q_TQOMR 0x00
+#define MTL_Q_RQOMR 0x40
+#define MTL_Q_RQDR 0x48
+#define MTL_Q_RQFCR 0x50
+#define MTL_Q_IER 0x70
+#define MTL_Q_ISR 0x74
+
+/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQDR_PRXQ_POS 16
+#define MTL_Q_RQDR_PRXQ_LEN 14
+#define MTL_Q_RQDR_RXQSTS_POS 4
+#define MTL_Q_RQDR_RXQSTS_LEN 2
+#define MTL_Q_RQFCR_RFA_POS 1
+#define MTL_Q_RQFCR_RFA_LEN 6
+#define MTL_Q_RQFCR_RFD_POS 17
+#define MTL_Q_RQFCR_RFD_LEN 6
+#define MTL_Q_RQOMR_EHFC_POS 7
+#define MTL_Q_RQOMR_EHFC_LEN 1
+#define MTL_Q_RQOMR_RQS_POS 16
+#define MTL_Q_RQOMR_RQS_LEN 9
+#define MTL_Q_RQOMR_RSF_POS 5
+#define MTL_Q_RQOMR_RSF_LEN 1
+#define MTL_Q_RQOMR_FEP_POS 4
+#define MTL_Q_RQOMR_FEP_LEN 1
+#define MTL_Q_RQOMR_FUP_POS 3
+#define MTL_Q_RQOMR_FUP_LEN 1
+#define MTL_Q_RQOMR_RTC_POS 0
+#define MTL_Q_RQOMR_RTC_LEN 2
+#define MTL_Q_TQOMR_FTQ_POS 0
+#define MTL_Q_TQOMR_FTQ_LEN 1
+#define MTL_Q_TQOMR_Q2TCMAP_POS 8
+#define MTL_Q_TQOMR_Q2TCMAP_LEN 3
+#define MTL_Q_TQOMR_TQS_POS 16
+#define MTL_Q_TQOMR_TQS_LEN 10
+#define MTL_Q_TQOMR_TSF_POS 1
+#define MTL_Q_TQOMR_TSF_LEN 1
+#define MTL_Q_TQOMR_TTC_POS 4
+#define MTL_Q_TQOMR_TTC_LEN 3
+#define MTL_Q_TQOMR_TXQEN_POS 2
+#define MTL_Q_TQOMR_TXQEN_LEN 2
+
+/* MTL queue register value */
+#define MTL_RSF_DISABLE 0x00
+#define MTL_RSF_ENABLE 0x01
+#define MTL_TSF_DISABLE 0x00
+#define MTL_TSF_ENABLE 0x01
+
+#define MTL_RX_THRESHOLD_64 0x00
+#define MTL_RX_THRESHOLD_96 0x02
+#define MTL_RX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_64 0x00
+#define MTL_TX_THRESHOLD_96 0x02
+#define MTL_TX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_192 0x04
+#define MTL_TX_THRESHOLD_256 0x05
+#define MTL_TX_THRESHOLD_384 0x06
+#define MTL_TX_THRESHOLD_512 0x07
+
+#define MTL_ETSALG_WRR 0x00
+#define MTL_ETSALG_WFQ 0x01
+#define MTL_ETSALG_DWRR 0x02
+#define MTL_RAA_SP 0x00
+#define MTL_RAA_WSP 0x01
+
+#define MTL_Q_DISABLED 0x00
+#define MTL_Q_ENABLED 0x02
+
+#define MTL_RQDCM0R_Q0MDMACH 0x0
+#define MTL_RQDCM0R_Q1MDMACH 0x00000100
+#define MTL_RQDCM0R_Q2MDMACH 0x00020000
+#define MTL_RQDCM0R_Q3MDMACH 0x03000000
+#define MTL_RQDCM1R_Q4MDMACH 0x00000004
+#define MTL_RQDCM1R_Q5MDMACH 0x00000500
+#define MTL_RQDCM1R_Q6MDMACH 0x00060000
+#define MTL_RQDCM1R_Q7MDMACH 0x07000000
+#define MTL_RQDCM2R_Q8MDMACH 0x00000008
+#define MTL_RQDCM2R_Q9MDMACH 0x00000900
+#define MTL_RQDCM2R_Q10MDMACH 0x000A0000
+#define MTL_RQDCM2R_Q11MDMACH 0x0B000000
+
+/* MTL traffic class register offsets
+ * Multiple traffic classes can be active. The first class has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_TC_BASE MTL_Q_BASE
+#define MTL_TC_INC MTL_Q_INC
+
+#define MTL_TC_ETSCR 0x10
+#define MTL_TC_ETSSR 0x14
+#define MTL_TC_QWR 0x18
+
+/* MTL traffic class register entry bit positions and sizes */
+#define MTL_TC_ETSCR_TSA_POS 0
+#define MTL_TC_ETSCR_TSA_LEN 2
+#define MTL_TC_QWR_QW_POS 0
+#define MTL_TC_QWR_QW_LEN 21
+
+/* MTL traffic class register value */
+#define MTL_TSA_SP 0x00
+#define MTL_TSA_ETS 0x02
+
+/* DMA register offsets */
+#define DMA_MR 0x3000
+#define DMA_SBMR 0x3004
+#define DMA_ISR 0x3008
+#define DMA_DSR0 0x3020
+#define DMA_DSR1 0x3024
+
+/* DMA register entry bit positions and sizes */
+#define DMA_ISR_MACIS_POS 17
+#define DMA_ISR_MACIS_LEN 1
+#define DMA_ISR_MTLIS_POS 16
+#define DMA_ISR_MTLIS_LEN 1
+#define DMA_MR_SWR_POS 0
+#define DMA_MR_SWR_LEN 1
+#define DMA_SBMR_EAME_POS 11
+#define DMA_SBMR_EAME_LEN 1
+#define DMA_SBMR_BLEN_64_POS 5
+#define DMA_SBMR_BLEN_64_LEN 1
+#define DMA_SBMR_BLEN_128_POS 6
+#define DMA_SBMR_BLEN_128_LEN 1
+#define DMA_SBMR_BLEN_256_POS 7
+#define DMA_SBMR_BLEN_256_LEN 1
+#define DMA_SBMR_UNDEF_POS 0
+#define DMA_SBMR_UNDEF_LEN 1
+
+/* DMA register values */
+#define DMA_DSR_RPS_LEN 4
+#define DMA_DSR_TPS_LEN 4
+#define DMA_DSR_Q_LEN (DMA_DSR_RPS_LEN + DMA_DSR_TPS_LEN)
+#define DMA_DSR0_TPS_START 12
+#define DMA_DSRX_FIRST_QUEUE 3
+#define DMA_DSRX_INC 4
+#define DMA_DSRX_QPR 4
+#define DMA_DSRX_TPS_START 4
+#define DMA_TPS_STOPPED 0x00
+#define DMA_TPS_SUSPENDED 0x06
+
+/* DMA channel register offsets
+ * Multiple channels can be active. The first channel has registers
+ * that begin at 0x3100. Each subsequent channel has registers that
+ * are accessed using an offset of 0x80 from the previous channel.
+ */
+#define DMA_CH_BASE 0x3100
+#define DMA_CH_INC 0x80
+
+#define DMA_CH_CR 0x00
+#define DMA_CH_TCR 0x04
+#define DMA_CH_RCR 0x08
+#define DMA_CH_TDLR_HI 0x10
+#define DMA_CH_TDLR_LO 0x14
+#define DMA_CH_RDLR_HI 0x18
+#define DMA_CH_RDLR_LO 0x1c
+#define DMA_CH_TDTR_LO 0x24
+#define DMA_CH_RDTR_LO 0x2c
+#define DMA_CH_TDRLR 0x30
+#define DMA_CH_RDRLR 0x34
+#define DMA_CH_IER 0x38
+#define DMA_CH_RIWT 0x3c
+#define DMA_CH_SR 0x60
+
+/* DMA channel register entry bit positions and sizes */
+#define DMA_CH_CR_PBLX8_POS 16
+#define DMA_CH_CR_PBLX8_LEN 1
+#define DMA_CH_CR_SPH_POS 24
+#define DMA_CH_CR_SPH_LEN 1
+#define DMA_CH_IER_AIE_POS 15
+#define DMA_CH_IER_AIE_LEN 1
+#define DMA_CH_IER_FBEE_POS 12
+#define DMA_CH_IER_FBEE_LEN 1
+#define DMA_CH_IER_NIE_POS 16
+#define DMA_CH_IER_NIE_LEN 1
+#define DMA_CH_IER_RBUE_POS 7
+#define DMA_CH_IER_RBUE_LEN 1
+#define DMA_CH_IER_RIE_POS 6
+#define DMA_CH_IER_RIE_LEN 1
+#define DMA_CH_IER_RSE_POS 8
+#define DMA_CH_IER_RSE_LEN 1
+#define DMA_CH_IER_TBUE_POS 2
+#define DMA_CH_IER_TBUE_LEN 1
+#define DMA_CH_IER_TIE_POS 0
+#define DMA_CH_IER_TIE_LEN 1
+#define DMA_CH_IER_TXSE_POS 1
+#define DMA_CH_IER_TXSE_LEN 1
+#define DMA_CH_RCR_PBL_POS 16
+#define DMA_CH_RCR_PBL_LEN 6
+#define DMA_CH_RCR_RBSZ_POS 1
+#define DMA_CH_RCR_RBSZ_LEN 14
+#define DMA_CH_RCR_SR_POS 0
+#define DMA_CH_RCR_SR_LEN 1
+#define DMA_CH_RIWT_RWT_POS 0
+#define DMA_CH_RIWT_RWT_LEN 8
+#define DMA_CH_SR_FBE_POS 12
+#define DMA_CH_SR_FBE_LEN 1
+#define DMA_CH_SR_RBU_POS 7
+#define DMA_CH_SR_RBU_LEN 1
+#define DMA_CH_SR_RI_POS 6
+#define DMA_CH_SR_RI_LEN 1
+#define DMA_CH_SR_RPS_POS 8
+#define DMA_CH_SR_RPS_LEN 1
+#define DMA_CH_SR_TBU_POS 2
+#define DMA_CH_SR_TBU_LEN 1
+#define DMA_CH_SR_TI_POS 0
+#define DMA_CH_SR_TI_LEN 1
+#define DMA_CH_SR_TPS_POS 1
+#define DMA_CH_SR_TPS_LEN 1
+#define DMA_CH_TCR_OSP_POS 4
+#define DMA_CH_TCR_OSP_LEN 1
+#define DMA_CH_TCR_PBL_POS 16
+#define DMA_CH_TCR_PBL_LEN 6
+#define DMA_CH_TCR_ST_POS 0
+#define DMA_CH_TCR_ST_LEN 1
+#define DMA_CH_TCR_TSE_POS 12
+#define DMA_CH_TCR_TSE_LEN 1
+
+/* DMA channel register values */
+#define DMA_OSP_DISABLE 0x00
+#define DMA_OSP_ENABLE 0x01
+#define DMA_PBL_1 1
+#define DMA_PBL_2 2
+#define DMA_PBL_4 4
+#define DMA_PBL_8 8
+#define DMA_PBL_16 16
+#define DMA_PBL_32 32
+#define DMA_PBL_64 64
+#define DMA_PBL_128 128
+#define DMA_PBL_256 256
+#define DMA_PBL_X8_DISABLE 0x00
+#define DMA_PBL_X8_ENABLE 0x01
+
+/* Descriptor/Packet entry bit positions and sizes */
+#define RX_PACKET_ERRORS_CRC_POS 2
+#define RX_PACKET_ERRORS_CRC_LEN 1
+#define RX_PACKET_ERRORS_FRAME_POS 3
+#define RX_PACKET_ERRORS_FRAME_LEN 1
+#define RX_PACKET_ERRORS_LENGTH_POS 0
+#define RX_PACKET_ERRORS_LENGTH_LEN 1
+#define RX_PACKET_ERRORS_OVERRUN_POS 1
+#define RX_PACKET_ERRORS_OVERRUN_LEN 1
+
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_POS 0
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN 1
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_POS 2
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS 3
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_POS 4
+#define RX_PACKET_ATTRIBUTES_CONTEXT_LEN 1
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS 5
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN 1
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_POS 6
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_LEN 1
+
+#define RX_NORMAL_DESC0_OVT_POS 0
+#define RX_NORMAL_DESC0_OVT_LEN 16
+#define RX_NORMAL_DESC2_HL_POS 0
+#define RX_NORMAL_DESC2_HL_LEN 10
+#define RX_NORMAL_DESC3_CDA_POS 27
+#define RX_NORMAL_DESC3_CDA_LEN 1
+#define RX_NORMAL_DESC3_CTXT_POS 30
+#define RX_NORMAL_DESC3_CTXT_LEN 1
+#define RX_NORMAL_DESC3_ES_POS 15
+#define RX_NORMAL_DESC3_ES_LEN 1
+#define RX_NORMAL_DESC3_ETLT_POS 16
+#define RX_NORMAL_DESC3_ETLT_LEN 4
+#define RX_NORMAL_DESC3_FD_POS 29
+#define RX_NORMAL_DESC3_FD_LEN 1
+#define RX_NORMAL_DESC3_INTE_POS 30
+#define RX_NORMAL_DESC3_INTE_LEN 1
+#define RX_NORMAL_DESC3_L34T_POS 20
+#define RX_NORMAL_DESC3_L34T_LEN 4
+#define RX_NORMAL_DESC3_LD_POS 28
+#define RX_NORMAL_DESC3_LD_LEN 1
+#define RX_NORMAL_DESC3_OWN_POS 31
+#define RX_NORMAL_DESC3_OWN_LEN 1
+#define RX_NORMAL_DESC3_PL_POS 0
+#define RX_NORMAL_DESC3_PL_LEN 14
+#define RX_NORMAL_DESC3_RSV_POS 26
+#define RX_NORMAL_DESC3_RSV_LEN 1
+
+#define RX_DESC3_L34T_IPV4_TCP 1
+#define RX_DESC3_L34T_IPV4_UDP 2
+#define RX_DESC3_L34T_IPV4_ICMP 3
+#define RX_DESC3_L34T_IPV6_TCP 9
+#define RX_DESC3_L34T_IPV6_UDP 10
+#define RX_DESC3_L34T_IPV6_ICMP 11
+
+#define RX_CONTEXT_DESC3_TSA_POS 4
+#define RX_CONTEXT_DESC3_TSA_LEN 1
+#define RX_CONTEXT_DESC3_TSD_POS 6
+#define RX_CONTEXT_DESC3_TSD_LEN 1
+
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS 0
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN 1
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS 2
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN 1
+#define TX_PACKET_ATTRIBUTES_PTP_POS 3
+#define TX_PACKET_ATTRIBUTES_PTP_LEN 1
+
+#define TX_CONTEXT_DESC2_MSS_POS 0
+#define TX_CONTEXT_DESC2_MSS_LEN 15
+#define TX_CONTEXT_DESC3_CTXT_POS 30
+#define TX_CONTEXT_DESC3_CTXT_LEN 1
+#define TX_CONTEXT_DESC3_TCMSSV_POS 26
+#define TX_CONTEXT_DESC3_TCMSSV_LEN 1
+#define TX_CONTEXT_DESC3_VLTV_POS 16
+#define TX_CONTEXT_DESC3_VLTV_LEN 1
+#define TX_CONTEXT_DESC3_VT_POS 0
+#define TX_CONTEXT_DESC3_VT_LEN 16
+
+#define TX_NORMAL_DESC2_HL_B1L_POS 0
+#define TX_NORMAL_DESC2_HL_B1L_LEN 14
+#define TX_NORMAL_DESC2_IC_POS 31
+#define TX_NORMAL_DESC2_IC_LEN 1
+#define TX_NORMAL_DESC2_TTSE_POS 30
+#define TX_NORMAL_DESC2_TTSE_LEN 1
+#define TX_NORMAL_DESC2_VTIR_POS 14
+#define TX_NORMAL_DESC2_VTIR_LEN 2
+#define TX_NORMAL_DESC3_CIC_POS 16
+#define TX_NORMAL_DESC3_CIC_LEN 2
+#define TX_NORMAL_DESC3_CPC_POS 26
+#define TX_NORMAL_DESC3_CPC_LEN 2
+#define TX_NORMAL_DESC3_CTXT_POS 30
+#define TX_NORMAL_DESC3_CTXT_LEN 1
+#define TX_NORMAL_DESC3_FD_POS 29
+#define TX_NORMAL_DESC3_FD_LEN 1
+#define TX_NORMAL_DESC3_FL_POS 0
+#define TX_NORMAL_DESC3_FL_LEN 15
+#define TX_NORMAL_DESC3_LD_POS 28
+#define TX_NORMAL_DESC3_LD_LEN 1
+#define TX_NORMAL_DESC3_OWN_POS 31
+#define TX_NORMAL_DESC3_OWN_LEN 1
+#define TX_NORMAL_DESC3_TCPHDRLEN_POS 19
+#define TX_NORMAL_DESC3_TCPHDRLEN_LEN 4
+#define TX_NORMAL_DESC3_TCPPL_POS 0
+#define TX_NORMAL_DESC3_TCPPL_LEN 18
+#define TX_NORMAL_DESC3_TSE_POS 18
+#define TX_NORMAL_DESC3_TSE_LEN 1
+
+#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
+
+#define XLGMAC_MTL_REG(pdata, n, reg) \
+ ((pdata)->mac_regs + MTL_Q_BASE + ((n) * MTL_Q_INC) + (reg))
+
+#define XLGMAC_DMA_REG(channel, reg) ((channel)->dma_regs + (reg))
+
+#endif /* __DWC_XLGMAC_REG_H__ */
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac.h b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
new file mode 100644
index 000000000000..cab3e40a86b9
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
@@ -0,0 +1,660 @@
+/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
+ *
+ * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is dual-licensed; you may select either version 2 of
+ * the GNU General Public License ("GPL") or BSD license ("BSD").
+ *
+ * This Synopsys DWC XLGMAC software driver and associated documentation
+ * (hereinafter the "Software") is an unsupported proprietary work of
+ * Synopsys, Inc. unless otherwise expressly agreed to in writing between
+ * Synopsys and you. The Software IS NOT an item of Licensed Software or a
+ * Licensed Product under any End User Software License Agreement or
+ * Agreement for Licensed Products with Synopsys or any supplement thereto.
+ * Synopsys is a registered trademark of Synopsys, Inc. Other names included
+ * in the SOFTWARE may be the trademarks of their respective owners.
+ */
+
+#ifndef __DWC_XLGMAC_H__
+#define __DWC_XLGMAC_H__
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/bitops.h>
+#include <linux/timecounter.h>
+
+#define XLGMAC_DRV_NAME "dwc-xlgmac"
+#define XLGMAC_DRV_VERSION "1.0.0"
+#define XLGMAC_DRV_DESC "Synopsys DWC XLGMAC Driver"
+
+/* Descriptor related parameters */
+#define XLGMAC_TX_DESC_CNT 1024
+#define XLGMAC_TX_DESC_MIN_FREE (XLGMAC_TX_DESC_CNT >> 3)
+#define XLGMAC_TX_DESC_MAX_PROC (XLGMAC_TX_DESC_CNT >> 1)
+#define XLGMAC_RX_DESC_CNT 1024
+#define XLGMAC_RX_DESC_MAX_DIRTY (XLGMAC_RX_DESC_CNT >> 3)
+
+/* Descriptors required for maximum contiguous TSO/GSO packet */
+#define XLGMAC_TX_MAX_SPLIT ((GSO_MAX_SIZE / XLGMAC_TX_MAX_BUF_SIZE) + 1)
+
+/* Maximum possible descriptors needed for a SKB */
+#define XLGMAC_TX_MAX_DESC_NR (MAX_SKB_FRAGS + XLGMAC_TX_MAX_SPLIT + 2)
+
+#define XLGMAC_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+#define XLGMAC_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define XLGMAC_RX_BUF_ALIGN 64
+
+/* Maximum Size for Splitting the Header Data
+ * Keep in sync with SKB_ALLOC_SIZE
+ * 3'b000: 64 bytes, 3'b001: 128 bytes
+ * 3'b010: 256 bytes, 3'b011: 512 bytes
+ * 3'b100: 1023 bytes , 3'b101'3'b111: Reserved
+ */
+#define XLGMAC_SPH_HDSMS_SIZE 3
+#define XLGMAC_SKB_ALLOC_SIZE 512
+
+#define XLGMAC_MAX_FIFO 81920
+
+#define XLGMAC_MAX_DMA_CHANNELS 16
+#define XLGMAC_DMA_STOP_TIMEOUT 5
+#define XLGMAC_DMA_INTERRUPT_MASK 0x31c7
+
+/* Default coalescing parameters */
+#define XLGMAC_INIT_DMA_TX_USECS 1000
+#define XLGMAC_INIT_DMA_TX_FRAMES 25
+#define XLGMAC_INIT_DMA_RX_USECS 30
+#define XLGMAC_INIT_DMA_RX_FRAMES 25
+#define XLGMAC_MAX_DMA_RIWT 0xff
+#define XLGMAC_MIN_DMA_RIWT 0x01
+
+/* Flow control queue count */
+#define XLGMAC_MAX_FLOW_CONTROL_QUEUES 8
+
+/* System clock is 125 MHz */
+#define XLGMAC_SYSCLOCK 125000000
+
+/* Maximum MAC address hash table size (256 bits = 8 bytes) */
+#define XLGMAC_MAC_HASH_TABLE_SIZE 8
+
+/* Receive Side Scaling */
+#define XLGMAC_RSS_HASH_KEY_SIZE 40
+#define XLGMAC_RSS_MAX_TABLE_SIZE 256
+#define XLGMAC_RSS_LOOKUP_TABLE_TYPE 0
+#define XLGMAC_RSS_HASH_KEY_TYPE 1
+
+#define XLGMAC_STD_PACKET_MTU 1500
+#define XLGMAC_JUMBO_PACKET_MTU 9000
+
+/* Helper macro for descriptor handling
+ * Always use XLGMAC_GET_DESC_DATA to access the descriptor data
+ */
+#define XLGMAC_GET_DESC_DATA(ring, idx) ({ \
+ typeof(ring) _ring = (ring); \
+ ((_ring)->desc_data_head + \
+ ((idx) & ((_ring)->dma_desc_count - 1))); \
+})
+
+#define XLGMAC_GET_REG_BITS(var, pos, len) ({ \
+ typeof(pos) _pos = (pos); \
+ typeof(len) _len = (len); \
+ ((var) & GENMASK(_pos + _len - 1, _pos)) >> (_pos); \
+})
+
+#define XLGMAC_GET_REG_BITS_LE(var, pos, len) ({ \
+ typeof(pos) _pos = (pos); \
+ typeof(len) _len = (len); \
+ typeof(var) _var = le32_to_cpu((var)); \
+ ((_var) & GENMASK(_pos + _len - 1, _pos)) >> (_pos); \
+})
+
+#define XLGMAC_SET_REG_BITS(var, pos, len, val) ({ \
+ typeof(var) _var = (var); \
+ typeof(pos) _pos = (pos); \
+ typeof(len) _len = (len); \
+ typeof(val) _val = (val); \
+ _val = (_val << _pos) & GENMASK(_pos + _len - 1, _pos); \
+ _var = (_var & ~GENMASK(_pos + _len - 1, _pos)) | _val; \
+})
+
+#define XLGMAC_SET_REG_BITS_LE(var, pos, len, val) ({ \
+ typeof(var) _var = (var); \
+ typeof(pos) _pos = (pos); \
+ typeof(len) _len = (len); \
+ typeof(val) _val = (val); \
+ _val = (_val << _pos) & GENMASK(_pos + _len - 1, _pos); \
+ _var = (_var & ~GENMASK(_pos + _len - 1, _pos)) | _val; \
+ cpu_to_le32(_var); \
+})
+
+struct xlgmac_pdata;
+
+enum xlgmac_int {
+ XLGMAC_INT_DMA_CH_SR_TI,
+ XLGMAC_INT_DMA_CH_SR_TPS,
+ XLGMAC_INT_DMA_CH_SR_TBU,
+ XLGMAC_INT_DMA_CH_SR_RI,
+ XLGMAC_INT_DMA_CH_SR_RBU,
+ XLGMAC_INT_DMA_CH_SR_RPS,
+ XLGMAC_INT_DMA_CH_SR_TI_RI,
+ XLGMAC_INT_DMA_CH_SR_FBE,
+ XLGMAC_INT_DMA_ALL,
+};
+
+struct xlgmac_stats {
+ /* MMC TX counters */
+ u64 txoctetcount_gb;
+ u64 txframecount_gb;
+ u64 txbroadcastframes_g;
+ u64 txmulticastframes_g;
+ u64 tx64octets_gb;
+ u64 tx65to127octets_gb;
+ u64 tx128to255octets_gb;
+ u64 tx256to511octets_gb;
+ u64 tx512to1023octets_gb;
+ u64 tx1024tomaxoctets_gb;
+ u64 txunicastframes_gb;
+ u64 txmulticastframes_gb;
+ u64 txbroadcastframes_gb;
+ u64 txunderflowerror;
+ u64 txoctetcount_g;
+ u64 txframecount_g;
+ u64 txpauseframes;
+ u64 txvlanframes_g;
+
+ /* MMC RX counters */
+ u64 rxframecount_gb;
+ u64 rxoctetcount_gb;
+ u64 rxoctetcount_g;
+ u64 rxbroadcastframes_g;
+ u64 rxmulticastframes_g;
+ u64 rxcrcerror;
+ u64 rxrunterror;
+ u64 rxjabbererror;
+ u64 rxundersize_g;
+ u64 rxoversize_g;
+ u64 rx64octets_gb;
+ u64 rx65to127octets_gb;
+ u64 rx128to255octets_gb;
+ u64 rx256to511octets_gb;
+ u64 rx512to1023octets_gb;
+ u64 rx1024tomaxoctets_gb;
+ u64 rxunicastframes_g;
+ u64 rxlengtherror;
+ u64 rxoutofrangetype;
+ u64 rxpauseframes;
+ u64 rxfifooverflow;
+ u64 rxvlanframes_gb;
+ u64 rxwatchdogerror;
+
+ /* Extra counters */
+ u64 tx_tso_packets;
+ u64 rx_split_header_packets;
+ u64 tx_process_stopped;
+ u64 rx_process_stopped;
+ u64 tx_buffer_unavailable;
+ u64 rx_buffer_unavailable;
+ u64 fatal_bus_error;
+ u64 tx_vlan_packets;
+ u64 rx_vlan_packets;
+ u64 napi_poll_isr;
+ u64 napi_poll_txtimer;
+};
+
+struct xlgmac_ring_buf {
+ struct sk_buff *skb;
+ dma_addr_t skb_dma;
+ unsigned int skb_len;
+};
+
+/* Common Tx and Rx DMA hardware descriptor */
+struct xlgmac_dma_desc {
+ __le32 desc0;
+ __le32 desc1;
+ __le32 desc2;
+ __le32 desc3;
+};
+
+/* Page allocation related values */
+struct xlgmac_page_alloc {
+ struct page *pages;
+ unsigned int pages_len;
+ unsigned int pages_offset;
+
+ dma_addr_t pages_dma;
+};
+
+/* Ring entry buffer data */
+struct xlgmac_buffer_data {
+ struct xlgmac_page_alloc pa;
+ struct xlgmac_page_alloc pa_unmap;
+
+ dma_addr_t dma_base;
+ unsigned long dma_off;
+ unsigned int dma_len;
+};
+
+/* Tx-related desc data */
+struct xlgmac_tx_desc_data {
+ unsigned int packets; /* BQL packet count */
+ unsigned int bytes; /* BQL byte count */
+};
+
+/* Rx-related desc data */
+struct xlgmac_rx_desc_data {
+ struct xlgmac_buffer_data hdr; /* Header locations */
+ struct xlgmac_buffer_data buf; /* Payload locations */
+
+ unsigned short hdr_len; /* Length of received header */
+ unsigned short len; /* Length of received packet */
+};
+
+struct xlgmac_pkt_info {
+ struct sk_buff *skb;
+
+ unsigned int attributes;
+
+ unsigned int errors;
+
+ /* descriptors needed for this packet */
+ unsigned int desc_count;
+ unsigned int length;
+
+ unsigned int tx_packets;
+ unsigned int tx_bytes;
+
+ unsigned int header_len;
+ unsigned int tcp_header_len;
+ unsigned int tcp_payload_len;
+ unsigned short mss;
+
+ unsigned short vlan_ctag;
+
+ u64 rx_tstamp;
+
+ u32 rss_hash;
+ enum pkt_hash_types rss_hash_type;
+};
+
+struct xlgmac_desc_data {
+ /* dma_desc: Virtual address of descriptor
+ * dma_desc_addr: DMA address of descriptor
+ */
+ struct xlgmac_dma_desc *dma_desc;
+ dma_addr_t dma_desc_addr;
+
+ /* skb: Virtual address of SKB
+ * skb_dma: DMA address of SKB data
+ * skb_dma_len: Length of SKB DMA area
+ */
+ struct sk_buff *skb;
+ dma_addr_t skb_dma;
+ unsigned int skb_dma_len;
+
+ /* Tx/Rx -related data */
+ struct xlgmac_tx_desc_data tx;
+ struct xlgmac_rx_desc_data rx;
+
+ unsigned int mapped_as_page;
+
+ /* Incomplete receive save location. If the budget is exhausted
+ * or the last descriptor (last normal descriptor or a following
+ * context descriptor) has not been DMA'd yet the current state
+ * of the receive processing needs to be saved.
+ */
+ unsigned int state_saved;
+ struct {
+ struct sk_buff *skb;
+ unsigned int len;
+ unsigned int error;
+ } state;
+};
+
+struct xlgmac_ring {
+ /* Per packet related information */
+ struct xlgmac_pkt_info pkt_info;
+
+ /* Virtual/DMA addresses of DMA descriptor list and the total count */
+ struct xlgmac_dma_desc *dma_desc_head;
+ dma_addr_t dma_desc_head_addr;
+ unsigned int dma_desc_count;
+
+ /* Array of descriptor data corresponding the DMA descriptor
+ * (always use the XLGMAC_GET_DESC_DATA macro to access this data)
+ */
+ struct xlgmac_desc_data *desc_data_head;
+
+ /* Page allocation for RX buffers */
+ struct xlgmac_page_alloc rx_hdr_pa;
+ struct xlgmac_page_alloc rx_buf_pa;
+
+ /* Ring index values
+ * cur - Tx: index of descriptor to be used for current transfer
+ * Rx: index of descriptor to check for packet availability
+ * dirty - Tx: index of descriptor to check for transfer complete
+ * Rx: index of descriptor to check for buffer reallocation
+ */
+ unsigned int cur;
+ unsigned int dirty;
+
+ /* Coalesce frame count used for interrupt bit setting */
+ unsigned int coalesce_count;
+
+ union {
+ struct {
+ unsigned int xmit_more;
+ unsigned int queue_stopped;
+ unsigned short cur_mss;
+ unsigned short cur_vlan_ctag;
+ } tx;
+ };
+} ____cacheline_aligned;
+
+struct xlgmac_channel {
+ char name[16];
+
+ /* Address of private data area for device */
+ struct xlgmac_pdata *pdata;
+
+ /* Queue index and base address of queue's DMA registers */
+ unsigned int queue_index;
+ void __iomem *dma_regs;
+
+ /* Per channel interrupt irq number */
+ int dma_irq;
+ char dma_irq_name[IFNAMSIZ + 32];
+
+ /* Netdev related settings */
+ struct napi_struct napi;
+
+ unsigned int saved_ier;
+
+ unsigned int tx_timer_active;
+ struct timer_list tx_timer;
+
+ struct xlgmac_ring *tx_ring;
+ struct xlgmac_ring *rx_ring;
+} ____cacheline_aligned;
+
+struct xlgmac_desc_ops {
+ int (*alloc_channles_and_rings)(struct xlgmac_pdata *pdata);
+ void (*free_channels_and_rings)(struct xlgmac_pdata *pdata);
+ int (*map_tx_skb)(struct xlgmac_channel *channel,
+ struct sk_buff *skb);
+ int (*map_rx_buffer)(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ struct xlgmac_desc_data *desc_data);
+ void (*unmap_desc_data)(struct xlgmac_pdata *pdata,
+ struct xlgmac_desc_data *desc_data);
+ void (*tx_desc_init)(struct xlgmac_pdata *pdata);
+ void (*rx_desc_init)(struct xlgmac_pdata *pdata);
+};
+
+struct xlgmac_hw_ops {
+ int (*init)(struct xlgmac_pdata *pdata);
+ int (*exit)(struct xlgmac_pdata *pdata);
+
+ int (*tx_complete)(struct xlgmac_dma_desc *dma_desc);
+
+ void (*enable_tx)(struct xlgmac_pdata *pdata);
+ void (*disable_tx)(struct xlgmac_pdata *pdata);
+ void (*enable_rx)(struct xlgmac_pdata *pdata);
+ void (*disable_rx)(struct xlgmac_pdata *pdata);
+
+ int (*enable_int)(struct xlgmac_channel *channel,
+ enum xlgmac_int int_id);
+ int (*disable_int)(struct xlgmac_channel *channel,
+ enum xlgmac_int int_id);
+ void (*dev_xmit)(struct xlgmac_channel *channel);
+ int (*dev_read)(struct xlgmac_channel *channel);
+
+ int (*set_mac_address)(struct xlgmac_pdata *pdata, u8 *addr);
+ int (*config_rx_mode)(struct xlgmac_pdata *pdata);
+ int (*enable_rx_csum)(struct xlgmac_pdata *pdata);
+ int (*disable_rx_csum)(struct xlgmac_pdata *pdata);
+
+ /* For MII speed configuration */
+ int (*set_xlgmii_25000_speed)(struct xlgmac_pdata *pdata);
+ int (*set_xlgmii_40000_speed)(struct xlgmac_pdata *pdata);
+ int (*set_xlgmii_50000_speed)(struct xlgmac_pdata *pdata);
+ int (*set_xlgmii_100000_speed)(struct xlgmac_pdata *pdata);
+
+ /* For descriptor related operation */
+ void (*tx_desc_init)(struct xlgmac_channel *channel);
+ void (*rx_desc_init)(struct xlgmac_channel *channel);
+ void (*tx_desc_reset)(struct xlgmac_desc_data *desc_data);
+ void (*rx_desc_reset)(struct xlgmac_pdata *pdata,
+ struct xlgmac_desc_data *desc_data,
+ unsigned int index);
+ int (*is_last_desc)(struct xlgmac_dma_desc *dma_desc);
+ int (*is_context_desc)(struct xlgmac_dma_desc *dma_desc);
+ void (*tx_start_xmit)(struct xlgmac_channel *channel,
+ struct xlgmac_ring *ring);
+
+ /* For Flow Control */
+ int (*config_tx_flow_control)(struct xlgmac_pdata *pdata);
+ int (*config_rx_flow_control)(struct xlgmac_pdata *pdata);
+
+ /* For Vlan related config */
+ int (*enable_rx_vlan_stripping)(struct xlgmac_pdata *pdata);
+ int (*disable_rx_vlan_stripping)(struct xlgmac_pdata *pdata);
+ int (*enable_rx_vlan_filtering)(struct xlgmac_pdata *pdata);
+ int (*disable_rx_vlan_filtering)(struct xlgmac_pdata *pdata);
+ int (*update_vlan_hash_table)(struct xlgmac_pdata *pdata);
+
+ /* For RX coalescing */
+ int (*config_rx_coalesce)(struct xlgmac_pdata *pdata);
+ int (*config_tx_coalesce)(struct xlgmac_pdata *pdata);
+ unsigned int (*usec_to_riwt)(struct xlgmac_pdata *pdata,
+ unsigned int usec);
+ unsigned int (*riwt_to_usec)(struct xlgmac_pdata *pdata,
+ unsigned int riwt);
+
+ /* For RX and TX threshold config */
+ int (*config_rx_threshold)(struct xlgmac_pdata *pdata,
+ unsigned int val);
+ int (*config_tx_threshold)(struct xlgmac_pdata *pdata,
+ unsigned int val);
+
+ /* For RX and TX Store and Forward Mode config */
+ int (*config_rsf_mode)(struct xlgmac_pdata *pdata,
+ unsigned int val);
+ int (*config_tsf_mode)(struct xlgmac_pdata *pdata,
+ unsigned int val);
+
+ /* For TX DMA Operate on Second Frame config */
+ int (*config_osp_mode)(struct xlgmac_pdata *pdata);
+
+ /* For RX and TX PBL config */
+ int (*config_rx_pbl_val)(struct xlgmac_pdata *pdata);
+ int (*get_rx_pbl_val)(struct xlgmac_pdata *pdata);
+ int (*config_tx_pbl_val)(struct xlgmac_pdata *pdata);
+ int (*get_tx_pbl_val)(struct xlgmac_pdata *pdata);
+ int (*config_pblx8)(struct xlgmac_pdata *pdata);
+
+ /* For MMC statistics */
+ void (*rx_mmc_int)(struct xlgmac_pdata *pdata);
+ void (*tx_mmc_int)(struct xlgmac_pdata *pdata);
+ void (*read_mmc_stats)(struct xlgmac_pdata *pdata);
+
+ /* For Receive Side Scaling */
+ int (*enable_rss)(struct xlgmac_pdata *pdata);
+ int (*disable_rss)(struct xlgmac_pdata *pdata);
+ int (*set_rss_hash_key)(struct xlgmac_pdata *pdata,
+ const u8 *key);
+ int (*set_rss_lookup_table)(struct xlgmac_pdata *pdata,
+ const u32 *table);
+};
+
+/* This structure contains flags that indicate what hardware features
+ * or configurations are present in the device.
+ */
+struct xlgmac_hw_features {
+ /* HW Version */
+ unsigned int version;
+
+ /* HW Feature Register0 */
+ unsigned int phyifsel; /* PHY interface support */
+ unsigned int vlhash; /* VLAN Hash Filter */
+ unsigned int sma; /* SMA(MDIO) Interface */
+ unsigned int rwk; /* PMT remote wake-up packet */
+ unsigned int mgk; /* PMT magic packet */
+ unsigned int mmc; /* RMON module */
+ unsigned int aoe; /* ARP Offload */
+ unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */
+ unsigned int eee; /* Energy Efficient Ethernet */
+ unsigned int tx_coe; /* Tx Checksum Offload */
+ unsigned int rx_coe; /* Rx Checksum Offload */
+ unsigned int addn_mac; /* Additional MAC Addresses */
+ unsigned int ts_src; /* Timestamp Source */
+ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
+
+ /* HW Feature Register1 */
+ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
+ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
+ unsigned int adv_ts_hi; /* Advance Timestamping High Word */
+ unsigned int dma_width; /* DMA width */
+ unsigned int dcb; /* DCB Feature */
+ unsigned int sph; /* Split Header Feature */
+ unsigned int tso; /* TCP Segmentation Offload */
+ unsigned int dma_debug; /* DMA Debug Registers */
+ unsigned int rss; /* Receive Side Scaling */
+ unsigned int tc_cnt; /* Number of Traffic Classes */
+ unsigned int hash_table_size; /* Hash Table Size */
+ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
+
+ /* HW Feature Register2 */
+ unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
+ unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
+ unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
+ unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
+ unsigned int pps_out_num; /* Number of PPS outputs */
+ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
+};
+
+struct xlgmac_resources {
+ void __iomem *addr;
+ int irq;
+};
+
+struct xlgmac_pdata {
+ struct net_device *netdev;
+ struct device *dev;
+
+ struct xlgmac_hw_ops hw_ops;
+ struct xlgmac_desc_ops desc_ops;
+
+ /* Device statistics */
+ struct xlgmac_stats stats;
+
+ u32 msg_enable;
+
+ /* MAC registers base */
+ void __iomem *mac_regs;
+
+ /* Hardware features of the device */
+ struct xlgmac_hw_features hw_feat;
+
+ struct work_struct restart_work;
+
+ /* Rings for Tx/Rx on a DMA channel */
+ struct xlgmac_channel *channel_head;
+ unsigned int channel_count;
+ unsigned int tx_ring_count;
+ unsigned int rx_ring_count;
+ unsigned int tx_desc_count;
+ unsigned int rx_desc_count;
+ unsigned int tx_q_count;
+ unsigned int rx_q_count;
+
+ /* Tx/Rx common settings */
+ unsigned int pblx8;
+
+ /* Tx settings */
+ unsigned int tx_sf_mode;
+ unsigned int tx_threshold;
+ unsigned int tx_pbl;
+ unsigned int tx_osp_mode;
+
+ /* Rx settings */
+ unsigned int rx_sf_mode;
+ unsigned int rx_threshold;
+ unsigned int rx_pbl;
+
+ /* Tx coalescing settings */
+ unsigned int tx_usecs;
+ unsigned int tx_frames;
+
+ /* Rx coalescing settings */
+ unsigned int rx_riwt;
+ unsigned int rx_usecs;
+ unsigned int rx_frames;
+
+ /* Current Rx buffer size */
+ unsigned int rx_buf_size;
+
+ /* Flow control settings */
+ unsigned int tx_pause;
+ unsigned int rx_pause;
+
+ /* Device interrupt number */
+ int dev_irq;
+ unsigned int per_channel_irq;
+ int channel_irq[XLGMAC_MAX_DMA_CHANNELS];
+
+ /* Netdev related settings */
+ unsigned char mac_addr[ETH_ALEN];
+ netdev_features_t netdev_features;
+ struct napi_struct napi;
+
+ /* Filtering support */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+ /* Device clocks */
+ unsigned long sysclk_rate;
+
+ /* RSS addressing mutex */
+ struct mutex rss_mutex;
+
+ /* Receive Side Scaling settings */
+ u8 rss_key[XLGMAC_RSS_HASH_KEY_SIZE];
+ u32 rss_table[XLGMAC_RSS_MAX_TABLE_SIZE];
+ u32 rss_options;
+
+ int phy_speed;
+
+ char drv_name[32];
+ char drv_ver[32];
+};
+
+void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops);
+void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops);
+const struct net_device_ops *xlgmac_get_netdev_ops(void);
+const struct ethtool_ops *xlgmac_get_ethtool_ops(void);
+void xlgmac_dump_tx_desc(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ unsigned int idx,
+ unsigned int count,
+ unsigned int flag);
+void xlgmac_dump_rx_desc(struct xlgmac_pdata *pdata,
+ struct xlgmac_ring *ring,
+ unsigned int idx);
+void xlgmac_print_pkt(struct net_device *netdev,
+ struct sk_buff *skb, bool tx_rx);
+void xlgmac_get_all_hw_features(struct xlgmac_pdata *pdata);
+void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata);
+int xlgmac_drv_probe(struct device *dev,
+ struct xlgmac_resources *res);
+int xlgmac_drv_remove(struct device *dev);
+
+/* For debug prints */
+#ifdef XLGMAC_DEBUG
+#define XLGMAC_PR(fmt, args...) \
+ pr_alert("[%s,%d]:" fmt, __func__, __LINE__, ## args)
+#else
+#define XLGMAC_PR(x...) do { } while (0)
+#endif
+
+#endif /* __DWC_XLGMAC_H__ */
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index f864fd0663db..711fbbbc4b1f 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2124,33 +2124,26 @@ static const char
};
/*
- * bdx_get_settings - get device-specific settings
+ * bdx_get_link_ksettings - get device-specific settings
* @netdev
* @ecmd
*/
-static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
-{
- u32 rdintcm;
- u32 tdintcm;
- struct bdx_priv *priv = netdev_priv(netdev);
-
- rdintcm = priv->rdintcm;
- tdintcm = priv->tdintcm;
-
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
- ecmd->duplex = DUPLEX_FULL;
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL; /* what does it mean? */
- ecmd->autoneg = AUTONEG_DISABLE;
-
- /* PCK_TH measures in multiples of FIFO bytes
- We translate to packets */
- ecmd->maxtxpkt =
- ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
- ecmd->maxrxpkt =
- ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
+static int bdx_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
+ ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
+
+ ecmd->base.speed = SPEED_10000;
+ ecmd->base.duplex = DUPLEX_FULL;
+ ecmd->base.port = PORT_FIBRE;
+ ecmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
@@ -2384,7 +2377,6 @@ static void bdx_get_ethtool_stats(struct net_device *netdev,
static void bdx_set_ethtool_ops(struct net_device *netdev)
{
static const struct ethtool_ops bdx_ethtool_ops = {
- .get_settings = bdx_get_settings,
.get_drvinfo = bdx_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_coalesce = bdx_get_coalesce,
@@ -2394,6 +2386,7 @@ static void bdx_set_ethtool_ops(struct net_device *netdev)
.get_strings = bdx_get_strings,
.get_sset_count = bdx_get_sset_count,
.get_ethtool_stats = bdx_get_ethtool_stats,
+ .get_link_ksettings = bdx_get_link_ksettings,
};
netdev->ethtool_ops = &bdx_ethtool_ops;
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 7c7ae0890e90..729a7da90b5b 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1134,7 +1134,6 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
u32 buf_len = skb_frag_size(frag);
dma_addr_t desc_dma;
u32 desc_dma_32;
- u32 pkt_info;
dma_addr = dma_map_page(dev, page, page_offset, buf_len,
DMA_TO_DEVICE);
@@ -1151,9 +1150,6 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
}
desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, ndesc);
- pkt_info =
- (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
- KNAV_DMA_DESC_RETQ_SHIFT;
set_pkt_info(dma_addr, buf_len, 0, ndesc);
desc_dma_32 = (u32)desc_dma;
set_words(&desc_dma_32, 1, &pdesc->next_desc);
@@ -1882,6 +1878,7 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
struct tc_to_netdev *tc)
{
+ u8 num_tc;
int i;
/* setup tc must be called under rtnl lock */
@@ -1890,15 +1887,18 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_tc = tc->mqprio->num_tc;
+
/* Sanity-check the number of traffic classes requested */
if ((dev->real_num_tx_queues <= 1) ||
- (dev->real_num_tx_queues < tc->tc))
+ (dev->real_num_tx_queues < num_tc))
return -EINVAL;
/* Configure traffic class to queue mappings */
- if (tc->tc) {
- netdev_set_num_tc(dev, tc->tc);
- for (i = 0; i < tc->tc; i++)
+ if (num_tc) {
+ netdev_set_num_tc(dev, num_tc);
+ for (i = 0; i < num_tc; i++)
netdev_set_tc_queue(dev, i, 1, i);
} else {
netdev_reset_tc(dev);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index eece3e2eec14..897176fc5043 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -3048,8 +3048,7 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev,
for_each_child_of_node(node, port) {
slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
if (!slave) {
- dev_err(dev,
- "memomry alloc failed for secondary port(%s), skipping...\n",
+ dev_err(dev, "memory alloc failed for secondary port(%s), skipping...\n",
port->name);
continue;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 72013314bba8..fa6a06571187 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1206,61 +1206,68 @@ void gelic_net_get_drvinfo(struct net_device *netdev,
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
-static int gelic_ether_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int gelic_ether_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct gelic_card *card = netdev_card(netdev);
+ u32 supported, advertising;
gelic_card_get_ether_port_status(card, 0);
if (card->ether_port_status & GELIC_LV1_ETHER_FULL_DUPLEX)
- cmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- cmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
switch (card->ether_port_status & GELIC_LV1_ETHER_SPEED_MASK) {
case GELIC_LV1_ETHER_SPEED_10:
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
break;
case GELIC_LV1_ETHER_SPEED_100:
- ethtool_cmd_speed_set(cmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
break;
case GELIC_LV1_ETHER_SPEED_1000:
- ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
break;
default:
pr_info("%s: speed unknown\n", __func__);
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
break;
}
- cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg |
+ supported = SUPPORTED_TP | SUPPORTED_Autoneg |
SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full;
- cmd->advertising = cmd->supported;
+ advertising = supported;
if (card->link_mode & GELIC_LV1_ETHER_AUTO_NEG) {
- cmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
} else {
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->advertising &= ~ADVERTISED_Autoneg;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ advertising &= ~ADVERTISED_Autoneg;
}
- cmd->port = PORT_TP;
+ cmd->base.port = PORT_TP;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int gelic_ether_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int
+gelic_ether_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct gelic_card *card = netdev_card(netdev);
u64 mode;
int ret;
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
mode = GELIC_LV1_ETHER_AUTO_NEG;
} else {
- switch (cmd->speed) {
+ switch (cmd->base.speed) {
case SPEED_10:
mode = GELIC_LV1_ETHER_SPEED_10;
break;
@@ -1273,9 +1280,9 @@ static int gelic_ether_set_settings(struct net_device *netdev,
default:
return -EINVAL;
}
- if (cmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL) {
mode |= GELIC_LV1_ETHER_FULL_DUPLEX;
- else if (cmd->speed == SPEED_1000) {
+ } else if (cmd->base.speed == SPEED_1000) {
pr_info("1000 half duplex is not supported.\n");
return -EINVAL;
}
@@ -1370,11 +1377,11 @@ done:
static const struct ethtool_ops gelic_ether_ethtool_ops = {
.get_drvinfo = gelic_net_get_drvinfo,
- .get_settings = gelic_ether_get_settings,
- .set_settings = gelic_ether_set_settings,
.get_link = ethtool_op_get_link,
.get_wol = gelic_net_get_wol,
.set_wol = gelic_net_set_wol,
+ .get_link_ksettings = gelic_ether_get_link_ksettings,
+ .set_link_ksettings = gelic_ether_set_link_ksettings,
};
/**
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
index ffe519382e11..16bd036d0682 100644
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
@@ -47,19 +47,23 @@ static struct {
};
static int
-spider_net_ethtool_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+spider_net_ethtool_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct spider_net_card *card;
card = netdev_priv(netdev);
- cmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE);
- cmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE);
- cmd->port = PORT_FIBRE;
- ethtool_cmd_speed_set(cmd, card->phy.speed);
- cmd->duplex = DUPLEX_FULL;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.speed = card->phy.speed;
+ cmd->base.duplex = DUPLEX_FULL;
return 0;
}
@@ -166,7 +170,6 @@ static void spider_net_get_strings(struct net_device *netdev, u32 stringset,
}
const struct ethtool_ops spider_net_ethtool_ops = {
- .get_settings = spider_net_ethtool_get_settings,
.get_drvinfo = spider_net_ethtool_get_drvinfo,
.get_wol = spider_net_ethtool_get_wol,
.get_msglevel = spider_net_ethtool_get_msglevel,
@@ -177,5 +180,6 @@ const struct ethtool_ops spider_net_ethtool_ops = {
.get_strings = spider_net_get_strings,
.get_sset_count = spider_net_get_sset_count,
.get_ethtool_stats = spider_net_get_ethtool_stats,
+ .get_link_ksettings = spider_net_ethtool_get_link_ksettings,
};
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index c5583991da4a..5ac6eaa9e785 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1499,27 +1499,29 @@ static void tsi108_init_mac(struct net_device *dev)
TSI_WRITE(TSI108_EC_INTMASK, ~0);
}
-static int tsi108_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int tsi108_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct tsi108_prv_data *data = netdev_priv(dev);
unsigned long flags;
int rc;
spin_lock_irqsave(&data->txlock, flags);
- rc = mii_ethtool_gset(&data->mii_if, cmd);
+ rc = mii_ethtool_get_link_ksettings(&data->mii_if, cmd);
spin_unlock_irqrestore(&data->txlock, flags);
return rc;
}
-static int tsi108_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int tsi108_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct tsi108_prv_data *data = netdev_priv(dev);
unsigned long flags;
int rc;
spin_lock_irqsave(&data->txlock, flags);
- rc = mii_ethtool_sset(&data->mii_if, cmd);
+ rc = mii_ethtool_set_link_ksettings(&data->mii_if, cmd);
spin_unlock_irqrestore(&data->txlock, flags);
return rc;
@@ -1535,8 +1537,8 @@ static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static const struct ethtool_ops tsi108_ethtool_ops = {
.get_link = ethtool_op_get_link,
- .get_settings = tsi108_get_settings,
- .set_settings = tsi108_set_settings,
+ .get_link_ksettings = tsi108_get_link_ksettings,
+ .set_link_ksettings = tsi108_set_link_ksettings,
};
static const struct net_device_ops tsi108_netdev_ops = {
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index c068c58428f7..4cf41f779d0e 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2303,25 +2303,27 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct rhine_private *rp = netdev_priv(dev);
int rc;
mutex_lock(&rp->task_lock);
- rc = mii_ethtool_gset(&rp->mii_if, cmd);
+ rc = mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
mutex_unlock(&rp->task_lock);
return rc;
}
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct rhine_private *rp = netdev_priv(dev);
int rc;
mutex_lock(&rp->task_lock);
- rc = mii_ethtool_sset(&rp->mii_if, cmd);
+ rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
rhine_set_carrier(&rp->mii_if);
mutex_unlock(&rp->task_lock);
@@ -2391,14 +2393,14 @@ static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
- .get_settings = netdev_get_settings,
- .set_settings = netdev_set_settings,
.nway_reset = netdev_nway_reset,
.get_link = netdev_get_link,
.get_msglevel = netdev_get_msglevel,
.set_msglevel = netdev_set_msglevel,
.get_wol = rhine_get_wol,
.set_wol = rhine_set_wol,
+ .get_link_ksettings = netdev_get_link_ksettings,
+ .set_link_ksettings = netdev_set_link_ksettings,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index d088788b27a7..ef9538ee53d0 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -3291,15 +3291,17 @@ static void velocity_ethtool_down(struct net_device *dev)
velocity_set_power_state(vptr, PCI_D3hot);
}
-static int velocity_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int velocity_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct velocity_info *vptr = netdev_priv(dev);
struct mac_regs __iomem *regs = vptr->mac_regs;
u32 status;
+ u32 supported, advertising;
+
status = check_connection_type(vptr->mac_regs);
- cmd->supported = SUPPORTED_TP |
+ supported = SUPPORTED_TP |
SUPPORTED_Autoneg |
SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
@@ -3308,9 +3310,9 @@ static int velocity_get_settings(struct net_device *dev,
SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full;
- cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
+ advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
- cmd->advertising |=
+ advertising |=
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
@@ -3320,19 +3322,19 @@ static int velocity_get_settings(struct net_device *dev,
} else {
switch (vptr->options.spd_dpx) {
case SPD_DPX_1000_FULL:
- cmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
break;
case SPD_DPX_100_HALF:
- cmd->advertising |= ADVERTISED_100baseT_Half;
+ advertising |= ADVERTISED_100baseT_Half;
break;
case SPD_DPX_100_FULL:
- cmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
break;
case SPD_DPX_10_HALF:
- cmd->advertising |= ADVERTISED_10baseT_Half;
+ advertising |= ADVERTISED_10baseT_Half;
break;
case SPD_DPX_10_FULL:
- cmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
break;
default:
break;
@@ -3340,30 +3342,35 @@ static int velocity_get_settings(struct net_device *dev,
}
if (status & VELOCITY_SPEED_1000)
- ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
else if (status & VELOCITY_SPEED_100)
- ethtool_cmd_speed_set(cmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
else
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
- cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
- cmd->port = PORT_TP;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
+ cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = readb(&regs->MIIADR) & 0x1F;
if (status & VELOCITY_DUPLEX_FULL)
- cmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- cmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int velocity_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int velocity_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct velocity_info *vptr = netdev_priv(dev);
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed = cmd->base.speed;
u32 curr_status;
u32 new_status = 0;
int ret = 0;
@@ -3371,11 +3378,12 @@ static int velocity_set_settings(struct net_device *dev,
curr_status = check_connection_type(vptr->mac_regs);
curr_status &= (~VELOCITY_LINK_FAIL);
- new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
+ new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
- new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
+ new_status |= ((cmd->base.duplex == DUPLEX_FULL) ?
+ VELOCITY_DUPLEX_FULL : 0);
if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
(new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
@@ -3644,8 +3652,6 @@ static void velocity_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops velocity_ethtool_ops = {
- .get_settings = velocity_get_settings,
- .set_settings = velocity_set_settings,
.get_drvinfo = velocity_get_drvinfo,
.get_wol = velocity_ethtool_get_wol,
.set_wol = velocity_ethtool_set_wol,
@@ -3658,7 +3664,9 @@ static const struct ethtool_ops velocity_ethtool_ops = {
.get_coalesce = velocity_get_coalesce,
.set_coalesce = velocity_set_coalesce,
.begin = velocity_ethtool_up,
- .complete = velocity_ethtool_down
+ .complete = velocity_ethtool_down,
+ .get_link_ksettings = velocity_get_link_ksettings,
+ .set_link_ksettings = velocity_set_link_ksettings,
};
#if defined(CONFIG_PM) && defined(CONFIG_INET)
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index f90267f0519f..2bdfb39215e9 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -1152,7 +1152,8 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
if (err < 0)
goto err_register;
- priv->xfer_wq = alloc_workqueue(netdev_name(ndev), WQ_MEM_RECLAIM, 0);
+ priv->xfer_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
+ netdev_name(ndev));
if (!priv->xfer_wq) {
err = -ENOMEM;
goto err_wq;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index b96e96919e31..33c595f4691d 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -301,7 +301,7 @@ static void axienet_set_mac_address(struct net_device *ndev,
if (address)
memcpy(ndev->dev_addr, address, ETH_ALEN);
if (!is_valid_ether_addr(ndev->dev_addr))
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
/* Set up unicast MAC address filter set its mac address */
axienet_iow(lp, XAE_UAW0_OFFSET,
diff --git a/drivers/net/fjes/fjes_ethtool.c b/drivers/net/fjes/fjes_ethtool.c
index 6575f880f1be..7d101714c2ef 100644
--- a/drivers/net/fjes/fjes_ethtool.c
+++ b/drivers/net/fjes/fjes_ethtool.c
@@ -175,16 +175,15 @@ static void fjes_get_drvinfo(struct net_device *netdev,
"platform:%s", plat_dev->name);
}
-static int fjes_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int fjes_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ecmd)
{
- ecmd->supported = 0;
- ecmd->advertising = 0;
- ecmd->duplex = DUPLEX_FULL;
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->transceiver = XCVR_DUMMY1;
- ecmd->port = PORT_NONE;
- ethtool_cmd_speed_set(ecmd, 20000); /* 20Gb/s */
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+ ecmd->base.duplex = DUPLEX_FULL;
+ ecmd->base.autoneg = AUTONEG_DISABLE;
+ ecmd->base.port = PORT_NONE;
+ ecmd->base.speed = 20000; /* 20Gb/s */
return 0;
}
@@ -296,7 +295,6 @@ static int fjes_get_dump_data(struct net_device *netdev,
}
static const struct ethtool_ops fjes_ethtool_ops = {
- .get_settings = fjes_get_settings,
.get_drvinfo = fjes_get_drvinfo,
.get_ethtool_stats = fjes_get_ethtool_stats,
.get_strings = fjes_get_strings,
@@ -306,6 +304,7 @@ static const struct ethtool_ops fjes_ethtool_ops = {
.set_dump = fjes_set_dump,
.get_dump_flag = fjes_get_dump_flag,
.get_dump_data = fjes_get_dump_data,
+ .get_link_ksettings = fjes_get_link_ksettings,
};
void fjes_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 7074b40ebd7f..dec5d563ab19 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1244,7 +1244,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
metadata = true;
if (data[IFLA_GENEVE_UDP_CSUM] &&
- !nla_get_u8(data[IFLA_GENEVE_UDP_CSUM]))
+ nla_get_u8(data[IFLA_GENEVE_UDP_CSUM]))
info.key.tun_flags |= TUNNEL_CSUM;
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] &&
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 89698741682f..4fea1b3dfbb4 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -56,7 +56,10 @@ struct pdp_ctx {
u16 af;
struct in_addr ms_addr_ip4;
- struct in_addr sgsn_addr_ip4;
+ struct in_addr peer_addr_ip4;
+
+ struct sock *sk;
+ struct net_device *dev;
atomic_t tx_seq;
struct rcu_head rcu_head;
@@ -66,11 +69,12 @@ struct pdp_ctx {
struct gtp_dev {
struct list_head list;
- struct socket *sock0;
- struct socket *sock1u;
+ struct sock *sk0;
+ struct sock *sk1u;
struct net_device *dev;
+ unsigned int role;
unsigned int hash_size;
struct hlist_head *tid_hash;
struct hlist_head *addr_hash;
@@ -84,6 +88,8 @@ struct gtp_net {
static u32 gtp_h_initval;
+static void pdp_context_delete(struct pdp_ctx *pctx);
+
static inline u32 gtp0_hashfn(u64 tid)
{
u32 *tid32 = (u32 *) &tid;
@@ -149,8 +155,8 @@ static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
return NULL;
}
-static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
- unsigned int hdrlen)
+static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
+ unsigned int hdrlen, unsigned int role)
{
struct iphdr *iph;
@@ -159,25 +165,62 @@ static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
iph = (struct iphdr *)(skb->data + hdrlen);
- return iph->saddr == pctx->ms_addr_ip4.s_addr;
+ if (role == GTP_ROLE_SGSN)
+ return iph->daddr == pctx->ms_addr_ip4.s_addr;
+ else
+ return iph->saddr == pctx->ms_addr_ip4.s_addr;
}
-/* Check if the inner IP source address in this packet is assigned to any
+/* Check if the inner IP address in this packet is assigned to any
* existing mobile subscriber.
*/
-static bool gtp_check_src_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
- unsigned int hdrlen)
+static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
+ unsigned int hdrlen, unsigned int role)
{
switch (ntohs(skb->protocol)) {
case ETH_P_IP:
- return gtp_check_src_ms_ipv4(skb, pctx, hdrlen);
+ return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
}
return false;
}
+static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
+ unsigned int hdrlen, unsigned int role)
+{
+ struct pcpu_sw_netstats *stats;
+
+ if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
+ netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
+ return 1;
+ }
+
+ /* Get rid of the GTP + UDP headers. */
+ if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
+ !net_eq(sock_net(pctx->sk), dev_net(pctx->dev))))
+ return -1;
+
+ netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
+
+ /* Now that the UDP and the GTP header have been removed, set up the
+ * new network header. This is required by the upper layer to
+ * calculate the transport header.
+ */
+ skb_reset_network_header(skb);
+
+ skb->dev = pctx->dev;
+
+ stats = this_cpu_ptr(pctx->dev->tstats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ netif_rx(skb);
+ return 0;
+}
+
/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
-static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
- bool xnet)
+static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
unsigned int hdrlen = sizeof(struct udphdr) +
sizeof(struct gtp0_header);
@@ -201,17 +244,10 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
return 1;
}
- if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
- netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
- return 1;
- }
-
- /* Get rid of the GTP + UDP headers. */
- return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
+ return gtp_rx(pctx, skb, hdrlen, gtp->role);
}
-static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
- bool xnet)
+static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
unsigned int hdrlen = sizeof(struct udphdr) +
sizeof(struct gtp1_header);
@@ -250,37 +286,33 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
return 1;
}
- if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
- netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
- return 1;
- }
-
- /* Get rid of the GTP + UDP headers. */
- return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
+ return gtp_rx(pctx, skb, hdrlen, gtp->role);
}
-static void gtp_encap_disable(struct gtp_dev *gtp)
+static void gtp_encap_destroy(struct sock *sk)
{
- if (gtp->sock0 && gtp->sock0->sk) {
- udp_sk(gtp->sock0->sk)->encap_type = 0;
- rcu_assign_sk_user_data(gtp->sock0->sk, NULL);
- }
- if (gtp->sock1u && gtp->sock1u->sk) {
- udp_sk(gtp->sock1u->sk)->encap_type = 0;
- rcu_assign_sk_user_data(gtp->sock1u->sk, NULL);
- }
+ struct gtp_dev *gtp;
- gtp->sock0 = NULL;
- gtp->sock1u = NULL;
+ gtp = rcu_dereference_sk_user_data(sk);
+ if (gtp) {
+ udp_sk(sk)->encap_type = 0;
+ rcu_assign_sk_user_data(sk, NULL);
+ sock_put(sk);
+ }
}
-static void gtp_encap_destroy(struct sock *sk)
+static void gtp_encap_disable_sock(struct sock *sk)
{
- struct gtp_dev *gtp;
+ if (!sk)
+ return;
- gtp = rcu_dereference_sk_user_data(sk);
- if (gtp)
- gtp_encap_disable(gtp);
+ gtp_encap_destroy(sk);
+}
+
+static void gtp_encap_disable(struct gtp_dev *gtp)
+{
+ gtp_encap_disable_sock(gtp->sk0);
+ gtp_encap_disable_sock(gtp->sk1u);
}
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
@@ -288,10 +320,8 @@ static void gtp_encap_destroy(struct sock *sk)
*/
static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
- struct pcpu_sw_netstats *stats;
struct gtp_dev *gtp;
- bool xnet;
- int ret;
+ int ret = 0;
gtp = rcu_dereference_sk_user_data(sk);
if (!gtp)
@@ -299,16 +329,14 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
- xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
-
switch (udp_sk(sk)->encap_type) {
case UDP_ENCAP_GTP0:
netdev_dbg(gtp->dev, "received GTP0 packet\n");
- ret = gtp0_udp_encap_recv(gtp, skb, xnet);
+ ret = gtp0_udp_encap_recv(gtp, skb);
break;
case UDP_ENCAP_GTP1U:
netdev_dbg(gtp->dev, "received GTP1U packet\n");
- ret = gtp1u_udp_encap_recv(gtp, skb, xnet);
+ ret = gtp1u_udp_encap_recv(gtp, skb);
break;
default:
ret = -1; /* Shouldn't happen. */
@@ -317,33 +345,17 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
switch (ret) {
case 1:
netdev_dbg(gtp->dev, "pass up to the process\n");
- return 1;
+ break;
case 0:
- netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n");
break;
case -1:
netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
kfree_skb(skb);
- return 0;
+ ret = 0;
+ break;
}
- /* Now that the UDP and the GTP header have been removed, set up the
- * new network header. This is required by the upper layer to
- * calculate the transport header.
- */
- skb_reset_network_header(skb);
-
- skb->dev = gtp->dev;
-
- stats = this_cpu_ptr(gtp->dev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
-
- netif_rx(skb);
-
- return 0;
+ return ret;
}
static int gtp_dev_init(struct net_device *dev)
@@ -367,8 +379,9 @@ static void gtp_dev_uninit(struct net_device *dev)
free_percpu(dev->tstats);
}
-static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4,
- const struct sock *sk, __be32 daddr)
+static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
+ const struct sock *sk,
+ __be32 daddr)
{
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_oif = sk->sk_bound_dev_if;
@@ -377,7 +390,7 @@ static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4,
fl4->flowi4_tos = RT_CONN_FLAGS(sk);
fl4->flowi4_proto = sk->sk_protocol;
- return ip_route_output_key(net, fl4);
+ return ip_route_output_key(sock_net(sk), fl4);
}
static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
@@ -466,7 +479,6 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
struct rtable *rt;
struct flowi4 fl4;
struct iphdr *iph;
- struct sock *sk;
__be16 df;
int mtu;
@@ -474,7 +486,11 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
* Prepend PDP header with TEI/TID from PDP ctx.
*/
iph = ip_hdr(skb);
- pctx = ipv4_pdp_find(gtp, iph->daddr);
+ if (gtp->role == GTP_ROLE_SGSN)
+ pctx = ipv4_pdp_find(gtp, iph->saddr);
+ else
+ pctx = ipv4_pdp_find(gtp, iph->daddr);
+
if (!pctx) {
netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
&iph->daddr);
@@ -482,40 +498,17 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
}
netdev_dbg(dev, "found PDP context %p\n", pctx);
- switch (pctx->gtp_version) {
- case GTP_V0:
- if (gtp->sock0)
- sk = gtp->sock0->sk;
- else
- sk = NULL;
- break;
- case GTP_V1:
- if (gtp->sock1u)
- sk = gtp->sock1u->sk;
- else
- sk = NULL;
- break;
- default:
- return -ENOENT;
- }
-
- if (!sk) {
- netdev_dbg(dev, "no userspace socket is available, skip\n");
- return -ENOENT;
- }
-
- rt = ip4_route_output_gtp(sock_net(sk), &fl4, gtp->sock0->sk,
- pctx->sgsn_addr_ip4.s_addr);
+ rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to SSGN %pI4\n",
- &pctx->sgsn_addr_ip4.s_addr);
+ &pctx->peer_addr_ip4.s_addr);
dev->stats.tx_carrier_errors++;
goto err;
}
if (rt->dst.dev == dev) {
netdev_dbg(dev, "circular route to SSGN %pI4\n",
- &pctx->sgsn_addr_ip4.s_addr);
+ &pctx->peer_addr_ip4.s_addr);
dev->stats.collisions++;
goto err_rt;
}
@@ -550,7 +543,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
goto err_rt;
}
- gtp_set_pktinfo_ipv4(pktinfo, sk, iph, pctx, rt, &fl4, dev);
+ gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
gtp_push_header(skb, pktinfo);
return 0;
@@ -640,27 +633,23 @@ static void gtp_link_setup(struct net_device *dev)
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
static void gtp_hashtable_free(struct gtp_dev *gtp);
-static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
- int fd_gtp0, int fd_gtp1);
+static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
- int hashsize, err, fd0, fd1;
struct gtp_dev *gtp;
struct gtp_net *gn;
+ int hashsize, err;
- if (!data[IFLA_GTP_FD0] || !data[IFLA_GTP_FD1])
+ if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
return -EINVAL;
gtp = netdev_priv(dev);
- fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
- fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
-
- err = gtp_encap_enable(dev, gtp, fd0, fd1);
+ err = gtp_encap_enable(gtp, data);
if (err < 0)
- goto out_err;
+ return err;
if (!data[IFLA_GTP_PDP_HASHSIZE])
hashsize = 1024;
@@ -688,7 +677,6 @@ out_hashtable:
gtp_hashtable_free(gtp);
out_encap:
gtp_encap_disable(gtp);
-out_err:
return err;
}
@@ -706,6 +694,7 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
[IFLA_GTP_FD0] = { .type = NLA_U32 },
[IFLA_GTP_FD1] = { .type = NLA_U32 },
[IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
+ [IFLA_GTP_ROLE] = { .type = NLA_U32 },
};
static int gtp_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -747,21 +736,6 @@ static struct rtnl_link_ops gtp_link_ops __read_mostly = {
.fill_info = gtp_fill_info,
};
-static struct net *gtp_genl_get_net(struct net *src_net, struct nlattr *tb[])
-{
- struct net *net;
-
- /* Examine the link attributes and figure out which network namespace
- * we are talking about.
- */
- if (tb[GTPA_NET_NS_FD])
- net = get_net_ns_by_fd(nla_get_u32(tb[GTPA_NET_NS_FD]));
- else
- net = get_net(src_net);
-
- return net;
-}
-
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
{
int i;
@@ -791,93 +765,127 @@ static void gtp_hashtable_free(struct gtp_dev *gtp)
struct pdp_ctx *pctx;
int i;
- for (i = 0; i < gtp->hash_size; i++) {
- hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
- hlist_del_rcu(&pctx->hlist_tid);
- hlist_del_rcu(&pctx->hlist_addr);
- kfree_rcu(pctx, rcu_head);
- }
- }
+ for (i = 0; i < gtp->hash_size; i++)
+ hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
+ pdp_context_delete(pctx);
+
synchronize_rcu();
kfree(gtp->addr_hash);
kfree(gtp->tid_hash);
}
-static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
- int fd_gtp0, int fd_gtp1)
+static struct sock *gtp_encap_enable_socket(int fd, int type,
+ struct gtp_dev *gtp)
{
struct udp_tunnel_sock_cfg tuncfg = {NULL};
- struct socket *sock0, *sock1u;
+ struct socket *sock;
+ struct sock *sk;
int err;
- netdev_dbg(dev, "enable gtp on %d, %d\n", fd_gtp0, fd_gtp1);
-
- sock0 = sockfd_lookup(fd_gtp0, &err);
- if (sock0 == NULL) {
- netdev_dbg(dev, "socket fd=%d not found (gtp0)\n", fd_gtp0);
- return -ENOENT;
- }
+ pr_debug("enable gtp on %d, %d\n", fd, type);
- if (sock0->sk->sk_protocol != IPPROTO_UDP) {
- netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp0);
- err = -EINVAL;
- goto err1;
+ sock = sockfd_lookup(fd, &err);
+ if (!sock) {
+ pr_debug("gtp socket fd=%d not found\n", fd);
+ return NULL;
}
- sock1u = sockfd_lookup(fd_gtp1, &err);
- if (sock1u == NULL) {
- netdev_dbg(dev, "socket fd=%d not found (gtp1u)\n", fd_gtp1);
- err = -ENOENT;
- goto err1;
+ if (sock->sk->sk_protocol != IPPROTO_UDP) {
+ pr_debug("socket fd=%d not UDP\n", fd);
+ sk = ERR_PTR(-EINVAL);
+ goto out_sock;
}
- if (sock1u->sk->sk_protocol != IPPROTO_UDP) {
- netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp1);
- err = -EINVAL;
- goto err2;
+ if (rcu_dereference_sk_user_data(sock->sk)) {
+ sk = ERR_PTR(-EBUSY);
+ goto out_sock;
}
- netdev_dbg(dev, "enable gtp on %p, %p\n", sock0, sock1u);
-
- gtp->sock0 = sock0;
- gtp->sock1u = sock1u;
+ sk = sock->sk;
+ sock_hold(sk);
tuncfg.sk_user_data = gtp;
+ tuncfg.encap_type = type;
tuncfg.encap_rcv = gtp_encap_recv;
tuncfg.encap_destroy = gtp_encap_destroy;
- tuncfg.encap_type = UDP_ENCAP_GTP0;
- setup_udp_tunnel_sock(sock_net(gtp->sock0->sk), gtp->sock0, &tuncfg);
-
- tuncfg.encap_type = UDP_ENCAP_GTP1U;
- setup_udp_tunnel_sock(sock_net(gtp->sock1u->sk), gtp->sock1u, &tuncfg);
+ setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
- err = 0;
-err2:
- sockfd_put(sock1u);
-err1:
- sockfd_put(sock0);
- return err;
+out_sock:
+ sockfd_put(sock);
+ return sk;
}
-static struct net_device *gtp_find_dev(struct net *net, int ifindex)
+static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
{
- struct gtp_net *gn = net_generic(net, gtp_net_id);
- struct gtp_dev *gtp;
+ struct sock *sk1u = NULL;
+ struct sock *sk0 = NULL;
+ unsigned int role = GTP_ROLE_GGSN;
- list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
- if (ifindex == gtp->dev->ifindex)
- return gtp->dev;
+ if (data[IFLA_GTP_FD0]) {
+ u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
+
+ sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
+ if (IS_ERR(sk0))
+ return PTR_ERR(sk0);
}
- return NULL;
+
+ if (data[IFLA_GTP_FD1]) {
+ u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
+
+ sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
+ if (IS_ERR(sk1u)) {
+ if (sk0)
+ gtp_encap_disable_sock(sk0);
+ return PTR_ERR(sk1u);
+ }
+ }
+
+ if (data[IFLA_GTP_ROLE]) {
+ role = nla_get_u32(data[IFLA_GTP_ROLE]);
+ if (role > GTP_ROLE_SGSN)
+ return -EINVAL;
+ }
+
+ gtp->sk0 = sk0;
+ gtp->sk1u = sk1u;
+ gtp->role = role;
+
+ return 0;
+}
+
+static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
+{
+ struct gtp_dev *gtp = NULL;
+ struct net_device *dev;
+ struct net *net;
+
+ /* Examine the link attributes and figure out which network namespace
+ * we are talking about.
+ */
+ if (nla[GTPA_NET_NS_FD])
+ net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD]));
+ else
+ net = get_net(src_net);
+
+ if (IS_ERR(net))
+ return NULL;
+
+ /* Check if there's an existing gtpX device to configure */
+ dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
+ if (dev->netdev_ops == &gtp_netdev_ops)
+ gtp = netdev_priv(dev);
+
+ put_net(net);
+ return gtp;
}
static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
{
pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
pctx->af = AF_INET;
- pctx->sgsn_addr_ip4.s_addr =
- nla_get_be32(info->attrs[GTPA_SGSN_ADDRESS]);
+ pctx->peer_addr_ip4.s_addr =
+ nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
pctx->ms_addr_ip4.s_addr =
nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
@@ -899,9 +907,10 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
}
}
-static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
+static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
+ struct genl_info *info)
{
- struct gtp_dev *gtp = netdev_priv(dev);
+ struct net_device *dev = gtp->dev;
u32 hash_ms, hash_tid = 0;
struct pdp_ctx *pctx;
bool found = false;
@@ -940,6 +949,9 @@ static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
if (pctx == NULL)
return -ENOMEM;
+ sock_hold(sk);
+ pctx->sk = sk;
+ pctx->dev = gtp->dev;
ipv4_pdp_fill(pctx, info);
atomic_set(&pctx->tx_seq, 0);
@@ -963,31 +975,50 @@ static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
switch (pctx->gtp_version) {
case GTP_V0:
netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
- pctx->u.v0.tid, &pctx->sgsn_addr_ip4,
+ pctx->u.v0.tid, &pctx->peer_addr_ip4,
&pctx->ms_addr_ip4, pctx);
break;
case GTP_V1:
netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei,
- &pctx->sgsn_addr_ip4, &pctx->ms_addr_ip4, pctx);
+ &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx);
break;
}
return 0;
}
+static void pdp_context_free(struct rcu_head *head)
+{
+ struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head);
+
+ sock_put(pctx->sk);
+ kfree(pctx);
+}
+
+static void pdp_context_delete(struct pdp_ctx *pctx)
+{
+ hlist_del_rcu(&pctx->hlist_tid);
+ hlist_del_rcu(&pctx->hlist_addr);
+ call_rcu(&pctx->rcu_head, pdp_context_free);
+}
+
static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
{
- struct net_device *dev;
- struct net *net;
+ unsigned int version;
+ struct gtp_dev *gtp;
+ struct sock *sk;
+ int err;
if (!info->attrs[GTPA_VERSION] ||
!info->attrs[GTPA_LINK] ||
- !info->attrs[GTPA_SGSN_ADDRESS] ||
+ !info->attrs[GTPA_PEER_ADDRESS] ||
!info->attrs[GTPA_MS_ADDRESS])
return -EINVAL;
- switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
+ version = nla_get_u32(info->attrs[GTPA_VERSION]);
+
+ switch (version) {
case GTP_V0:
if (!info->attrs[GTPA_TID] ||
!info->attrs[GTPA_FLOW])
@@ -1003,77 +1034,101 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
- net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
- if (IS_ERR(net))
- return PTR_ERR(net);
+ rcu_read_lock();
- /* Check if there's an existing gtpX device to configure */
- dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
- if (dev == NULL) {
- put_net(net);
- return -ENODEV;
+ gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
+ if (!gtp) {
+ err = -ENODEV;
+ goto out_unlock;
}
- put_net(net);
- return ipv4_pdp_add(dev, info);
+ if (version == GTP_V0)
+ sk = gtp->sk0;
+ else if (version == GTP_V1)
+ sk = gtp->sk1u;
+ else
+ sk = NULL;
+
+ if (!sk) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+
+ err = ipv4_pdp_add(gtp, sk, info);
+
+out_unlock:
+ rcu_read_unlock();
+ return err;
}
-static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
+static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
+ struct nlattr *nla[])
{
- struct net_device *dev;
- struct pdp_ctx *pctx;
struct gtp_dev *gtp;
- struct net *net;
- if (!info->attrs[GTPA_VERSION] ||
- !info->attrs[GTPA_LINK])
- return -EINVAL;
+ gtp = gtp_find_dev(net, nla);
+ if (!gtp)
+ return ERR_PTR(-ENODEV);
- net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
- if (IS_ERR(net))
- return PTR_ERR(net);
+ if (nla[GTPA_MS_ADDRESS]) {
+ __be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]);
- /* Check if there's an existing gtpX device to configure */
- dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
- if (dev == NULL) {
- put_net(net);
- return -ENODEV;
+ return ipv4_pdp_find(gtp, ip);
+ } else if (nla[GTPA_VERSION]) {
+ u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]);
+
+ if (gtp_version == GTP_V0 && nla[GTPA_TID])
+ return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]));
+ else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI])
+ return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]));
}
- put_net(net);
- gtp = netdev_priv(dev);
+ return ERR_PTR(-EINVAL);
+}
- switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
- case GTP_V0:
- if (!info->attrs[GTPA_TID])
- return -EINVAL;
- pctx = gtp0_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_TID]));
- break;
- case GTP_V1:
- if (!info->attrs[GTPA_I_TEI])
- return -EINVAL;
- pctx = gtp1_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_I_TEI]));
- break;
+static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[])
+{
+ struct pdp_ctx *pctx;
- default:
+ if (nla[GTPA_LINK])
+ pctx = gtp_find_pdp_by_link(net, nla);
+ else
+ pctx = ERR_PTR(-EINVAL);
+
+ if (!pctx)
+ pctx = ERR_PTR(-ENOENT);
+
+ return pctx;
+}
+
+static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
+{
+ struct pdp_ctx *pctx;
+ int err = 0;
+
+ if (!info->attrs[GTPA_VERSION])
return -EINVAL;
- }
- if (pctx == NULL)
- return -ENOENT;
+ rcu_read_lock();
+
+ pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
+ if (IS_ERR(pctx)) {
+ err = PTR_ERR(pctx);
+ goto out_unlock;
+ }
if (pctx->gtp_version == GTP_V0)
- netdev_dbg(dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
+ netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
pctx->u.v0.tid, pctx);
else if (pctx->gtp_version == GTP_V1)
- netdev_dbg(dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
+ netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
- hlist_del_rcu(&pctx->hlist_tid);
- hlist_del_rcu(&pctx->hlist_addr);
- kfree_rcu(pctx, rcu_head);
+ pdp_context_delete(pctx);
- return 0;
+out_unlock:
+ rcu_read_unlock();
+ return err;
}
static struct genl_family gtp_genl_family;
@@ -1089,7 +1144,7 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
goto nlmsg_failure;
if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
- nla_put_be32(skb, GTPA_SGSN_ADDRESS, pctx->sgsn_addr_ip4.s_addr) ||
+ nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
goto nla_put_failure;
@@ -1117,59 +1172,17 @@ nla_put_failure:
static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
{
struct pdp_ctx *pctx = NULL;
- struct net_device *dev;
struct sk_buff *skb2;
- struct gtp_dev *gtp;
- u32 gtp_version;
- struct net *net;
int err;
- if (!info->attrs[GTPA_VERSION] ||
- !info->attrs[GTPA_LINK])
- return -EINVAL;
-
- gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
- switch (gtp_version) {
- case GTP_V0:
- case GTP_V1:
- break;
- default:
+ if (!info->attrs[GTPA_VERSION])
return -EINVAL;
- }
-
- net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
- if (IS_ERR(net))
- return PTR_ERR(net);
-
- /* Check if there's an existing gtpX device to configure */
- dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
- if (dev == NULL) {
- put_net(net);
- return -ENODEV;
- }
- put_net(net);
-
- gtp = netdev_priv(dev);
rcu_read_lock();
- if (gtp_version == GTP_V0 &&
- info->attrs[GTPA_TID]) {
- u64 tid = nla_get_u64(info->attrs[GTPA_TID]);
-
- pctx = gtp0_pdp_find(gtp, tid);
- } else if (gtp_version == GTP_V1 &&
- info->attrs[GTPA_I_TEI]) {
- u32 tid = nla_get_u32(info->attrs[GTPA_I_TEI]);
-
- pctx = gtp1_pdp_find(gtp, tid);
- } else if (info->attrs[GTPA_MS_ADDRESS]) {
- __be32 ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
-
- pctx = ipv4_pdp_find(gtp, ip);
- }
- if (pctx == NULL) {
- err = -ENOENT;
+ pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
+ if (IS_ERR(pctx)) {
+ err = PTR_ERR(pctx);
goto err_unlock;
}
@@ -1242,7 +1255,7 @@ static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
[GTPA_LINK] = { .type = NLA_U32, },
[GTPA_VERSION] = { .type = NLA_U32, },
[GTPA_TID] = { .type = NLA_U64, },
- [GTPA_SGSN_ADDRESS] = { .type = NLA_U32, },
+ [GTPA_PEER_ADDRESS] = { .type = NLA_U32, },
[GTPA_MS_ADDRESS] = { .type = NLA_U32, },
[GTPA_FLOW] = { .type = NLA_U16, },
[GTPA_NET_NS_FD] = { .type = NLA_U32, },
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index db23cb36ae5c..262b2ea576a3 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -196,6 +196,7 @@ int netvsc_recv_callback(struct net_device *net,
const struct ndis_tcp_ip_checksum_info *csum_info,
const struct ndis_pkt_8021q_info *vlan);
void netvsc_channel_cb(void *context);
+int netvsc_poll(struct napi_struct *napi, int budget);
int rndis_filter_open(struct netvsc_device *nvdev);
int rndis_filter_close(struct netvsc_device *nvdev);
int rndis_filter_device_add(struct hv_device *dev,
@@ -632,7 +633,7 @@ struct nvsp_message {
#define NETVSC_PACKET_SIZE 4096
-#define VRSS_SEND_TAB_SIZE 16
+#define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */
#define VRSS_CHANNEL_MAX 64
#define VRSS_CHANNEL_DEFAULT 8
@@ -685,7 +686,7 @@ struct net_device_context {
/* point back to our device context */
struct hv_device *device_ctx;
/* netvsc_device */
- struct netvsc_device *nvdev;
+ struct netvsc_device __rcu *nvdev;
/* reconfigure work */
struct delayed_work dwork;
/* last reconfig time */
@@ -707,9 +708,6 @@ struct net_device_context {
u32 speed;
struct netvsc_ethtool_stats eth_stats;
- /* the device is going away */
- bool start_remove;
-
/* State to manage the associated VF interface. */
struct net_device __rcu *vf_netdev;
@@ -722,6 +720,8 @@ struct net_device_context {
/* Per channel data */
struct netvsc_channel {
struct vmbus_channel *channel;
+ const struct vmpacket_descriptor *desc;
+ struct napi_struct napi;
struct multi_send_data msd;
struct multi_recv_comp mrc;
atomic_t queue_sends;
@@ -760,8 +760,8 @@ struct netvsc_device {
u32 max_chn;
u32 num_chn;
- spinlock_t sc_lock; /* Protects num_sc_offered variable */
- u32 num_sc_offered;
+
+ refcount_t sc_offered;
/* Holds rndis device info */
void *extension;
@@ -776,6 +776,8 @@ struct netvsc_device {
atomic_t open_cnt;
struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
+
+ struct rcu_head rcu;
};
static inline struct netvsc_device *
@@ -1424,9 +1426,6 @@ struct rndis_message {
((void *) rndis_msg)
-#define __struct_bcount(x)
-
-
#define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \
sizeof(union rndis_message_container))
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 15ef713d96c0..15749d359e60 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -80,8 +80,10 @@ static struct netvsc_device *alloc_net_device(void)
return net_device;
}
-static void free_netvsc_device(struct netvsc_device *nvdev)
+static void free_netvsc_device(struct rcu_head *head)
{
+ struct netvsc_device *nvdev
+ = container_of(head, struct netvsc_device, rcu);
int i;
for (i = 0; i < VRSS_CHANNEL_MAX; i++)
@@ -90,14 +92,9 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
kfree(nvdev);
}
-
-static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
- u16 q_idx)
+static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
{
- const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
-
- return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
- atomic_read(&nvchan->queue_sends) == 0;
+ call_rcu(&nvdev->rcu, free_netvsc_device);
}
static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
@@ -138,6 +135,13 @@ static void netvsc_destroy_buf(struct hv_device *device)
sizeof(struct nvsp_message),
(unsigned long)revoke_packet,
VM_PKT_DATA_INBAND, 0);
+ /* If the failure is because the channel is rescinded;
+ * ignore the failure since we cannot send on a rescinded
+ * channel. This would allow us to properly cleanup
+ * even when the channel is rescinded.
+ */
+ if (device->channel->rescind)
+ ret = 0;
/*
* If we failed here, we might as well return and
* have a leak rather than continue and a bugchk
@@ -198,6 +202,15 @@ static void netvsc_destroy_buf(struct hv_device *device)
sizeof(struct nvsp_message),
(unsigned long)revoke_packet,
VM_PKT_DATA_INBAND, 0);
+
+ /* If the failure is because the channel is rescinded;
+ * ignore the failure since we cannot send on a rescinded
+ * channel. This would allow us to properly cleanup
+ * even when the channel is rescinded.
+ */
+ if (device->channel->rescind)
+ ret = 0;
+
/* If we failed here, we might as well return and
* have a leak rather than continue and a bugchk
*/
@@ -555,10 +568,11 @@ void netvsc_device_remove(struct hv_device *device)
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct netvsc_device *net_device = net_device_ctx->nvdev;
+ int i;
netvsc_disconnect_vsp(device);
- net_device_ctx->nvdev = NULL;
+ RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
/*
* At this point, no one should be accessing net_device
@@ -569,8 +583,12 @@ void netvsc_device_remove(struct hv_device *device)
/* Now, we can close the channel safely */
vmbus_close(device->channel);
+ /* And dissassociate NAPI context from device */
+ for (i = 0; i < net_device->num_chn; i++)
+ netif_napi_del(&net_device->chan_table[i].napi);
+
/* Release all resources */
- free_netvsc_device(net_device);
+ free_netvsc_device_rcu(net_device);
}
#define RING_AVAIL_PERCENT_HIWATER 20
@@ -599,11 +617,11 @@ static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
static void netvsc_send_tx_complete(struct netvsc_device *net_device,
struct vmbus_channel *incoming_channel,
struct hv_device *device,
- struct vmpacket_descriptor *packet)
+ const struct vmpacket_descriptor *desc,
+ int budget)
{
- struct sk_buff *skb = (struct sk_buff *)(unsigned long)packet->trans_id;
+ struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
struct net_device *ndev = hv_get_drvdata(device);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct vmbus_channel *channel = device->channel;
u16 q_idx = 0;
int queue_sends;
@@ -627,7 +645,7 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
tx_stats->bytes += packet->total_bytes;
u64_stats_update_end(&tx_stats->syncp);
- dev_consume_skb_any(skb);
+ napi_consume_skb(skb, budget);
}
queue_sends =
@@ -637,7 +655,6 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
wake_up(&net_device->wait_drain);
if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
- !net_device_ctx->start_remove &&
(hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
queue_sends < 1))
netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
@@ -646,14 +663,12 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
static void netvsc_send_completion(struct netvsc_device *net_device,
struct vmbus_channel *incoming_channel,
struct hv_device *device,
- struct vmpacket_descriptor *packet)
+ const struct vmpacket_descriptor *desc,
+ int budget)
{
- struct nvsp_message *nvsp_packet;
+ struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
struct net_device *ndev = hv_get_drvdata(device);
- nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
- (packet->offset8 << 3));
-
switch (nvsp_packet->hdr.msg_type) {
case NVSP_MSG_TYPE_INIT_COMPLETE:
case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
@@ -667,7 +682,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
netvsc_send_tx_complete(net_device, incoming_channel,
- device, packet);
+ device, desc, budget);
break;
default:
@@ -709,8 +724,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
packet->page_buf_cnt;
/* Add padding */
- if (skb && skb->xmit_more && remain &&
- !packet->cp_partial) {
+ if (skb->xmit_more && remain && !packet->cp_partial) {
padding = net_device->pkt_align - remain;
rndis_msg->msg_len += padding;
packet->total_data_buflen += padding;
@@ -868,9 +882,7 @@ int netvsc_send(struct hv_device *device,
if (msdp->pkt)
msd_len = msdp->pkt->total_data_buflen;
- try_batch = (skb != NULL) && msd_len > 0 && msdp->count <
- net_device->max_pkt;
-
+ try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
if (try_batch && msd_len + pktlen + net_device->pkt_align <
net_device->send_section_size) {
section_index = msdp->pkt->send_buf_index;
@@ -880,7 +892,7 @@ int netvsc_send(struct hv_device *device,
section_index = msdp->pkt->send_buf_index;
packet->cp_partial = true;
- } else if ((skb != NULL) && pktlen + net_device->pkt_align <
+ } else if (pktlen + net_device->pkt_align <
net_device->send_section_size) {
section_index = netvsc_get_next_send_section(net_device);
if (section_index != NETVSC_INVALID_INDEX) {
@@ -1065,28 +1077,29 @@ static inline struct recv_comp_data *get_recv_comp_slot(
return rcd;
}
-static void netvsc_receive(struct net_device *ndev,
+static int netvsc_receive(struct net_device *ndev,
struct netvsc_device *net_device,
struct net_device_context *net_device_ctx,
struct hv_device *device,
struct vmbus_channel *channel,
- struct vmtransfer_page_packet_header *vmxferpage_packet,
+ const struct vmpacket_descriptor *desc,
struct nvsp_message *nvsp)
{
+ const struct vmtransfer_page_packet_header *vmxferpage_packet
+ = container_of(desc, const struct vmtransfer_page_packet_header, d);
+ u16 q_idx = channel->offermsg.offer.sub_channel_index;
char *recv_buf = net_device->recv_buf;
u32 status = NVSP_STAT_SUCCESS;
int i;
int count = 0;
int ret;
- struct recv_comp_data *rcd;
- u16 q_idx = channel->offermsg.offer.sub_channel_index;
/* Make sure this is a valid nvsp packet */
if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
netif_err(net_device_ctx, rx_err, ndev,
"Unknown nvsp packet type received %u\n",
nvsp->hdr.msg_type);
- return;
+ return 0;
}
if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
@@ -1094,7 +1107,7 @@ static void netvsc_receive(struct net_device *ndev,
"Invalid xfer page set id - expecting %x got %x\n",
NETVSC_RECEIVE_BUFFER_ID,
vmxferpage_packet->xfer_pageset_id);
- return;
+ return 0;
}
count = vmxferpage_packet->range_cnt;
@@ -1110,26 +1123,26 @@ static void netvsc_receive(struct net_device *ndev,
channel, data, buflen);
}
- if (!net_device->chan_table[q_idx].mrc.buf) {
+ if (net_device->chan_table[q_idx].mrc.buf) {
+ struct recv_comp_data *rcd;
+
+ rcd = get_recv_comp_slot(net_device, channel, q_idx);
+ if (rcd) {
+ rcd->tid = vmxferpage_packet->d.trans_id;
+ rcd->status = status;
+ } else {
+ netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
+ q_idx, vmxferpage_packet->d.trans_id);
+ }
+ } else {
ret = netvsc_send_recv_completion(channel,
vmxferpage_packet->d.trans_id,
status);
if (ret)
netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n",
q_idx, vmxferpage_packet->d.trans_id, ret);
- return;
}
-
- rcd = get_recv_comp_slot(net_device, channel, q_idx);
-
- if (!rcd) {
- netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
- q_idx, vmxferpage_packet->d.trans_id);
- return;
- }
-
- rcd->tid = vmxferpage_packet->d.trans_id;
- rcd->status = status;
+ return count;
}
static void netvsc_send_table(struct hv_device *hdev,
@@ -1175,28 +1188,25 @@ static inline void netvsc_receive_inband(struct hv_device *hdev,
}
}
-static void netvsc_process_raw_pkt(struct hv_device *device,
- struct vmbus_channel *channel,
- struct netvsc_device *net_device,
- struct net_device *ndev,
- u64 request_id,
- struct vmpacket_descriptor *desc)
+static int netvsc_process_raw_pkt(struct hv_device *device,
+ struct vmbus_channel *channel,
+ struct netvsc_device *net_device,
+ struct net_device *ndev,
+ const struct vmpacket_descriptor *desc,
+ int budget)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct nvsp_message *nvmsg
- = (struct nvsp_message *)((unsigned long)desc
- + (desc->offset8 << 3));
+ struct nvsp_message *nvmsg = hv_pkt_data(desc);
switch (desc->type) {
case VM_PKT_COMP:
- netvsc_send_completion(net_device, channel, device, desc);
+ netvsc_send_completion(net_device, channel, device,
+ desc, budget);
break;
case VM_PKT_DATA_USING_XFER_PAGES:
- netvsc_receive(ndev, net_device, net_device_ctx,
- device, channel,
- (struct vmtransfer_page_packet_header *)desc,
- nvmsg);
+ return netvsc_receive(ndev, net_device, net_device_ctx,
+ device, channel, desc, nvmsg);
break;
case VM_PKT_DATA_INBAND:
@@ -1205,53 +1215,74 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
default:
netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
- desc->type, request_id);
+ desc->type, desc->trans_id);
break;
}
+
+ return 0;
}
-void netvsc_channel_cb(void *context)
+static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
{
- struct vmbus_channel *channel = context;
- u16 q_idx = channel->offermsg.offer.sub_channel_index;
- struct hv_device *device;
- struct netvsc_device *net_device;
- struct vmpacket_descriptor *desc;
- struct net_device *ndev;
- bool need_to_commit = false;
+ struct vmbus_channel *primary = channel->primary_channel;
- if (channel->primary_channel != NULL)
- device = channel->primary_channel->device_obj;
- else
- device = channel->device_obj;
+ return primary ? primary->device_obj : channel->device_obj;
+}
- ndev = hv_get_drvdata(device);
- if (unlikely(!ndev))
- return;
+/* Network processing softirq
+ * Process data in incoming ring buffer from host
+ * Stops when ring is empty or budget is met or exceeded.
+ */
+int netvsc_poll(struct napi_struct *napi, int budget)
+{
+ struct netvsc_channel *nvchan
+ = container_of(napi, struct netvsc_channel, napi);
+ struct vmbus_channel *channel = nvchan->channel;
+ struct hv_device *device = netvsc_channel_to_device(channel);
+ u16 q_idx = channel->offermsg.offer.sub_channel_index;
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
+ int work_done = 0;
- net_device = net_device_to_netvsc_device(ndev);
- if (unlikely(!net_device))
- return;
+ /* If starting a new interval */
+ if (!nvchan->desc)
+ nvchan->desc = hv_pkt_iter_first(channel);
- if (unlikely(net_device->destroy &&
- netvsc_channel_idle(net_device, q_idx)))
- return;
+ while (nvchan->desc && work_done < budget) {
+ work_done += netvsc_process_raw_pkt(device, channel, net_device,
+ ndev, nvchan->desc, budget);
+ nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
+ }
- /* commit_rd_index() -> hv_signal_on_read() needs this. */
- init_cached_read_index(channel);
+ /* If receive ring was exhausted
+ * and not doing busy poll
+ * then re-enable host interrupts
+ * and reschedule if ring is not empty.
+ */
+ if (work_done < budget &&
+ napi_complete_done(napi, work_done) &&
+ hv_end_read(&channel->inbound) != 0)
+ napi_reschedule(napi);
- while ((desc = get_next_pkt_raw(channel)) != NULL) {
- netvsc_process_raw_pkt(device, channel, net_device,
- ndev, desc->trans_id, desc);
+ netvsc_chk_recv_comp(net_device, channel, q_idx);
- put_pkt_raw(channel, desc);
- need_to_commit = true;
- }
+ /* Driver may overshoot since multiple packets per descriptor */
+ return min(work_done, budget);
+}
- if (need_to_commit)
- commit_rd_index(channel);
+/* Call back when data is available in host ring buffer.
+ * Processing is deferred until network softirq (NAPI)
+ */
+void netvsc_channel_cb(void *context)
+{
+ struct netvsc_channel *nvchan = context;
- netvsc_chk_recv_comp(net_device, channel, q_idx);
+ if (napi_schedule_prep(&nvchan->napi)) {
+ /* disable interupts from host */
+ hv_begin_read(&nvchan->channel->inbound);
+
+ __napi_schedule(&nvchan->napi);
+ }
}
/*
@@ -1273,10 +1304,29 @@ int netvsc_device_add(struct hv_device *device,
net_device->ring_size = ring_size;
+ /* Because the device uses NAPI, all the interrupt batching and
+ * control is done via Net softirq, not the channel handling
+ */
+ set_channel_read_mode(device->channel, HV_CALL_ISR);
+
+ /* If we're reopening the device we may have multiple queues, fill the
+ * chn_table with the default channel to use it before subchannels are
+ * opened.
+ * Initialize the channel state before we open;
+ * we can be interrupted as soon as we open the channel.
+ */
+
+ for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
+ struct netvsc_channel *nvchan = &net_device->chan_table[i];
+
+ nvchan->channel = device->channel;
+ }
+
/* Open the channel */
ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
ring_size * PAGE_SIZE, NULL, 0,
- netvsc_channel_cb, device->channel);
+ netvsc_channel_cb,
+ net_device->chan_table);
if (ret != 0) {
netdev_err(ndev, "unable to open channel: %d\n", ret);
@@ -1286,19 +1336,15 @@ int netvsc_device_add(struct hv_device *device,
/* Channel is opened */
netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
- /* If we're reopening the device we may have multiple queues, fill the
- * chn_table with the default channel to use it before subchannels are
- * opened.
- */
- for (i = 0; i < VRSS_CHANNEL_MAX; i++)
- net_device->chan_table[i].channel = device->channel;
+ /* Enable NAPI handler for init callbacks */
+ netif_napi_add(ndev, &net_device->chan_table[0].napi,
+ netvsc_poll, NAPI_POLL_WEIGHT);
+ napi_enable(&net_device->chan_table[0].napi);
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
* populated.
*/
- wmb();
-
- net_device_ctx->nvdev = net_device;
+ rcu_assign_pointer(net_device_ctx->nvdev, net_device);
/* Connect with the NetVsp */
ret = netvsc_connect_vsp(device);
@@ -1311,11 +1357,13 @@ int netvsc_device_add(struct hv_device *device,
return ret;
close:
+ netif_napi_del(&net_device->chan_table[0].napi);
+
/* Now, we can close the channel safely */
vmbus_close(device->channel);
cleanup:
- free_netvsc_device(net_device);
+ free_netvsc_device(&net_device->rcu);
return ret;
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 5ede87f30463..4421a6d00375 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -62,7 +62,7 @@ static void do_set_multicast(struct work_struct *w)
container_of(w, struct net_device_context, work);
struct hv_device *device_obj = ndevctx->device_ctx;
struct net_device *ndev = hv_get_drvdata(device_obj);
- struct netvsc_device *nvdev = ndevctx->nvdev;
+ struct netvsc_device *nvdev = rcu_dereference(ndevctx->nvdev);
struct rndis_device *rdev;
if (!nvdev)
@@ -116,7 +116,7 @@ static int netvsc_open(struct net_device *net)
static int netvsc_close(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
- struct netvsc_device *nvdev = net_device_ctx->nvdev;
+ struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
int ret;
u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
struct vmbus_channel *chn;
@@ -191,6 +191,54 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
return ppi;
}
+/* Azure hosts don't support non-TCP port numbers in hashing yet. We compute
+ * hash for non-TCP traffic with only IP numbers.
+ */
+static inline u32 netvsc_get_hash(struct sk_buff *skb, struct sock *sk)
+{
+ struct flow_keys flow;
+ u32 hash;
+ static u32 hashrnd __read_mostly;
+
+ net_get_random_once(&hashrnd, sizeof(hashrnd));
+
+ if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
+ return 0;
+
+ if (flow.basic.ip_proto == IPPROTO_TCP) {
+ return skb_get_hash(skb);
+ } else {
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
+ else
+ hash = 0;
+
+ skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
+ }
+
+ return hash;
+}
+
+static inline int netvsc_get_tx_queue(struct net_device *ndev,
+ struct sk_buff *skb, int old_idx)
+{
+ const struct net_device_context *ndc = netdev_priv(ndev);
+ struct sock *sk = skb->sk;
+ int q_idx;
+
+ q_idx = ndc->tx_send_table[netvsc_get_hash(skb, sk) &
+ (VRSS_SEND_TAB_SIZE - 1)];
+
+ /* If queue index changed record the new value */
+ if (q_idx != old_idx &&
+ sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
+ sk_tx_queue_set(sk, q_idx);
+
+ return q_idx;
+}
+
/*
* Select queue for transmit.
*
@@ -205,24 +253,22 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
unsigned int num_tx_queues = ndev->real_num_tx_queues;
- struct sock *sk = skb->sk;
- int q_idx = sk_tx_queue_get(sk);
-
- if (q_idx < 0 || skb->ooo_okay || q_idx >= num_tx_queues) {
- u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
- int new_idx;
-
- new_idx = net_device_ctx->tx_send_table[hash] % num_tx_queues;
+ int q_idx = sk_tx_queue_get(skb->sk);
- if (q_idx != new_idx && sk &&
- sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
- sk_tx_queue_set(sk, new_idx);
-
- q_idx = new_idx;
+ if (q_idx < 0 || skb->ooo_okay) {
+ /* If forwarding a packet, we use the recorded queue when
+ * available for better cache locality.
+ */
+ if (skb_rx_queue_recorded(skb))
+ q_idx = skb_get_rx_queue(skb);
+ else
+ q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
}
+ while (unlikely(q_idx >= num_tx_queues))
+ q_idx -= num_tx_queues;
+
return q_idx;
}
@@ -584,13 +630,14 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
}
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
+ struct napi_struct *napi,
const struct ndis_tcp_ip_checksum_info *csum_info,
const struct ndis_pkt_8021q_info *vlan,
void *data, u32 buflen)
{
struct sk_buff *skb;
- skb = netdev_alloc_skb_ip_align(net, buflen);
+ skb = napi_alloc_skb(napi, buflen);
if (!skb)
return skb;
@@ -636,12 +683,12 @@ int netvsc_recv_callback(struct net_device *net,
const struct ndis_pkt_8021q_info *vlan)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
- struct netvsc_device *net_device = net_device_ctx->nvdev;
+ struct netvsc_device *net_device;
+ u16 q_idx = channel->offermsg.offer.sub_channel_index;
+ struct netvsc_channel *nvchan;
struct net_device *vf_netdev;
struct sk_buff *skb;
struct netvsc_stats *rx_stats;
- u16 q_idx = channel->offermsg.offer.sub_channel_index;
-
if (net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
@@ -654,13 +701,20 @@ int netvsc_recv_callback(struct net_device *net,
* interface in the guest.
*/
rcu_read_lock();
+ net_device = rcu_dereference(net_device_ctx->nvdev);
+ if (unlikely(!net_device))
+ goto drop;
+
+ nvchan = &net_device->chan_table[q_idx];
vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
if (vf_netdev && (vf_netdev->flags & IFF_UP))
net = vf_netdev;
/* Allocate a skb - TODO direct I/O to pages? */
- skb = netvsc_alloc_recv_skb(net, csum_info, vlan, data, len);
+ skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
+ csum_info, vlan, data, len);
if (unlikely(!skb)) {
+drop:
++net->stats.rx_dropped;
rcu_read_unlock();
return NVSP_STAT_FAIL;
@@ -674,7 +728,7 @@ int netvsc_recv_callback(struct net_device *net,
* on the synthetic device because modifying the VF device
* statistics will not work correctly.
*/
- rx_stats = &net_device->chan_table[q_idx].rx_stats;
+ rx_stats = &nvchan->rx_stats;
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->packets++;
rx_stats->bytes += len;
@@ -685,12 +739,7 @@ int netvsc_recv_callback(struct net_device *net,
++rx_stats->multicast;
u64_stats_update_end(&rx_stats->syncp);
- /*
- * Pass the skb back up. Network stack will deallocate the skb when it
- * is done.
- * TODO - use NAPI?
- */
- netif_receive_skb(skb);
+ napi_gro_receive(&nvchan->napi, skb);
rcu_read_unlock();
return 0;
@@ -707,7 +756,7 @@ static void netvsc_get_channels(struct net_device *net,
struct ethtool_channels *channel)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
- struct netvsc_device *nvdev = net_device_ctx->nvdev;
+ struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
if (nvdev) {
channel->max_combined = nvdev->max_chn;
@@ -744,8 +793,9 @@ static int netvsc_set_channels(struct net_device *net,
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *dev = net_device_ctx->device_ctx;
- struct netvsc_device *nvdev = net_device_ctx->nvdev;
+ struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
unsigned int count = channels->combined_count;
+ bool was_running;
int ret;
/* We do not support separate count for rx, tx, or other */
@@ -756,7 +806,7 @@ static int netvsc_set_channels(struct net_device *net,
if (count > net->num_tx_queues || count > net->num_rx_queues)
return -EINVAL;
- if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
+ if (!nvdev || nvdev->destroy)
return -ENODEV;
if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
@@ -765,11 +815,13 @@ static int netvsc_set_channels(struct net_device *net,
if (count > nvdev->max_chn)
return -EINVAL;
- ret = netvsc_close(net);
- if (ret)
- return ret;
+ was_running = netif_running(net);
+ if (was_running) {
+ ret = netvsc_close(net);
+ if (ret)
+ return ret;
+ }
- net_device_ctx->start_remove = true;
rndis_filter_device_remove(dev, nvdev);
ret = netvsc_set_queues(net, dev, count);
@@ -778,8 +830,8 @@ static int netvsc_set_channels(struct net_device *net,
else
netvsc_set_queues(net, dev, nvdev->num_chn);
- netvsc_open(net);
- net_device_ctx->start_remove = false;
+ if (was_running)
+ ret = netvsc_open(net);
/* We may have missed link change notifications */
schedule_delayed_work(&net_device_ctx->dwork, 0);
@@ -787,18 +839,19 @@ static int netvsc_set_channels(struct net_device *net,
return ret;
}
-static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
+static bool
+netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
{
- struct ethtool_cmd diff1 = *cmd;
- struct ethtool_cmd diff2 = {};
+ struct ethtool_link_ksettings diff1 = *cmd;
+ struct ethtool_link_ksettings diff2 = {};
- ethtool_cmd_speed_set(&diff1, 0);
- diff1.duplex = 0;
+ diff1.base.speed = 0;
+ diff1.base.duplex = 0;
/* advertising and cmd are usually set */
- diff1.advertising = 0;
- diff1.cmd = 0;
+ ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
+ diff1.base.cmd = 0;
/* We set port to PORT_OTHER */
- diff2.port = PORT_OTHER;
+ diff2.base.port = PORT_OTHER;
return !memcmp(&diff1, &diff2, sizeof(diff1));
}
@@ -808,33 +861,35 @@ static void netvsc_init_settings(struct net_device *dev)
struct net_device_context *ndc = netdev_priv(dev);
ndc->speed = SPEED_UNKNOWN;
- ndc->duplex = DUPLEX_UNKNOWN;
+ ndc->duplex = DUPLEX_FULL;
}
-static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netvsc_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct net_device_context *ndc = netdev_priv(dev);
- ethtool_cmd_speed_set(cmd, ndc->speed);
- cmd->duplex = ndc->duplex;
- cmd->port = PORT_OTHER;
+ cmd->base.speed = ndc->speed;
+ cmd->base.duplex = ndc->duplex;
+ cmd->base.port = PORT_OTHER;
return 0;
}
-static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netvsc_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct net_device_context *ndc = netdev_priv(dev);
u32 speed;
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
if (!ethtool_validate_speed(speed) ||
- !ethtool_validate_duplex(cmd->duplex) ||
+ !ethtool_validate_duplex(cmd->base.duplex) ||
!netvsc_validate_ethtool_ss_cmd(cmd))
return -EINVAL;
ndc->speed = speed;
- ndc->duplex = cmd->duplex;
+ ndc->duplex = cmd->base.duplex;
return 0;
}
@@ -842,24 +897,27 @@ static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
- struct netvsc_device *nvdev = ndevctx->nvdev;
+ struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
struct hv_device *hdev = ndevctx->device_ctx;
struct netvsc_device_info device_info;
- int ret;
+ bool was_running;
+ int ret = 0;
- if (ndevctx->start_remove || !nvdev || nvdev->destroy)
+ if (!nvdev || nvdev->destroy)
return -ENODEV;
- ret = netvsc_close(ndev);
- if (ret)
- goto out;
+ was_running = netif_running(ndev);
+ if (was_running) {
+ ret = netvsc_close(ndev);
+ if (ret)
+ return ret;
+ }
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
device_info.num_chn = nvdev->num_chn;
device_info.max_num_vrss_chns = nvdev->num_chn;
- ndevctx->start_remove = true;
rndis_filter_device_remove(hdev, nvdev);
/* 'nvdev' has been freed in rndis_filter_device_remove() ->
@@ -872,9 +930,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
rndis_filter_device_add(hdev, &device_info);
-out:
- netvsc_open(ndev);
- ndevctx->start_remove = false;
+ if (was_running)
+ ret = netvsc_open(ndev);
/* We may have missed link change notifications */
schedule_delayed_work(&ndevctx->dwork, 0);
@@ -886,7 +943,7 @@ static void netvsc_get_stats64(struct net_device *net,
struct rtnl_link_stats64 *t)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
- struct netvsc_device *nvdev = ndev_ctx->nvdev;
+ struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
int i;
if (!nvdev)
@@ -971,7 +1028,10 @@ static const struct {
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
{
struct net_device_context *ndc = netdev_priv(dev);
- struct netvsc_device *nvdev = ndc->nvdev;
+ struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
+
+ if (!nvdev)
+ return -ENODEV;
switch (string_set) {
case ETH_SS_STATS:
@@ -985,13 +1045,16 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct net_device_context *ndc = netdev_priv(dev);
- struct netvsc_device *nvdev = ndc->nvdev;
+ struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
const void *nds = &ndc->eth_stats;
const struct netvsc_stats *qstats;
unsigned int start;
u64 packets, bytes;
int i, j;
+ if (!nvdev)
+ return;
+
for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
@@ -1020,10 +1083,13 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct net_device_context *ndc = netdev_priv(dev);
- struct netvsc_device *nvdev = ndc->nvdev;
+ struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
u8 *p = data;
int i;
+ if (!nvdev)
+ return;
+
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
@@ -1075,7 +1141,10 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules)
{
struct net_device_context *ndc = netdev_priv(dev);
- struct netvsc_device *nvdev = ndc->nvdev;
+ struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
+
+ if (!nvdev)
+ return -ENODEV;
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
@@ -1111,13 +1180,17 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct net_device_context *ndc = netdev_priv(dev);
- struct netvsc_device *ndev = ndc->nvdev;
- struct rndis_device *rndis_dev = ndev->extension;
+ struct netvsc_device *ndev = rcu_dereference(ndc->nvdev);
+ struct rndis_device *rndis_dev;
int i;
+ if (!ndev)
+ return -ENODEV;
+
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+ rndis_dev = ndev->extension;
if (indir) {
for (i = 0; i < ITAB_NUM; i++)
indir[i] = rndis_dev->ind_table[i];
@@ -1133,13 +1206,17 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct net_device_context *ndc = netdev_priv(dev);
- struct netvsc_device *ndev = ndc->nvdev;
- struct rndis_device *rndis_dev = ndev->extension;
+ struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
+ struct rndis_device *rndis_dev;
int i;
+ if (!ndev)
+ return -ENODEV;
+
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
+ rndis_dev = ndev->extension;
if (indir) {
for (i = 0; i < ITAB_NUM; i++)
if (indir[i] >= dev->num_rx_queues)
@@ -1168,13 +1245,13 @@ static const struct ethtool_ops ethtool_ops = {
.get_channels = netvsc_get_channels,
.set_channels = netvsc_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
- .get_settings = netvsc_get_settings,
- .set_settings = netvsc_set_settings,
.get_rxnfc = netvsc_get_rxnfc,
.get_rxfh_key_size = netvsc_get_rxfh_key_size,
.get_rxfh_indir_size = netvsc_rss_indir_size,
.get_rxfh = netvsc_get_rxfh,
.set_rxfh = netvsc_set_rxfh,
+ .get_link_ksettings = netvsc_get_link_ksettings,
+ .set_link_ksettings = netvsc_set_link_ksettings,
};
static const struct net_device_ops device_ops = {
@@ -1210,10 +1287,10 @@ static void netvsc_link_change(struct work_struct *w)
unsigned long flags, next_reconfig, delay;
rtnl_lock();
- if (ndev_ctx->start_remove)
+ net_device = rtnl_dereference(ndev_ctx->nvdev);
+ if (!net_device)
goto out_unlock;
- net_device = ndev_ctx->nvdev;
rdev = net_device->extension;
next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
@@ -1354,7 +1431,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
- netvsc_dev = net_device_ctx->nvdev;
+ netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
return NOTIFY_DONE;
@@ -1380,7 +1457,7 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
- netvsc_dev = net_device_ctx->nvdev;
+ netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
@@ -1414,7 +1491,7 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
- netvsc_dev = net_device_ctx->nvdev;
+ netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
netvsc_switch_datapath(ndev, false);
@@ -1474,8 +1551,6 @@ static int netvsc_probe(struct hv_device *dev,
hv_set_drvdata(dev, net);
- net_device_ctx->start_remove = false;
-
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
@@ -1492,8 +1567,7 @@ static int netvsc_probe(struct hv_device *dev,
/* Notify the netvsc driver of the new device */
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
- device_info.max_num_vrss_chns = min_t(u32, VRSS_CHANNEL_DEFAULT,
- num_online_cpus());
+ device_info.num_chn = VRSS_CHANNEL_DEFAULT;
ret = rndis_filter_device_add(dev, &device_info);
if (ret != 0) {
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
@@ -1509,6 +1583,7 @@ static int netvsc_probe(struct hv_device *dev,
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
net->vlan_features = net->features;
+ /* RCU not necessary here, device not registered */
nvdev = net_device_ctx->nvdev;
netif_set_real_num_tx_queues(net, nvdev->num_chn);
netif_set_real_num_rx_queues(net, nvdev->num_chn);
@@ -1544,26 +1619,20 @@ static int netvsc_remove(struct hv_device *dev)
ndev_ctx = netdev_priv(net);
- /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
- * removing the device.
- */
- rtnl_lock();
- ndev_ctx->start_remove = true;
- rtnl_unlock();
+ netif_device_detach(net);
cancel_delayed_work_sync(&ndev_ctx->dwork);
cancel_work_sync(&ndev_ctx->work);
- /* Stop outbound asap */
- netif_tx_disable(net);
-
- unregister_netdev(net);
-
/*
* Call to the vsc driver to let it know that the device is being
- * removed
+ * removed. Also blocks mtu and channel changes.
*/
+ rtnl_lock();
rndis_filter_device_remove(dev, ndev_ctx->nvdev);
+ rtnl_unlock();
+
+ unregister_netdev(net);
hv_set_drvdata(dev, NULL);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 19356f56b7b1..ab92c3c95951 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -819,16 +819,14 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
{
struct rndis_request *request;
struct rndis_set_request *set;
- struct rndis_set_complete *set_complete;
int ret;
request = get_rndis_request(dev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
sizeof(u32));
- if (!request) {
- ret = -ENOMEM;
- goto cleanup;
- }
+ if (!request)
+ return -ENOMEM;
+
/* Setup the rndis set */
set = &request->request_msg.msg.set_req;
@@ -840,15 +838,11 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
&new_filter, sizeof(u32));
ret = rndis_filter_send_request(dev, request);
- if (ret != 0)
- goto cleanup;
+ if (ret == 0)
+ wait_for_completion(&request->wait_event);
- wait_for_completion(&request->wait_event);
+ put_rndis_request(dev, request);
- set_complete = &request->response_msg.msg.set_complete;
-cleanup:
- if (request)
- put_rndis_request(dev, request);
return ret;
}
@@ -926,8 +920,6 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
struct rndis_halt_request *halt;
struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
struct netvsc_device *nvdev = net_device_ctx->nvdev;
- struct hv_device *hdev = net_device_ctx->device_ctx;
- ulong flags;
/* Attempt to do a rndis device halt */
request = get_rndis_request(dev, RNDIS_MSG_HALT,
@@ -945,9 +937,10 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
dev->state = RNDIS_DEV_UNINITIALIZED;
cleanup:
- spin_lock_irqsave(&hdev->channel->inbound_lock, flags);
nvdev->destroy = true;
- spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
+
+ /* Force flag to be ordered before waiting */
+ wmb();
/* Wait for all send completions */
wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
@@ -996,26 +989,38 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
hv_get_drvdata(new_sc->primary_channel->device_obj);
struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev);
u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
+ struct netvsc_channel *nvchan;
int ret;
- unsigned long flags;
if (chn_index >= nvscdev->num_chn)
return;
- nvscdev->chan_table[chn_index].mrc.buf
+ nvchan = nvscdev->chan_table + chn_index;
+ nvchan->mrc.buf
= vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
+ if (!nvchan->mrc.buf)
+ return;
+
+ /* Because the device uses NAPI, all the interrupt batching and
+ * control is done via Net softirq, not the channel handling
+ */
+ set_channel_read_mode(new_sc, HV_CALL_ISR);
+
+ /* Set the channel before opening.*/
+ nvchan->channel = new_sc;
+ netif_napi_add(ndev, &nvchan->napi,
+ netvsc_poll, NAPI_POLL_WEIGHT);
+
ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
nvscdev->ring_size * PAGE_SIZE, NULL, 0,
- netvsc_channel_cb, new_sc);
-
+ netvsc_channel_cb, nvchan);
if (ret == 0)
- nvscdev->chan_table[chn_index].channel = new_sc;
+ napi_enable(&nvchan->napi);
+ else
+ netdev_err(ndev, "sub channel open failed (%d)\n", ret);
- spin_lock_irqsave(&nvscdev->sc_lock, flags);
- nvscdev->num_sc_offered--;
- spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
- if (nvscdev->num_sc_offered == 0)
+ if (refcount_dec_and_test(&nvscdev->sc_offered))
complete(&nvscdev->channel_init_wait);
}
@@ -1032,12 +1037,9 @@ int rndis_filter_device_add(struct hv_device *dev,
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
unsigned int gso_max_size = GSO_MAX_SIZE;
- u32 mtu, size;
- u32 num_rss_qs;
- u32 sc_delta;
+ u32 mtu, size, num_rss_qs;
const struct cpumask *node_cpu_mask;
u32 num_possible_rss_qs;
- unsigned long flags;
int i, ret;
rndis_device = get_rndis_device();
@@ -1060,7 +1062,7 @@ int rndis_filter_device_add(struct hv_device *dev,
net_device->max_chn = 1;
net_device->num_chn = 1;
- spin_lock_init(&net_device->sc_lock);
+ refcount_set(&net_device->sc_offered, 0);
net_device->extension = rndis_device;
rndis_device->ndev = net;
@@ -1174,34 +1176,30 @@ int rndis_filter_device_add(struct hv_device *dev,
if (ret || rsscap.num_recv_que < 2)
goto out;
- net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, rsscap.num_recv_que);
-
- num_rss_qs = min(device_info->max_num_vrss_chns, net_device->max_chn);
-
/*
* We will limit the VRSS channels to the number CPUs in the NUMA node
* the primary channel is currently bound to.
+ *
+ * This also guarantees that num_possible_rss_qs <= num_online_cpus
*/
node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
- num_possible_rss_qs = cpumask_weight(node_cpu_mask);
+ num_possible_rss_qs = min_t(u32, cpumask_weight(node_cpu_mask),
+ rsscap.num_recv_que);
- /* We will use the given number of channels if available. */
- if (device_info->num_chn && device_info->num_chn < net_device->max_chn)
- net_device->num_chn = device_info->num_chn;
- else
- net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+ net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
- num_rss_qs = net_device->num_chn - 1;
+ /* We will use the given number of channels if available. */
+ net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
for (i = 0; i < ITAB_NUM; i++)
rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
net_device->num_chn);
- net_device->num_sc_offered = num_rss_qs;
-
- if (net_device->num_chn == 1)
- goto out;
+ num_rss_qs = net_device->num_chn - 1;
+ if (num_rss_qs == 0)
+ return 0;
+ refcount_set(&net_device->sc_offered, num_rss_qs);
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
init_packet = &net_device->channel_init_pkt;
@@ -1217,32 +1215,23 @@ int rndis_filter_device_add(struct hv_device *dev,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret)
goto out;
- wait_for_completion(&net_device->channel_init_wait);
- if (init_packet->msg.v5_msg.subchn_comp.status !=
- NVSP_STAT_SUCCESS) {
+ if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
ret = -ENODEV;
goto out;
}
+ wait_for_completion(&net_device->channel_init_wait);
+
net_device->num_chn = 1 +
init_packet->msg.v5_msg.subchn_comp.num_subchannels;
- ret = rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
- net_device->num_chn);
-
- /*
- * Set the number of sub-channels to be received.
- */
- spin_lock_irqsave(&net_device->sc_lock, flags);
- sc_delta = num_rss_qs - (net_device->num_chn - 1);
- net_device->num_sc_offered -= sc_delta;
- spin_unlock_irqrestore(&net_device->sc_lock, flags);
-
+ /* ignore failues from setting rss parameters, still have channels */
+ rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
+ net_device->num_chn);
out:
if (ret) {
net_device->max_chn = 1;
net_device->num_chn = 1;
- net_device->num_sc_offered = 0;
}
return 0; /* return 0 because primary channel can be used alone */
@@ -1257,12 +1246,6 @@ void rndis_filter_device_remove(struct hv_device *dev,
{
struct rndis_device *rndis_dev = net_dev->extension;
- /* If not all subchannel offers are complete, wait for them until
- * completion to avoid race.
- */
- if (net_dev->num_sc_offered > 0)
- wait_for_completion(&net_dev->channel_init_wait);
-
/* Halt and release the rndis device */
rndis_filter_halt_device(rndis_dev);
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 3057a8df4ce9..303ba4133920 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -82,3 +82,25 @@ config IEEE802154_ADF7242
This driver can also be built as a module. To do so, say M here.
the module will be called 'adf7242'.
+
+config IEEE802154_CA8210
+ tristate "Cascoda CA8210 transceiver driver"
+ depends on IEEE802154_DRIVERS && MAC802154
+ depends on COMMON_CLK
+ depends on SPI
+ ---help---
+ Say Y here to enable the CA8210 SPI 802.15.4 wireless
+ controller.
+
+ This driver can also be built as a module. To do so, say M here.
+ the module will be called 'ca8210'.
+
+config IEEE802154_CA8210_DEBUGFS
+ bool "CA8210 debugfs interface"
+ depends on IEEE802154_CA8210
+ depends on DEBUG_FS
+ ---help---
+ This option compiles debugfs code for the ca8210 driver. This
+ exposes a debugfs node for each CA8210 instance which allows
+ direct use of the Cascoda API, exposing the 802.15.4 MAC
+ management entities.
diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile
index 3a923d339497..8374bb44a145 100644
--- a/drivers/net/ieee802154/Makefile
+++ b/drivers/net/ieee802154/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
obj-$(CONFIG_IEEE802154_CC2520) += cc2520.o
obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o
obj-$(CONFIG_IEEE802154_ADF7242) += adf7242.o
+obj-$(CONFIG_IEEE802154_CA8210) += ca8210.o
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
new file mode 100644
index 000000000000..25fd3b04b3c0
--- /dev/null
+++ b/drivers/net/ieee802154/ca8210.c
@@ -0,0 +1,3242 @@
+/*
+ * http://www.cascoda.com/products/ca-821x/
+ * Copyright (c) 2016, Cascoda, Ltd.
+ * All rights reserved.
+ *
+ * This code is dual-licensed under both GPLv2 and 3-clause BSD. What follows is
+ * the license notice for both respectively.
+ *
+ *******************************************************************************
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *******************************************************************************
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/cdev.h>
+#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/ieee802154.h>
+#include <linux/kfifo.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include <net/ieee802154_netdev.h>
+#include <net/mac802154.h>
+
+#define DRIVER_NAME "ca8210"
+
+/* external clock frequencies */
+#define ONE_MHZ 1000000
+#define TWO_MHZ (2 * ONE_MHZ)
+#define FOUR_MHZ (4 * ONE_MHZ)
+#define EIGHT_MHZ (8 * ONE_MHZ)
+#define SIXTEEN_MHZ (16 * ONE_MHZ)
+
+/* spi constants */
+#define CA8210_SPI_BUF_SIZE 256
+#define CA8210_SYNC_TIMEOUT 1000 /* Timeout for synchronous commands [ms] */
+
+/* test interface constants */
+#define CA8210_TEST_INT_FILE_NAME "ca8210_test"
+#define CA8210_TEST_INT_FIFO_SIZE 256
+
+/* MAC status enumerations */
+#define MAC_SUCCESS (0x00)
+#define MAC_ERROR (0x01)
+#define MAC_CANCELLED (0x02)
+#define MAC_READY_FOR_POLL (0x03)
+#define MAC_COUNTER_ERROR (0xDB)
+#define MAC_IMPROPER_KEY_TYPE (0xDC)
+#define MAC_IMPROPER_SECURITY_LEVEL (0xDD)
+#define MAC_UNSUPPORTED_LEGACY (0xDE)
+#define MAC_UNSUPPORTED_SECURITY (0xDF)
+#define MAC_BEACON_LOST (0xE0)
+#define MAC_CHANNEL_ACCESS_FAILURE (0xE1)
+#define MAC_DENIED (0xE2)
+#define MAC_DISABLE_TRX_FAILURE (0xE3)
+#define MAC_SECURITY_ERROR (0xE4)
+#define MAC_FRAME_TOO_LONG (0xE5)
+#define MAC_INVALID_GTS (0xE6)
+#define MAC_INVALID_HANDLE (0xE7)
+#define MAC_INVALID_PARAMETER (0xE8)
+#define MAC_NO_ACK (0xE9)
+#define MAC_NO_BEACON (0xEA)
+#define MAC_NO_DATA (0xEB)
+#define MAC_NO_SHORT_ADDRESS (0xEC)
+#define MAC_OUT_OF_CAP (0xED)
+#define MAC_PAN_ID_CONFLICT (0xEE)
+#define MAC_REALIGNMENT (0xEF)
+#define MAC_TRANSACTION_EXPIRED (0xF0)
+#define MAC_TRANSACTION_OVERFLOW (0xF1)
+#define MAC_TX_ACTIVE (0xF2)
+#define MAC_UNAVAILABLE_KEY (0xF3)
+#define MAC_UNSUPPORTED_ATTRIBUTE (0xF4)
+#define MAC_INVALID_ADDRESS (0xF5)
+#define MAC_ON_TIME_TOO_LONG (0xF6)
+#define MAC_PAST_TIME (0xF7)
+#define MAC_TRACKING_OFF (0xF8)
+#define MAC_INVALID_INDEX (0xF9)
+#define MAC_LIMIT_REACHED (0xFA)
+#define MAC_READ_ONLY (0xFB)
+#define MAC_SCAN_IN_PROGRESS (0xFC)
+#define MAC_SUPERFRAME_OVERLAP (0xFD)
+#define MAC_SYSTEM_ERROR (0xFF)
+
+/* HWME attribute IDs */
+#define HWME_EDTHRESHOLD (0x04)
+#define HWME_EDVALUE (0x06)
+#define HWME_SYSCLKOUT (0x0F)
+#define HWME_LQILIMIT (0x11)
+
+/* TDME attribute IDs */
+#define TDME_CHANNEL (0x00)
+#define TDME_ATM_CONFIG (0x06)
+
+#define MAX_HWME_ATTRIBUTE_SIZE 16
+#define MAX_TDME_ATTRIBUTE_SIZE 2
+
+/* PHY/MAC PIB Attribute Enumerations */
+#define PHY_CURRENT_CHANNEL (0x00)
+#define PHY_TRANSMIT_POWER (0x02)
+#define PHY_CCA_MODE (0x03)
+#define MAC_ASSOCIATION_PERMIT (0x41)
+#define MAC_AUTO_REQUEST (0x42)
+#define MAC_BATT_LIFE_EXT (0x43)
+#define MAC_BATT_LIFE_EXT_PERIODS (0x44)
+#define MAC_BEACON_PAYLOAD (0x45)
+#define MAC_BEACON_PAYLOAD_LENGTH (0x46)
+#define MAC_BEACON_ORDER (0x47)
+#define MAC_GTS_PERMIT (0x4d)
+#define MAC_MAX_CSMA_BACKOFFS (0x4e)
+#define MAC_MIN_BE (0x4f)
+#define MAC_PAN_ID (0x50)
+#define MAC_PROMISCUOUS_MODE (0x51)
+#define MAC_RX_ON_WHEN_IDLE (0x52)
+#define MAC_SHORT_ADDRESS (0x53)
+#define MAC_SUPERFRAME_ORDER (0x54)
+#define MAC_ASSOCIATED_PAN_COORD (0x56)
+#define MAC_MAX_BE (0x57)
+#define MAC_MAX_FRAME_RETRIES (0x59)
+#define MAC_RESPONSE_WAIT_TIME (0x5A)
+#define MAC_SECURITY_ENABLED (0x5D)
+
+#define MAC_AUTO_REQUEST_SECURITY_LEVEL (0x78)
+#define MAC_AUTO_REQUEST_KEY_ID_MODE (0x79)
+
+#define NS_IEEE_ADDRESS (0xFF) /* Non-standard IEEE address */
+
+/* MAC Address Mode Definitions */
+#define MAC_MODE_NO_ADDR (0x00)
+#define MAC_MODE_SHORT_ADDR (0x02)
+#define MAC_MODE_LONG_ADDR (0x03)
+
+/* MAC constants */
+#define MAX_BEACON_OVERHEAD (75)
+#define MAX_BEACON_PAYLOAD_LENGTH (IEEE802154_MTU - MAX_BEACON_OVERHEAD)
+
+#define MAX_ATTRIBUTE_SIZE (122)
+#define MAX_DATA_SIZE (114)
+
+#define CA8210_VALID_CHANNELS (0x07FFF800)
+
+/* MAC workarounds for V1.1 and MPW silicon (V0.x) */
+#define CA8210_MAC_WORKAROUNDS (0)
+#define CA8210_MAC_MPW (0)
+
+/* memory manipulation macros */
+#define LS_BYTE(x) ((u8)((x) & 0xFF))
+#define MS_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+
+/* message ID codes in SPI commands */
+/* downstream */
+#define MCPS_DATA_REQUEST (0x00)
+#define MLME_ASSOCIATE_REQUEST (0x02)
+#define MLME_ASSOCIATE_RESPONSE (0x03)
+#define MLME_DISASSOCIATE_REQUEST (0x04)
+#define MLME_GET_REQUEST (0x05)
+#define MLME_ORPHAN_RESPONSE (0x06)
+#define MLME_RESET_REQUEST (0x07)
+#define MLME_RX_ENABLE_REQUEST (0x08)
+#define MLME_SCAN_REQUEST (0x09)
+#define MLME_SET_REQUEST (0x0A)
+#define MLME_START_REQUEST (0x0B)
+#define MLME_POLL_REQUEST (0x0D)
+#define HWME_SET_REQUEST (0x0E)
+#define HWME_GET_REQUEST (0x0F)
+#define TDME_SETSFR_REQUEST (0x11)
+#define TDME_GETSFR_REQUEST (0x12)
+#define TDME_SET_REQUEST (0x14)
+/* upstream */
+#define MCPS_DATA_INDICATION (0x00)
+#define MCPS_DATA_CONFIRM (0x01)
+#define MLME_RESET_CONFIRM (0x0A)
+#define MLME_SET_CONFIRM (0x0E)
+#define MLME_START_CONFIRM (0x0F)
+#define HWME_SET_CONFIRM (0x12)
+#define HWME_GET_CONFIRM (0x13)
+#define HWME_WAKEUP_INDICATION (0x15)
+#define TDME_SETSFR_CONFIRM (0x17)
+
+/* SPI command IDs */
+/* bit indicating a confirm or indication from slave to master */
+#define SPI_S2M (0x20)
+/* bit indicating a synchronous message */
+#define SPI_SYN (0x40)
+
+/* SPI command definitions */
+#define SPI_IDLE (0xFF)
+#define SPI_NACK (0xF0)
+
+#define SPI_MCPS_DATA_REQUEST (MCPS_DATA_REQUEST)
+#define SPI_MCPS_DATA_INDICATION (MCPS_DATA_INDICATION + SPI_S2M)
+#define SPI_MCPS_DATA_CONFIRM (MCPS_DATA_CONFIRM + SPI_S2M)
+
+#define SPI_MLME_ASSOCIATE_REQUEST (MLME_ASSOCIATE_REQUEST)
+#define SPI_MLME_RESET_REQUEST (MLME_RESET_REQUEST + SPI_SYN)
+#define SPI_MLME_SET_REQUEST (MLME_SET_REQUEST + SPI_SYN)
+#define SPI_MLME_START_REQUEST (MLME_START_REQUEST + SPI_SYN)
+#define SPI_MLME_RESET_CONFIRM (MLME_RESET_CONFIRM + SPI_S2M + SPI_SYN)
+#define SPI_MLME_SET_CONFIRM (MLME_SET_CONFIRM + SPI_S2M + SPI_SYN)
+#define SPI_MLME_START_CONFIRM (MLME_START_CONFIRM + SPI_S2M + SPI_SYN)
+
+#define SPI_HWME_SET_REQUEST (HWME_SET_REQUEST + SPI_SYN)
+#define SPI_HWME_GET_REQUEST (HWME_GET_REQUEST + SPI_SYN)
+#define SPI_HWME_SET_CONFIRM (HWME_SET_CONFIRM + SPI_S2M + SPI_SYN)
+#define SPI_HWME_GET_CONFIRM (HWME_GET_CONFIRM + SPI_S2M + SPI_SYN)
+#define SPI_HWME_WAKEUP_INDICATION (HWME_WAKEUP_INDICATION + SPI_S2M)
+
+#define SPI_TDME_SETSFR_REQUEST (TDME_SETSFR_REQUEST + SPI_SYN)
+#define SPI_TDME_SET_REQUEST (TDME_SET_REQUEST + SPI_SYN)
+#define SPI_TDME_SETSFR_CONFIRM (TDME_SETSFR_CONFIRM + SPI_S2M + SPI_SYN)
+
+/* TDME SFR addresses */
+/* Page 0 */
+#define CA8210_SFR_PACFG (0xB1)
+#define CA8210_SFR_MACCON (0xD8)
+#define CA8210_SFR_PACFGIB (0xFE)
+/* Page 1 */
+#define CA8210_SFR_LOTXCAL (0xBF)
+#define CA8210_SFR_PTHRH (0xD1)
+#define CA8210_SFR_PRECFG (0xD3)
+#define CA8210_SFR_LNAGX40 (0xE1)
+#define CA8210_SFR_LNAGX41 (0xE2)
+#define CA8210_SFR_LNAGX42 (0xE3)
+#define CA8210_SFR_LNAGX43 (0xE4)
+#define CA8210_SFR_LNAGX44 (0xE5)
+#define CA8210_SFR_LNAGX45 (0xE6)
+#define CA8210_SFR_LNAGX46 (0xE7)
+#define CA8210_SFR_LNAGX47 (0xE9)
+
+#define PACFGIB_DEFAULT_CURRENT (0x3F)
+#define PTHRH_DEFAULT_THRESHOLD (0x5A)
+#define LNAGX40_DEFAULT_GAIN (0x29) /* 10dB */
+#define LNAGX41_DEFAULT_GAIN (0x54) /* 21dB */
+#define LNAGX42_DEFAULT_GAIN (0x6C) /* 27dB */
+#define LNAGX43_DEFAULT_GAIN (0x7A) /* 30dB */
+#define LNAGX44_DEFAULT_GAIN (0x84) /* 33dB */
+#define LNAGX45_DEFAULT_GAIN (0x8B) /* 34dB */
+#define LNAGX46_DEFAULT_GAIN (0x92) /* 36dB */
+#define LNAGX47_DEFAULT_GAIN (0x96) /* 37dB */
+
+#define CA8210_IOCTL_HARD_RESET (0x00)
+
+/* Structs/Enums */
+
+/**
+ * struct cas_control - spi transfer structure
+ * @msg: spi_message for each exchange
+ * @transfer: spi_transfer for each exchange
+ * @tx_buf: source array for transmission
+ * @tx_in_buf: array storing bytes received during transmission
+ * @priv: pointer to private data
+ *
+ * This structure stores all the necessary data passed around during a single
+ * spi exchange.
+ */
+struct cas_control {
+ struct spi_message msg;
+ struct spi_transfer transfer;
+
+ u8 tx_buf[CA8210_SPI_BUF_SIZE];
+ u8 tx_in_buf[CA8210_SPI_BUF_SIZE];
+
+ struct ca8210_priv *priv;
+};
+
+/**
+ * struct ca8210_test - ca8210 test interface structure
+ * @ca8210_dfs_spi_int: pointer to the entry in the debug fs for this device
+ * @up_fifo: fifo for upstream messages
+ *
+ * This structure stores all the data pertaining to the debug interface
+ */
+struct ca8210_test {
+ struct dentry *ca8210_dfs_spi_int;
+ struct kfifo up_fifo;
+ wait_queue_head_t readq;
+};
+
+/**
+ * struct ca8210_priv - ca8210 private data structure
+ * @spi: pointer to the ca8210 spi device object
+ * @hw: pointer to the ca8210 ieee802154_hw object
+ * @hw_registered: true if hw has been registered with ieee802154
+ * @lock: spinlock protecting the private data area
+ * @mlme_workqueue: workqueue for triggering MLME Reset
+ * @irq_workqueue: workqueue for irq processing
+ * @tx_skb: current socket buffer to transmit
+ * @nextmsduhandle: msdu handle to pass to the 15.4 MAC layer for the
+ * next transmission
+ * @clk: external clock provided by the ca8210
+ * @last_dsn: sequence number of last data packet received, for
+ * resend detection
+ * @test: test interface data section for this instance
+ * @async_tx_pending: true if an asynchronous transmission was started and
+ * is not complete
+ * @sync_command_response: pointer to buffer to fill with sync response
+ * @ca8210_is_awake: nonzero if ca8210 is initialised, ready for comms
+ * @sync_down: counts number of downstream synchronous commands
+ * @sync_up: counts number of upstream synchronous commands
+ * @spi_transfer_complete completion object for a single spi_transfer
+ * @sync_exchange_complete completion object for a complete synchronous API
+ * exchange
+ * @promiscuous whether the ca8210 is in promiscuous mode or not
+ * @retries: records how many times the current pending spi
+ * transfer has been retried
+ */
+struct ca8210_priv {
+ struct spi_device *spi;
+ struct ieee802154_hw *hw;
+ bool hw_registered;
+ spinlock_t lock;
+ struct workqueue_struct *mlme_workqueue;
+ struct workqueue_struct *irq_workqueue;
+ struct sk_buff *tx_skb;
+ u8 nextmsduhandle;
+ struct clk *clk;
+ int last_dsn;
+ struct ca8210_test test;
+ bool async_tx_pending;
+ u8 *sync_command_response;
+ struct completion ca8210_is_awake;
+ int sync_down, sync_up;
+ struct completion spi_transfer_complete, sync_exchange_complete;
+ bool promiscuous;
+ int retries;
+};
+
+/**
+ * struct work_priv_container - link between a work object and the relevant
+ * device's private data
+ * @work: work object being executed
+ * @priv: device's private data section
+ *
+ */
+struct work_priv_container {
+ struct work_struct work;
+ struct ca8210_priv *priv;
+};
+
+/**
+ * struct ca8210_platform_data - ca8210 platform data structure
+ * @extclockenable: true if the external clock is to be enabled
+ * @extclockfreq: frequency of the external clock
+ * @extclockgpio: ca8210 output gpio of the external clock
+ * @gpio_reset: gpio number of ca8210 reset line
+ * @gpio_irq: gpio number of ca8210 interrupt line
+ * @irq_id: identifier for the ca8210 irq
+ *
+ */
+struct ca8210_platform_data {
+ bool extclockenable;
+ unsigned int extclockfreq;
+ unsigned int extclockgpio;
+ int gpio_reset;
+ int gpio_irq;
+ int irq_id;
+};
+
+/**
+ * struct fulladdr - full MAC addressing information structure
+ * @mode: address mode (none, short, extended)
+ * @pan_id: 16-bit LE pan id
+ * @address: LE address, variable length as specified by mode
+ *
+ */
+struct fulladdr {
+ u8 mode;
+ u8 pan_id[2];
+ u8 address[8];
+};
+
+/**
+ * union macaddr: generic MAC address container
+ * @short_addr: 16-bit short address
+ * @ieee_address: 64-bit extended address as LE byte array
+ *
+ */
+union macaddr {
+ u16 short_address;
+ u8 ieee_address[8];
+};
+
+/**
+ * struct secspec: security specification for SAP commands
+ * @security_level: 0-7, controls level of authentication & encryption
+ * @key_id_mode: 0-3, specifies how to obtain key
+ * @key_source: extended key retrieval data
+ * @key_index: single-byte key identifier
+ *
+ */
+struct secspec {
+ u8 security_level;
+ u8 key_id_mode;
+ u8 key_source[8];
+ u8 key_index;
+};
+
+/* downlink functions parameter set definitions */
+struct mcps_data_request_pset {
+ u8 src_addr_mode;
+ struct fulladdr dst;
+ u8 msdu_length;
+ u8 msdu_handle;
+ u8 tx_options;
+ u8 msdu[MAX_DATA_SIZE];
+};
+
+struct mlme_set_request_pset {
+ u8 pib_attribute;
+ u8 pib_attribute_index;
+ u8 pib_attribute_length;
+ u8 pib_attribute_value[MAX_ATTRIBUTE_SIZE];
+};
+
+struct hwme_set_request_pset {
+ u8 hw_attribute;
+ u8 hw_attribute_length;
+ u8 hw_attribute_value[MAX_HWME_ATTRIBUTE_SIZE];
+};
+
+struct hwme_get_request_pset {
+ u8 hw_attribute;
+};
+
+struct tdme_setsfr_request_pset {
+ u8 sfr_page;
+ u8 sfr_address;
+ u8 sfr_value;
+};
+
+/* uplink functions parameter set definitions */
+struct hwme_set_confirm_pset {
+ u8 status;
+ u8 hw_attribute;
+};
+
+struct hwme_get_confirm_pset {
+ u8 status;
+ u8 hw_attribute;
+ u8 hw_attribute_length;
+ u8 hw_attribute_value[MAX_HWME_ATTRIBUTE_SIZE];
+};
+
+struct tdme_setsfr_confirm_pset {
+ u8 status;
+ u8 sfr_page;
+ u8 sfr_address;
+};
+
+struct mac_message {
+ u8 command_id;
+ u8 length;
+ union {
+ struct mcps_data_request_pset data_req;
+ struct mlme_set_request_pset set_req;
+ struct hwme_set_request_pset hwme_set_req;
+ struct hwme_get_request_pset hwme_get_req;
+ struct tdme_setsfr_request_pset tdme_set_sfr_req;
+ struct hwme_set_confirm_pset hwme_set_cnf;
+ struct hwme_get_confirm_pset hwme_get_cnf;
+ struct tdme_setsfr_confirm_pset tdme_set_sfr_cnf;
+ u8 u8param;
+ u8 status;
+ u8 payload[148];
+ } pdata;
+};
+
+union pa_cfg_sfr {
+ struct {
+ u8 bias_current_trim : 3;
+ u8 /* reserved */ : 1;
+ u8 buffer_capacitor_trim : 3;
+ u8 boost : 1;
+ };
+ u8 paib;
+};
+
+struct preamble_cfg_sfr {
+ u8 timeout_symbols : 3;
+ u8 acquisition_symbols : 3;
+ u8 search_symbols : 2;
+};
+
+static int (*cascoda_api_upstream)(
+ const u8 *buf,
+ size_t len,
+ void *device_ref
+);
+
+/**
+ * link_to_linux_err() - Translates an 802.15.4 return code into the closest
+ * linux error
+ * @link_status: 802.15.4 status code
+ *
+ * Return: 0 or Linux error code
+ */
+static int link_to_linux_err(int link_status)
+{
+ if (link_status < 0) {
+ /* status is already a Linux code */
+ return link_status;
+ }
+ switch (link_status) {
+ case MAC_SUCCESS:
+ case MAC_REALIGNMENT:
+ return 0;
+ case MAC_IMPROPER_KEY_TYPE:
+ return -EKEYREJECTED;
+ case MAC_IMPROPER_SECURITY_LEVEL:
+ case MAC_UNSUPPORTED_LEGACY:
+ case MAC_DENIED:
+ return -EACCES;
+ case MAC_BEACON_LOST:
+ case MAC_NO_ACK:
+ case MAC_NO_BEACON:
+ return -ENETUNREACH;
+ case MAC_CHANNEL_ACCESS_FAILURE:
+ case MAC_TX_ACTIVE:
+ case MAC_SCAN_IN_PROGRESS:
+ return -EBUSY;
+ case MAC_DISABLE_TRX_FAILURE:
+ case MAC_OUT_OF_CAP:
+ return -EAGAIN;
+ case MAC_FRAME_TOO_LONG:
+ return -EMSGSIZE;
+ case MAC_INVALID_GTS:
+ case MAC_PAST_TIME:
+ return -EBADSLT;
+ case MAC_INVALID_HANDLE:
+ return -EBADMSG;
+ case MAC_INVALID_PARAMETER:
+ case MAC_UNSUPPORTED_ATTRIBUTE:
+ case MAC_ON_TIME_TOO_LONG:
+ case MAC_INVALID_INDEX:
+ return -EINVAL;
+ case MAC_NO_DATA:
+ return -ENODATA;
+ case MAC_NO_SHORT_ADDRESS:
+ return -EFAULT;
+ case MAC_PAN_ID_CONFLICT:
+ return -EADDRINUSE;
+ case MAC_TRANSACTION_EXPIRED:
+ return -ETIME;
+ case MAC_TRANSACTION_OVERFLOW:
+ return -ENOBUFS;
+ case MAC_UNAVAILABLE_KEY:
+ return -ENOKEY;
+ case MAC_INVALID_ADDRESS:
+ return -ENXIO;
+ case MAC_TRACKING_OFF:
+ case MAC_SUPERFRAME_OVERLAP:
+ return -EREMOTEIO;
+ case MAC_LIMIT_REACHED:
+ return -EDQUOT;
+ case MAC_READ_ONLY:
+ return -EROFS;
+ default:
+ return -EPROTO;
+ }
+}
+
+/**
+ * ca8210_test_int_driver_write() - Writes a message to the test interface to be
+ * read by the userspace
+ * @buf: Buffer containing upstream message
+ * @len: length of message to write
+ * @spi: SPI device of message originator
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_test_int_driver_write(
+ const u8 *buf,
+ size_t len,
+ void *spi
+)
+{
+ struct ca8210_priv *priv = spi_get_drvdata(spi);
+ struct ca8210_test *test = &priv->test;
+ char *fifo_buffer;
+ int i;
+
+ dev_dbg(
+ &priv->spi->dev,
+ "test_interface: Buffering upstream message:\n"
+ );
+ for (i = 0; i < len; i++)
+ dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]);
+
+ fifo_buffer = kmalloc(len, GFP_KERNEL);
+ if (!fifo_buffer)
+ return -ENOMEM;
+ memcpy(fifo_buffer, buf, len);
+ kfifo_in(&test->up_fifo, &fifo_buffer, 4);
+ wake_up_interruptible(&priv->test.readq);
+
+ return 0;
+}
+
+/* SPI Operation */
+
+static int ca8210_net_rx(
+ struct ieee802154_hw *hw,
+ u8 *command,
+ size_t len
+);
+static u8 mlme_reset_request_sync(
+ u8 set_default_pib,
+ void *device_ref
+);
+static int ca8210_spi_transfer(
+ struct spi_device *spi,
+ const u8 *buf,
+ size_t len
+);
+
+/**
+ * ca8210_reset_send() - Hard resets the ca8210 for a given time
+ * @spi: Pointer to target ca8210 spi device
+ * @ms: Milliseconds to hold the reset line low for
+ */
+static void ca8210_reset_send(struct spi_device *spi, unsigned int ms)
+{
+ struct ca8210_platform_data *pdata = spi->dev.platform_data;
+ struct ca8210_priv *priv = spi_get_drvdata(spi);
+ long status;
+
+ gpio_set_value(pdata->gpio_reset, 0);
+ reinit_completion(&priv->ca8210_is_awake);
+ msleep(ms);
+ gpio_set_value(pdata->gpio_reset, 1);
+ priv->promiscuous = false;
+
+ /* Wait until wakeup indication seen */
+ status = wait_for_completion_interruptible_timeout(
+ &priv->ca8210_is_awake,
+ msecs_to_jiffies(CA8210_SYNC_TIMEOUT)
+ );
+ if (status == 0) {
+ dev_crit(
+ &spi->dev,
+ "Fatal: No wakeup from ca8210 after reset!\n"
+ );
+ }
+
+ dev_dbg(&spi->dev, "Reset the device\n");
+}
+
+/**
+ * ca8210_mlme_reset_worker() - Resets the MLME, Called when the MAC OVERFLOW
+ * condition happens.
+ * @work: Pointer to work being executed
+ */
+static void ca8210_mlme_reset_worker(struct work_struct *work)
+{
+ struct work_priv_container *wpc = container_of(
+ work,
+ struct work_priv_container,
+ work
+ );
+ struct ca8210_priv *priv = wpc->priv;
+
+ mlme_reset_request_sync(0, priv->spi);
+ kfree(wpc);
+}
+
+/**
+ * ca8210_rx_done() - Calls various message dispatches responding to a received
+ * command
+ * @arg: Pointer to the cas_control object for the relevant spi transfer
+ *
+ * Presents a received SAP command from the ca8210 to the Cascoda EVBME, test
+ * interface and network driver.
+ */
+static void ca8210_rx_done(struct cas_control *cas_ctl)
+{
+ u8 *buf;
+ u8 len;
+ struct work_priv_container *mlme_reset_wpc;
+ struct ca8210_priv *priv = cas_ctl->priv;
+
+ buf = cas_ctl->tx_in_buf;
+ len = buf[1] + 2;
+ if (len > CA8210_SPI_BUF_SIZE) {
+ dev_crit(
+ &priv->spi->dev,
+ "Received packet len (%d) erroneously long\n",
+ len
+ );
+ goto finish;
+ }
+
+ if (buf[0] & SPI_SYN) {
+ if (priv->sync_command_response) {
+ memcpy(priv->sync_command_response, buf, len);
+ complete(&priv->sync_exchange_complete);
+ } else {
+ if (cascoda_api_upstream)
+ cascoda_api_upstream(buf, len, priv->spi);
+ priv->sync_up++;
+ }
+ } else {
+ if (cascoda_api_upstream)
+ cascoda_api_upstream(buf, len, priv->spi);
+ }
+
+ ca8210_net_rx(priv->hw, buf, len);
+ if (buf[0] == SPI_MCPS_DATA_CONFIRM) {
+ if (buf[3] == MAC_TRANSACTION_OVERFLOW) {
+ dev_info(
+ &priv->spi->dev,
+ "Waiting for transaction overflow to stabilise...\n");
+ msleep(2000);
+ dev_info(
+ &priv->spi->dev,
+ "Resetting MAC...\n");
+
+ mlme_reset_wpc = kmalloc(sizeof(*mlme_reset_wpc),
+ GFP_KERNEL);
+ if (!mlme_reset_wpc)
+ goto finish;
+ INIT_WORK(
+ &mlme_reset_wpc->work,
+ ca8210_mlme_reset_worker
+ );
+ mlme_reset_wpc->priv = priv;
+ queue_work(priv->mlme_workqueue, &mlme_reset_wpc->work);
+ }
+ } else if (buf[0] == SPI_HWME_WAKEUP_INDICATION) {
+ dev_notice(
+ &priv->spi->dev,
+ "Wakeup indication received, reason:\n"
+ );
+ switch (buf[2]) {
+ case 0:
+ dev_notice(
+ &priv->spi->dev,
+ "Transceiver woken up from Power Up / System Reset\n"
+ );
+ break;
+ case 1:
+ dev_notice(
+ &priv->spi->dev,
+ "Watchdog Timer Time-Out\n"
+ );
+ break;
+ case 2:
+ dev_notice(
+ &priv->spi->dev,
+ "Transceiver woken up from Power-Off by Sleep Timer Time-Out\n");
+ break;
+ case 3:
+ dev_notice(
+ &priv->spi->dev,
+ "Transceiver woken up from Power-Off by GPIO Activity\n"
+ );
+ break;
+ case 4:
+ dev_notice(
+ &priv->spi->dev,
+ "Transceiver woken up from Standby by Sleep Timer Time-Out\n"
+ );
+ break;
+ case 5:
+ dev_notice(
+ &priv->spi->dev,
+ "Transceiver woken up from Standby by GPIO Activity\n"
+ );
+ break;
+ case 6:
+ dev_notice(
+ &priv->spi->dev,
+ "Sleep-Timer Time-Out in Active Mode\n"
+ );
+ break;
+ default:
+ dev_warn(&priv->spi->dev, "Wakeup reason unknown\n");
+ break;
+ }
+ complete(&priv->ca8210_is_awake);
+ }
+
+finish:;
+}
+
+static int ca8210_remove(struct spi_device *spi_device);
+
+/**
+ * ca8210_spi_transfer_complete() - Called when a single spi transfer has
+ * completed
+ * @context: Pointer to the cas_control object for the finished transfer
+ */
+static void ca8210_spi_transfer_complete(void *context)
+{
+ struct cas_control *cas_ctl = context;
+ struct ca8210_priv *priv = cas_ctl->priv;
+ bool duplex_rx = false;
+ int i;
+ u8 retry_buffer[CA8210_SPI_BUF_SIZE];
+
+ if (
+ cas_ctl->tx_in_buf[0] == SPI_NACK ||
+ (cas_ctl->tx_in_buf[0] == SPI_IDLE &&
+ cas_ctl->tx_in_buf[1] == SPI_NACK)
+ ) {
+ /* ca8210 is busy */
+ dev_info(&priv->spi->dev, "ca8210 was busy during attempted write\n");
+ if (cas_ctl->tx_buf[0] == SPI_IDLE) {
+ dev_warn(
+ &priv->spi->dev,
+ "IRQ servicing NACKd, dropping transfer\n"
+ );
+ kfree(cas_ctl);
+ return;
+ }
+ if (priv->retries > 3) {
+ dev_err(&priv->spi->dev, "too many retries!\n");
+ kfree(cas_ctl);
+ ca8210_remove(priv->spi);
+ return;
+ }
+ memcpy(retry_buffer, cas_ctl->tx_buf, CA8210_SPI_BUF_SIZE);
+ kfree(cas_ctl);
+ ca8210_spi_transfer(
+ priv->spi,
+ retry_buffer,
+ CA8210_SPI_BUF_SIZE
+ );
+ priv->retries++;
+ dev_info(&priv->spi->dev, "retried spi write\n");
+ return;
+ } else if (
+ cas_ctl->tx_in_buf[0] != SPI_IDLE &&
+ cas_ctl->tx_in_buf[0] != SPI_NACK
+ ) {
+ duplex_rx = true;
+ }
+
+ if (duplex_rx) {
+ dev_dbg(&priv->spi->dev, "READ CMD DURING TX\n");
+ for (i = 0; i < cas_ctl->tx_in_buf[1] + 2; i++)
+ dev_dbg(
+ &priv->spi->dev,
+ "%#03x\n",
+ cas_ctl->tx_in_buf[i]
+ );
+ ca8210_rx_done(cas_ctl);
+ }
+ complete(&priv->spi_transfer_complete);
+ kfree(cas_ctl);
+ priv->retries = 0;
+}
+
+/**
+ * ca8210_spi_transfer() - Initiate duplex spi transfer with ca8210
+ * @spi: Pointer to spi device for transfer
+ * @buf: Octet array to send
+ * @len: length of the buffer being sent
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_spi_transfer(
+ struct spi_device *spi,
+ const u8 *buf,
+ size_t len
+)
+{
+ int i, status = 0;
+ struct ca8210_priv *priv = spi_get_drvdata(spi);
+ struct cas_control *cas_ctl;
+
+ if (!spi) {
+ dev_crit(
+ &spi->dev,
+ "NULL spi device passed to ca8210_spi_transfer\n"
+ );
+ return -ENODEV;
+ }
+
+ reinit_completion(&priv->spi_transfer_complete);
+
+ dev_dbg(&spi->dev, "ca8210_spi_transfer called\n");
+
+ cas_ctl = kmalloc(sizeof(*cas_ctl), GFP_ATOMIC);
+ if (!cas_ctl)
+ return -ENOMEM;
+
+ cas_ctl->priv = priv;
+ memset(cas_ctl->tx_buf, SPI_IDLE, CA8210_SPI_BUF_SIZE);
+ memset(cas_ctl->tx_in_buf, SPI_IDLE, CA8210_SPI_BUF_SIZE);
+ memcpy(cas_ctl->tx_buf, buf, len);
+
+ for (i = 0; i < len; i++)
+ dev_dbg(&spi->dev, "%#03x\n", cas_ctl->tx_buf[i]);
+
+ spi_message_init(&cas_ctl->msg);
+
+ cas_ctl->transfer.tx_nbits = 1; /* 1 MOSI line */
+ cas_ctl->transfer.rx_nbits = 1; /* 1 MISO line */
+ cas_ctl->transfer.speed_hz = 0; /* Use device setting */
+ cas_ctl->transfer.bits_per_word = 0; /* Use device setting */
+ cas_ctl->transfer.tx_buf = cas_ctl->tx_buf;
+ cas_ctl->transfer.rx_buf = cas_ctl->tx_in_buf;
+ cas_ctl->transfer.delay_usecs = 0;
+ cas_ctl->transfer.cs_change = 0;
+ cas_ctl->transfer.len = sizeof(struct mac_message);
+ cas_ctl->msg.complete = ca8210_spi_transfer_complete;
+ cas_ctl->msg.context = cas_ctl;
+
+ spi_message_add_tail(
+ &cas_ctl->transfer,
+ &cas_ctl->msg
+ );
+
+ status = spi_async(spi, &cas_ctl->msg);
+ if (status < 0) {
+ dev_crit(
+ &spi->dev,
+ "status %d from spi_sync in write\n",
+ status
+ );
+ }
+
+ return status;
+}
+
+/**
+ * ca8210_spi_exchange() - Exchange API/SAP commands with the radio
+ * @buf: Octet array of command being sent downstream
+ * @len: length of buf
+ * @response: buffer for storing synchronous response
+ * @device_ref: spi_device pointer for ca8210
+ *
+ * Effectively calls ca8210_spi_transfer to write buf[] to the spi, then for
+ * synchronous commands waits for the corresponding response to be read from
+ * the spi before returning. The response is written to the response parameter.
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_spi_exchange(
+ const u8 *buf,
+ size_t len,
+ u8 *response,
+ void *device_ref
+)
+{
+ int status = 0;
+ struct spi_device *spi = device_ref;
+ struct ca8210_priv *priv = spi->dev.driver_data;
+ long wait_remaining;
+
+ if ((buf[0] & SPI_SYN) && response) { /* if sync wait for confirm */
+ reinit_completion(&priv->sync_exchange_complete);
+ priv->sync_command_response = response;
+ }
+
+ do {
+ reinit_completion(&priv->spi_transfer_complete);
+ status = ca8210_spi_transfer(priv->spi, buf, len);
+ if (status) {
+ dev_warn(
+ &spi->dev,
+ "spi write failed, returned %d\n",
+ status
+ );
+ if (status == -EBUSY)
+ continue;
+ if (((buf[0] & SPI_SYN) && response))
+ complete(&priv->sync_exchange_complete);
+ goto cleanup;
+ }
+
+ wait_remaining = wait_for_completion_interruptible_timeout(
+ &priv->spi_transfer_complete,
+ msecs_to_jiffies(1000)
+ );
+ if (wait_remaining == -ERESTARTSYS) {
+ status = -ERESTARTSYS;
+ } else if (wait_remaining == 0) {
+ dev_err(
+ &spi->dev,
+ "SPI downstream transfer timed out!\n"
+ );
+ status = -ETIME;
+ goto cleanup;
+ }
+ } while (status < 0);
+
+ if (!((buf[0] & SPI_SYN) && response))
+ goto cleanup;
+
+ wait_remaining = wait_for_completion_interruptible_timeout(
+ &priv->sync_exchange_complete,
+ msecs_to_jiffies(CA8210_SYNC_TIMEOUT)
+ );
+ if (wait_remaining == -ERESTARTSYS) {
+ status = -ERESTARTSYS;
+ } else if (wait_remaining == 0) {
+ dev_err(
+ &spi->dev,
+ "Synchronous confirm timeout\n"
+ );
+ status = -ETIME;
+ }
+
+cleanup:
+ priv->sync_command_response = NULL;
+ return status;
+}
+
+/**
+ * ca8210_interrupt_handler() - Called when an irq is received from the ca8210
+ * @irq: Id of the irq being handled
+ * @dev_id: Pointer passed by the system, pointing to the ca8210's private data
+ *
+ * This function is called when the irq line from the ca8210 is asserted,
+ * signifying that the ca8210 has a message to send upstream to us. Starts the
+ * asynchronous spi read.
+ *
+ * Return: irq return code
+ */
+static irqreturn_t ca8210_interrupt_handler(int irq, void *dev_id)
+{
+ struct ca8210_priv *priv = dev_id;
+ int status;
+
+ dev_dbg(&priv->spi->dev, "irq: Interrupt occurred\n");
+ do {
+ status = ca8210_spi_transfer(priv->spi, NULL, 0);
+ if (status && (status != -EBUSY)) {
+ dev_warn(
+ &priv->spi->dev,
+ "spi read failed, returned %d\n",
+ status
+ );
+ }
+ } while (status == -EBUSY);
+ return IRQ_HANDLED;
+}
+
+static int (*cascoda_api_downstream)(
+ const u8 *buf,
+ size_t len,
+ u8 *response,
+ void *device_ref
+) = ca8210_spi_exchange;
+
+/* Cascoda API / 15.4 SAP Primitives */
+
+/**
+ * tdme_setsfr_request_sync() - TDME_SETSFR_request/confirm according to API
+ * @sfr_page: SFR Page
+ * @sfr_address: SFR Address
+ * @sfr_value: SFR Value
+ * @device_ref: Nondescript pointer to target device
+ *
+ * Return: 802.15.4 status code of TDME-SETSFR.confirm
+ */
+static u8 tdme_setsfr_request_sync(
+ u8 sfr_page,
+ u8 sfr_address,
+ u8 sfr_value,
+ void *device_ref
+)
+{
+ int ret;
+ struct mac_message command, response;
+ struct spi_device *spi = device_ref;
+
+ command.command_id = SPI_TDME_SETSFR_REQUEST;
+ command.length = 3;
+ command.pdata.tdme_set_sfr_req.sfr_page = sfr_page;
+ command.pdata.tdme_set_sfr_req.sfr_address = sfr_address;
+ command.pdata.tdme_set_sfr_req.sfr_value = sfr_value;
+ response.command_id = SPI_IDLE;
+ ret = cascoda_api_downstream(
+ &command.command_id,
+ command.length + 2,
+ &response.command_id,
+ device_ref
+ );
+ if (ret) {
+ dev_crit(&spi->dev, "cascoda_api_downstream returned %d", ret);
+ return MAC_SYSTEM_ERROR;
+ }
+
+ if (response.command_id != SPI_TDME_SETSFR_CONFIRM) {
+ dev_crit(
+ &spi->dev,
+ "sync response to SPI_TDME_SETSFR_REQUEST was not SPI_TDME_SETSFR_CONFIRM, it was %d\n",
+ response.command_id
+ );
+ return MAC_SYSTEM_ERROR;
+ }
+
+ return response.pdata.tdme_set_sfr_cnf.status;
+}
+
+/**
+ * tdme_chipinit() - TDME Chip Register Default Initialisation Macro
+ * @device_ref: Nondescript pointer to target device
+ *
+ * Return: 802.15.4 status code of API calls
+ */
+static u8 tdme_chipinit(void *device_ref)
+{
+ u8 status = MAC_SUCCESS;
+ u8 sfr_address;
+ struct spi_device *spi = device_ref;
+ struct preamble_cfg_sfr pre_cfg_value = {
+ .timeout_symbols = 3,
+ .acquisition_symbols = 3,
+ .search_symbols = 1,
+ };
+ /* LNA Gain Settings */
+ status = tdme_setsfr_request_sync(
+ 1, (sfr_address = CA8210_SFR_LNAGX40),
+ LNAGX40_DEFAULT_GAIN, device_ref);
+ if (status)
+ goto finish;
+ status = tdme_setsfr_request_sync(
+ 1, (sfr_address = CA8210_SFR_LNAGX41),
+ LNAGX41_DEFAULT_GAIN, device_ref);
+ if (status)
+ goto finish;
+ status = tdme_setsfr_request_sync(
+ 1, (sfr_address = CA8210_SFR_LNAGX42),
+ LNAGX42_DEFAULT_GAIN, device_ref);
+ if (status)
+ goto finish;
+ status = tdme_setsfr_request_sync(
+ 1, (sfr_address = CA8210_SFR_LNAGX43),
+ LNAGX43_DEFAULT_GAIN, device_ref);
+ if (status)
+ goto finish;
+ status = tdme_setsfr_request_sync(
+ 1, (sfr_address = CA8210_SFR_LNAGX44),
+ LNAGX44_DEFAULT_GAIN, device_ref);
+ if (status)
+ goto finish;
+ status = tdme_setsfr_request_sync(
+ 1, (sfr_address = CA8210_SFR_LNAGX45),
+ LNAGX45_DEFAULT_GAIN, device_ref);
+ if (status)
+ goto finish;
+ status = tdme_setsfr_request_sync(
+ 1, (sfr_address = CA8210_SFR_LNAGX46),
+ LNAGX46_DEFAULT_GAIN, device_ref);
+ if (status)
+ goto finish;
+ status = tdme_setsfr_request_sync(
+ 1, (sfr_address = CA8210_SFR_LNAGX47),
+ LNAGX47_DEFAULT_GAIN, device_ref);
+ if (status)
+ goto finish;
+ /* Preamble Timing Config */
+ status = tdme_setsfr_request_sync(
+ 1, (sfr_address = CA8210_SFR_PRECFG),
+ *((u8 *)&pre_cfg_value), device_ref);
+ if (status)
+ goto finish;
+ /* Preamble Threshold High */
+ status = tdme_setsfr_request_sync(
+ 1, (sfr_address = CA8210_SFR_PTHRH),
+ PTHRH_DEFAULT_THRESHOLD, device_ref);
+ if (status)
+ goto finish;
+ /* Tx Output Power 8 dBm */
+ status = tdme_setsfr_request_sync(
+ 0, (sfr_address = CA8210_SFR_PACFGIB),
+ PACFGIB_DEFAULT_CURRENT, device_ref);
+ if (status)
+ goto finish;
+
+finish:
+ if (status != MAC_SUCCESS) {
+ dev_err(
+ &spi->dev,
+ "failed to set sfr at %#03x, status = %#03x\n",
+ sfr_address,
+ status
+ );
+ }
+ return status;
+}
+
+/**
+ * tdme_channelinit() - TDME Channel Register Default Initialisation Macro (Tx)
+ * @channel: 802.15.4 channel to initialise chip for
+ * @device_ref: Nondescript pointer to target device
+ *
+ * Return: 802.15.4 status code of API calls
+ */
+static u8 tdme_channelinit(u8 channel, void *device_ref)
+{
+ /* Transceiver front-end local oscillator tx two-point calibration
+ * value. Tuned for the hardware.
+ */
+ u8 txcalval;
+
+ if (channel >= 25)
+ txcalval = 0xA7;
+ else if (channel >= 23)
+ txcalval = 0xA8;
+ else if (channel >= 22)
+ txcalval = 0xA9;
+ else if (channel >= 20)
+ txcalval = 0xAA;
+ else if (channel >= 17)
+ txcalval = 0xAB;
+ else if (channel >= 16)
+ txcalval = 0xAC;
+ else if (channel >= 14)
+ txcalval = 0xAD;
+ else if (channel >= 12)
+ txcalval = 0xAE;
+ else
+ txcalval = 0xAF;
+
+ return tdme_setsfr_request_sync(
+ 1,
+ CA8210_SFR_LOTXCAL,
+ txcalval,
+ device_ref
+ ); /* LO Tx Cal */
+}
+
+/**
+ * tdme_checkpibattribute() - Checks Attribute Values that are not checked in
+ * MAC
+ * @pib_attribute: Attribute Number
+ * @pib_attribute_length: Attribute length
+ * @pib_attribute_value: Pointer to Attribute Value
+ * @device_ref: Nondescript pointer to target device
+ *
+ * Return: 802.15.4 status code of checks
+ */
+static u8 tdme_checkpibattribute(
+ u8 pib_attribute,
+ u8 pib_attribute_length,
+ const void *pib_attribute_value
+)
+{
+ u8 status = MAC_SUCCESS;
+ u8 value;
+
+ value = *((u8 *)pib_attribute_value);
+
+ switch (pib_attribute) {
+ /* PHY */
+ case PHY_TRANSMIT_POWER:
+ if (value > 0x3F)
+ status = MAC_INVALID_PARAMETER;
+ break;
+ case PHY_CCA_MODE:
+ if (value > 0x03)
+ status = MAC_INVALID_PARAMETER;
+ break;
+ /* MAC */
+ case MAC_BATT_LIFE_EXT_PERIODS:
+ if ((value < 6) || (value > 41))
+ status = MAC_INVALID_PARAMETER;
+ break;
+ case MAC_BEACON_PAYLOAD:
+ if (pib_attribute_length > MAX_BEACON_PAYLOAD_LENGTH)
+ status = MAC_INVALID_PARAMETER;
+ break;
+ case MAC_BEACON_PAYLOAD_LENGTH:
+ if (value > MAX_BEACON_PAYLOAD_LENGTH)
+ status = MAC_INVALID_PARAMETER;
+ break;
+ case MAC_BEACON_ORDER:
+ if (value > 15)
+ status = MAC_INVALID_PARAMETER;
+ break;
+ case MAC_MAX_BE:
+ if ((value < 3) || (value > 8))
+ status = MAC_INVALID_PARAMETER;
+ break;
+ case MAC_MAX_CSMA_BACKOFFS:
+ if (value > 5)
+ status = MAC_INVALID_PARAMETER;
+ break;
+ case MAC_MAX_FRAME_RETRIES:
+ if (value > 7)
+ status = MAC_INVALID_PARAMETER;
+ break;
+ case MAC_MIN_BE:
+ if (value > 8)
+ status = MAC_INVALID_PARAMETER;
+ break;
+ case MAC_RESPONSE_WAIT_TIME:
+ if ((value < 2) || (value > 64))
+ status = MAC_INVALID_PARAMETER;
+ break;
+ case MAC_SUPERFRAME_ORDER:
+ if (value > 15)
+ status = MAC_INVALID_PARAMETER;
+ break;
+ /* boolean */
+ case MAC_ASSOCIATED_PAN_COORD:
+ case MAC_ASSOCIATION_PERMIT:
+ case MAC_AUTO_REQUEST:
+ case MAC_BATT_LIFE_EXT:
+ case MAC_GTS_PERMIT:
+ case MAC_PROMISCUOUS_MODE:
+ case MAC_RX_ON_WHEN_IDLE:
+ case MAC_SECURITY_ENABLED:
+ if (value > 1)
+ status = MAC_INVALID_PARAMETER;
+ break;
+ /* MAC SEC */
+ case MAC_AUTO_REQUEST_SECURITY_LEVEL:
+ if (value > 7)
+ status = MAC_INVALID_PARAMETER;
+ break;
+ case MAC_AUTO_REQUEST_KEY_ID_MODE:
+ if (value > 3)
+ status = MAC_INVALID_PARAMETER;
+ break;
+ default:
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * tdme_settxpower() - Sets the tx power for MLME_SET phyTransmitPower
+ * @txp: Transmit Power
+ * @device_ref: Nondescript pointer to target device
+ *
+ * Normalised to 802.15.4 Definition (6-bit, signed):
+ * Bit 7-6: not used
+ * Bit 5-0: tx power (-32 - +31 dB)
+ *
+ * Return: 802.15.4 status code of api calls
+ */
+static u8 tdme_settxpower(u8 txp, void *device_ref)
+{
+ u8 status;
+ s8 txp_val;
+ u8 txp_ext;
+ union pa_cfg_sfr pa_cfg_val;
+
+ /* extend from 6 to 8 bit */
+ txp_ext = 0x3F & txp;
+ if (txp_ext & 0x20)
+ txp_ext += 0xC0;
+ txp_val = (s8)txp_ext;
+
+ if (CA8210_MAC_MPW) {
+ if (txp_val > 0) {
+ /* 8 dBm: ptrim = 5, itrim = +3 => +4 dBm */
+ pa_cfg_val.bias_current_trim = 3;
+ pa_cfg_val.buffer_capacitor_trim = 5;
+ pa_cfg_val.boost = 1;
+ } else {
+ /* 0 dBm: ptrim = 7, itrim = +3 => -6 dBm */
+ pa_cfg_val.bias_current_trim = 3;
+ pa_cfg_val.buffer_capacitor_trim = 7;
+ pa_cfg_val.boost = 0;
+ }
+ /* write PACFG */
+ status = tdme_setsfr_request_sync(
+ 0,
+ CA8210_SFR_PACFG,
+ pa_cfg_val.paib,
+ device_ref
+ );
+ } else {
+ /* Look-Up Table for Setting Current and Frequency Trim values
+ * for desired Output Power
+ */
+ if (txp_val > 8) {
+ pa_cfg_val.paib = 0x3F;
+ } else if (txp_val == 8) {
+ pa_cfg_val.paib = 0x32;
+ } else if (txp_val == 7) {
+ pa_cfg_val.paib = 0x22;
+ } else if (txp_val == 6) {
+ pa_cfg_val.paib = 0x18;
+ } else if (txp_val == 5) {
+ pa_cfg_val.paib = 0x10;
+ } else if (txp_val == 4) {
+ pa_cfg_val.paib = 0x0C;
+ } else if (txp_val == 3) {
+ pa_cfg_val.paib = 0x08;
+ } else if (txp_val == 2) {
+ pa_cfg_val.paib = 0x05;
+ } else if (txp_val == 1) {
+ pa_cfg_val.paib = 0x03;
+ } else if (txp_val == 0) {
+ pa_cfg_val.paib = 0x01;
+ } else { /* < 0 */
+ pa_cfg_val.paib = 0x00;
+ }
+ /* write PACFGIB */
+ status = tdme_setsfr_request_sync(
+ 0,
+ CA8210_SFR_PACFGIB,
+ pa_cfg_val.paib,
+ device_ref
+ );
+ }
+
+ return status;
+}
+
+/**
+ * mcps_data_request() - mcps_data_request (Send Data) according to API Spec
+ * @src_addr_mode: Source Addressing Mode
+ * @dst_address_mode: Destination Addressing Mode
+ * @dst_pan_id: Destination PAN ID
+ * @dst_addr: Pointer to Destination Address
+ * @msdu_length: length of Data
+ * @msdu: Pointer to Data
+ * @msdu_handle: Handle of Data
+ * @tx_options: Tx Options Bit Field
+ * @security: Pointer to Security Structure or NULL
+ * @device_ref: Nondescript pointer to target device
+ *
+ * Return: 802.15.4 status code of action
+ */
+static u8 mcps_data_request(
+ u8 src_addr_mode,
+ u8 dst_address_mode,
+ u16 dst_pan_id,
+ union macaddr *dst_addr,
+ u8 msdu_length,
+ u8 *msdu,
+ u8 msdu_handle,
+ u8 tx_options,
+ struct secspec *security,
+ void *device_ref
+)
+{
+ struct secspec *psec;
+ struct mac_message command;
+
+ command.command_id = SPI_MCPS_DATA_REQUEST;
+ command.pdata.data_req.src_addr_mode = src_addr_mode;
+ command.pdata.data_req.dst.mode = dst_address_mode;
+ if (dst_address_mode != MAC_MODE_NO_ADDR) {
+ command.pdata.data_req.dst.pan_id[0] = LS_BYTE(dst_pan_id);
+ command.pdata.data_req.dst.pan_id[1] = MS_BYTE(dst_pan_id);
+ if (dst_address_mode == MAC_MODE_SHORT_ADDR) {
+ command.pdata.data_req.dst.address[0] = LS_BYTE(
+ dst_addr->short_address
+ );
+ command.pdata.data_req.dst.address[1] = MS_BYTE(
+ dst_addr->short_address
+ );
+ } else { /* MAC_MODE_LONG_ADDR*/
+ memcpy(
+ command.pdata.data_req.dst.address,
+ dst_addr->ieee_address,
+ 8
+ );
+ }
+ }
+ command.pdata.data_req.msdu_length = msdu_length;
+ command.pdata.data_req.msdu_handle = msdu_handle;
+ command.pdata.data_req.tx_options = tx_options;
+ memcpy(command.pdata.data_req.msdu, msdu, msdu_length);
+ psec = (struct secspec *)(command.pdata.data_req.msdu + msdu_length);
+ command.length = sizeof(struct mcps_data_request_pset) -
+ MAX_DATA_SIZE + msdu_length;
+ if (!security || (security->security_level == 0)) {
+ psec->security_level = 0;
+ command.length += 1;
+ } else {
+ *psec = *security;
+ command.length += sizeof(struct secspec);
+ }
+
+ if (ca8210_spi_transfer(device_ref, &command.command_id,
+ command.length + 2))
+ return MAC_SYSTEM_ERROR;
+
+ return MAC_SUCCESS;
+}
+
+/**
+ * mlme_reset_request_sync() - MLME_RESET_request/confirm according to API Spec
+ * @set_default_pib: Set defaults in PIB
+ * @device_ref: Nondescript pointer to target device
+ *
+ * Return: 802.15.4 status code of MLME-RESET.confirm
+ */
+static u8 mlme_reset_request_sync(
+ u8 set_default_pib,
+ void *device_ref
+)
+{
+ u8 status;
+ struct mac_message command, response;
+ struct spi_device *spi = device_ref;
+
+ command.command_id = SPI_MLME_RESET_REQUEST;
+ command.length = 1;
+ command.pdata.u8param = set_default_pib;
+
+ if (cascoda_api_downstream(
+ &command.command_id,
+ command.length + 2,
+ &response.command_id,
+ device_ref)) {
+ dev_err(&spi->dev, "cascoda_api_downstream failed\n");
+ return MAC_SYSTEM_ERROR;
+ }
+
+ if (response.command_id != SPI_MLME_RESET_CONFIRM)
+ return MAC_SYSTEM_ERROR;
+
+ status = response.pdata.status;
+
+ /* reset COORD Bit for Channel Filtering as Coordinator */
+ if (CA8210_MAC_WORKAROUNDS && set_default_pib && (!status)) {
+ status = tdme_setsfr_request_sync(
+ 0,
+ CA8210_SFR_MACCON,
+ 0,
+ device_ref
+ );
+ }
+
+ return status;
+}
+
+/**
+ * mlme_set_request_sync() - MLME_SET_request/confirm according to API Spec
+ * @pib_attribute: Attribute Number
+ * @pib_attribute_index: Index within Attribute if an Array
+ * @pib_attribute_length: Attribute length
+ * @pib_attribute_value: Pointer to Attribute Value
+ * @device_ref: Nondescript pointer to target device
+ *
+ * Return: 802.15.4 status code of MLME-SET.confirm
+ */
+static u8 mlme_set_request_sync(
+ u8 pib_attribute,
+ u8 pib_attribute_index,
+ u8 pib_attribute_length,
+ const void *pib_attribute_value,
+ void *device_ref
+)
+{
+ u8 status;
+ struct mac_message command, response;
+
+ /* pre-check the validity of pib_attribute values that are not checked
+ * in MAC
+ */
+ if (tdme_checkpibattribute(
+ pib_attribute, pib_attribute_length, pib_attribute_value)) {
+ return MAC_INVALID_PARAMETER;
+ }
+
+ if (pib_attribute == PHY_CURRENT_CHANNEL) {
+ status = tdme_channelinit(
+ *((u8 *)pib_attribute_value),
+ device_ref
+ );
+ if (status)
+ return status;
+ }
+
+ if (pib_attribute == PHY_TRANSMIT_POWER) {
+ return tdme_settxpower(
+ *((u8 *)pib_attribute_value),
+ device_ref
+ );
+ }
+
+ command.command_id = SPI_MLME_SET_REQUEST;
+ command.length = sizeof(struct mlme_set_request_pset) -
+ MAX_ATTRIBUTE_SIZE + pib_attribute_length;
+ command.pdata.set_req.pib_attribute = pib_attribute;
+ command.pdata.set_req.pib_attribute_index = pib_attribute_index;
+ command.pdata.set_req.pib_attribute_length = pib_attribute_length;
+ memcpy(
+ command.pdata.set_req.pib_attribute_value,
+ pib_attribute_value,
+ pib_attribute_length
+ );
+
+ if (cascoda_api_downstream(
+ &command.command_id,
+ command.length + 2,
+ &response.command_id,
+ device_ref)) {
+ return MAC_SYSTEM_ERROR;
+ }
+
+ if (response.command_id != SPI_MLME_SET_CONFIRM)
+ return MAC_SYSTEM_ERROR;
+
+ return response.pdata.status;
+}
+
+/**
+ * hwme_set_request_sync() - HWME_SET_request/confirm according to API Spec
+ * @hw_attribute: Attribute Number
+ * @hw_attribute_length: Attribute length
+ * @hw_attribute_value: Pointer to Attribute Value
+ * @device_ref: Nondescript pointer to target device
+ *
+ * Return: 802.15.4 status code of HWME-SET.confirm
+ */
+static u8 hwme_set_request_sync(
+ u8 hw_attribute,
+ u8 hw_attribute_length,
+ u8 *hw_attribute_value,
+ void *device_ref
+)
+{
+ struct mac_message command, response;
+
+ command.command_id = SPI_HWME_SET_REQUEST;
+ command.length = 2 + hw_attribute_length;
+ command.pdata.hwme_set_req.hw_attribute = hw_attribute;
+ command.pdata.hwme_set_req.hw_attribute_length = hw_attribute_length;
+ memcpy(
+ command.pdata.hwme_set_req.hw_attribute_value,
+ hw_attribute_value,
+ hw_attribute_length
+ );
+
+ if (cascoda_api_downstream(
+ &command.command_id,
+ command.length + 2,
+ &response.command_id,
+ device_ref)) {
+ return MAC_SYSTEM_ERROR;
+ }
+
+ if (response.command_id != SPI_HWME_SET_CONFIRM)
+ return MAC_SYSTEM_ERROR;
+
+ return response.pdata.hwme_set_cnf.status;
+}
+
+/**
+ * hwme_get_request_sync() - HWME_GET_request/confirm according to API Spec
+ * @hw_attribute: Attribute Number
+ * @hw_attribute_length: Attribute length
+ * @hw_attribute_value: Pointer to Attribute Value
+ * @device_ref: Nondescript pointer to target device
+ *
+ * Return: 802.15.4 status code of HWME-GET.confirm
+ */
+static u8 hwme_get_request_sync(
+ u8 hw_attribute,
+ u8 *hw_attribute_length,
+ u8 *hw_attribute_value,
+ void *device_ref
+)
+{
+ struct mac_message command, response;
+
+ command.command_id = SPI_HWME_GET_REQUEST;
+ command.length = 1;
+ command.pdata.hwme_get_req.hw_attribute = hw_attribute;
+
+ if (cascoda_api_downstream(
+ &command.command_id,
+ command.length + 2,
+ &response.command_id,
+ device_ref)) {
+ return MAC_SYSTEM_ERROR;
+ }
+
+ if (response.command_id != SPI_HWME_GET_CONFIRM)
+ return MAC_SYSTEM_ERROR;
+
+ if (response.pdata.hwme_get_cnf.status == MAC_SUCCESS) {
+ *hw_attribute_length =
+ response.pdata.hwme_get_cnf.hw_attribute_length;
+ memcpy(
+ hw_attribute_value,
+ response.pdata.hwme_get_cnf.hw_attribute_value,
+ *hw_attribute_length
+ );
+ }
+
+ return response.pdata.hwme_get_cnf.status;
+}
+
+/* Network driver operation */
+
+/**
+ * ca8210_async_xmit_complete() - Called to announce that an asynchronous
+ * transmission has finished
+ * @hw: ieee802154_hw of ca8210 that has finished exchange
+ * @msduhandle: Identifier of transmission that has completed
+ * @status: Returned 802.15.4 status code of the transmission
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_async_xmit_complete(
+ struct ieee802154_hw *hw,
+ u8 msduhandle,
+ u8 status)
+{
+ struct ca8210_priv *priv = hw->priv;
+
+ if (priv->nextmsduhandle != msduhandle) {
+ dev_err(
+ &priv->spi->dev,
+ "Unexpected msdu_handle on data confirm, Expected %d, got %d\n",
+ priv->nextmsduhandle,
+ msduhandle
+ );
+ return -EIO;
+ }
+
+ priv->async_tx_pending = false;
+ priv->nextmsduhandle++;
+
+ if (status) {
+ dev_err(
+ &priv->spi->dev,
+ "Link transmission unsuccessful, status = %d\n",
+ status
+ );
+ if (status != MAC_TRANSACTION_OVERFLOW) {
+ ieee802154_wake_queue(priv->hw);
+ return 0;
+ }
+ }
+ ieee802154_xmit_complete(priv->hw, priv->tx_skb, true);
+
+ return 0;
+}
+
+/**
+ * ca8210_skb_rx() - Contructs a properly framed socket buffer from a received
+ * MCPS_DATA_indication
+ * @hw: ieee802154_hw that MCPS_DATA_indication was received by
+ * @len: length of MCPS_DATA_indication
+ * @data_ind: Octet array of MCPS_DATA_indication
+ *
+ * Called by the spi driver whenever a SAP command is received, this function
+ * will ascertain whether the command is of interest to the network driver and
+ * take necessary action.
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_skb_rx(
+ struct ieee802154_hw *hw,
+ size_t len,
+ u8 *data_ind
+)
+{
+ struct ieee802154_hdr hdr;
+ int msdulen;
+ int hlen;
+ u8 mpdulinkquality = data_ind[23];
+ struct sk_buff *skb;
+ struct ca8210_priv *priv = hw->priv;
+
+ /* Allocate mtu size buffer for every rx packet */
+ skb = dev_alloc_skb(IEEE802154_MTU + sizeof(hdr));
+ if (!skb) {
+ dev_crit(&priv->spi->dev, "dev_alloc_skb failed\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, sizeof(hdr));
+
+ msdulen = data_ind[22]; /* msdu_length */
+ if (msdulen > IEEE802154_MTU) {
+ dev_err(
+ &priv->spi->dev,
+ "received erroneously large msdu length!\n"
+ );
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+ dev_dbg(&priv->spi->dev, "skb buffer length = %d\n", msdulen);
+
+ if (priv->promiscuous)
+ goto copy_payload;
+
+ /* Populate hdr */
+ hdr.sec.level = data_ind[29 + msdulen];
+ dev_dbg(&priv->spi->dev, "security level: %#03x\n", hdr.sec.level);
+ if (hdr.sec.level > 0) {
+ hdr.sec.key_id_mode = data_ind[30 + msdulen];
+ memcpy(&hdr.sec.extended_src, &data_ind[31 + msdulen], 8);
+ hdr.sec.key_id = data_ind[39 + msdulen];
+ }
+ hdr.source.mode = data_ind[0];
+ dev_dbg(&priv->spi->dev, "srcAddrMode: %#03x\n", hdr.source.mode);
+ hdr.source.pan_id = *(u16 *)&data_ind[1];
+ dev_dbg(&priv->spi->dev, "srcPanId: %#06x\n", hdr.source.pan_id);
+ memcpy(&hdr.source.extended_addr, &data_ind[3], 8);
+ hdr.dest.mode = data_ind[11];
+ dev_dbg(&priv->spi->dev, "dstAddrMode: %#03x\n", hdr.dest.mode);
+ hdr.dest.pan_id = *(u16 *)&data_ind[12];
+ dev_dbg(&priv->spi->dev, "dstPanId: %#06x\n", hdr.dest.pan_id);
+ memcpy(&hdr.dest.extended_addr, &data_ind[14], 8);
+
+ /* Fill in FC implicitly */
+ hdr.fc.type = 1; /* Data frame */
+ if (hdr.sec.level)
+ hdr.fc.security_enabled = 1;
+ else
+ hdr.fc.security_enabled = 0;
+ if (data_ind[1] != data_ind[12] || data_ind[2] != data_ind[13])
+ hdr.fc.intra_pan = 1;
+ else
+ hdr.fc.intra_pan = 0;
+ hdr.fc.dest_addr_mode = hdr.dest.mode;
+ hdr.fc.source_addr_mode = hdr.source.mode;
+
+ /* Add hdr to front of buffer */
+ hlen = ieee802154_hdr_push(skb, &hdr);
+
+ if (hlen < 0) {
+ dev_crit(&priv->spi->dev, "failed to push mac hdr onto skb!\n");
+ kfree_skb(skb);
+ return hlen;
+ }
+
+ skb_reset_mac_header(skb);
+ skb->mac_len = hlen;
+
+copy_payload:
+ /* Add <msdulen> bytes of space to the back of the buffer */
+ /* Copy msdu to skb */
+ memcpy(skb_put(skb, msdulen), &data_ind[29], msdulen);
+
+ ieee802154_rx_irqsafe(hw, skb, mpdulinkquality);
+ return 0;
+}
+
+/**
+ * ca8210_net_rx() - Acts upon received SAP commands relevant to the network
+ * driver
+ * @hw: ieee802154_hw that command was received by
+ * @command: Octet array of received command
+ * @len: length of the received command
+ *
+ * Called by the spi driver whenever a SAP command is received, this function
+ * will ascertain whether the command is of interest to the network driver and
+ * take necessary action.
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_net_rx(struct ieee802154_hw *hw, u8 *command, size_t len)
+{
+ struct ca8210_priv *priv = hw->priv;
+ unsigned long flags;
+ u8 status;
+
+ dev_dbg(&priv->spi->dev, "ca8210_net_rx(), CmdID = %d\n", command[0]);
+
+ if (command[0] == SPI_MCPS_DATA_INDICATION) {
+ /* Received data */
+ spin_lock_irqsave(&priv->lock, flags);
+ if (command[26] == priv->last_dsn) {
+ dev_dbg(
+ &priv->spi->dev,
+ "DSN %d resend received, ignoring...\n",
+ command[26]
+ );
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return 0;
+ }
+ priv->last_dsn = command[26];
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ca8210_skb_rx(hw, len - 2, command + 2);
+ } else if (command[0] == SPI_MCPS_DATA_CONFIRM) {
+ status = command[3];
+ if (priv->async_tx_pending) {
+ return ca8210_async_xmit_complete(
+ hw,
+ command[2],
+ status
+ );
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ca8210_skb_tx() - Transmits a given socket buffer using the ca8210
+ * @skb: Socket buffer to transmit
+ * @msduhandle: Data identifier to pass to the 802.15.4 MAC
+ * @priv: Pointer to private data section of target ca8210
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_skb_tx(
+ struct sk_buff *skb,
+ u8 msduhandle,
+ struct ca8210_priv *priv
+)
+{
+ int status;
+ struct ieee802154_hdr header = { 0 };
+ struct secspec secspec;
+ unsigned int mac_len;
+
+ dev_dbg(&priv->spi->dev, "ca8210_skb_tx() called\n");
+
+ /* Get addressing info from skb - ieee802154 layer creates a full
+ * packet
+ */
+ mac_len = ieee802154_hdr_peek_addrs(skb, &header);
+
+ secspec.security_level = header.sec.level;
+ secspec.key_id_mode = header.sec.key_id_mode;
+ if (secspec.key_id_mode == 2)
+ memcpy(secspec.key_source, &header.sec.short_src, 4);
+ else if (secspec.key_id_mode == 3)
+ memcpy(secspec.key_source, &header.sec.extended_src, 8);
+ secspec.key_index = header.sec.key_id;
+
+ /* Pass to Cascoda API */
+ status = mcps_data_request(
+ header.source.mode,
+ header.dest.mode,
+ header.dest.pan_id,
+ (union macaddr *)&header.dest.extended_addr,
+ skb->len - mac_len,
+ &skb->data[mac_len],
+ msduhandle,
+ header.fc.ack_request,
+ &secspec,
+ priv->spi
+ );
+ return link_to_linux_err(status);
+}
+
+/**
+ * ca8210_start() - Starts the network driver
+ * @hw: ieee802154_hw of ca8210 being started
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_start(struct ieee802154_hw *hw)
+{
+ int status;
+ u8 rx_on_when_idle;
+ u8 lqi_threshold = 0;
+ struct ca8210_priv *priv = hw->priv;
+
+ priv->last_dsn = -1;
+ /* Turn receiver on when idle for now just to test rx */
+ rx_on_when_idle = 1;
+ status = mlme_set_request_sync(
+ MAC_RX_ON_WHEN_IDLE,
+ 0,
+ 1,
+ &rx_on_when_idle,
+ priv->spi
+ );
+ if (status) {
+ dev_crit(
+ &priv->spi->dev,
+ "Setting rx_on_when_idle failed, status = %d\n",
+ status
+ );
+ return link_to_linux_err(status);
+ }
+ status = hwme_set_request_sync(
+ HWME_LQILIMIT,
+ 1,
+ &lqi_threshold,
+ priv->spi
+ );
+ if (status) {
+ dev_crit(
+ &priv->spi->dev,
+ "Setting lqilimit failed, status = %d\n",
+ status
+ );
+ return link_to_linux_err(status);
+ }
+
+ return 0;
+}
+
+/**
+ * ca8210_stop() - Stops the network driver
+ * @hw: ieee802154_hw of ca8210 being stopped
+ *
+ * Return: 0 or linux error code
+ */
+static void ca8210_stop(struct ieee802154_hw *hw)
+{
+}
+
+/**
+ * ca8210_xmit_async() - Asynchronously transmits a given socket buffer using
+ * the ca8210
+ * @hw: ieee802154_hw of ca8210 to transmit from
+ * @skb: Socket buffer to transmit
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_xmit_async(struct ieee802154_hw *hw, struct sk_buff *skb)
+{
+ struct ca8210_priv *priv = hw->priv;
+ int status;
+
+ dev_dbg(&priv->spi->dev, "calling ca8210_xmit_async()\n");
+
+ priv->tx_skb = skb;
+ priv->async_tx_pending = true;
+ status = ca8210_skb_tx(skb, priv->nextmsduhandle, priv);
+ return status;
+}
+
+/**
+ * ca8210_get_ed() - Returns the measured energy on the current channel at this
+ * instant in time
+ * @hw: ieee802154_hw of target ca8210
+ * @level: Measured Energy Detect level
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_get_ed(struct ieee802154_hw *hw, u8 *level)
+{
+ u8 lenvar;
+ struct ca8210_priv *priv = hw->priv;
+
+ return link_to_linux_err(
+ hwme_get_request_sync(HWME_EDVALUE, &lenvar, level, priv->spi)
+ );
+}
+
+/**
+ * ca8210_set_channel() - Sets the current operating 802.15.4 channel of the
+ * ca8210
+ * @hw: ieee802154_hw of target ca8210
+ * @page: Channel page to set
+ * @channel: Channel number to set
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_set_channel(
+ struct ieee802154_hw *hw,
+ u8 page,
+ u8 channel
+)
+{
+ u8 status;
+ struct ca8210_priv *priv = hw->priv;
+
+ status = mlme_set_request_sync(
+ PHY_CURRENT_CHANNEL,
+ 0,
+ 1,
+ &channel,
+ priv->spi
+ );
+ if (status) {
+ dev_err(
+ &priv->spi->dev,
+ "error setting channel, MLME-SET.confirm status = %d\n",
+ status
+ );
+ }
+ return link_to_linux_err(status);
+}
+
+/**
+ * ca8210_set_hw_addr_filt() - Sets the address filtering parameters of the
+ * ca8210
+ * @hw: ieee802154_hw of target ca8210
+ * @filt: Filtering parameters
+ * @changed: Bitmap representing which parameters to change
+ *
+ * Effectively just sets the actual addressing information identifying this node
+ * as all filtering is performed by the ca8210 as detailed in the IEEE 802.15.4
+ * 2006 specification.
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_set_hw_addr_filt(
+ struct ieee802154_hw *hw,
+ struct ieee802154_hw_addr_filt *filt,
+ unsigned long changed
+)
+{
+ u8 status = 0;
+ struct ca8210_priv *priv = hw->priv;
+
+ if (changed & IEEE802154_AFILT_PANID_CHANGED) {
+ status = mlme_set_request_sync(
+ MAC_PAN_ID,
+ 0,
+ 2,
+ &filt->pan_id, priv->spi
+ );
+ if (status) {
+ dev_err(
+ &priv->spi->dev,
+ "error setting pan id, MLME-SET.confirm status = %d",
+ status
+ );
+ return link_to_linux_err(status);
+ }
+ }
+ if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
+ status = mlme_set_request_sync(
+ MAC_SHORT_ADDRESS,
+ 0,
+ 2,
+ &filt->short_addr, priv->spi
+ );
+ if (status) {
+ dev_err(
+ &priv->spi->dev,
+ "error setting short address, MLME-SET.confirm status = %d",
+ status
+ );
+ return link_to_linux_err(status);
+ }
+ }
+ if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
+ status = mlme_set_request_sync(
+ NS_IEEE_ADDRESS,
+ 0,
+ 8,
+ &filt->ieee_addr,
+ priv->spi
+ );
+ if (status) {
+ dev_err(
+ &priv->spi->dev,
+ "error setting ieee address, MLME-SET.confirm status = %d",
+ status
+ );
+ return link_to_linux_err(status);
+ }
+ }
+ /* TODO: Should use MLME_START to set coord bit? */
+ return 0;
+}
+
+/**
+ * ca8210_set_tx_power() - Sets the transmit power of the ca8210
+ * @hw: ieee802154_hw of target ca8210
+ * @mbm: Transmit power in mBm (dBm*100)
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_set_tx_power(struct ieee802154_hw *hw, s32 mbm)
+{
+ struct ca8210_priv *priv = hw->priv;
+
+ mbm /= 100;
+ return link_to_linux_err(
+ mlme_set_request_sync(PHY_TRANSMIT_POWER, 0, 1, &mbm, priv->spi)
+ );
+}
+
+/**
+ * ca8210_set_cca_mode() - Sets the clear channel assessment mode of the ca8210
+ * @hw: ieee802154_hw of target ca8210
+ * @cca: CCA mode to set
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_set_cca_mode(
+ struct ieee802154_hw *hw,
+ const struct wpan_phy_cca *cca
+)
+{
+ u8 status;
+ u8 cca_mode;
+ struct ca8210_priv *priv = hw->priv;
+
+ cca_mode = cca->mode & 3;
+ if (cca_mode == 3 && cca->opt == NL802154_CCA_OPT_ENERGY_CARRIER_OR) {
+ /* cca_mode 0 == CS OR ED, 3 == CS AND ED */
+ cca_mode = 0;
+ }
+ status = mlme_set_request_sync(
+ PHY_CCA_MODE,
+ 0,
+ 1,
+ &cca_mode,
+ priv->spi
+ );
+ if (status) {
+ dev_err(
+ &priv->spi->dev,
+ "error setting cca mode, MLME-SET.confirm status = %d",
+ status
+ );
+ }
+ return link_to_linux_err(status);
+}
+
+/**
+ * ca8210_set_cca_ed_level() - Sets the CCA ED level of the ca8210
+ * @hw: ieee802154_hw of target ca8210
+ * @level: ED level to set (in mbm)
+ *
+ * Sets the minimum threshold of measured energy above which the ca8210 will
+ * back off and retry a transmission.
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_set_cca_ed_level(struct ieee802154_hw *hw, s32 level)
+{
+ u8 status;
+ u8 ed_threshold = (level / 100) * 2 + 256;
+ struct ca8210_priv *priv = hw->priv;
+
+ status = hwme_set_request_sync(
+ HWME_EDTHRESHOLD,
+ 1,
+ &ed_threshold,
+ priv->spi
+ );
+ if (status) {
+ dev_err(
+ &priv->spi->dev,
+ "error setting ed threshold, HWME-SET.confirm status = %d",
+ status
+ );
+ }
+ return link_to_linux_err(status);
+}
+
+/**
+ * ca8210_set_csma_params() - Sets the CSMA parameters of the ca8210
+ * @hw: ieee802154_hw of target ca8210
+ * @min_be: Minimum backoff exponent when backing off a transmission
+ * @max_be: Maximum backoff exponent when backing off a transmission
+ * @retries: Number of times to retry after backing off
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_set_csma_params(
+ struct ieee802154_hw *hw,
+ u8 min_be,
+ u8 max_be,
+ u8 retries
+)
+{
+ u8 status;
+ struct ca8210_priv *priv = hw->priv;
+
+ status = mlme_set_request_sync(MAC_MIN_BE, 0, 1, &min_be, priv->spi);
+ if (status) {
+ dev_err(
+ &priv->spi->dev,
+ "error setting min be, MLME-SET.confirm status = %d",
+ status
+ );
+ return link_to_linux_err(status);
+ }
+ status = mlme_set_request_sync(MAC_MAX_BE, 0, 1, &max_be, priv->spi);
+ if (status) {
+ dev_err(
+ &priv->spi->dev,
+ "error setting max be, MLME-SET.confirm status = %d",
+ status
+ );
+ return link_to_linux_err(status);
+ }
+ status = mlme_set_request_sync(
+ MAC_MAX_CSMA_BACKOFFS,
+ 0,
+ 1,
+ &retries,
+ priv->spi
+ );
+ if (status) {
+ dev_err(
+ &priv->spi->dev,
+ "error setting max csma backoffs, MLME-SET.confirm status = %d",
+ status
+ );
+ }
+ return link_to_linux_err(status);
+}
+
+/**
+ * ca8210_set_frame_retries() - Sets the maximum frame retries of the ca8210
+ * @hw: ieee802154_hw of target ca8210
+ * @retries: Number of retries
+ *
+ * Sets the number of times to retry a transmission if no acknowledgment was
+ * was received from the other end when one was requested.
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
+{
+ u8 status;
+ struct ca8210_priv *priv = hw->priv;
+
+ status = mlme_set_request_sync(
+ MAC_MAX_FRAME_RETRIES,
+ 0,
+ 1,
+ &retries,
+ priv->spi
+ );
+ if (status) {
+ dev_err(
+ &priv->spi->dev,
+ "error setting frame retries, MLME-SET.confirm status = %d",
+ status
+ );
+ }
+ return link_to_linux_err(status);
+}
+
+static int ca8210_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
+{
+ u8 status;
+ struct ca8210_priv *priv = hw->priv;
+
+ status = mlme_set_request_sync(
+ MAC_PROMISCUOUS_MODE,
+ 0,
+ 1,
+ (const void*)&on,
+ priv->spi
+ );
+ if (status) {
+ dev_err(
+ &priv->spi->dev,
+ "error setting promiscuous mode, MLME-SET.confirm status = %d",
+ status
+ );
+ } else {
+ priv->promiscuous = on;
+ }
+ return link_to_linux_err(status);
+}
+
+static const struct ieee802154_ops ca8210_phy_ops = {
+ .start = ca8210_start,
+ .stop = ca8210_stop,
+ .xmit_async = ca8210_xmit_async,
+ .ed = ca8210_get_ed,
+ .set_channel = ca8210_set_channel,
+ .set_hw_addr_filt = ca8210_set_hw_addr_filt,
+ .set_txpower = ca8210_set_tx_power,
+ .set_cca_mode = ca8210_set_cca_mode,
+ .set_cca_ed_level = ca8210_set_cca_ed_level,
+ .set_csma_params = ca8210_set_csma_params,
+ .set_frame_retries = ca8210_set_frame_retries,
+ .set_promiscuous_mode = ca8210_set_promiscuous_mode
+};
+
+/* Test/EVBME Interface */
+
+/**
+ * ca8210_test_int_open() - Opens the test interface to the userspace
+ * @inodp: inode representation of file interface
+ * @filp: file interface
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_test_int_open(struct inode *inodp, struct file *filp)
+{
+ struct ca8210_priv *priv = inodp->i_private;
+
+ filp->private_data = priv;
+ return 0;
+}
+
+/**
+ * ca8210_test_check_upstream() - Checks a command received from the upstream
+ * testing interface for required action
+ * @buf: Buffer containing command to check
+ * @device_ref: Nondescript pointer to target device
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_test_check_upstream(u8 *buf, void *device_ref)
+{
+ int ret;
+ u8 response[CA8210_SPI_BUF_SIZE];
+
+ if (buf[0] == SPI_MLME_SET_REQUEST) {
+ ret = tdme_checkpibattribute(buf[2], buf[4], buf + 5);
+ if (ret) {
+ response[0] = SPI_MLME_SET_CONFIRM;
+ response[1] = 3;
+ response[2] = MAC_INVALID_PARAMETER;
+ response[3] = buf[2];
+ response[4] = buf[3];
+ if (cascoda_api_upstream)
+ cascoda_api_upstream(response, 5, device_ref);
+ return ret;
+ }
+ }
+ if (buf[0] == SPI_MLME_ASSOCIATE_REQUEST) {
+ return tdme_channelinit(buf[2], device_ref);
+ } else if (buf[0] == SPI_MLME_START_REQUEST) {
+ return tdme_channelinit(buf[4], device_ref);
+ } else if (
+ (buf[0] == SPI_MLME_SET_REQUEST) &&
+ (buf[2] == PHY_CURRENT_CHANNEL)
+ ) {
+ return tdme_channelinit(buf[5], device_ref);
+ } else if (
+ (buf[0] == SPI_TDME_SET_REQUEST) &&
+ (buf[2] == TDME_CHANNEL)
+ ) {
+ return tdme_channelinit(buf[4], device_ref);
+ } else if (
+ (CA8210_MAC_WORKAROUNDS) &&
+ (buf[0] == SPI_MLME_RESET_REQUEST) &&
+ (buf[2] == 1)
+ ) {
+ /* reset COORD Bit for Channel Filtering as Coordinator */
+ return tdme_setsfr_request_sync(
+ 0,
+ CA8210_SFR_MACCON,
+ 0,
+ device_ref
+ );
+ }
+ return 0;
+} /* End of EVBMECheckSerialCommand() */
+
+/**
+ * ca8210_test_int_user_write() - Called by a process in userspace to send a
+ * message to the ca8210 drivers
+ * @filp: file interface
+ * @in_buf: Buffer containing message to write
+ * @len: length of message
+ * @off: file offset
+ *
+ * Return: 0 or linux error code
+ */
+static ssize_t ca8210_test_int_user_write(
+ struct file *filp,
+ const char __user *in_buf,
+ size_t len,
+ loff_t *off
+)
+{
+ int ret;
+ struct ca8210_priv *priv = filp->private_data;
+ u8 command[CA8210_SPI_BUF_SIZE];
+
+ if (len > CA8210_SPI_BUF_SIZE) {
+ dev_warn(
+ &priv->spi->dev,
+ "userspace requested erroneously long write (%zu)\n",
+ len
+ );
+ return -EMSGSIZE;
+ }
+
+ ret = copy_from_user(command, in_buf, len);
+ if (ret) {
+ dev_err(
+ &priv->spi->dev,
+ "%d bytes could not be copied from userspace\n",
+ ret
+ );
+ return -EIO;
+ }
+
+ ret = ca8210_test_check_upstream(command, priv->spi);
+ if (ret == 0) {
+ ret = ca8210_spi_exchange(
+ command,
+ command[1] + 2,
+ NULL,
+ priv->spi
+ );
+ if (ret < 0) {
+ /* effectively 0 bytes were written successfully */
+ dev_err(
+ &priv->spi->dev,
+ "spi exchange failed\n"
+ );
+ return ret;
+ }
+ if (command[0] & SPI_SYN)
+ priv->sync_down++;
+ }
+
+ return len;
+}
+
+/**
+ * ca8210_test_int_user_read() - Called by a process in userspace to read a
+ * message from the ca8210 drivers
+ * @filp: file interface
+ * @buf: Buffer to write message to
+ * @len: length of message to read (ignored)
+ * @offp: file offset
+ *
+ * If the O_NONBLOCK flag was set when opening the file then this function will
+ * not block, i.e. it will return if the fifo is empty. Otherwise the function
+ * will block, i.e. wait until new data arrives.
+ *
+ * Return: number of bytes read
+ */
+static ssize_t ca8210_test_int_user_read(
+ struct file *filp,
+ char __user *buf,
+ size_t len,
+ loff_t *offp
+)
+{
+ int i, cmdlen;
+ struct ca8210_priv *priv = filp->private_data;
+ unsigned char *fifo_buffer;
+ unsigned long bytes_not_copied;
+
+ if (filp->f_flags & O_NONBLOCK) {
+ /* Non-blocking mode */
+ if (kfifo_is_empty(&priv->test.up_fifo))
+ return 0;
+ } else {
+ /* Blocking mode */
+ wait_event_interruptible(
+ priv->test.readq,
+ !kfifo_is_empty(&priv->test.up_fifo)
+ );
+ }
+
+ if (kfifo_out(&priv->test.up_fifo, &fifo_buffer, 4) != 4) {
+ dev_err(
+ &priv->spi->dev,
+ "test_interface: Wrong number of elements popped from upstream fifo\n"
+ );
+ return 0;
+ }
+ cmdlen = fifo_buffer[1];
+ bytes_not_copied = cmdlen + 2;
+
+ bytes_not_copied = copy_to_user(buf, fifo_buffer, bytes_not_copied);
+ if (bytes_not_copied > 0) {
+ dev_err(
+ &priv->spi->dev,
+ "%lu bytes could not be copied to user space!\n",
+ bytes_not_copied
+ );
+ }
+
+ dev_dbg(&priv->spi->dev, "test_interface: Cmd len = %d\n", cmdlen);
+
+ dev_dbg(&priv->spi->dev, "test_interface: Read\n");
+ for (i = 0; i < cmdlen + 2; i++)
+ dev_dbg(&priv->spi->dev, "%#03x\n", fifo_buffer[i]);
+
+ kfree(fifo_buffer);
+
+ return cmdlen + 2;
+}
+
+/**
+ * ca8210_test_int_ioctl() - Called by a process in userspace to enact an
+ * arbitrary action
+ * @filp: file interface
+ * @ioctl_num: which action to enact
+ * @ioctl_param: arbitrary parameter for the action
+ *
+ * Return: status
+ */
+static long ca8210_test_int_ioctl(
+ struct file *filp,
+ unsigned int ioctl_num,
+ unsigned long ioctl_param
+)
+{
+ struct ca8210_priv *priv = filp->private_data;
+
+ switch (ioctl_num) {
+ case CA8210_IOCTL_HARD_RESET:
+ ca8210_reset_send(priv->spi, ioctl_param);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/**
+ * ca8210_test_int_poll() - Called by a process in userspace to determine which
+ * actions are currently possible for the file
+ * @filp: file interface
+ * @ptable: poll table
+ *
+ * Return: set of poll return flags
+ */
+static unsigned int ca8210_test_int_poll(
+ struct file *filp,
+ struct poll_table_struct *ptable
+)
+{
+ unsigned int return_flags = 0;
+ struct ca8210_priv *priv = filp->private_data;
+
+ poll_wait(filp, &priv->test.readq, ptable);
+ if (!kfifo_is_empty(&priv->test.up_fifo))
+ return_flags |= (POLLIN | POLLRDNORM);
+ if (wait_event_interruptible(
+ priv->test.readq,
+ !kfifo_is_empty(&priv->test.up_fifo))) {
+ return POLLERR;
+ }
+ return return_flags;
+}
+
+static const struct file_operations test_int_fops = {
+ .read = ca8210_test_int_user_read,
+ .write = ca8210_test_int_user_write,
+ .open = ca8210_test_int_open,
+ .release = NULL,
+ .unlocked_ioctl = ca8210_test_int_ioctl,
+ .poll = ca8210_test_int_poll
+};
+
+/* Init/Deinit */
+
+/**
+ * ca8210_get_platform_data() - Populate a ca8210_platform_data object
+ * @spi_device: Pointer to ca8210 spi device object to get data for
+ * @pdata: Pointer to ca8210_platform_data object to populate
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_get_platform_data(
+ struct spi_device *spi_device,
+ struct ca8210_platform_data *pdata
+)
+{
+ int ret = 0;
+
+ if (!spi_device->dev.of_node)
+ return -EINVAL;
+
+ pdata->extclockenable = of_property_read_bool(
+ spi_device->dev.of_node,
+ "extclock-enable"
+ );
+ if (pdata->extclockenable) {
+ ret = of_property_read_u32(
+ spi_device->dev.of_node,
+ "extclock-freq",
+ &pdata->extclockfreq
+ );
+ if (ret < 0)
+ return ret;
+
+ ret = of_property_read_u32(
+ spi_device->dev.of_node,
+ "extclock-gpio",
+ &pdata->extclockgpio
+ );
+ }
+
+ return ret;
+}
+
+/**
+ * ca8210_config_extern_clk() - Configure the external clock provided by the
+ * ca8210
+ * @pdata: Pointer to ca8210_platform_data containing clock parameters
+ * @spi: Pointer to target ca8210 spi device
+ * @on: True to turn the clock on, false to turn off
+ *
+ * The external clock is configured with a frequency and output pin taken from
+ * the platform data.
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_config_extern_clk(
+ struct ca8210_platform_data *pdata,
+ struct spi_device *spi,
+ bool on
+)
+{
+ u8 clkparam[2];
+
+ if (on) {
+ dev_info(&spi->dev, "Switching external clock on\n");
+ switch (pdata->extclockfreq) {
+ case SIXTEEN_MHZ:
+ clkparam[0] = 1;
+ break;
+ case EIGHT_MHZ:
+ clkparam[0] = 2;
+ break;
+ case FOUR_MHZ:
+ clkparam[0] = 3;
+ break;
+ case TWO_MHZ:
+ clkparam[0] = 4;
+ break;
+ case ONE_MHZ:
+ clkparam[0] = 5;
+ break;
+ default:
+ dev_crit(&spi->dev, "Invalid extclock-freq\n");
+ return -EINVAL;
+ }
+ clkparam[1] = pdata->extclockgpio;
+ } else {
+ dev_info(&spi->dev, "Switching external clock off\n");
+ clkparam[0] = 0; /* off */
+ clkparam[1] = 0;
+ }
+ return link_to_linux_err(
+ hwme_set_request_sync(HWME_SYSCLKOUT, 2, clkparam, spi)
+ );
+}
+
+/**
+ * ca8210_register_ext_clock() - Register ca8210's external clock with kernel
+ * @spi: Pointer to target ca8210 spi device
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_register_ext_clock(struct spi_device *spi)
+{
+ struct device_node *np = spi->dev.of_node;
+ struct ca8210_priv *priv = spi_get_drvdata(spi);
+ struct ca8210_platform_data *pdata = spi->dev.platform_data;
+ int ret = 0;
+
+ if (!np)
+ return -EFAULT;
+
+ priv->clk = clk_register_fixed_rate(
+ &spi->dev,
+ np->name,
+ NULL,
+ 0,
+ pdata->extclockfreq
+ );
+
+ if (IS_ERR(priv->clk)) {
+ dev_crit(&spi->dev, "Failed to register external clk\n");
+ return PTR_ERR(priv->clk);
+ }
+ ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk);
+ if (ret) {
+ clk_unregister(priv->clk);
+ dev_crit(
+ &spi->dev,
+ "Failed to register external clock as clock provider\n"
+ );
+ } else {
+ dev_info(&spi->dev, "External clock set as clock provider\n");
+ }
+
+ return ret;
+}
+
+/**
+ * ca8210_unregister_ext_clock() - Unregister ca8210's external clock with
+ * kernel
+ * @spi: Pointer to target ca8210 spi device
+ */
+static void ca8210_unregister_ext_clock(struct spi_device *spi)
+{
+ struct ca8210_priv *priv = spi_get_drvdata(spi);
+
+ if (!priv->clk)
+ return
+
+ of_clk_del_provider(spi->dev.of_node);
+ clk_unregister(priv->clk);
+ dev_info(&spi->dev, "External clock unregistered\n");
+}
+
+/**
+ * ca8210_reset_init() - Initialise the reset input to the ca8210
+ * @spi: Pointer to target ca8210 spi device
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_reset_init(struct spi_device *spi)
+{
+ int ret;
+ struct ca8210_platform_data *pdata = spi->dev.platform_data;
+
+ pdata->gpio_reset = of_get_named_gpio(
+ spi->dev.of_node,
+ "reset-gpio",
+ 0
+ );
+
+ ret = gpio_direction_output(pdata->gpio_reset, 1);
+ if (ret < 0) {
+ dev_crit(
+ &spi->dev,
+ "Reset GPIO %d did not set to output mode\n",
+ pdata->gpio_reset
+ );
+ }
+
+ return ret;
+}
+
+/**
+ * ca8210_interrupt_init() - Initialise the irq output from the ca8210
+ * @spi: Pointer to target ca8210 spi device
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_interrupt_init(struct spi_device *spi)
+{
+ int ret;
+ struct ca8210_platform_data *pdata = spi->dev.platform_data;
+
+ pdata->gpio_irq = of_get_named_gpio(
+ spi->dev.of_node,
+ "irq-gpio",
+ 0
+ );
+
+ pdata->irq_id = gpio_to_irq(pdata->gpio_irq);
+ if (pdata->irq_id < 0) {
+ dev_crit(
+ &spi->dev,
+ "Could not get irq for gpio pin %d\n",
+ pdata->gpio_irq
+ );
+ gpio_free(pdata->gpio_irq);
+ return pdata->irq_id;
+ }
+
+ ret = request_irq(
+ pdata->irq_id,
+ ca8210_interrupt_handler,
+ IRQF_TRIGGER_FALLING,
+ "ca8210-irq",
+ spi_get_drvdata(spi)
+ );
+ if (ret) {
+ dev_crit(&spi->dev, "request_irq %d failed\n", pdata->irq_id);
+ gpio_unexport(pdata->gpio_irq);
+ gpio_free(pdata->gpio_irq);
+ }
+
+ return ret;
+}
+
+/**
+ * ca8210_dev_com_init() - Initialise the spi communication component
+ * @priv: Pointer to private data structure
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_dev_com_init(struct ca8210_priv *priv)
+{
+ priv->mlme_workqueue = alloc_ordered_workqueue(
+ "MLME work queue",
+ WQ_UNBOUND
+ );
+ if (!priv->mlme_workqueue) {
+ dev_crit(&priv->spi->dev, "alloc of mlme_workqueue failed!\n");
+ return -ENOMEM;
+ }
+
+ priv->irq_workqueue = alloc_ordered_workqueue(
+ "ca8210 irq worker",
+ WQ_UNBOUND
+ );
+ if (!priv->irq_workqueue) {
+ dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ca8210_dev_com_clear() - Deinitialise the spi communication component
+ * @priv: Pointer to private data structure
+ */
+static void ca8210_dev_com_clear(struct ca8210_priv *priv)
+{
+ flush_workqueue(priv->mlme_workqueue);
+ destroy_workqueue(priv->mlme_workqueue);
+ flush_workqueue(priv->irq_workqueue);
+ destroy_workqueue(priv->irq_workqueue);
+}
+
+#define CA8210_MAX_TX_POWERS (9)
+static const s32 ca8210_tx_powers[CA8210_MAX_TX_POWERS] = {
+ 800, 700, 600, 500, 400, 300, 200, 100, 0
+};
+
+#define CA8210_MAX_ED_LEVELS (21)
+static const s32 ca8210_ed_levels[CA8210_MAX_ED_LEVELS] = {
+ -10300, -10250, -10200, -10150, -10100, -10050, -10000, -9950, -9900,
+ -9850, -9800, -9750, -9700, -9650, -9600, -9550, -9500, -9450, -9400,
+ -9350, -9300
+};
+
+/**
+ * ca8210_hw_setup() - Populate the ieee802154_hw phy attributes with the
+ * ca8210's defaults
+ * @ca8210_hw: Pointer to ieee802154_hw to populate
+ */
+static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw)
+{
+ /* Support channels 11-26 */
+ ca8210_hw->phy->supported.channels[0] = CA8210_VALID_CHANNELS;
+ ca8210_hw->phy->supported.tx_powers_size = CA8210_MAX_TX_POWERS;
+ ca8210_hw->phy->supported.tx_powers = ca8210_tx_powers;
+ ca8210_hw->phy->supported.cca_ed_levels_size = CA8210_MAX_ED_LEVELS;
+ ca8210_hw->phy->supported.cca_ed_levels = ca8210_ed_levels;
+ ca8210_hw->phy->current_channel = 18;
+ ca8210_hw->phy->current_page = 0;
+ ca8210_hw->phy->transmit_power = 800;
+ ca8210_hw->phy->cca.mode = NL802154_CCA_ENERGY_CARRIER;
+ ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND;
+ ca8210_hw->phy->cca_ed_level = -9800;
+ ca8210_hw->phy->symbol_duration = 16;
+ ca8210_hw->phy->lifs_period = 40;
+ ca8210_hw->phy->sifs_period = 12;
+ ca8210_hw->flags =
+ IEEE802154_HW_AFILT |
+ IEEE802154_HW_OMIT_CKSUM |
+ IEEE802154_HW_FRAME_RETRIES |
+ IEEE802154_HW_PROMISCUOUS |
+ IEEE802154_HW_CSMA_PARAMS;
+ ca8210_hw->phy->flags =
+ WPAN_PHY_FLAG_TXPOWER |
+ WPAN_PHY_FLAG_CCA_ED_LEVEL |
+ WPAN_PHY_FLAG_CCA_MODE;
+}
+
+/**
+ * ca8210_test_interface_init() - Initialise the test file interface
+ * @priv: Pointer to private data structure
+ *
+ * Provided as an alternative to the standard linux network interface, the test
+ * interface exposes a file in the filesystem (ca8210_test) that allows
+ * 802.15.4 SAP Commands and Cascoda EVBME commands to be sent directly to
+ * the stack.
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_test_interface_init(struct ca8210_priv *priv)
+{
+ struct ca8210_test *test = &priv->test;
+ char node_name[32];
+
+ snprintf(
+ node_name,
+ sizeof(node_name),
+ "ca8210@%d_%d",
+ priv->spi->master->bus_num,
+ priv->spi->chip_select
+ );
+
+ test->ca8210_dfs_spi_int = debugfs_create_file(
+ node_name,
+ 0600, /* S_IRUSR | S_IWUSR */
+ NULL,
+ priv,
+ &test_int_fops
+ );
+ if (IS_ERR(test->ca8210_dfs_spi_int)) {
+ dev_err(
+ &priv->spi->dev,
+ "Error %ld when creating debugfs node\n",
+ PTR_ERR(test->ca8210_dfs_spi_int)
+ );
+ return PTR_ERR(test->ca8210_dfs_spi_int);
+ }
+ debugfs_create_symlink("ca8210", NULL, node_name);
+ init_waitqueue_head(&test->readq);
+ return kfifo_alloc(
+ &test->up_fifo,
+ CA8210_TEST_INT_FIFO_SIZE,
+ GFP_KERNEL
+ );
+}
+
+/**
+ * ca8210_test_interface_clear() - Deinitialise the test file interface
+ * @priv: Pointer to private data structure
+ */
+static void ca8210_test_interface_clear(struct ca8210_priv *priv)
+{
+ struct ca8210_test *test = &priv->test;
+
+ if (!IS_ERR(test->ca8210_dfs_spi_int))
+ debugfs_remove(test->ca8210_dfs_spi_int);
+ kfifo_free(&test->up_fifo);
+ dev_info(&priv->spi->dev, "Test interface removed\n");
+}
+
+/**
+ * ca8210_remove() - Shut down a ca8210 upon being disconnected
+ * @priv: Pointer to private data structure
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_remove(struct spi_device *spi_device)
+{
+ struct ca8210_priv *priv;
+ struct ca8210_platform_data *pdata;
+
+ dev_info(&spi_device->dev, "Removing ca8210\n");
+
+ pdata = spi_device->dev.platform_data;
+ if (pdata) {
+ if (pdata->extclockenable) {
+ ca8210_unregister_ext_clock(spi_device);
+ ca8210_config_extern_clk(pdata, spi_device, 0);
+ }
+ free_irq(pdata->irq_id, spi_device->dev.driver_data);
+ kfree(pdata);
+ spi_device->dev.platform_data = NULL;
+ }
+ /* get spi_device private data */
+ priv = spi_get_drvdata(spi_device);
+ if (priv) {
+ dev_info(
+ &spi_device->dev,
+ "sync_down = %d, sync_up = %d\n",
+ priv->sync_down,
+ priv->sync_up
+ );
+ ca8210_dev_com_clear(spi_device->dev.driver_data);
+ if (priv->hw) {
+ if (priv->hw_registered)
+ ieee802154_unregister_hw(priv->hw);
+ ieee802154_free_hw(priv->hw);
+ priv->hw = NULL;
+ dev_info(
+ &spi_device->dev,
+ "Unregistered & freed ieee802154_hw.\n"
+ );
+ }
+ if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS))
+ ca8210_test_interface_clear(priv);
+ }
+
+ return 0;
+}
+
+/**
+ * ca8210_probe() - Set up a connected ca8210 upon being detected by the system
+ * @priv: Pointer to private data structure
+ *
+ * Return: 0 or linux error code
+ */
+static int ca8210_probe(struct spi_device *spi_device)
+{
+ struct ca8210_priv *priv;
+ struct ieee802154_hw *hw;
+ struct ca8210_platform_data *pdata;
+ int ret;
+
+ dev_info(&spi_device->dev, "Inserting ca8210\n");
+
+ /* allocate ieee802154_hw and private data */
+ hw = ieee802154_alloc_hw(sizeof(struct ca8210_priv), &ca8210_phy_ops);
+ if (!hw) {
+ dev_crit(&spi_device->dev, "ieee802154_alloc_hw failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ priv = hw->priv;
+ priv->hw = hw;
+ priv->spi = spi_device;
+ hw->parent = &spi_device->dev;
+ spin_lock_init(&priv->lock);
+ priv->async_tx_pending = false;
+ priv->hw_registered = false;
+ priv->sync_up = 0;
+ priv->sync_down = 0;
+ priv->promiscuous = false;
+ priv->retries = 0;
+ init_completion(&priv->ca8210_is_awake);
+ init_completion(&priv->spi_transfer_complete);
+ init_completion(&priv->sync_exchange_complete);
+ spi_set_drvdata(priv->spi, priv);
+ if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS)) {
+ cascoda_api_upstream = ca8210_test_int_driver_write;
+ ca8210_test_interface_init(priv);
+ } else {
+ cascoda_api_upstream = NULL;
+ }
+ ca8210_hw_setup(hw);
+ ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+
+ pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_crit(
+ &spi_device->dev,
+ "Could not allocate platform data\n"
+ );
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = ca8210_get_platform_data(priv->spi, pdata);
+ if (ret) {
+ dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n");
+ goto error;
+ }
+ priv->spi->dev.platform_data = pdata;
+
+ ret = ca8210_dev_com_init(priv);
+ if (ret) {
+ dev_crit(&spi_device->dev, "ca8210_dev_com_init failed\n");
+ goto error;
+ }
+ ret = ca8210_reset_init(priv->spi);
+ if (ret) {
+ dev_crit(&spi_device->dev, "ca8210_reset_init failed\n");
+ goto error;
+ }
+
+ ret = ca8210_interrupt_init(priv->spi);
+ if (ret) {
+ dev_crit(&spi_device->dev, "ca8210_interrupt_init failed\n");
+ goto error;
+ }
+
+ msleep(100);
+
+ ca8210_reset_send(priv->spi, 1);
+
+ ret = tdme_chipinit(priv->spi);
+ if (ret) {
+ dev_crit(&spi_device->dev, "tdme_chipinit failed\n");
+ goto error;
+ }
+
+ if (pdata->extclockenable) {
+ ret = ca8210_config_extern_clk(pdata, priv->spi, 1);
+ if (ret) {
+ dev_crit(
+ &spi_device->dev,
+ "ca8210_config_extern_clk failed\n"
+ );
+ goto error;
+ }
+ ret = ca8210_register_ext_clock(priv->spi);
+ if (ret) {
+ dev_crit(
+ &spi_device->dev,
+ "ca8210_register_ext_clock failed\n"
+ );
+ goto error;
+ }
+ }
+
+ ret = ieee802154_register_hw(hw);
+ if (ret) {
+ dev_crit(&spi_device->dev, "ieee802154_register_hw failed\n");
+ goto error;
+ }
+ priv->hw_registered = true;
+
+ return 0;
+error:
+ msleep(100); /* wait for pending spi transfers to complete */
+ ca8210_remove(spi_device);
+ return link_to_linux_err(ret);
+}
+
+static const struct of_device_id ca8210_of_ids[] = {
+ {.compatible = "cascoda,ca8210", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ca8210_of_ids);
+
+static struct spi_driver ca8210_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ca8210_of_ids),
+ },
+ .probe = ca8210_probe,
+ .remove = ca8210_remove
+};
+
+module_spi_driver(ca8210_spi_driver);
+
+MODULE_AUTHOR("Harry Morris <h.morris@cascoda.com>");
+MODULE_DESCRIPTION("CA-8210 SoftMAC driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION("1.0");
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 7b131f8e4093..bd63289c55e8 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -18,6 +18,7 @@
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/ieee802154.h>
#include <linux/irq.h>
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 800a46c8d26c..7919369c0a72 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -26,6 +26,7 @@
#include <linux/netfilter.h>
#include <net/ip.h>
#include <net/ip6_route.h>
+#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/route.h>
#include <net/addrconf.h>
@@ -91,6 +92,7 @@ struct ipvl_addr {
struct ipvl_port {
struct net_device *dev;
+ possible_net_t pnet;
struct hlist_head hlhead[IPVLAN_HASH_SIZE];
struct list_head ipvlans;
u16 mode;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index aa8575ccbce3..618ed88fad0f 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -9,7 +9,11 @@
#include "ipvlan.h"
-static u32 ipvl_nf_hook_refcnt = 0;
+static unsigned int ipvlan_netid __read_mostly;
+
+struct ipvlan_netns {
+ unsigned int ipvl_nf_hook_refcnt;
+};
static struct nf_hook_ops ipvl_nfops[] __read_mostly = {
{
@@ -35,28 +39,34 @@ static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev)
ipvlan->dev->mtu = dev->mtu;
}
-static int ipvlan_register_nf_hook(void)
+static int ipvlan_register_nf_hook(struct net *net)
{
+ struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
int err = 0;
- if (!ipvl_nf_hook_refcnt) {
- err = _nf_register_hooks(ipvl_nfops, ARRAY_SIZE(ipvl_nfops));
+ if (!vnet->ipvl_nf_hook_refcnt) {
+ err = nf_register_net_hooks(net, ipvl_nfops,
+ ARRAY_SIZE(ipvl_nfops));
if (!err)
- ipvl_nf_hook_refcnt = 1;
+ vnet->ipvl_nf_hook_refcnt = 1;
} else {
- ipvl_nf_hook_refcnt++;
+ vnet->ipvl_nf_hook_refcnt++;
}
return err;
}
-static void ipvlan_unregister_nf_hook(void)
+static void ipvlan_unregister_nf_hook(struct net *net)
{
- WARN_ON(!ipvl_nf_hook_refcnt);
+ struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
+
+ if (WARN_ON(!vnet->ipvl_nf_hook_refcnt))
+ return;
- ipvl_nf_hook_refcnt--;
- if (!ipvl_nf_hook_refcnt)
- _nf_unregister_hooks(ipvl_nfops, ARRAY_SIZE(ipvl_nfops));
+ vnet->ipvl_nf_hook_refcnt--;
+ if (!vnet->ipvl_nf_hook_refcnt)
+ nf_unregister_net_hooks(net, ipvl_nfops,
+ ARRAY_SIZE(ipvl_nfops));
}
static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
@@ -69,7 +79,7 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
if (port->mode != nval) {
if (nval == IPVLAN_MODE_L3S) {
/* New mode is L3S */
- err = ipvlan_register_nf_hook();
+ err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
if (!err) {
mdev->l3mdev_ops = &ipvl_l3mdev_ops;
mdev->priv_flags |= IFF_L3MDEV_MASTER;
@@ -78,7 +88,7 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
} else if (port->mode == IPVLAN_MODE_L3S) {
/* Old mode was L3S */
mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
- ipvlan_unregister_nf_hook();
+ ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
mdev->l3mdev_ops = NULL;
}
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
@@ -111,6 +121,7 @@ static int ipvlan_port_create(struct net_device *dev)
if (!port)
return -ENOMEM;
+ write_pnet(&port->pnet, dev_net(dev));
port->dev = dev;
port->mode = IPVLAN_MODE_L3;
INIT_LIST_HEAD(&port->ipvlans);
@@ -142,7 +153,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
dev->priv_flags &= ~IFF_IPVLAN_MASTER;
if (port->mode == IPVLAN_MODE_L3S) {
dev->priv_flags &= ~IFF_L3MDEV_MASTER;
- ipvlan_unregister_nf_hook();
+ ipvlan_unregister_nf_hook(dev_net(dev));
dev->l3mdev_ops = NULL;
}
netdev_rx_handler_unregister(dev);
@@ -673,6 +684,24 @@ static int ipvlan_device_event(struct notifier_block *unused,
ipvlan->dev);
break;
+ case NETDEV_REGISTER: {
+ struct net *oldnet, *newnet = dev_net(dev);
+ struct ipvlan_netns *old_vnet;
+
+ oldnet = read_pnet(&port->pnet);
+ if (net_eq(newnet, oldnet))
+ break;
+
+ write_pnet(&port->pnet, newnet);
+
+ old_vnet = net_generic(oldnet, ipvlan_netid);
+ if (!old_vnet->ipvl_nf_hook_refcnt)
+ break;
+
+ ipvlan_register_nf_hook(newnet);
+ ipvlan_unregister_nf_hook(oldnet);
+ break;
+ }
case NETDEV_UNREGISTER:
if (dev->reg_state != NETREG_UNREGISTERING)
break;
@@ -854,6 +883,23 @@ static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = {
.notifier_call = ipvlan_addr6_event,
};
+static void ipvlan_ns_exit(struct net *net)
+{
+ struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
+
+ if (WARN_ON_ONCE(vnet->ipvl_nf_hook_refcnt)) {
+ vnet->ipvl_nf_hook_refcnt = 0;
+ nf_unregister_net_hooks(net, ipvl_nfops,
+ ARRAY_SIZE(ipvl_nfops));
+ }
+}
+
+static struct pernet_operations ipvlan_net_ops = {
+ .id = &ipvlan_netid,
+ .size = sizeof(struct ipvlan_netns),
+ .exit = ipvlan_ns_exit,
+};
+
static int __init ipvlan_init_module(void)
{
int err;
@@ -863,10 +909,16 @@ static int __init ipvlan_init_module(void)
register_inet6addr_notifier(&ipvlan_addr6_notifier_block);
register_inetaddr_notifier(&ipvlan_addr4_notifier_block);
- err = ipvlan_link_register(&ipvlan_link_ops);
+ err = register_pernet_subsys(&ipvlan_net_ops);
if (err < 0)
goto error;
+ err = ipvlan_link_register(&ipvlan_link_ops);
+ if (err < 0) {
+ unregister_pernet_subsys(&ipvlan_net_ops);
+ goto error;
+ }
+
return 0;
error:
unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
@@ -878,6 +930,7 @@ error:
static void __exit ipvlan_cleanup_module(void)
{
rtnl_link_unregister(&ipvlan_link_ops);
+ unregister_pernet_subsys(&ipvlan_net_ops);
unregister_netdevice_notifier(&ipvlan_notifier_block);
unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index b23b71981fd5..224f65cb576b 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -13,7 +13,7 @@
*
* Alan Cox : Fixed oddments for NET3.014
* Alan Cox : Rejig for NET3.029 snap #3
- * Alan Cox : Fixed NET3.029 bugs and sped up
+ * Alan Cox : Fixed NET3.029 bugs and sped up
* Larry McVoy : Tiny tweak to double performance
* Alan Cox : Backed out LMV's tweak - the linux mm
* can't take it...
@@ -41,7 +41,7 @@
#include <linux/in.h>
#include <linux/uaccess.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
@@ -55,6 +55,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/percpu.h>
+#include <linux/net_tstamp.h>
#include <net/net_namespace.h>
#include <linux/u64_stats_sync.h>
@@ -64,8 +65,7 @@ struct pcpu_lstats {
struct u64_stats_sync syncp;
};
-/*
- * The higher levels take care of making this non-reentrant (it's
+/* The higher levels take care of making this non-reentrant (it's
* called with bh's disabled).
*/
static netdev_tx_t loopback_xmit(struct sk_buff *skb,
@@ -74,6 +74,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
struct pcpu_lstats *lb_stats;
int len;
+ skb_tx_timestamp(skb);
skb_orphan(skb);
/* Before queueing this packet to netif_rx(),
@@ -129,8 +130,21 @@ static u32 always_on(struct net_device *dev)
return 1;
}
+static int loopback_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *ts_info)
+{
+ ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+
+ ts_info->phc_index = -1;
+
+ return 0;
+};
+
static const struct ethtool_ops loopback_ethtool_ops = {
.get_link = always_on,
+ .get_ts_info = loopback_get_ts_info,
};
static int loopback_dev_init(struct net_device *dev)
@@ -149,14 +163,13 @@ static void loopback_dev_free(struct net_device *dev)
}
static const struct net_device_ops loopback_ops = {
- .ndo_init = loopback_dev_init,
- .ndo_start_xmit= loopback_xmit,
+ .ndo_init = loopback_dev_init,
+ .ndo_start_xmit = loopback_xmit,
.ndo_get_stats64 = loopback_get_stats64,
.ndo_set_mac_address = eth_mac_addr,
};
-/*
- * The loopback device is special. There is only one instance
+/* The loopback device is special. There is only one instance
* per network namespace.
*/
static void loopback_setup(struct net_device *dev)
@@ -170,7 +183,7 @@ static void loopback_setup(struct net_device *dev)
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
netif_keep_dst(dev);
dev->hw_features = NETIF_F_GSO_SOFTWARE;
- dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
+ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
| NETIF_F_GSO_SOFTWARE
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
@@ -206,7 +219,6 @@ static __net_init int loopback_net_init(struct net *net)
net->loopback_dev = dev;
return 0;
-
out_free_netdev:
free_netdev(dev);
out:
@@ -217,5 +229,5 @@ out:
/* Registered in net/core/dev.c */
struct pernet_operations __net_initdata loopback_net_ops = {
- .init = loopback_net_init,
+ .init = loopback_net_init,
};
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 49ce4e9f4a0f..cdc347be68f2 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1605,8 +1605,9 @@ static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
if (!attrs[MACSEC_ATTR_SA_CONFIG])
return -EINVAL;
- if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG],
- macsec_genl_sa_policy))
+ if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX,
+ attrs[MACSEC_ATTR_SA_CONFIG],
+ macsec_genl_sa_policy, NULL))
return -EINVAL;
return 0;
@@ -1617,8 +1618,9 @@ static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
return -EINVAL;
- if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG],
- macsec_genl_rxsc_policy))
+ if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX,
+ attrs[MACSEC_ATTR_RXSC_CONFIG],
+ macsec_genl_rxsc_policy, NULL))
return -EINVAL;
return 0;
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 36877ba65516..4daf3d0926a8 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -372,18 +372,19 @@ static void ntb_get_drvinfo(struct net_device *ndev,
strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
}
-static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ntb_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
- cmd->supported = SUPPORTED_Backplane;
- cmd->advertising = ADVERTISED_Backplane;
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_OTHER;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_DUMMY1;
- cmd->autoneg = AUTONEG_ENABLE;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Backplane);
+
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_OTHER;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = AUTONEG_ENABLE;
return 0;
}
@@ -391,7 +392,7 @@ static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static const struct ethtool_ops ntb_ethtool_ops = {
.get_drvinfo = ntb_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_settings = ntb_get_settings,
+ .get_link_ksettings = ntb_get_link_ksettings,
};
static const struct ntb_queue_handlers ntb_netdev_handlers = {
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 8dbd59baa34d..60ffc9da6a28 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -2,33 +2,12 @@
# PHY Layer Configuration
#
-menuconfig PHYLIB
- tristate "PHY Device support and infrastructure"
- depends on NETDEVICES
+menuconfig MDIO_DEVICE
+ tristate "MDIO bus device drivers"
help
- Ethernet controllers are usually attached to PHY
- devices. This option provides infrastructure for
- managing PHY devices.
-
-if PHYLIB
+ MDIO devices and driver infrastructure code.
-config SWPHY
- bool
-
-config LED_TRIGGER_PHY
- bool "Support LED triggers for tracking link state"
- depends on LEDS_TRIGGERS
- ---help---
- Adds support for a set of LED trigger events per-PHY. Link
- state change will trigger the events, for consumption by an
- LED class driver. There are triggers for each link speed currently
- supported by the phy, and are of the form:
- <mii bus id>:<phy>:<speed>
-
- Where speed is in the form:
- <Speed in megabits>Mbps or <Speed in gigabits>Gbps
-
-comment "MDIO bus device drivers"
+if MDIO_DEVICE
config MDIO_BCM_IPROC
tristate "Broadcom iProc MDIO bus controller"
@@ -40,7 +19,7 @@ config MDIO_BCM_IPROC
config MDIO_BCM_UNIMAC
tristate "Broadcom UniMAC MDIO bus controller"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && OF_MDIO
help
This module provides a driver for the Broadcom UniMAC MDIO busses.
This hardware can be found in the Broadcom GENET Ethernet MAC
@@ -49,6 +28,7 @@ config MDIO_BCM_UNIMAC
config MDIO_BITBANG
tristate "Bitbanged MDIO buses"
+ depends on !(MDIO_DEVICE=y && PHYLIB=m)
help
This module implements the MDIO bus protocol in software,
for use by low level drivers that export the ability to
@@ -160,6 +140,36 @@ config MDIO_XGENE
This module provides a driver for the MDIO busses found in the
APM X-Gene SoC's.
+endif
+
+menuconfig PHYLIB
+ tristate "PHY Device support and infrastructure"
+ depends on NETDEVICES
+ select MDIO_DEVICE
+ help
+ Ethernet controllers are usually attached to PHY
+ devices. This option provides infrastructure for
+ managing PHY devices.
+
+if PHYLIB
+
+config SWPHY
+ bool
+
+config LED_TRIGGER_PHY
+ bool "Support LED triggers for tracking link state"
+ depends on LEDS_TRIGGERS
+ ---help---
+ Adds support for a set of LED trigger events per-PHY. Link
+ state change will trigger the events, for consumption by an
+ LED class driver. There are triggers for each link speed currently
+ supported by the phy, and are of the form:
+ <mii bus id>:<phy>:<speed>
+
+ Where speed is in the form:
+ <Speed in megabits>Mbps or <Speed in gigabits>Gbps
+
+
comment "MII PHY device drivers"
config AMD_PHY
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 407b0b601ea8..e36db9a2ba38 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -1,7 +1,20 @@
# Makefile for Linux PHY drivers and MDIO bus drivers
-libphy-y := phy.o phy_device.o mdio_bus.o mdio_device.o \
- mdio-boardinfo.o
+libphy-y := phy.o phy-core.o phy_device.o
+mdio-bus-y += mdio_bus.o mdio_device.o
+
+ifdef CONFIG_MDIO_DEVICE
+obj-y += mdio-boardinfo.o
+endif
+
+# PHYLIB implies MDIO_DEVICE, in that case, we have a bunch of circular
+# dependencies that does not make it possible to split mdio-bus objects into a
+# dedicated loadable module, so we bundle them all together into libphy.ko
+ifdef CONFIG_PHYLIB
+libphy-y += $(mdio-bus-y)
+else
+obj-$(CONFIG_MDIO_DEVICE) += mdio-bus.o
+endif
libphy-$(CONFIG_SWPHY) += swphy.o
libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index ab9ad689617c..171010eb4d9c 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Broadcom Corporation
+ * Copyright (C) 2015-2017 Broadcom
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -201,8 +201,7 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
int val;
/* Enable EEE at PHY level */
- val = phy_read_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL,
- MDIO_MMD_AN);
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL);
if (val < 0)
return val;
@@ -211,22 +210,19 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
else
val &= ~(LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X);
- phy_write_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL,
- MDIO_MMD_AN, (u32)val);
+ phy_write_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL, (u32)val);
/* Advertise EEE */
- val = phy_read_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV,
- MDIO_MMD_AN);
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV);
if (val < 0)
return val;
if (enable)
- val |= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
+ val |= (MDIO_EEE_100TX | MDIO_EEE_1000T);
else
- val &= ~(MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
+ val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T);
- phy_write_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV,
- MDIO_MMD_AN, (u32)val);
+ phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val);
return 0;
}
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index d1c2614dad3a..caa9f6e17f34 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -1,7 +1,7 @@
/*
* Broadcom BCM7xxx internal transceivers support.
*
- * Copyright (C) 2014, Broadcom Corporation
+ * Copyright (C) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -19,7 +19,7 @@
/* Broadcom BCM7xxx internal PHY registers */
-/* 40nm only register definitions */
+/* EPHY only register definitions */
#define MII_BCM7XXX_100TX_AUX_CTL 0x10
#define MII_BCM7XXX_100TX_FALSE_CAR 0x13
#define MII_BCM7XXX_100TX_DISC 0x14
@@ -27,6 +27,19 @@
#define MII_BCM7XXX_64CLK_MDIO BIT(12)
#define MII_BCM7XXX_TEST 0x1f
#define MII_BCM7XXX_SHD_MODE_2 BIT(2)
+#define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe
+#define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf
+#define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a
+#define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x3
+#define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x6
+#define MII_BCM7XXX_PCS_CTRL_2_DEF 0x4400
+#define MII_BCM7XXX_SHD_3_AN_STAT 0xb
+#define MII_BCM7XXX_AN_NULL_MSG_EN BIT(0)
+#define MII_BCM7XXX_AN_EEE_EN BIT(1)
+#define MII_BCM7XXX_SHD_3_EEE_THRESH 0xe
+#define MII_BCM7XXX_EEE_THRESH_DEF 0x50
+#define MII_BCM7XXX_SHD_3_TL4 0x23
+#define MII_BCM7XXX_TL4_RST_MSK (BIT(2) | BIT(1))
/* 28nm only register definitions */
#define MISC_ADDR(base, channel) base, channel
@@ -286,6 +299,181 @@ static int phy_set_clr_bits(struct phy_device *dev, int location,
return v;
}
+static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* set shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+ MII_BCM7XXX_SHD_MODE_2, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Set current trim values INT_trim = -1, Ext_trim =0 */
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_BIAS_TRIM, 0x3BE0);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+ /* Cal reset */
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+ MII_BCM7XXX_SHD_3_TL4);
+ if (ret < 0)
+ goto reset_shadow_mode;
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+ MII_BCM7XXX_TL4_RST_MSK, 0);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+ /* Cal reset disable */
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+ MII_BCM7XXX_SHD_3_TL4);
+ if (ret < 0)
+ goto reset_shadow_mode;
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+ 0, MII_BCM7XXX_TL4_RST_MSK);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+reset_shadow_mode:
+ /* reset shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
+ MII_BCM7XXX_SHD_MODE_2);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* The 28nm EPHY does not support Clause 45 (MMD) used by bcm-phy-lib */
+static int bcm7xxx_28nm_ephy_apd_enable(struct phy_device *phydev)
+{
+ int ret;
+
+ /* set shadow mode 1 */
+ ret = phy_set_clr_bits(phydev, MII_BRCM_FET_BRCMTEST,
+ MII_BRCM_FET_BT_SRE, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Enable auto-power down */
+ ret = phy_set_clr_bits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2,
+ MII_BRCM_FET_SHDW_AS2_APDE, 0);
+ if (ret < 0)
+ return ret;
+
+ /* reset shadow mode 1 */
+ ret = phy_set_clr_bits(phydev, MII_BRCM_FET_BRCMTEST, 0,
+ MII_BRCM_FET_BT_SRE);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int bcm7xxx_28nm_ephy_eee_enable(struct phy_device *phydev)
+{
+ int ret;
+
+ /* set shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+ MII_BCM7XXX_SHD_MODE_2, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Advertise supported modes */
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+ MII_BCM7XXX_SHD_3_AN_EEE_ADV);
+ if (ret < 0)
+ goto reset_shadow_mode;
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+ MDIO_EEE_100TX);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+ /* Restore Defaults */
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+ MII_BCM7XXX_SHD_3_PCS_CTRL_2);
+ if (ret < 0)
+ goto reset_shadow_mode;
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+ MII_BCM7XXX_PCS_CTRL_2_DEF);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+ MII_BCM7XXX_SHD_3_EEE_THRESH);
+ if (ret < 0)
+ goto reset_shadow_mode;
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+ MII_BCM7XXX_EEE_THRESH_DEF);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+ /* Enable EEE autonegotiation */
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+ MII_BCM7XXX_SHD_3_AN_STAT);
+ if (ret < 0)
+ goto reset_shadow_mode;
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+ (MII_BCM7XXX_AN_NULL_MSG_EN | MII_BCM7XXX_AN_EEE_EN));
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+reset_shadow_mode:
+ /* reset shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
+ MII_BCM7XXX_SHD_MODE_2);
+ if (ret < 0)
+ return ret;
+
+ /* Restart autoneg */
+ phy_write(phydev, MII_BMCR,
+ (BMCR_SPEED100 | BMCR_ANENABLE | BMCR_ANRESTART));
+
+ return 0;
+}
+
+static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
+{
+ u8 rev = phydev->phy_id & ~phydev->drv->phy_id_mask;
+ int ret = 0;
+
+ pr_info_once("%s: %s PHY revision: 0x%02x\n",
+ phydev_name(phydev), phydev->drv->name, rev);
+
+ /* Dummy read to a register to workaround a possible issue upon reset
+ * where the internal inverter may not allow the first MDIO transaction
+ * to pass the MDIO management controller and make us return 0xffff for
+ * such reads.
+ */
+ phy_read(phydev, MII_BMSR);
+
+ /* Apply AFE software work-around if necessary */
+ if (rev == 0x01) {
+ ret = bcm7xxx_28nm_ephy_01_afe_config_init(phydev);
+ if (ret)
+ return ret;
+ }
+
+ ret = bcm7xxx_28nm_ephy_eee_enable(phydev);
+ if (ret)
+ return ret;
+
+ return bcm7xxx_28nm_ephy_apd_enable(phydev);
+}
+
+static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Re-apply workarounds coming out suspend/resume */
+ ret = bcm7xxx_28nm_ephy_config_init(phydev);
+ if (ret)
+ return ret;
+
+ return genphy_config_aneg(phydev);
+}
+
static int bcm7xxx_config_init(struct phy_device *phydev)
{
int ret;
@@ -434,6 +622,23 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.probe = bcm7xxx_28nm_probe, \
}
+#define BCM7XXX_28NM_EPHY(_oui, _name) \
+{ \
+ .phy_id = (_oui), \
+ .phy_id_mask = 0xfffffff0, \
+ .name = _name, \
+ .features = PHY_BASIC_FEATURES, \
+ .flags = PHY_IS_INTERNAL, \
+ .config_init = bcm7xxx_28nm_ephy_config_init, \
+ .config_aneg = genphy_config_aneg, \
+ .read_status = genphy_read_status, \
+ .resume = bcm7xxx_28nm_ephy_resume, \
+ .get_sset_count = bcm_phy_get_sset_count, \
+ .get_strings = bcm_phy_get_strings, \
+ .get_stats = bcm7xxx_28nm_get_phy_stats, \
+ .probe = bcm7xxx_28nm_probe, \
+}
+
#define BCM7XXX_40NM_EPHY(_oui, _name) \
{ \
.phy_id = (_oui), \
@@ -450,6 +655,9 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
+ BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"),
+ BCM7XXX_28NM_EPHY(PHY_ID_BCM7268, "Broadcom BCM7268"),
+ BCM7XXX_28NM_EPHY(PHY_ID_BCM7271, "Broadcom BCM7271"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7278, "Broadcom BCM7278"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"),
@@ -466,6 +674,9 @@ static struct phy_driver bcm7xxx_driver[] = {
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM7250, 0xfffffff0, },
+ { PHY_ID_BCM7260, 0xfffffff0, },
+ { PHY_ID_BCM7268, 0xfffffff0, },
+ { PHY_ID_BCM7271, 0xfffffff0, },
{ PHY_ID_BCM7278, 0xfffffff0, },
{ PHY_ID_BCM7364, 0xfffffff0, },
{ PHY_ID_BCM7366, 0xfffffff0, },
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 9cd8b27d1292..a32dc5d11e89 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -74,27 +74,40 @@ static int bcm54612e_config_init(struct phy_device *phydev)
return 0;
}
-static int bcm54810_config(struct phy_device *phydev)
+static int bcm5481x_config(struct phy_device *phydev)
{
int rc, val;
- val = bcm_phy_read_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL);
- val &= ~BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN;
- rc = bcm_phy_write_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL,
- val);
- if (rc < 0)
- return rc;
-
+ /* handling PHY's internal RX clock delay */
val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
- val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
val |= MII_BCM54XX_AUXCTL_MISC_WREN;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ /* Disable RGMII RXC-RXD skew */
+ val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
+ }
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ /* Enable RGMII RXC-RXD skew */
+ val |= MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
+ }
rc = bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
val);
if (rc < 0)
return rc;
+ /* handling PHY's internal TX clock delay */
val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL);
- val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ /* Disable internal TX clock delay */
+ val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
+ }
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ /* Enable internal TX clock delay */
+ val |= BCM54810_SHD_CLK_CTL_GTXCLK_EN;
+ }
rc = bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
if (rc < 0)
return rc;
@@ -244,7 +257,7 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
static int bcm54xx_config_init(struct phy_device *phydev)
{
- int reg, err;
+ int reg, err, val;
reg = phy_read(phydev, MII_BCM54XX_ECR);
if (reg < 0)
@@ -283,8 +296,14 @@ static int bcm54xx_config_init(struct phy_device *phydev)
if (err)
return err;
} else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) {
- err = bcm54810_config(phydev);
- if (err)
+ /* For BCM54810, we need to disable BroadR-Reach function */
+ val = bcm_phy_read_exp(phydev,
+ BCM54810_EXP_BROADREACH_LRE_MISC_CTL);
+ val &= ~BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN;
+ err = bcm_phy_write_exp(phydev,
+ BCM54810_EXP_BROADREACH_LRE_MISC_CTL,
+ val);
+ if (err < 0)
return err;
}
@@ -392,29 +411,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
ret = genphy_config_aneg(phydev);
/* Then we can set up the delay. */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
- u16 reg;
-
- /*
- * There is no BCM5481 specification available, so down
- * here is everything we know about "register 0x18". This
- * at least helps BCM5481 to successfully receive packets
- * on MPC8360E-RDK board. Peter Barada <peterb@logicpd.com>
- * says: "This sets delay between the RXD and RXC signals
- * instead of using trace lengths to achieve timing".
- */
-
- /* Set RDX clk delay. */
- reg = 0x7 | (0x7 << 12);
- phy_write(phydev, 0x18, reg);
-
- reg = phy_read(phydev, 0x18);
- /* Set RDX-RXC skew. */
- reg |= (1 << 8);
- /* Write bits 14:0. */
- reg |= (1 << 15);
- phy_write(phydev, 0x18, reg);
- }
+ bcm5481x_config(phydev);
if (of_property_read_bool(np, "enet-phy-lane-swap")) {
/* Lane Swap - Undocumented register...magic! */
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 19865530e0b1..b57f20e552ba 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -133,14 +133,14 @@ static int dp83867_config_port_mirroring(struct phy_device *phydev)
(struct dp83867_private *)phydev->priv;
u16 val;
- val = phy_read_mmd_indirect(phydev, DP83867_CFG4, DP83867_DEVADDR);
+ val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4);
if (dp83867->port_mirroring == DP83867_PORT_MIRROING_EN)
val |= DP83867_CFG4_PORT_MIRROR_EN;
else
val &= ~DP83867_CFG4_PORT_MIRROR_EN;
- phy_write_mmd_indirect(phydev, DP83867_CFG4, DP83867_DEVADDR, val);
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val);
return 0;
}
@@ -231,8 +231,7 @@ static int dp83867_config_init(struct phy_device *phydev)
* register's bit 11 (marked as RESERVED).
*/
- bs = phy_read_mmd_indirect(phydev, DP83867_STRAP_STS1,
- DP83867_DEVADDR);
+ bs = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS1);
if (bs & DP83867_STRAP_STS1_RESERVED)
val &= ~DP83867_PHYCR_RESERVED_MASK;
@@ -243,8 +242,7 @@ static int dp83867_config_init(struct phy_device *phydev)
if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
(phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
- val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
- DP83867_DEVADDR);
+ val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
@@ -255,25 +253,24 @@ static int dp83867_config_init(struct phy_device *phydev)
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
val |= DP83867_RGMII_RX_CLK_DELAY_EN;
- phy_write_mmd_indirect(phydev, DP83867_RGMIICTL,
- DP83867_DEVADDR, val);
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, val);
delay = (dp83867->rx_id_delay |
(dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT));
- phy_write_mmd_indirect(phydev, DP83867_RGMIIDCTL,
- DP83867_DEVADDR, delay);
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL,
+ delay);
if (dp83867->io_impedance >= 0) {
- val = phy_read_mmd_indirect(phydev, DP83867_IO_MUX_CFG,
- DP83867_DEVADDR);
+ val = phy_read_mmd(phydev, DP83867_DEVADDR,
+ DP83867_IO_MUX_CFG);
val &= ~DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL;
val |= dp83867->io_impedance &
DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL;
- phy_write_mmd_indirect(phydev, DP83867_IO_MUX_CFG,
- DP83867_DEVADDR, val);
+ phy_write_mmd(phydev, DP83867_DEVADDR,
+ DP83867_IO_MUX_CFG, val);
}
}
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index b1fd7bb0e4db..55f8c52dd2f1 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -166,13 +166,13 @@ static int xway_gphy_config_init(struct phy_device *phydev)
/* Clear all pending interrupts */
phy_read(phydev, XWAY_MDIO_ISTAT);
- phy_write_mmd_indirect(phydev, XWAY_MMD_LEDCH, MDIO_MMD_VEND2,
- XWAY_MMD_LEDCH_NACS_NONE |
- XWAY_MMD_LEDCH_SBF_F02HZ |
- XWAY_MMD_LEDCH_FBF_F16HZ);
- phy_write_mmd_indirect(phydev, XWAY_MMD_LEDCL, MDIO_MMD_VEND2,
- XWAY_MMD_LEDCH_CBLINK_NONE |
- XWAY_MMD_LEDCH_SCAN_NONE);
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCH,
+ XWAY_MMD_LEDCH_NACS_NONE |
+ XWAY_MMD_LEDCH_SBF_F02HZ |
+ XWAY_MMD_LEDCH_FBF_F16HZ);
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCL,
+ XWAY_MMD_LEDCH_CBLINK_NONE |
+ XWAY_MMD_LEDCH_SCAN_NONE);
/**
* In most cases only one LED is connected to this phy, so
@@ -183,12 +183,12 @@ static int xway_gphy_config_init(struct phy_device *phydev)
ledxh = XWAY_MMD_LEDxH_BLINKF_NONE | XWAY_MMD_LEDxH_CON_LINK10XX;
ledxl = XWAY_MMD_LEDxL_PULSE_TXACT | XWAY_MMD_LEDxL_PULSE_RXACT |
XWAY_MMD_LEDxL_BLINKS_NONE;
- phy_write_mmd_indirect(phydev, XWAY_MMD_LED0H, MDIO_MMD_VEND2, ledxh);
- phy_write_mmd_indirect(phydev, XWAY_MMD_LED0L, MDIO_MMD_VEND2, ledxl);
- phy_write_mmd_indirect(phydev, XWAY_MMD_LED1H, MDIO_MMD_VEND2, ledxh);
- phy_write_mmd_indirect(phydev, XWAY_MMD_LED1L, MDIO_MMD_VEND2, ledxl);
- phy_write_mmd_indirect(phydev, XWAY_MMD_LED2H, MDIO_MMD_VEND2, ledxh);
- phy_write_mmd_indirect(phydev, XWAY_MMD_LED2L, MDIO_MMD_VEND2, ledxl);
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED0H, ledxh);
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED0L, ledxl);
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED1H, ledxh);
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED1L, ledxl);
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED2H, ledxh);
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED2L, ledxl);
return 0;
}
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 8c73b2e771dd..34395230ce70 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -1,7 +1,7 @@
/*
* Broadcom UniMAC MDIO bus controller driver
*
- * Copyright (C) 2014, Broadcom Corporation
+ * Copyright (C) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -228,6 +228,7 @@ static int unimac_mdio_remove(struct platform_device *pdev)
}
static const struct of_device_id unimac_mdio_ids[] = {
+ { .compatible = "brcm,genet-mdio-v5", },
{ .compatible = "brcm,genet-mdio-v4", },
{ .compatible = "brcm,genet-mdio-v3", },
{ .compatible = "brcm,genet-mdio-v2", },
diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c
index 61941e29daae..1861f387820d 100644
--- a/drivers/net/phy/mdio-boardinfo.c
+++ b/drivers/net/phy/mdio-boardinfo.c
@@ -24,10 +24,12 @@ static DEFINE_MUTEX(mdio_board_lock);
* @mdiodev: MDIO device pointer
* Context: can sleep
*/
-void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus)
+void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus,
+ int (*cb)
+ (struct mii_bus *bus,
+ struct mdio_board_info *bi))
{
struct mdio_board_entry *be;
- struct mdio_device *mdiodev;
struct mdio_board_info *bi;
int ret;
@@ -38,23 +40,14 @@ void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus)
if (strcmp(bus->id, bi->bus_id))
continue;
- mdiodev = mdio_device_create(bus, bi->mdio_addr);
- if (IS_ERR(mdiodev))
+ ret = cb(bus, bi);
+ if (ret)
continue;
- strncpy(mdiodev->modalias, bi->modalias,
- sizeof(mdiodev->modalias));
- mdiodev->bus_match = mdio_device_bus_match;
- mdiodev->dev.platform_data = (void *)bi->platform_data;
-
- ret = mdio_device_register(mdiodev);
- if (ret) {
- mdio_device_free(mdiodev);
- continue;
- }
}
mutex_unlock(&mdio_board_lock);
}
+EXPORT_SYMBOL(mdiobus_setup_mdiodev_from_board_info);
/**
* mdio_register_board_info - register MDIO devices for a given board
diff --git a/drivers/net/phy/mdio-boardinfo.h b/drivers/net/phy/mdio-boardinfo.h
index 00f98163e90e..3a7f143904e8 100644
--- a/drivers/net/phy/mdio-boardinfo.h
+++ b/drivers/net/phy/mdio-boardinfo.h
@@ -14,6 +14,9 @@ struct mdio_board_entry {
struct mdio_board_info board_info;
};
-void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus);
+void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus,
+ int (*cb)
+ (struct mii_bus *bus,
+ struct mdio_board_info *bi));
#endif /* __MDIO_BOARD_INFO_H */
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index f095051beb54..3e2ac07b6e37 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -229,7 +229,7 @@ static int xgene_xfi_mdio_write(struct mii_bus *bus, int phy_id,
val = SET_VAL(HSTPHYADX, phy_id) | SET_VAL(HSTREGADX, reg) |
SET_VAL(HSTMIIMWRDAT, data);
- xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, data);
+ xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, val);
val = HSTLDCMD | SET_VAL(HSTMIIMCMD, MIIM_CMD_LEGACY_WRITE);
xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, val);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index fa7d51f14869..a898e5c4ef1b 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -22,8 +22,11 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_device.h>
#include <linux/of_mdio.h>
+#include <linux/of_gpio.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -290,6 +293,36 @@ static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio,
#endif
/**
+ * mdiobus_create_device_from_board_info - create a full MDIO device given
+ * a mdio_board_info structure
+ * @bus: MDIO bus to create the devices on
+ * @bi: mdio_board_info structure describing the devices
+ *
+ * Returns 0 on success or < 0 on error.
+ */
+static int mdiobus_create_device(struct mii_bus *bus,
+ struct mdio_board_info *bi)
+{
+ struct mdio_device *mdiodev;
+ int ret = 0;
+
+ mdiodev = mdio_device_create(bus, bi->mdio_addr);
+ if (IS_ERR(mdiodev))
+ return -ENODEV;
+
+ strncpy(mdiodev->modalias, bi->modalias,
+ sizeof(mdiodev->modalias));
+ mdiodev->bus_match = mdio_device_bus_match;
+ mdiodev->dev.platform_data = (void *)bi->platform_data;
+
+ ret = mdio_device_register(mdiodev);
+ if (ret)
+ mdio_device_free(mdiodev);
+
+ return ret;
+}
+
+/**
* __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
* @bus: target mii_bus
* @owner: module containing bus accessor functions
@@ -307,6 +340,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
{
struct mdio_device *mdiodev;
int i, err;
+ struct gpio_desc *gpiod;
if (NULL == bus || NULL == bus->name ||
NULL == bus->read || NULL == bus->write)
@@ -333,6 +367,35 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
if (bus->reset)
bus->reset(bus);
+ /* de-assert bus level PHY GPIO resets */
+ if (bus->num_reset_gpios > 0) {
+ bus->reset_gpiod = devm_kcalloc(&bus->dev,
+ bus->num_reset_gpios,
+ sizeof(struct gpio_desc *),
+ GFP_KERNEL);
+ if (!bus->reset_gpiod)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < bus->num_reset_gpios; i++) {
+ gpiod = devm_gpiod_get_index(&bus->dev, "reset", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ err = PTR_ERR(gpiod);
+ if (err != -ENOENT) {
+ dev_err(&bus->dev,
+ "mii_bus %s couldn't get reset GPIO\n",
+ bus->id);
+ return err;
+ }
+ } else {
+ bus->reset_gpiod[i] = gpiod;
+ gpiod_set_value_cansleep(gpiod, 1);
+ udelay(bus->reset_delay_us);
+ gpiod_set_value_cansleep(gpiod, 0);
+ }
+ }
+
for (i = 0; i < PHY_MAX_ADDR; i++) {
if ((bus->phy_mask & (1 << i)) == 0) {
struct phy_device *phydev;
@@ -345,7 +408,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
}
}
- mdiobus_setup_mdiodev_from_board_info(bus);
+ mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device);
bus->state = MDIOBUS_REGISTERED;
pr_info("%s: probed\n", bus->name);
@@ -360,6 +423,13 @@ error:
mdiodev->device_remove(mdiodev);
mdiodev->device_free(mdiodev);
}
+
+ /* Put PHYs in RESET to save power */
+ for (i = 0; i < bus->num_reset_gpios; i++) {
+ if (bus->reset_gpiod[i])
+ gpiod_set_value_cansleep(bus->reset_gpiod[i], 1);
+ }
+
device_del(&bus->dev);
return err;
}
@@ -381,6 +451,13 @@ void mdiobus_unregister(struct mii_bus *bus)
mdiodev->device_remove(mdiodev);
mdiodev->device_free(mdiodev);
}
+
+ /* Put PHYs in RESET to save power */
+ for (i = 0; i < bus->num_reset_gpios; i++) {
+ if (bus->reset_gpiod[i])
+ gpiod_set_value_cansleep(bus->reset_gpiod[i], 1);
+ }
+
device_del(&bus->dev);
}
EXPORT_SYMBOL(mdiobus_unregister);
@@ -648,9 +725,18 @@ int __init mdio_bus_init(void)
return ret;
}
+EXPORT_SYMBOL_GPL(mdio_bus_init);
+#if IS_ENABLED(CONFIG_PHYLIB)
void mdio_bus_exit(void)
{
class_unregister(&mdio_bus_class);
bus_unregister(&mdio_bus_type);
}
+EXPORT_SYMBOL_GPL(mdio_bus_exit);
+#else
+module_init(mdio_bus_init);
+/* no module_exit, intentional */
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MDIO bus/device layer");
+#endif
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index da5b39268370..6a5fd18f062c 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -626,8 +626,7 @@ static int ksz8873mll_config_aneg(struct phy_device *phydev)
* MMD extended PHY registers.
*/
static int
-ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
- int regnum)
+ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum)
{
return -1;
}
@@ -635,10 +634,10 @@ ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
/* This routine does nothing since the Micrel ksz9021 does not support
* standard IEEE MMD extended PHY registers.
*/
-static void
-ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
- int regnum, u32 val)
+static int
+ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum, u16 val)
{
+ return -1;
}
static int kszphy_get_sset_count(struct phy_device *phydev)
@@ -946,8 +945,8 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
- .read_mmd_indirect = ksz9021_rd_mmd_phyreg,
- .write_mmd_indirect = ksz9021_wr_mmd_phyreg,
+ .read_mmd = ksz9021_rd_mmd_phyreg,
+ .write_mmd = ksz9021_wr_mmd_phyreg,
}, {
.phy_id = PHY_ID_KSZ9031,
.phy_id_mask = MICREL_PHY_ID_MASK,
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 324fbf6ad8ff..2b2f543cf9f0 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -78,9 +78,8 @@ static int lan88xx_probe(struct phy_device *phydev)
priv->wolopts = 0;
/* these values can be used to identify internal PHY */
- priv->chip_id = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_ID, 3);
- priv->chip_rev = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_REV,
- 3);
+ priv->chip_id = phy_read_mmd(phydev, 3, LAN88XX_MMD3_CHIP_ID);
+ priv->chip_rev = phy_read_mmd(phydev, 3, LAN88XX_MMD3_CHIP_REV);
phydev->priv = priv;
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
new file mode 100644
index 000000000000..6739b738bbaf
--- /dev/null
+++ b/drivers/net/phy/phy-core.c
@@ -0,0 +1,101 @@
+/*
+ * Core PHY library, taken from phy.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/export.h>
+#include <linux/phy.h>
+
+static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
+ u16 regnum)
+{
+ /* Write the desired MMD Devad */
+ bus->write(bus, phy_addr, MII_MMD_CTRL, devad);
+
+ /* Write the desired MMD register address */
+ bus->write(bus, phy_addr, MII_MMD_DATA, regnum);
+
+ /* Select the Function : DATA with no post increment */
+ bus->write(bus, phy_addr, MII_MMD_CTRL, devad | MII_MMD_CTRL_NOINCR);
+}
+
+/**
+ * phy_read_mmd - Convenience function for reading a register
+ * from an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to read from (0..31)
+ * @regnum: The register on the MMD to read (0..65535)
+ *
+ * Same rules as for phy_read();
+ */
+int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
+{
+ int val;
+
+ if (regnum > (u16)~0 || devad > 32)
+ return -EINVAL;
+
+ if (phydev->drv->read_mmd) {
+ val = phydev->drv->read_mmd(phydev, devad, regnum);
+ } else if (phydev->is_c45) {
+ u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
+
+ val = mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr);
+ } else {
+ struct mii_bus *bus = phydev->mdio.bus;
+ int phy_addr = phydev->mdio.addr;
+
+ mutex_lock(&bus->mdio_lock);
+ mmd_phy_indirect(bus, phy_addr, devad, regnum);
+
+ /* Read the content of the MMD's selected register */
+ val = bus->read(bus, phy_addr, MII_MMD_DATA);
+ mutex_unlock(&bus->mdio_lock);
+ }
+ return val;
+}
+EXPORT_SYMBOL(phy_read_mmd);
+
+/**
+ * phy_write_mmd - Convenience function for writing a register
+ * on an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ * @val: value to write to @regnum
+ *
+ * Same rules as for phy_write();
+ */
+int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
+{
+ int ret;
+
+ if (regnum > (u16)~0 || devad > 32)
+ return -EINVAL;
+
+ if (phydev->drv->write_mmd) {
+ ret = phydev->drv->write_mmd(phydev, devad, regnum, val);
+ } else if (phydev->is_c45) {
+ u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
+
+ ret = mdiobus_write(phydev->mdio.bus, phydev->mdio.addr,
+ addr, val);
+ } else {
+ struct mii_bus *bus = phydev->mdio.bus;
+ int phy_addr = phydev->mdio.addr;
+
+ mutex_lock(&bus->mdio_lock);
+ mmd_phy_indirect(bus, phy_addr, devad, regnum);
+
+ /* Write the data into MMD's selected register */
+ bus->write(bus, phy_addr, MII_MMD_DATA, val);
+ mutex_unlock(&bus->mdio_lock);
+
+ ret = 0;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(phy_write_mmd);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 97ff1278167b..82ab8fb82587 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -50,8 +50,22 @@ static const char *phy_speed_to_str(int speed)
return "1Gbps";
case SPEED_2500:
return "2.5Gbps";
+ case SPEED_5000:
+ return "5Gbps";
case SPEED_10000:
return "10Gbps";
+ case SPEED_20000:
+ return "20Gbps";
+ case SPEED_25000:
+ return "25Gbps";
+ case SPEED_40000:
+ return "40Gbps";
+ case SPEED_50000:
+ return "50Gbps";
+ case SPEED_56000:
+ return "56Gbps";
+ case SPEED_100000:
+ return "100Gbps";
case SPEED_UNKNOWN:
return "Unknown";
default:
@@ -162,7 +176,9 @@ struct phy_setting {
u32 setting;
};
-/* A mapping of all SUPPORTED settings to speed/duplex */
+/* A mapping of all SUPPORTED settings to speed/duplex. This table
+ * must be grouped by speed and sorted in descending match priority
+ * - iow, descending speed. */
static const struct phy_setting settings[] = {
{
.speed = SPEED_10000,
@@ -221,45 +237,70 @@ static const struct phy_setting settings[] = {
},
};
-#define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
-
/**
- * phy_find_setting - find a PHY settings array entry that matches speed & duplex
+ * phy_lookup_setting - lookup a PHY setting
* @speed: speed to match
* @duplex: duplex to match
+ * @feature: allowed link modes
+ * @exact: an exact match is required
+ *
+ * Search the settings array for a setting that matches the speed and
+ * duplex, and which is supported.
*
- * Description: Searches the settings array for the setting which
- * matches the desired speed and duplex, and returns the index
- * of that setting. Returns the index of the last setting if
- * none of the others match.
+ * If @exact is unset, either an exact match or %NULL for no match will
+ * be returned.
+ *
+ * If @exact is set, an exact match, the fastest supported setting at
+ * or below the specified speed, the slowest supported setting, or if
+ * they all fail, %NULL will be returned.
*/
-static inline unsigned int phy_find_setting(int speed, int duplex)
+static const struct phy_setting *
+phy_lookup_setting(int speed, int duplex, u32 features, bool exact)
{
- unsigned int idx = 0;
+ const struct phy_setting *p, *match = NULL, *last = NULL;
+ int i;
+
+ for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
+ if (p->setting & features) {
+ last = p;
+ if (p->speed == speed && p->duplex == duplex) {
+ /* Exact match for speed and duplex */
+ match = p;
+ break;
+ } else if (!exact) {
+ if (!match && p->speed <= speed)
+ /* Candidate */
+ match = p;
+
+ if (p->speed < speed)
+ break;
+ }
+ }
+ }
- while (idx < ARRAY_SIZE(settings) &&
- (settings[idx].speed != speed || settings[idx].duplex != duplex))
- idx++;
+ if (!match && !exact)
+ match = last;
- return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
+ return match;
}
/**
- * phy_find_valid - find a PHY setting that matches the requested features mask
- * @idx: The first index in settings[] to search
- * @features: A mask of the valid settings
+ * phy_find_valid - find a PHY setting that matches the requested parameters
+ * @speed: desired speed
+ * @duplex: desired duplex
+ * @supported: mask of supported link modes
*
- * Description: Returns the index of the first valid setting less
- * than or equal to the one pointed to by idx, as determined by
- * the mask in features. Returns the index of the last setting
- * if nothing else matches.
+ * Locate a supported phy setting that is, in priority order:
+ * - an exact match for the specified speed and duplex mode
+ * - a match for the specified speed, or slower speed
+ * - the slowest supported speed
+ * Returns the matched phy_setting entry, or %NULL if no supported phy
+ * settings were found.
*/
-static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
+static const struct phy_setting *
+phy_find_valid(int speed, int duplex, u32 supported)
{
- while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
- idx++;
-
- return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
+ return phy_lookup_setting(speed, duplex, supported, false);
}
/**
@@ -279,20 +320,11 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
unsigned int count = 0;
unsigned int idx = 0;
- while (idx < MAX_NUM_SETTINGS && count < size) {
- idx = phy_find_valid(idx, phy->supported);
-
- if (!(settings[idx].setting & phy->supported))
- break;
-
+ for (idx = 0; idx < ARRAY_SIZE(settings) && count < size; idx++)
/* Assumes settings are grouped by speed */
- if ((count == 0) ||
- (speeds[count - 1] != settings[idx].speed)) {
- speeds[count] = settings[idx].speed;
- count++;
- }
- idx++;
- }
+ if ((settings[idx].setting & phy->supported) &&
+ (count == 0 || speeds[count - 1] != settings[idx].speed))
+ speeds[count++] = settings[idx].speed;
return count;
}
@@ -308,12 +340,7 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
*/
static inline bool phy_check_valid(int speed, int duplex, u32 features)
{
- unsigned int idx;
-
- idx = phy_find_valid(phy_find_setting(speed, duplex), features);
-
- return settings[idx].speed == speed && settings[idx].duplex == duplex &&
- (settings[idx].setting & features);
+ return !!phy_lookup_setting(speed, duplex, features, true);
}
/**
@@ -326,18 +353,22 @@ static inline bool phy_check_valid(int speed, int duplex, u32 features)
*/
static void phy_sanitize_settings(struct phy_device *phydev)
{
+ const struct phy_setting *setting;
u32 features = phydev->supported;
- unsigned int idx;
/* Sanitize settings based on PHY capabilities */
if ((features & SUPPORTED_Autoneg) == 0)
phydev->autoneg = AUTONEG_DISABLE;
- idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
- features);
-
- phydev->speed = settings[idx].speed;
- phydev->duplex = settings[idx].duplex;
+ setting = phy_find_valid(phydev->speed, phydev->duplex, features);
+ if (setting) {
+ phydev->speed = setting->speed;
+ phydev->duplex = setting->duplex;
+ } else {
+ /* We failed to find anything (no supported speeds?) */
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ }
}
/**
@@ -1224,91 +1255,6 @@ void phy_mac_interrupt(struct phy_device *phydev, int new_link)
}
EXPORT_SYMBOL(phy_mac_interrupt);
-static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
- int addr)
-{
- /* Write the desired MMD Devad */
- bus->write(bus, addr, MII_MMD_CTRL, devad);
-
- /* Write the desired MMD register address */
- bus->write(bus, addr, MII_MMD_DATA, prtad);
-
- /* Select the Function : DATA with no post increment */
- bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
-}
-
-/**
- * phy_read_mmd_indirect - reads data from the MMD registers
- * @phydev: The PHY device bus
- * @prtad: MMD Address
- * @devad: MMD DEVAD
- *
- * Description: it reads data from the MMD registers (clause 22 to access to
- * clause 45) of the specified phy address.
- * To read these register we have:
- * 1) Write reg 13 // DEVAD
- * 2) Write reg 14 // MMD Address
- * 3) Write reg 13 // MMD Data Command for MMD DEVAD
- * 3) Read reg 14 // Read MMD data
- */
-int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad)
-{
- struct phy_driver *phydrv = phydev->drv;
- int addr = phydev->mdio.addr;
- int value = -1;
-
- if (!phydrv->read_mmd_indirect) {
- struct mii_bus *bus = phydev->mdio.bus;
-
- mutex_lock(&bus->mdio_lock);
- mmd_phy_indirect(bus, prtad, devad, addr);
-
- /* Read the content of the MMD's selected register */
- value = bus->read(bus, addr, MII_MMD_DATA);
- mutex_unlock(&bus->mdio_lock);
- } else {
- value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
- }
- return value;
-}
-EXPORT_SYMBOL(phy_read_mmd_indirect);
-
-/**
- * phy_write_mmd_indirect - writes data to the MMD registers
- * @phydev: The PHY device
- * @prtad: MMD Address
- * @devad: MMD DEVAD
- * @data: data to write in the MMD register
- *
- * Description: Write data from the MMD registers of the specified
- * phy address.
- * To write these register we have:
- * 1) Write reg 13 // DEVAD
- * 2) Write reg 14 // MMD Address
- * 3) Write reg 13 // MMD Data Command for MMD DEVAD
- * 3) Write reg 14 // Write MMD data
- */
-void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
- int devad, u32 data)
-{
- struct phy_driver *phydrv = phydev->drv;
- int addr = phydev->mdio.addr;
-
- if (!phydrv->write_mmd_indirect) {
- struct mii_bus *bus = phydev->mdio.bus;
-
- mutex_lock(&bus->mdio_lock);
- mmd_phy_indirect(bus, prtad, devad, addr);
-
- /* Write the data into MMD's selected register */
- bus->write(bus, addr, MII_MMD_DATA, data);
- mutex_unlock(&bus->mdio_lock);
- } else {
- phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
- }
-}
-EXPORT_SYMBOL(phy_write_mmd_indirect);
-
/**
* phy_init_eee - init and check the EEE feature
* @phydev: target phy_device struct
@@ -1325,15 +1271,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
return -EIO;
/* According to 802.3az,the EEE is supported only in full duplex-mode.
- * Also EEE feature is active when core is operating with MII, GMII
- * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
- * should return an error if they do not support EEE.
*/
- if ((phydev->duplex == DUPLEX_FULL) &&
- ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
- (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
- phy_interface_is_rgmii(phydev) ||
- phy_is_internal(phydev))) {
+ if (phydev->duplex == DUPLEX_FULL) {
int eee_lp, eee_cap, eee_adv;
u32 lp, cap, adv;
int status;
@@ -1344,8 +1283,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
return status;
/* First check if the EEE ability is supported */
- eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
- MDIO_MMD_PCS);
+ eee_cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
if (eee_cap <= 0)
goto eee_exit_err;
@@ -1356,13 +1294,11 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
/* Check which link settings negotiated and verify it in
* the EEE advertising registers.
*/
- eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
- MDIO_MMD_AN);
+ eee_lp = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
if (eee_lp <= 0)
goto eee_exit_err;
- eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
- MDIO_MMD_AN);
+ eee_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
if (eee_adv <= 0)
goto eee_exit_err;
@@ -1375,14 +1311,12 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
/* Configure the PHY to stop receiving xMII
* clock while it is signaling LPI.
*/
- int val = phy_read_mmd_indirect(phydev, MDIO_CTRL1,
- MDIO_MMD_PCS);
+ int val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
if (val < 0)
return val;
val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
- phy_write_mmd_indirect(phydev, MDIO_CTRL1,
- MDIO_MMD_PCS, val);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, val);
}
return 0; /* EEE supported */
@@ -1404,7 +1338,7 @@ int phy_get_eee_err(struct phy_device *phydev)
if (!phydev->drv)
return -EIO;
- return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS);
+ return phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_WK_ERR);
}
EXPORT_SYMBOL(phy_get_eee_err);
@@ -1424,19 +1358,19 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
return -EIO;
/* Get Supported EEE */
- val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS);
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
if (val < 0)
return val;
data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
/* Get advertisement EEE */
- val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN);
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
if (val < 0)
return val;
data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
/* Get LP advertisement EEE */
- val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, MDIO_MMD_AN);
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
if (val < 0)
return val;
data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
@@ -1454,15 +1388,37 @@ EXPORT_SYMBOL(phy_ethtool_get_eee);
*/
int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
{
- int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
+ int cap, old_adv, adv, ret;
if (!phydev->drv)
return -EIO;
+ /* Get Supported EEE */
+ cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
+ if (cap < 0)
+ return cap;
+
+ old_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
+ if (old_adv < 0)
+ return old_adv;
+
+ adv = ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
+
/* Mask prohibited EEE modes */
- val &= ~phydev->eee_broken_modes;
+ adv &= ~phydev->eee_broken_modes;
- phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, val);
+ if (old_adv != adv) {
+ ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
+ if (ret < 0)
+ return ret;
+
+ /* Restart autonegotiation so the new modes get sent to the
+ * link partner.
+ */
+ ret = genphy_restart_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 5198ccfa347f..1219eeab69d1 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1217,7 +1217,7 @@ static int genphy_config_eee_advert(struct phy_device *phydev)
* supported by the phy. If we read 0, EEE is not advertised
* In both case, we don't need to continue
*/
- adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN);
+ adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
if (adv <= 0)
return 0;
@@ -1228,7 +1228,7 @@ static int genphy_config_eee_advert(struct phy_device *phydev)
if (old_adv == adv)
return 0;
- phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, adv);
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
return 1;
}
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index fb32eaf2255d..cef6967b0396 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/smscphy.h>
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 85c01247f2e3..6c5d5ef46f75 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2476,7 +2476,8 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
goto team_put;
}
err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
- nl_option, team_nl_option_policy);
+ nl_option, team_nl_option_policy,
+ info->extack);
if (err)
goto team_put;
if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index cc88cd7856f5..bbd707b9ef7a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2444,18 +2444,16 @@ static struct miscdevice tun_miscdev = {
/* ethtool interface */
-static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- cmd->supported = 0;
- cmd->advertising = 0;
- ethtool_cmd_speed_set(cmd, SPEED_10);
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_TP;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+static int tun_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ cmd->base.speed = SPEED_10;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
@@ -2518,7 +2516,6 @@ static int tun_set_coalesce(struct net_device *dev,
}
static const struct ethtool_ops tun_ethtool_ops = {
- .get_settings = tun_get_settings,
.get_drvinfo = tun_get_drvinfo,
.get_msglevel = tun_get_msglevel,
.set_msglevel = tun_set_msglevel,
@@ -2526,6 +2523,7 @@ static const struct ethtool_ops tun_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_coalesce = tun_get_coalesce,
.set_coalesce = tun_set_coalesce,
+ .get_link_ksettings = tun_get_link_ksettings,
};
static int tun_queue_resize(struct tun_struct *tun)
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 0dd510604118..a3aa0a27dfe5 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -136,9 +136,9 @@ static const struct ethtool_ops ax88172_ethtool_ops = {
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
.set_eeprom = asix_set_eeprom,
- .get_settings = usbnet_get_settings,
- .set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
+ .get_link_ksettings = usbnet_get_link_ksettings,
+ .set_link_ksettings = usbnet_set_link_ksettings,
};
static void ax88172_set_multicast(struct net_device *net)
@@ -206,6 +206,7 @@ static const struct net_device_ops ax88172_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = asix_ioctl,
@@ -301,9 +302,9 @@ static const struct ethtool_ops ax88772_ethtool_ops = {
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
.set_eeprom = asix_set_eeprom,
- .get_settings = usbnet_get_settings,
- .set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
+ .get_link_ksettings = usbnet_get_link_ksettings,
+ .set_link_ksettings = usbnet_set_link_ksettings,
};
static int ax88772_link_reset(struct usbnet *dev)
@@ -591,6 +592,7 @@ static const struct net_device_ops ax88772_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = asix_ioctl,
@@ -775,9 +777,9 @@ static const struct ethtool_ops ax88178_ethtool_ops = {
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
.set_eeprom = asix_set_eeprom,
- .get_settings = usbnet_get_settings,
- .set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
+ .get_link_ksettings = usbnet_get_link_ksettings,
+ .set_link_ksettings = usbnet_set_link_ksettings,
};
static int marvell_phy_init(struct usbnet *dev)
@@ -1044,6 +1046,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = asix_set_multicast,
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 6308386b09df..501576f53854 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -143,6 +143,7 @@ static const struct net_device_ops ax88172a_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = ax88172a_ioctl,
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index a3a7db0702d8..51cf60092a18 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -620,16 +620,18 @@ ax88179_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
return 0;
}
-static int ax88179_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+static int ax88179_get_link_ksettings(struct net_device *net,
+ struct ethtool_link_ksettings *cmd)
{
struct usbnet *dev = netdev_priv(net);
- return mii_ethtool_gset(&dev->mii, cmd);
+ return mii_ethtool_get_link_ksettings(&dev->mii, cmd);
}
-static int ax88179_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+static int ax88179_set_link_ksettings(struct net_device *net,
+ const struct ethtool_link_ksettings *cmd)
{
struct usbnet *dev = netdev_priv(net);
- return mii_ethtool_sset(&dev->mii, cmd);
+ return mii_ethtool_set_link_ksettings(&dev->mii, cmd);
}
static int
@@ -826,11 +828,11 @@ static const struct ethtool_ops ax88179_ethtool_ops = {
.set_wol = ax88179_set_wol,
.get_eeprom_len = ax88179_get_eeprom_len,
.get_eeprom = ax88179_get_eeprom,
- .get_settings = ax88179_get_settings,
- .set_settings = ax88179_set_settings,
.get_eee = ax88179_get_eee,
.set_eee = ax88179_set_eee,
.nway_reset = usbnet_nway_reset,
+ .get_link_ksettings = ax88179_get_link_ksettings,
+ .set_link_ksettings = ax88179_set_link_ksettings,
};
static void ax88179_set_multicast(struct net_device *net)
@@ -957,6 +959,7 @@ static const struct net_device_ops ax88179_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_change_mtu = ax88179_change_mtu,
.ndo_set_mac_address = ax88179_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 0acc9b640419..fce92f0e5abd 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -688,29 +688,34 @@ static void catc_get_drvinfo(struct net_device *dev,
usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info));
}
-static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int catc_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct catc *catc = netdev_priv(dev);
if (!catc->is_f5u011)
return -EOPNOTSUPP;
- cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_TP;
- cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_TP;
- ethtool_cmd_speed_set(cmd, SPEED_10);
- cmd->duplex = DUPLEX_HALF;
- cmd->port = PORT_TP;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->maxtxpkt = 1;
- cmd->maxrxpkt = 1;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+
+ cmd->base.speed = SPEED_10;
+ cmd->base.duplex = DUPLEX_HALF;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
return 0;
}
static const struct ethtool_ops ops = {
.get_drvinfo = catc_get_drvinfo,
- .get_settings = catc_get_settings,
- .get_link = ethtool_op_get_link
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = catc_get_link_ksettings,
};
/*
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 3a98f3762a4c..a6b997cffd3b 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -100,6 +100,7 @@ static const struct net_device_ops cdc_mbim_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_change_mtu = cdc_ncm_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index f317984f7536..bb3f71f9fbde 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -131,8 +131,6 @@ static void cdc_ncm_get_strings(struct net_device __always_unused *netdev, u32 s
static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx);
static const struct ethtool_ops cdc_ncm_ethtool_ops = {
- .get_settings = usbnet_get_settings,
- .set_settings = usbnet_set_settings,
.get_link = usbnet_get_link,
.nway_reset = usbnet_nway_reset,
.get_drvinfo = usbnet_get_drvinfo,
@@ -142,6 +140,8 @@ static const struct ethtool_ops cdc_ncm_ethtool_ops = {
.get_sset_count = cdc_ncm_get_sset_count,
.get_strings = cdc_ncm_get_strings,
.get_ethtool_stats = cdc_ncm_get_ethtool_stats,
+ .get_link_ksettings = usbnet_get_link_ksettings,
+ .set_link_ksettings = usbnet_set_link_ksettings,
};
static u32 cdc_ncm_check_rx_max(struct usbnet *dev, u32 new_rx)
@@ -753,6 +753,7 @@ static const struct net_device_ops cdc_ncm_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_change_mtu = cdc_ncm_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 0b4bdd39106b..b91f92e4e5f2 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -281,9 +281,9 @@ static const struct ethtool_ops dm9601_ethtool_ops = {
.set_msglevel = usbnet_set_msglevel,
.get_eeprom_len = dm9601_get_eeprom_len,
.get_eeprom = dm9601_get_eeprom,
- .get_settings = usbnet_get_settings,
- .set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
+ .get_link_ksettings = usbnet_get_link_ksettings,
+ .set_link_ksettings = usbnet_set_link_ksettings,
};
static void dm9601_set_multicast(struct net_device *net)
@@ -343,6 +343,7 @@ static const struct net_device_ops dm9601_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = dm9601_ioctl,
.ndo_set_rx_mode = dm9601_set_multicast,
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index 4ff70b22c6ee..5a43b77a6b9c 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -144,6 +144,7 @@ static const struct net_device_ops int51x1_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = int51x1_set_multicast,
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 2a2c3edb6bad..37fb621fde86 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -245,8 +245,6 @@ struct kaweth_device
__u16 packet_filter_bitmap;
struct kaweth_ethernet_configuration configuration;
-
- struct net_device_stats stats;
};
/****************************************************************
@@ -598,7 +596,7 @@ static void kaweth_usb_receive(struct urb *urb)
struct sk_buff *skb;
if (unlikely(status == -EPIPE)) {
- kaweth->stats.rx_errors++;
+ net->stats.rx_errors++;
kaweth->end = 1;
wake_up(&kaweth->term_wait);
dev_dbg(dev, "Status was -EPIPE.\n");
@@ -613,12 +611,12 @@ static void kaweth_usb_receive(struct urb *urb)
}
if (unlikely(status == -EPROTO || status == -ETIME ||
status == -EILSEQ)) {
- kaweth->stats.rx_errors++;
+ net->stats.rx_errors++;
dev_dbg(dev, "Status was -EPROTO, -ETIME, or -EILSEQ.\n");
return;
}
if (unlikely(status == -EOVERFLOW)) {
- kaweth->stats.rx_errors++;
+ net->stats.rx_errors++;
dev_dbg(dev, "Status was -EOVERFLOW.\n");
}
spin_lock(&kaweth->device_lock);
@@ -663,8 +661,8 @@ static void kaweth_usb_receive(struct urb *urb)
netif_rx(skb);
- kaweth->stats.rx_packets++;
- kaweth->stats.rx_bytes += pkt_len;
+ net->stats.rx_packets++;
+ net->stats.rx_bytes += pkt_len;
}
kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC);
@@ -804,7 +802,7 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
/* We now decide whether we can put our special header into the sk_buff */
if (skb_cow_head(skb, 2)) {
- kaweth->stats.tx_errors++;
+ net->stats.tx_errors++;
netif_start_queue(net);
spin_unlock_irq(&kaweth->device_lock);
dev_kfree_skb_any(skb);
@@ -828,15 +826,15 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
{
dev_warn(&net->dev, "kaweth failed tx_urb %d\n", res);
skip:
- kaweth->stats.tx_errors++;
+ net->stats.tx_errors++;
netif_start_queue(net);
dev_kfree_skb_irq(skb);
}
else
{
- kaweth->stats.tx_packets++;
- kaweth->stats.tx_bytes += skb->len;
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += skb->len;
}
spin_unlock_irq(&kaweth->device_lock);
@@ -906,15 +904,6 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
}
/****************************************************************
- * kaweth_netdev_stats
- ****************************************************************/
-static struct net_device_stats *kaweth_netdev_stats(struct net_device *dev)
-{
- struct kaweth_device *kaweth = netdev_priv(dev);
- return &kaweth->stats;
-}
-
-/****************************************************************
* kaweth_tx_timeout
****************************************************************/
static void kaweth_tx_timeout(struct net_device *net)
@@ -922,7 +911,7 @@ static void kaweth_tx_timeout(struct net_device *net)
struct kaweth_device *kaweth = netdev_priv(net);
dev_warn(&net->dev, "%s: Tx timed out. Resetting.\n", net->name);
- kaweth->stats.tx_errors++;
+ net->stats.tx_errors++;
netif_trans_update(net);
usb_unlink_urb(kaweth->tx_urb);
@@ -975,7 +964,6 @@ static const struct net_device_ops kaweth_netdev_ops = {
.ndo_start_xmit = kaweth_start_xmit,
.ndo_tx_timeout = kaweth_tx_timeout,
.ndo_set_rx_mode = kaweth_set_rx_mode,
- .ndo_get_stats = kaweth_netdev_stats,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 636f48f19d1e..9eff97a650ae 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -29,6 +29,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/mdio.h>
+#include <linux/phy.h>
#include <net/ip6_checksum.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
@@ -1952,10 +1953,10 @@ static int lan8835_fixup(struct phy_device *phydev)
struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
- buf = phy_read_mmd_indirect(phydev, 0x8010, 3);
+ buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
buf &= ~0x1800;
buf |= 0x0800;
- phy_write_mmd_indirect(phydev, 0x8010, 3, buf);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
/* RGMII MAC TXC Delay Enable */
ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
@@ -1975,11 +1976,11 @@ static int ksz9031rnx_fixup(struct phy_device *phydev)
/* Micrel9301RNX PHY configuration */
/* RGMII Control Signal Pad Skew */
- phy_write_mmd_indirect(phydev, 4, 2, 0x0077);
+ phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
/* RGMII RX Data Pad Skew */
- phy_write_mmd_indirect(phydev, 5, 2, 0x7777);
+ phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
/* RGMII RX Clock Pad Skew */
- phy_write_mmd_indirect(phydev, 8, 2, 0x1FF);
+ phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 4f345bd4e6e2..5a47e5510ca8 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -464,9 +464,9 @@ static const struct ethtool_ops mcs7830_ethtool_ops = {
.get_link = usbnet_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
- .get_settings = usbnet_get_settings,
- .set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
+ .get_link_ksettings = usbnet_get_link_ksettings,
+ .set_link_ksettings = usbnet_set_link_ksettings,
};
static const struct net_device_ops mcs7830_netdev_ops = {
@@ -475,6 +475,7 @@ static const struct net_device_ops mcs7830_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mcs7830_ioctl,
.ndo_set_rx_mode = mcs7830_set_multicast,
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 36674484c6fb..6514c86f043e 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -501,13 +501,13 @@ static void read_bulk_callback(struct urb *urb)
if (rx_status & 0x1e) {
netif_dbg(pegasus, rx_err, net,
"RX packet error %x\n", rx_status);
- pegasus->stats.rx_errors++;
+ net->stats.rx_errors++;
if (rx_status & 0x06) /* long or runt */
- pegasus->stats.rx_length_errors++;
+ net->stats.rx_length_errors++;
if (rx_status & 0x08)
- pegasus->stats.rx_crc_errors++;
+ net->stats.rx_crc_errors++;
if (rx_status & 0x10) /* extra bits */
- pegasus->stats.rx_frame_errors++;
+ net->stats.rx_frame_errors++;
goto goon;
}
if (pegasus->chip == 0x8513) {
@@ -535,8 +535,8 @@ static void read_bulk_callback(struct urb *urb)
skb_put(pegasus->rx_skb, pkt_len);
pegasus->rx_skb->protocol = eth_type_trans(pegasus->rx_skb, net);
netif_rx(pegasus->rx_skb);
- pegasus->stats.rx_packets++;
- pegasus->stats.rx_bytes += pkt_len;
+ net->stats.rx_packets++;
+ net->stats.rx_bytes += pkt_len;
if (pegasus->flags & PEGASUS_UNPLUG)
return;
@@ -670,13 +670,13 @@ static void intr_callback(struct urb *urb)
/* byte 0 == tx_status1, reg 2B */
if (d[0] & (TX_UNDERRUN|EXCESSIVE_COL
|LATE_COL|JABBER_TIMEOUT)) {
- pegasus->stats.tx_errors++;
+ net->stats.tx_errors++;
if (d[0] & TX_UNDERRUN)
- pegasus->stats.tx_fifo_errors++;
+ net->stats.tx_fifo_errors++;
if (d[0] & (EXCESSIVE_COL | JABBER_TIMEOUT))
- pegasus->stats.tx_aborted_errors++;
+ net->stats.tx_aborted_errors++;
if (d[0] & LATE_COL)
- pegasus->stats.tx_window_errors++;
+ net->stats.tx_window_errors++;
}
/* d[5].LINK_STATUS lies on some adapters.
@@ -685,7 +685,7 @@ static void intr_callback(struct urb *urb)
*/
/* bytes 3-4 == rx_lostpkt, reg 2E/2F */
- pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4];
+ net->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4];
}
res = usb_submit_urb(urb, GFP_ATOMIC);
@@ -701,7 +701,7 @@ static void pegasus_tx_timeout(struct net_device *net)
pegasus_t *pegasus = netdev_priv(net);
netif_warn(pegasus, timer, net, "tx timeout\n");
usb_unlink_urb(pegasus->tx_urb);
- pegasus->stats.tx_errors++;
+ net->stats.tx_errors++;
}
static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb,
@@ -731,23 +731,18 @@ static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb,
netif_device_detach(pegasus->net);
break;
default:
- pegasus->stats.tx_errors++;
+ net->stats.tx_errors++;
netif_start_queue(net);
}
} else {
- pegasus->stats.tx_packets++;
- pegasus->stats.tx_bytes += skb->len;
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += skb->len;
}
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
-static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev)
-{
- return &((pegasus_t *) netdev_priv(dev))->stats;
-}
-
static inline void disable_net_traffic(pegasus_t *pegasus)
{
__le16 tmp = cpu_to_le16(0);
@@ -953,20 +948,22 @@ static inline void pegasus_reset_wol(struct net_device *dev)
}
static int
-pegasus_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+pegasus_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
{
pegasus_t *pegasus;
pegasus = netdev_priv(dev);
- mii_ethtool_gset(&pegasus->mii, ecmd);
+ mii_ethtool_get_link_ksettings(&pegasus->mii, ecmd);
return 0;
}
static int
-pegasus_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+pegasus_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
{
pegasus_t *pegasus = netdev_priv(dev);
- return mii_ethtool_sset(&pegasus->mii, ecmd);
+ return mii_ethtool_set_link_ksettings(&pegasus->mii, ecmd);
}
static int pegasus_nway_reset(struct net_device *dev)
@@ -995,14 +992,14 @@ static void pegasus_set_msglevel(struct net_device *dev, u32 v)
static const struct ethtool_ops ops = {
.get_drvinfo = pegasus_get_drvinfo,
- .get_settings = pegasus_get_settings,
- .set_settings = pegasus_set_settings,
.nway_reset = pegasus_nway_reset,
.get_link = pegasus_get_link,
.get_msglevel = pegasus_get_msglevel,
.set_msglevel = pegasus_set_msglevel,
.get_wol = pegasus_get_wol,
.set_wol = pegasus_set_wol,
+ .get_link_ksettings = pegasus_get_link_ksettings,
+ .set_link_ksettings = pegasus_set_link_ksettings,
};
static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
@@ -1292,7 +1289,6 @@ static const struct net_device_ops pegasus_netdev_ops = {
.ndo_do_ioctl = pegasus_ioctl,
.ndo_start_xmit = pegasus_start_xmit,
.ndo_set_rx_mode = pegasus_set_multicast,
- .ndo_get_stats = pegasus_netdev_stats,
.ndo_tx_timeout = pegasus_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index d15646244fdf..9b7ea9c9167d 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -83,7 +83,6 @@ typedef struct pegasus {
struct usb_device *usb;
struct usb_interface *intf;
struct net_device *net;
- struct net_device_stats stats;
struct mii_if_info mii;
unsigned flags;
unsigned features;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 2474618404f5..a3ed8115747c 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -58,12 +58,198 @@ struct qmi_wwan_state {
enum qmi_wwan_flags {
QMI_WWAN_FLAG_RAWIP = 1 << 0,
+ QMI_WWAN_FLAG_MUX = 1 << 1,
};
enum qmi_wwan_quirks {
QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
};
+struct qmimux_hdr {
+ u8 pad;
+ u8 mux_id;
+ __be16 pkt_len;
+};
+
+struct qmimux_priv {
+ struct net_device *real_dev;
+ u8 mux_id;
+};
+
+static int qmimux_open(struct net_device *dev)
+{
+ struct qmimux_priv *priv = netdev_priv(dev);
+ struct net_device *real_dev = priv->real_dev;
+
+ if (!(priv->real_dev->flags & IFF_UP))
+ return -ENETDOWN;
+
+ if (netif_carrier_ok(real_dev))
+ netif_carrier_on(dev);
+ return 0;
+}
+
+static int qmimux_stop(struct net_device *dev)
+{
+ netif_carrier_off(dev);
+ return 0;
+}
+
+static netdev_tx_t qmimux_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct qmimux_priv *priv = netdev_priv(dev);
+ unsigned int len = skb->len;
+ struct qmimux_hdr *hdr;
+
+ hdr = (struct qmimux_hdr *)skb_push(skb, sizeof(struct qmimux_hdr));
+ hdr->pad = 0;
+ hdr->mux_id = priv->mux_id;
+ hdr->pkt_len = cpu_to_be16(len);
+ skb->dev = priv->real_dev;
+ return dev_queue_xmit(skb);
+}
+
+static const struct net_device_ops qmimux_netdev_ops = {
+ .ndo_open = qmimux_open,
+ .ndo_stop = qmimux_stop,
+ .ndo_start_xmit = qmimux_start_xmit,
+};
+
+static void qmimux_setup(struct net_device *dev)
+{
+ dev->header_ops = NULL; /* No header */
+ dev->type = ARPHRD_NONE;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->netdev_ops = &qmimux_netdev_ops;
+ dev->destructor = free_netdev;
+}
+
+static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id)
+{
+ struct qmimux_priv *priv;
+ struct list_head *iter;
+ struct net_device *ldev;
+
+ rcu_read_lock();
+ netdev_for_each_upper_dev_rcu(dev->net, ldev, iter) {
+ priv = netdev_priv(ldev);
+ if (priv->mux_id == mux_id) {
+ rcu_read_unlock();
+ return ldev;
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
+static bool qmimux_has_slaves(struct usbnet *dev)
+{
+ return !list_empty(&dev->net->adj_list.upper);
+}
+
+static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ unsigned int len, offset = sizeof(struct qmimux_hdr);
+ struct qmimux_hdr *hdr;
+ struct net_device *net;
+ struct sk_buff *skbn;
+
+ while (offset < skb->len) {
+ hdr = (struct qmimux_hdr *)skb->data;
+ len = be16_to_cpu(hdr->pkt_len);
+
+ /* drop the packet, bogus length */
+ if (offset + len > skb->len)
+ return 0;
+
+ /* control packet, we do not know what to do */
+ if (hdr->pad & 0x80)
+ goto skip;
+
+ net = qmimux_find_dev(dev, hdr->mux_id);
+ if (!net)
+ goto skip;
+ skbn = netdev_alloc_skb(net, len);
+ if (!skbn)
+ return 0;
+ skbn->dev = net;
+
+ switch (skb->data[offset] & 0xf0) {
+ case 0x40:
+ skbn->protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ skbn->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ /* not ip - do not know what to do */
+ goto skip;
+ }
+
+ memcpy(skb_put(skbn, len), skb->data + offset, len);
+ if (netif_rx(skbn) != NET_RX_SUCCESS)
+ return 0;
+
+skip:
+ offset += len + sizeof(struct qmimux_hdr);
+ }
+ return 1;
+}
+
+static int qmimux_register_device(struct net_device *real_dev, u8 mux_id)
+{
+ struct net_device *new_dev;
+ struct qmimux_priv *priv;
+ int err;
+
+ new_dev = alloc_netdev(sizeof(struct qmimux_priv),
+ "qmimux%d", NET_NAME_UNKNOWN, qmimux_setup);
+ if (!new_dev)
+ return -ENOBUFS;
+
+ dev_net_set(new_dev, dev_net(real_dev));
+ priv = netdev_priv(new_dev);
+ priv->mux_id = mux_id;
+ priv->real_dev = real_dev;
+
+ err = register_netdevice(new_dev);
+ if (err < 0)
+ goto out_free_newdev;
+
+ /* Account for reference in struct qmimux_priv_priv */
+ dev_hold(real_dev);
+
+ err = netdev_upper_dev_link(real_dev, new_dev);
+ if (err)
+ goto out_unregister_netdev;
+
+ netif_stacked_transfer_operstate(real_dev, new_dev);
+
+ return 0;
+
+out_unregister_netdev:
+ unregister_netdevice(new_dev);
+ dev_put(real_dev);
+
+out_free_newdev:
+ free_netdev(new_dev);
+ return err;
+}
+
+static void qmimux_unregister_device(struct net_device *dev)
+{
+ struct qmimux_priv *priv = netdev_priv(dev);
+ struct net_device *real_dev = priv->real_dev;
+
+ netdev_upper_dev_unlink(real_dev, dev);
+ unregister_netdevice(dev);
+
+ /* Get rid of the reference to real_dev */
+ dev_put(real_dev);
+}
+
static void qmi_wwan_netdev_setup(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
@@ -137,10 +323,114 @@ err:
return ret;
}
+static ssize_t add_mux_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct net_device *dev = to_net_dev(d);
+ struct qmimux_priv *priv;
+ struct list_head *iter;
+ struct net_device *ldev;
+ ssize_t count = 0;
+
+ rcu_read_lock();
+ netdev_for_each_upper_dev_rcu(dev, ldev, iter) {
+ priv = netdev_priv(ldev);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "0x%02x\n", priv->mux_id);
+ }
+ rcu_read_unlock();
+ return count;
+}
+
+static ssize_t add_mux_store(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct qmi_wwan_state *info = (void *)&dev->data;
+ u8 mux_id;
+ int ret;
+
+ if (kstrtou8(buf, 0, &mux_id))
+ return -EINVAL;
+
+ /* mux_id [1 - 0x7f] range empirically found */
+ if (mux_id < 1 || mux_id > 0x7f)
+ return -EINVAL;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (qmimux_find_dev(dev, mux_id)) {
+ netdev_err(dev->net, "mux_id already present\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* we don't want to modify a running netdev */
+ if (netif_running(dev->net)) {
+ netdev_err(dev->net, "Cannot change a running device\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ ret = qmimux_register_device(dev->net, mux_id);
+ if (!ret) {
+ info->flags |= QMI_WWAN_FLAG_MUX;
+ ret = len;
+ }
+err:
+ rtnl_unlock();
+ return ret;
+}
+
+static ssize_t del_mux_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ return add_mux_show(d, attr, buf);
+}
+
+static ssize_t del_mux_store(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct qmi_wwan_state *info = (void *)&dev->data;
+ struct net_device *del_dev;
+ u8 mux_id;
+ int ret = 0;
+
+ if (kstrtou8(buf, 0, &mux_id))
+ return -EINVAL;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ /* we don't want to modify a running netdev */
+ if (netif_running(dev->net)) {
+ netdev_err(dev->net, "Cannot change a running device\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ del_dev = qmimux_find_dev(dev, mux_id);
+ if (!del_dev) {
+ netdev_err(dev->net, "mux_id not present\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ qmimux_unregister_device(del_dev);
+
+ if (!qmimux_has_slaves(dev))
+ info->flags &= ~QMI_WWAN_FLAG_MUX;
+ ret = len;
+err:
+ rtnl_unlock();
+ return ret;
+}
+
static DEVICE_ATTR_RW(raw_ip);
+static DEVICE_ATTR_RW(add_mux);
+static DEVICE_ATTR_RW(del_mux);
static struct attribute *qmi_wwan_sysfs_attrs[] = {
&dev_attr_raw_ip.attr,
+ &dev_attr_add_mux.attr,
+ &dev_attr_del_mux.attr,
NULL,
};
@@ -184,6 +474,9 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (skb->len < dev->net->hard_header_len)
return 0;
+ if (info->flags & QMI_WWAN_FLAG_MUX)
+ return qmimux_rx_fixup(dev, skb);
+
switch (skb->data[0] & 0xf0) {
case 0x40:
proto = htons(ETH_P_IP);
@@ -249,6 +542,7 @@ static const struct net_device_ops qmi_wwan_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_set_mac_address = qmi_wwan_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1036,11 +1330,33 @@ static int qmi_wwan_probe(struct usb_interface *intf,
return usbnet_probe(intf, id);
}
+static void qmi_wwan_disconnect(struct usb_interface *intf)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct qmi_wwan_state *info = (void *)&dev->data;
+ struct list_head *iter;
+ struct net_device *ldev;
+
+ if (info->flags & QMI_WWAN_FLAG_MUX) {
+ if (!rtnl_trylock()) {
+ restart_syscall();
+ return;
+ }
+ rcu_read_lock();
+ netdev_for_each_upper_dev_rcu(dev->net, ldev, iter)
+ qmimux_unregister_device(ldev);
+ rcu_read_unlock();
+ rtnl_unlock();
+ info->flags &= ~QMI_WWAN_FLAG_MUX;
+ }
+ usbnet_disconnect(intf);
+}
+
static struct usb_driver qmi_wwan_driver = {
.name = "qmi_wwan",
.id_table = products,
.probe = qmi_wwan_probe,
- .disconnect = usbnet_disconnect,
+ .disconnect = qmi_wwan_disconnect,
.suspend = qmi_wwan_suspend,
.resume = qmi_wwan_resume,
.reset_resume = qmi_wwan_resume,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 07f788c49d57..ddc62cb69be8 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1766,6 +1766,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
unsigned long flags;
struct list_head *cursor, *next, rx_queue;
int ret = 0, work_done = 0;
+ struct napi_struct *napi = &tp->napi;
if (!skb_queue_empty(&tp->rx_queue)) {
while (work_done < budget) {
@@ -1778,7 +1779,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
break;
pkt_len = skb->len;
- napi_gro_receive(&tp->napi, skb);
+ napi_gro_receive(napi, skb);
work_done++;
stats->rx_packets++;
stats->rx_bytes += pkt_len;
@@ -1828,7 +1829,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
pkt_len -= CRC_SIZE;
rx_data += sizeof(struct rx_desc);
- skb = napi_alloc_skb(&tp->napi, pkt_len);
+ skb = napi_alloc_skb(napi, pkt_len);
if (!skb) {
stats->rx_dropped++;
goto find_next_rx;
@@ -1840,7 +1841,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
skb->protocol = eth_type_trans(skb, netdev);
rtl_rx_vlan_tag(rx_desc, skb);
if (work_done < budget) {
- napi_gro_receive(&tp->napi, skb);
+ napi_gro_receive(napi, skb);
work_done++;
stats->rx_packets++;
stats->rx_bytes += pkt_len;
@@ -3156,6 +3157,7 @@ static bool rtl8153_in_nway(struct r8152 *tp)
static void set_carrier(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
+ struct napi_struct *napi = &tp->napi;
u8 speed;
speed = rtl8152_get_speed(tp);
@@ -3165,7 +3167,7 @@ static void set_carrier(struct r8152 *tp)
tp->rtl_ops.enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
netif_stop_queue(netdev);
- napi_disable(&tp->napi);
+ napi_disable(napi);
netif_carrier_on(netdev);
rtl_start_rx(tp);
napi_enable(&tp->napi);
@@ -3178,9 +3180,9 @@ static void set_carrier(struct r8152 *tp)
} else {
if (netif_carrier_ok(netdev)) {
netif_carrier_off(netdev);
- napi_disable(&tp->napi);
+ napi_disable(napi);
tp->rtl_ops.disable(tp);
- napi_enable(&tp->napi);
+ napi_enable(napi);
netif_info(tp, link, netdev, "carrier off\n");
}
}
@@ -3642,11 +3644,13 @@ static int rtl8152_runtime_suspend(struct r8152 *tp)
tp->rtl_ops.autosuspend_en(tp, true);
if (netif_carrier_ok(netdev)) {
- napi_disable(&tp->napi);
+ struct napi_struct *napi = &tp->napi;
+
+ napi_disable(napi);
rtl_stop_rx(tp);
rxdy_gated_en(tp, false);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
- napi_enable(&tp->napi);
+ napi_enable(napi);
}
}
@@ -3662,12 +3666,14 @@ static int rtl8152_system_suspend(struct r8152 *tp)
netif_device_detach(netdev);
if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
+ struct napi_struct *napi = &tp->napi;
+
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
- napi_disable(&tp->napi);
+ napi_disable(napi);
cancel_delayed_work_sync(&tp->schedule);
tp->rtl_ops.down(tp);
- napi_enable(&tp->napi);
+ napi_enable(napi);
}
return ret;
@@ -3693,45 +3699,46 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
static int rtl8152_resume(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
+ struct net_device *netdev = tp->netdev;
mutex_lock(&tp->control);
if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
tp->rtl_ops.init(tp);
queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
- netif_device_attach(tp->netdev);
+ netif_device_attach(netdev);
}
- if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
+ if (netif_running(netdev) && netdev->flags & IFF_UP) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ struct napi_struct *napi = &tp->napi;
+
tp->rtl_ops.autosuspend_en(tp, false);
- napi_disable(&tp->napi);
+ napi_disable(napi);
set_bit(WORK_ENABLE, &tp->flags);
-
- if (netif_carrier_ok(tp->netdev)) {
+ if (netif_carrier_ok(netdev)) {
if (rtl8152_get_speed(tp) & LINK_STATUS) {
rtl_start_rx(tp);
} else {
- netif_carrier_off(tp->netdev);
+ netif_carrier_off(netdev);
tp->rtl_ops.disable(tp);
- netif_info(tp, link, tp->netdev,
+ netif_info(tp, link, netdev,
"linking down\n");
}
}
-
- napi_enable(&tp->napi);
+ napi_enable(napi);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
smp_mb__after_atomic();
if (!list_empty(&tp->rx_done))
napi_schedule(&tp->napi);
} else {
tp->rtl_ops.up(tp);
- netif_carrier_off(tp->netdev);
+ netif_carrier_off(netdev);
set_bit(WORK_ENABLE, &tp->flags);
}
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
} else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- if (tp->netdev->flags & IFF_UP)
+ if (netdev->flags & IFF_UP)
tp->rtl_ops.autosuspend_en(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
}
@@ -3819,7 +3826,8 @@ static void rtl8152_get_drvinfo(struct net_device *netdev,
}
static
-int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+int rtl8152_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct r8152 *tp = netdev_priv(netdev);
int ret;
@@ -3833,7 +3841,7 @@ int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
mutex_lock(&tp->control);
- ret = mii_ethtool_gset(&tp->mii, cmd);
+ ret = mii_ethtool_get_link_ksettings(&tp->mii, cmd);
mutex_unlock(&tp->control);
@@ -3843,7 +3851,8 @@ out:
return ret;
}
-static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rtl8152_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct r8152 *tp = netdev_priv(dev);
int ret;
@@ -3854,11 +3863,12 @@ static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
mutex_lock(&tp->control);
- ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
+ ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed,
+ cmd->base.duplex);
if (!ret) {
- tp->autoneg = cmd->autoneg;
- tp->speed = cmd->speed;
- tp->duplex = cmd->duplex;
+ tp->autoneg = cmd->base.autoneg;
+ tp->speed = cmd->base.speed;
+ tp->duplex = cmd->base.duplex;
}
mutex_unlock(&tp->control);
@@ -4136,8 +4146,6 @@ static int rtl8152_set_coalesce(struct net_device *netdev,
static const struct ethtool_ops ops = {
.get_drvinfo = rtl8152_get_drvinfo,
- .get_settings = rtl8152_get_settings,
- .set_settings = rtl8152_set_settings,
.get_link = ethtool_op_get_link,
.nway_reset = rtl8152_nway_reset,
.get_msglevel = rtl8152_get_msglevel,
@@ -4151,6 +4159,8 @@ static const struct ethtool_ops ops = {
.set_coalesce = rtl8152_set_coalesce,
.get_eee = rtl_ethtool_get_eee,
.set_eee = rtl_ethtool_set_eee,
+ .get_link_ksettings = rtl8152_get_link_ksettings,
+ .set_link_ksettings = rtl8152_set_link_ksettings,
};
static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -4249,44 +4259,6 @@ static const struct net_device_ops rtl8152_netdev_ops = {
.ndo_features_check = rtl8152_features_check,
};
-static void r8152b_get_version(struct r8152 *tp)
-{
- u32 ocp_data;
- u16 version;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR1);
- version = (u16)(ocp_data & VERSION_MASK);
-
- switch (version) {
- case 0x4c00:
- tp->version = RTL_VER_01;
- break;
- case 0x4c10:
- tp->version = RTL_VER_02;
- break;
- case 0x5c00:
- tp->version = RTL_VER_03;
- tp->mii.supports_gmii = 1;
- break;
- case 0x5c10:
- tp->version = RTL_VER_04;
- tp->mii.supports_gmii = 1;
- break;
- case 0x5c20:
- tp->version = RTL_VER_05;
- tp->mii.supports_gmii = 1;
- break;
- case 0x5c30:
- tp->version = RTL_VER_06;
- tp->mii.supports_gmii = 1;
- break;
- default:
- netif_info(tp, probe, tp->netdev,
- "Unknown version 0x%04x\n", version);
- break;
- }
-}
-
static void rtl8152_unload(struct r8152 *tp)
{
if (test_bit(RTL8152_UNPLUG, &tp->flags))
@@ -4351,14 +4323,66 @@ static int rtl_ops_init(struct r8152 *tp)
return ret;
}
+static u8 rtl_get_version(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ u32 ocp_data = 0;
+ __le32 *tmp;
+ u8 version;
+ int ret;
+
+ tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return 0;
+
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
+ PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
+ if (ret > 0)
+ ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
+
+ kfree(tmp);
+
+ switch (ocp_data) {
+ case 0x4c00:
+ version = RTL_VER_01;
+ break;
+ case 0x4c10:
+ version = RTL_VER_02;
+ break;
+ case 0x5c00:
+ version = RTL_VER_03;
+ break;
+ case 0x5c10:
+ version = RTL_VER_04;
+ break;
+ case 0x5c20:
+ version = RTL_VER_05;
+ break;
+ case 0x5c30:
+ version = RTL_VER_06;
+ break;
+ default:
+ version = RTL_VER_UNKNOWN;
+ dev_info(&intf->dev, "Unknown version 0x%04x\n", ocp_data);
+ break;
+ }
+
+ return version;
+}
+
static int rtl8152_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
+ u8 version = rtl_get_version(intf);
struct r8152 *tp;
struct net_device *netdev;
int ret;
+ if (version == RTL_VER_UNKNOWN)
+ return -ENODEV;
+
if (udev->actconfig->desc.bConfigurationValue != 1) {
usb_driver_set_configuration(udev, 1);
return -ENODEV;
@@ -4378,8 +4402,18 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->udev = udev;
tp->netdev = netdev;
tp->intf = intf;
+ tp->version = version;
+
+ switch (version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ tp->mii.supports_gmii = 0;
+ break;
+ default:
+ tp->mii.supports_gmii = 1;
+ break;
+ }
- r8152b_get_version(tp);
ret = rtl_ops_init(tp);
if (ret)
goto out;
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index c5b21138b7eb..e96e2e5673d7 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -291,6 +291,7 @@ static const struct net_device_ops rndis_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index c81c79110cef..daaa88a66f40 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -791,47 +791,52 @@ static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinf
usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
-static int rtl8150_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static int rtl8150_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ecmd)
{
rtl8150_t *dev = netdev_priv(netdev);
short lpa, bmcr;
+ u32 supported;
- ecmd->supported = (SUPPORTED_10baseT_Half |
+ supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_TP | SUPPORTED_MII);
- ecmd->port = PORT_TP;
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->phy_address = dev->phy;
+ ecmd->base.port = PORT_TP;
+ ecmd->base.phy_address = dev->phy;
get_registers(dev, BMCR, 2, &bmcr);
get_registers(dev, ANLP, 2, &lpa);
if (bmcr & BMCR_ANENABLE) {
u32 speed = ((lpa & (LPA_100HALF | LPA_100FULL)) ?
SPEED_100 : SPEED_10);
- ethtool_cmd_speed_set(ecmd, speed);
- ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->base.speed = speed;
+ ecmd->base.autoneg = AUTONEG_ENABLE;
if (speed == SPEED_100)
- ecmd->duplex = (lpa & LPA_100FULL) ?
+ ecmd->base.duplex = (lpa & LPA_100FULL) ?
DUPLEX_FULL : DUPLEX_HALF;
else
- ecmd->duplex = (lpa & LPA_10FULL) ?
+ ecmd->base.duplex = (lpa & LPA_10FULL) ?
DUPLEX_FULL : DUPLEX_HALF;
} else {
- ecmd->autoneg = AUTONEG_DISABLE;
- ethtool_cmd_speed_set(ecmd, ((bmcr & BMCR_SPEED100) ?
- SPEED_100 : SPEED_10));
- ecmd->duplex = (bmcr & BMCR_FULLDPLX) ?
+ ecmd->base.autoneg = AUTONEG_DISABLE;
+ ecmd->base.speed = ((bmcr & BMCR_SPEED100) ?
+ SPEED_100 : SPEED_10);
+ ecmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
DUPLEX_FULL : DUPLEX_HALF;
}
+
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
+ supported);
+
return 0;
}
static const struct ethtool_ops ops = {
.get_drvinfo = rtl8150_get_drvinfo,
- .get_settings = rtl8150_get_settings,
- .get_link = ethtool_op_get_link
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = rtl8150_get_link_ksettings,
};
static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index ac69f28d92d2..2110ab3513f0 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -199,6 +199,7 @@ static const struct net_device_ops sierra_net_device_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -648,9 +649,9 @@ static const struct ethtool_ops sierra_net_ethtool_ops = {
.get_link = sierra_net_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
- .get_settings = usbnet_get_settings,
- .set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
+ .get_link_ksettings = usbnet_get_link_ksettings,
+ .set_link_ksettings = usbnet_set_link_ksettings,
};
static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 190de9a90f73..d0a113743195 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -743,13 +743,13 @@ static const struct ethtool_ops smsc75xx_ethtool_ops = {
.get_drvinfo = usbnet_get_drvinfo,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
- .get_settings = usbnet_get_settings,
- .set_settings = usbnet_set_settings,
.get_eeprom_len = smsc75xx_ethtool_get_eeprom_len,
.get_eeprom = smsc75xx_ethtool_get_eeprom,
.set_eeprom = smsc75xx_ethtool_set_eeprom,
.get_wol = smsc75xx_ethtool_get_wol,
.set_wol = smsc75xx_ethtool_set_wol,
+ .get_link_ksettings = usbnet_get_link_ksettings,
+ .set_link_ksettings = usbnet_set_link_ksettings,
};
static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -1381,6 +1381,7 @@ static const struct net_device_ops smsc75xx_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_change_mtu = smsc75xx_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 5f19fb0f025d..765400b62168 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -33,7 +33,7 @@
#include "smsc95xx.h"
#define SMSC_CHIPNAME "smsc95xx"
-#define SMSC_DRIVER_VERSION "1.0.5"
+#define SMSC_DRIVER_VERSION "1.0.6"
#define HS_USB_PKT_SIZE (512)
#define FS_USB_PKT_SIZE (64)
#define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE)
@@ -853,32 +853,32 @@ static void set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
pdata->mdix_ctrl = mdix_ctrl;
}
-static int smsc95xx_get_settings(struct net_device *net,
- struct ethtool_cmd *cmd)
+static int smsc95xx_get_link_ksettings(struct net_device *net,
+ struct ethtool_link_ksettings *cmd)
{
struct usbnet *dev = netdev_priv(net);
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
int retval;
- retval = usbnet_get_settings(net, cmd);
+ retval = usbnet_get_link_ksettings(net, cmd);
- cmd->eth_tp_mdix = pdata->mdix_ctrl;
- cmd->eth_tp_mdix_ctrl = pdata->mdix_ctrl;
+ cmd->base.eth_tp_mdix = pdata->mdix_ctrl;
+ cmd->base.eth_tp_mdix_ctrl = pdata->mdix_ctrl;
return retval;
}
-static int smsc95xx_set_settings(struct net_device *net,
- struct ethtool_cmd *cmd)
+static int smsc95xx_set_link_ksettings(struct net_device *net,
+ const struct ethtool_link_ksettings *cmd)
{
struct usbnet *dev = netdev_priv(net);
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
int retval;
- if (pdata->mdix_ctrl != cmd->eth_tp_mdix_ctrl)
- set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
+ if (pdata->mdix_ctrl != cmd->base.eth_tp_mdix_ctrl)
+ set_mdix_status(net, cmd->base.eth_tp_mdix_ctrl);
- retval = usbnet_set_settings(net, cmd);
+ retval = usbnet_set_link_ksettings(net, cmd);
return retval;
}
@@ -889,8 +889,6 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_drvinfo = usbnet_get_drvinfo,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
- .get_settings = smsc95xx_get_settings,
- .set_settings = smsc95xx_set_settings,
.get_eeprom_len = smsc95xx_ethtool_get_eeprom_len,
.get_eeprom = smsc95xx_ethtool_get_eeprom,
.set_eeprom = smsc95xx_ethtool_set_eeprom,
@@ -898,6 +896,8 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_regs = smsc95xx_ethtool_getregs,
.get_wol = smsc95xx_ethtool_get_wol,
.set_wol = smsc95xx_ethtool_set_wol,
+ .get_link_ksettings = smsc95xx_get_link_ksettings,
+ .set_link_ksettings = smsc95xx_set_link_ksettings,
};
static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -1248,6 +1248,7 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = smsc95xx_ioctl,
@@ -1498,7 +1499,7 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev)
if (ret < 0)
return ret;
- if (val & 0xFFFF) {
+ if (val & RX_FIFO_INF_USED_) {
netdev_info(dev->net, "rx fifo not empty in autosuspend\n");
return -EBUSY;
}
diff --git a/drivers/net/usb/smsc95xx.h b/drivers/net/usb/smsc95xx.h
index 29a4d9efce7c..cfc704f3a460 100644
--- a/drivers/net/usb/smsc95xx.h
+++ b/drivers/net/usb/smsc95xx.h
@@ -21,218 +21,280 @@
#define _SMSC95XX_H
/* Tx command words */
-#define TX_CMD_A_DATA_OFFSET_ (0x001F0000)
-#define TX_CMD_A_FIRST_SEG_ (0x00002000)
-#define TX_CMD_A_LAST_SEG_ (0x00001000)
-#define TX_CMD_A_BUF_SIZE_ (0x000007FF)
+#define TX_CMD_A_DATA_OFFSET_ (0x001F0000) /* Data Start Offset */
+#define TX_CMD_A_FIRST_SEG_ (0x00002000) /* First Segment */
+#define TX_CMD_A_LAST_SEG_ (0x00001000) /* Last Segment */
+#define TX_CMD_A_BUF_SIZE_ (0x000007FF) /* Buffer Size */
-#define TX_CMD_B_CSUM_ENABLE (0x00004000)
-#define TX_CMD_B_ADD_CRC_DISABLE_ (0x00002000)
-#define TX_CMD_B_DISABLE_PADDING_ (0x00001000)
-#define TX_CMD_B_PKT_BYTE_LENGTH_ (0x000007FF)
+#define TX_CMD_B_CSUM_ENABLE (0x00004000) /* TX Checksum Enable */
+#define TX_CMD_B_ADD_CRC_DIS_ (0x00002000) /* Add CRC Disable */
+#define TX_CMD_B_DIS_PADDING_ (0x00001000) /* Disable Frame Padding */
+#define TX_CMD_B_FRAME_LENGTH_ (0x000007FF) /* Frame Length (bytes) */
/* Rx status word */
-#define RX_STS_FF_ (0x40000000) /* Filter Fail */
-#define RX_STS_FL_ (0x3FFF0000) /* Frame Length */
-#define RX_STS_ES_ (0x00008000) /* Error Summary */
-#define RX_STS_BF_ (0x00002000) /* Broadcast Frame */
-#define RX_STS_LE_ (0x00001000) /* Length Error */
-#define RX_STS_RF_ (0x00000800) /* Runt Frame */
-#define RX_STS_MF_ (0x00000400) /* Multicast Frame */
-#define RX_STS_TL_ (0x00000080) /* Frame too long */
-#define RX_STS_CS_ (0x00000040) /* Collision Seen */
-#define RX_STS_FT_ (0x00000020) /* Frame Type */
-#define RX_STS_RW_ (0x00000010) /* Receive Watchdog */
-#define RX_STS_ME_ (0x00000008) /* Mii Error */
-#define RX_STS_DB_ (0x00000004) /* Dribbling */
-#define RX_STS_CRC_ (0x00000002) /* CRC Error */
-
-/* SCSRs */
-#define ID_REV (0x00)
-#define ID_REV_CHIP_ID_MASK_ (0xFFFF0000)
-#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
-#define ID_REV_CHIP_ID_9500_ (0x9500)
-#define ID_REV_CHIP_ID_9500A_ (0x9E00)
-#define ID_REV_CHIP_ID_9512_ (0xEC00)
-#define ID_REV_CHIP_ID_9530_ (0x9530)
-#define ID_REV_CHIP_ID_89530_ (0x9E08)
-#define ID_REV_CHIP_ID_9730_ (0x9730)
-
-#define INT_STS (0x08)
-#define INT_STS_TX_STOP_ (0x00020000)
-#define INT_STS_RX_STOP_ (0x00010000)
-#define INT_STS_PHY_INT_ (0x00008000)
-#define INT_STS_TXE_ (0x00004000)
-#define INT_STS_TDFU_ (0x00002000)
-#define INT_STS_TDFO_ (0x00001000)
-#define INT_STS_RXDF_ (0x00000800)
-#define INT_STS_GPIOS_ (0x000007FF)
-#define INT_STS_CLEAR_ALL_ (0xFFFFFFFF)
-
-#define RX_CFG (0x0C)
-#define RX_FIFO_FLUSH_ (0x00000001)
-
-#define TX_CFG (0x10)
-#define TX_CFG_ON_ (0x00000004)
-#define TX_CFG_STOP_ (0x00000002)
-#define TX_CFG_FIFO_FLUSH_ (0x00000001)
-
-#define HW_CFG (0x14)
-#define HW_CFG_BIR_ (0x00001000)
-#define HW_CFG_LEDB_ (0x00000800)
-#define HW_CFG_RXDOFF_ (0x00000600)
-#define HW_CFG_DRP_ (0x00000040)
-#define HW_CFG_MEF_ (0x00000020)
-#define HW_CFG_LRST_ (0x00000008)
-#define HW_CFG_PSEL_ (0x00000004)
-#define HW_CFG_BCE_ (0x00000002)
-#define HW_CFG_SRST_ (0x00000001)
-
-#define RX_FIFO_INF (0x18)
-
-#define PM_CTRL (0x20)
-#define PM_CTL_RES_CLR_WKP_STS (0x00000200)
-#define PM_CTL_DEV_RDY_ (0x00000080)
-#define PM_CTL_SUS_MODE_ (0x00000060)
-#define PM_CTL_SUS_MODE_0 (0x00000000)
-#define PM_CTL_SUS_MODE_1 (0x00000020)
-#define PM_CTL_SUS_MODE_2 (0x00000040)
-#define PM_CTL_SUS_MODE_3 (0x00000060)
-#define PM_CTL_PHY_RST_ (0x00000010)
-#define PM_CTL_WOL_EN_ (0x00000008)
-#define PM_CTL_ED_EN_ (0x00000004)
-#define PM_CTL_WUPS_ (0x00000003)
-#define PM_CTL_WUPS_NO_ (0x00000000)
-#define PM_CTL_WUPS_ED_ (0x00000001)
-#define PM_CTL_WUPS_WOL_ (0x00000002)
-#define PM_CTL_WUPS_MULTI_ (0x00000003)
-
-#define LED_GPIO_CFG (0x24)
-#define LED_GPIO_CFG_SPD_LED (0x01000000)
-#define LED_GPIO_CFG_LNK_LED (0x00100000)
-#define LED_GPIO_CFG_FDX_LED (0x00010000)
-
-#define GPIO_CFG (0x28)
-
-#define AFC_CFG (0x2C)
-
+#define RX_STS_FF_ (0x40000000) /* Filter Fail */
+#define RX_STS_FL_ (0x3FFF0000) /* Frame Length */
+#define RX_STS_ES_ (0x00008000) /* Error Summary */
+#define RX_STS_BF_ (0x00002000) /* Broadcast Frame */
+#define RX_STS_LE_ (0x00001000) /* Length Error */
+#define RX_STS_RF_ (0x00000800) /* Runt Frame */
+#define RX_STS_MF_ (0x00000400) /* Multicast Frame */
+#define RX_STS_TL_ (0x00000080) /* Frame too long */
+#define RX_STS_CS_ (0x00000040) /* Collision Seen */
+#define RX_STS_FT_ (0x00000020) /* Frame Type */
+#define RX_STS_RW_ (0x00000010) /* Receive Watchdog */
+#define RX_STS_ME_ (0x00000008) /* MII Error */
+#define RX_STS_DB_ (0x00000004) /* Dribbling */
+#define RX_STS_CRC_ (0x00000002) /* CRC Error */
+
+/* SCSRs - System Control and Status Registers */
+/* Device ID and Revision Register */
+#define ID_REV (0x00)
+#define ID_REV_CHIP_ID_MASK_ (0xFFFF0000)
+#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
+#define ID_REV_CHIP_ID_9500_ (0x9500)
+#define ID_REV_CHIP_ID_9500A_ (0x9E00)
+#define ID_REV_CHIP_ID_9512_ (0xEC00)
+#define ID_REV_CHIP_ID_9530_ (0x9530)
+#define ID_REV_CHIP_ID_89530_ (0x9E08)
+#define ID_REV_CHIP_ID_9730_ (0x9730)
+
+/* Interrupt Status Register */
+#define INT_STS (0x08)
+#define INT_STS_MAC_RTO_ (0x00040000) /* MAC Reset Time Out */
+#define INT_STS_TX_STOP_ (0x00020000) /* TX Stopped */
+#define INT_STS_RX_STOP_ (0x00010000) /* RX Stopped */
+#define INT_STS_PHY_INT_ (0x00008000) /* PHY Interrupt */
+#define INT_STS_TXE_ (0x00004000) /* Transmitter Error */
+#define INT_STS_TDFU_ (0x00002000) /* TX Data FIFO Underrun */
+#define INT_STS_TDFO_ (0x00001000) /* TX Data FIFO Overrun */
+#define INT_STS_RXDF_ (0x00000800) /* RX Dropped Frame */
+#define INT_STS_GPIOS_ (0x000007FF) /* GPIOs Interrupts */
+#define INT_STS_CLEAR_ALL_ (0xFFFFFFFF)
+
+/* Receive Configuration Register */
+#define RX_CFG (0x0C)
+#define RX_FIFO_FLUSH_ (0x00000001) /* Receive FIFO Flush */
+
+/* Transmit Configuration Register */
+#define TX_CFG (0x10)
+#define TX_CFG_ON_ (0x00000004) /* Transmitter Enable */
+#define TX_CFG_STOP_ (0x00000002) /* Stop Transmitter */
+#define TX_CFG_FIFO_FLUSH_ (0x00000001) /* Transmit FIFO Flush */
+
+/* Hardware Configuration Register */
+#define HW_CFG (0x14)
+#define HW_CFG_BIR_ (0x00001000) /* Bulk In Empty Response */
+#define HW_CFG_LEDB_ (0x00000800) /* Activity LED 80ms Bypass */
+#define HW_CFG_RXDOFF_ (0x00000600) /* RX Data Offset */
+#define HW_CFG_SBP_ (0x00000100) /* Stall Bulk Out Pipe Dis. */
+#define HW_CFG_IME_ (0x00000080) /* Internal MII Visi. Enable */
+#define HW_CFG_DRP_ (0x00000040) /* Discard Errored RX Frame */
+#define HW_CFG_MEF_ (0x00000020) /* Mult. ETH Frames/USB pkt */
+#define HW_CFG_ETC_ (0x00000010) /* EEPROM Timeout Control */
+#define HW_CFG_LRST_ (0x00000008) /* Soft Lite Reset */
+#define HW_CFG_PSEL_ (0x00000004) /* External PHY Select */
+#define HW_CFG_BCE_ (0x00000002) /* Burst Cap Enable */
+#define HW_CFG_SRST_ (0x00000001) /* Soft Reset */
+
+/* Receive FIFO Information Register */
+#define RX_FIFO_INF (0x18)
+#define RX_FIFO_INF_USED_ (0x0000FFFF) /* RX Data FIFO Used Space */
+
+/* Transmit FIFO Information Register */
+#define TX_FIFO_INF (0x1C)
+#define TX_FIFO_INF_FREE_ (0x0000FFFF) /* TX Data FIFO Free Space */
+
+/* Power Management Control Register */
+#define PM_CTRL (0x20)
+#define PM_CTL_RES_CLR_WKP_STS (0x00000200) /* Resume Clears Wakeup STS */
+#define PM_CTL_RES_CLR_WKP_EN (0x00000100) /* Resume Clears Wkp Enables */
+#define PM_CTL_DEV_RDY_ (0x00000080) /* Device Ready */
+#define PM_CTL_SUS_MODE_ (0x00000060) /* Suspend Mode */
+#define PM_CTL_SUS_MODE_0 (0x00000000)
+#define PM_CTL_SUS_MODE_1 (0x00000020)
+#define PM_CTL_SUS_MODE_2 (0x00000040)
+#define PM_CTL_SUS_MODE_3 (0x00000060)
+#define PM_CTL_PHY_RST_ (0x00000010) /* PHY Reset */
+#define PM_CTL_WOL_EN_ (0x00000008) /* Wake On Lan Enable */
+#define PM_CTL_ED_EN_ (0x00000004) /* Energy Detect Enable */
+#define PM_CTL_WUPS_ (0x00000003) /* Wake Up Status */
+#define PM_CTL_WUPS_NO_ (0x00000000) /* No Wake Up Event Detected */
+#define PM_CTL_WUPS_ED_ (0x00000001) /* Energy Detect */
+#define PM_CTL_WUPS_WOL_ (0x00000002) /* Wake On Lan */
+#define PM_CTL_WUPS_MULTI_ (0x00000003) /* Multiple Events Occurred */
+
+/* LED General Purpose IO Configuration Register */
+#define LED_GPIO_CFG (0x24)
+#define LED_GPIO_CFG_SPD_LED (0x01000000) /* GPIOz as Speed LED */
+#define LED_GPIO_CFG_LNK_LED (0x00100000) /* GPIOy as Link LED */
+#define LED_GPIO_CFG_FDX_LED (0x00010000) /* GPIOx as Full Duplex LED */
+
+/* General Purpose IO Configuration Register */
+#define GPIO_CFG (0x28)
+
+/* Automatic Flow Control Configuration Register */
+#define AFC_CFG (0x2C)
+#define AFC_CFG_HI_ (0x00FF0000) /* Auto Flow Ctrl High Level */
+#define AFC_CFG_LO_ (0x0000FF00) /* Auto Flow Ctrl Low Level */
+#define AFC_CFG_BACK_DUR_ (0x000000F0) /* Back Pressure Duration */
+#define AFC_CFG_FC_MULT_ (0x00000008) /* Flow Ctrl on Mcast Frame */
+#define AFC_CFG_FC_BRD_ (0x00000004) /* Flow Ctrl on Bcast Frame */
+#define AFC_CFG_FC_ADD_ (0x00000002) /* Flow Ctrl on Addr. Decode */
+#define AFC_CFG_FC_ANY_ (0x00000001) /* Flow Ctrl on Any Frame */
/* Hi watermark = 15.5Kb (~10 mtu pkts) */
/* low watermark = 3k (~2 mtu pkts) */
/* backpressure duration = ~ 350us */
/* Apply FC on any frame. */
-#define AFC_CFG_DEFAULT (0x00F830A1)
-
-#define E2P_CMD (0x30)
-#define E2P_CMD_BUSY_ (0x80000000)
-#define E2P_CMD_MASK_ (0x70000000)
-#define E2P_CMD_READ_ (0x00000000)
-#define E2P_CMD_EWDS_ (0x10000000)
-#define E2P_CMD_EWEN_ (0x20000000)
-#define E2P_CMD_WRITE_ (0x30000000)
-#define E2P_CMD_WRAL_ (0x40000000)
-#define E2P_CMD_ERASE_ (0x50000000)
-#define E2P_CMD_ERAL_ (0x60000000)
-#define E2P_CMD_RELOAD_ (0x70000000)
-#define E2P_CMD_TIMEOUT_ (0x00000400)
-#define E2P_CMD_LOADED_ (0x00000200)
-#define E2P_CMD_ADDR_ (0x000001FF)
-
-#define MAX_EEPROM_SIZE (512)
-
-#define E2P_DATA (0x34)
-#define E2P_DATA_MASK_ (0x000000FF)
-
-#define BURST_CAP (0x38)
-
+#define AFC_CFG_DEFAULT (0x00F830A1)
+
+/* EEPROM Command Register */
+#define E2P_CMD (0x30)
+#define E2P_CMD_BUSY_ (0x80000000) /* E2P Controller Busy */
+#define E2P_CMD_MASK_ (0x70000000) /* Command Mask (see below) */
+#define E2P_CMD_READ_ (0x00000000) /* Read Location */
+#define E2P_CMD_EWDS_ (0x10000000) /* Erase/Write Disable */
+#define E2P_CMD_EWEN_ (0x20000000) /* Erase/Write Enable */
+#define E2P_CMD_WRITE_ (0x30000000) /* Write Location */
+#define E2P_CMD_WRAL_ (0x40000000) /* Write All */
+#define E2P_CMD_ERASE_ (0x50000000) /* Erase Location */
+#define E2P_CMD_ERAL_ (0x60000000) /* Erase All */
+#define E2P_CMD_RELOAD_ (0x70000000) /* Data Reload */
+#define E2P_CMD_TIMEOUT_ (0x00000400) /* Set if no resp within 30ms */
+#define E2P_CMD_LOADED_ (0x00000200) /* Valid EEPROM found */
+#define E2P_CMD_ADDR_ (0x000001FF) /* Byte aligned address */
+
+#define MAX_EEPROM_SIZE (512)
+
+/* EEPROM Data Register */
+#define E2P_DATA (0x34)
+#define E2P_DATA_MASK_ (0x000000FF) /* EEPROM Data Mask */
+
+/* Burst Cap Register */
+#define BURST_CAP (0x38)
+#define BURST_CAP_MASK_ (0x000000FF) /* Max burst sent by the UTX */
+
+/* Configuration Straps Status Register */
#define STRAP_STATUS (0x3C)
-#define STRAP_STATUS_PWR_SEL_ (0x00000020)
-#define STRAP_STATUS_AMDIX_EN_ (0x00000010)
-#define STRAP_STATUS_PORT_SWAP_ (0x00000008)
-#define STRAP_STATUS_EEP_SIZE_ (0x00000004)
-#define STRAP_STATUS_RMT_WKP_ (0x00000002)
-#define STRAP_STATUS_EEP_DISABLE_ (0x00000001)
-
-#define GPIO_WAKE (0x64)
-
-#define INT_EP_CTL (0x68)
-#define INT_EP_CTL_INTEP_ (0x80000000)
-#define INT_EP_CTL_MACRTO_ (0x00080000)
-#define INT_EP_CTL_TX_STOP_ (0x00020000)
-#define INT_EP_CTL_RX_STOP_ (0x00010000)
-#define INT_EP_CTL_PHY_INT_ (0x00008000)
-#define INT_EP_CTL_TXE_ (0x00004000)
-#define INT_EP_CTL_TDFU_ (0x00002000)
-#define INT_EP_CTL_TDFO_ (0x00001000)
-#define INT_EP_CTL_RXDF_ (0x00000800)
-#define INT_EP_CTL_GPIOS_ (0x000007FF)
-
-#define BULK_IN_DLY (0x6C)
-
-/* MAC CSRs */
-#define MAC_CR (0x100)
-#define MAC_CR_RXALL_ (0x80000000)
-#define MAC_CR_RCVOWN_ (0x00800000)
-#define MAC_CR_LOOPBK_ (0x00200000)
-#define MAC_CR_FDPX_ (0x00100000)
-#define MAC_CR_MCPAS_ (0x00080000)
-#define MAC_CR_PRMS_ (0x00040000)
-#define MAC_CR_INVFILT_ (0x00020000)
-#define MAC_CR_PASSBAD_ (0x00010000)
-#define MAC_CR_HFILT_ (0x00008000)
-#define MAC_CR_HPFILT_ (0x00002000)
-#define MAC_CR_LCOLL_ (0x00001000)
-#define MAC_CR_BCAST_ (0x00000800)
-#define MAC_CR_DISRTY_ (0x00000400)
-#define MAC_CR_PADSTR_ (0x00000100)
-#define MAC_CR_BOLMT_MASK (0x000000C0)
-#define MAC_CR_DFCHK_ (0x00000020)
-#define MAC_CR_TXEN_ (0x00000008)
-#define MAC_CR_RXEN_ (0x00000004)
-
-#define ADDRH (0x104)
-
-#define ADDRL (0x108)
-
-#define HASHH (0x10C)
-
-#define HASHL (0x110)
-
-#define MII_ADDR (0x114)
-#define MII_WRITE_ (0x02)
-#define MII_BUSY_ (0x01)
-#define MII_READ_ (0x00) /* ~of MII Write bit */
-
-#define MII_DATA (0x118)
-
-#define FLOW (0x11C)
-#define FLOW_FCPT_ (0xFFFF0000)
-#define FLOW_FCPASS_ (0x00000004)
-#define FLOW_FCEN_ (0x00000002)
-#define FLOW_FCBSY_ (0x00000001)
-
-#define VLAN1 (0x120)
-
-#define VLAN2 (0x124)
-
-#define WUFF (0x128)
-#define LAN9500_WUFF_NUM (4)
-#define LAN9500A_WUFF_NUM (8)
-
-#define WUCSR (0x12C)
-#define WUCSR_WFF_PTR_RST_ (0x80000000)
-#define WUCSR_GUE_ (0x00000200)
-#define WUCSR_WUFR_ (0x00000040)
-#define WUCSR_MPR_ (0x00000020)
-#define WUCSR_WAKE_EN_ (0x00000004)
-#define WUCSR_MPEN_ (0x00000002)
-
-#define COE_CR (0x130)
-#define Tx_COE_EN_ (0x00010000)
-#define Rx_COE_MODE_ (0x00000002)
-#define Rx_COE_EN_ (0x00000001)
-
-/* Vendor-specific PHY Definitions */
-
+#define STRAP_STATUS_PWR_SEL_ (0x00000020) /* Device self-powered */
+#define STRAP_STATUS_AMDIX_EN_ (0x00000010) /* Auto-MDIX Enabled */
+#define STRAP_STATUS_PORT_SWAP_ (0x00000008) /* USBD+/USBD- Swapped */
+#define STRAP_STATUS_EEP_SIZE_ (0x00000004) /* EEPROM Size */
+#define STRAP_STATUS_RMT_WKP_ (0x00000002) /* Remote Wkp supported */
+#define STRAP_STATUS_EEP_DISABLE_ (0x00000001) /* EEPROM Disabled */
+
+/* Data Port Select Register */
+#define DP_SEL (0x40)
+
+/* Data Port Command Register */
+#define DP_CMD (0x44)
+
+/* Data Port Address Register */
+#define DP_ADDR (0x48)
+
+/* Data Port Data 0 Register */
+#define DP_DATA0 (0x4C)
+
+/* Data Port Data 1 Register */
+#define DP_DATA1 (0x50)
+
+/* General Purpose IO Wake Enable and Polarity Register */
+#define GPIO_WAKE (0x64)
+
+/* Interrupt Endpoint Control Register */
+#define INT_EP_CTL (0x68)
+#define INT_EP_CTL_INTEP_ (0x80000000) /* Always TX Interrupt PKT */
+#define INT_EP_CTL_MAC_RTO_ (0x00080000) /* MAC Reset Time Out */
+#define INT_EP_CTL_RX_FIFO_ (0x00040000) /* RX FIFO Has Frame */
+#define INT_EP_CTL_TX_STOP_ (0x00020000) /* TX Stopped */
+#define INT_EP_CTL_RX_STOP_ (0x00010000) /* RX Stopped */
+#define INT_EP_CTL_PHY_INT_ (0x00008000) /* PHY Interrupt */
+#define INT_EP_CTL_TXE_ (0x00004000) /* TX Error */
+#define INT_EP_CTL_TDFU_ (0x00002000) /* TX Data FIFO Underrun */
+#define INT_EP_CTL_TDFO_ (0x00001000) /* TX Data FIFO Overrun */
+#define INT_EP_CTL_RXDF_ (0x00000800) /* RX Dropped Frame */
+#define INT_EP_CTL_GPIOS_ (0x000007FF) /* GPIOs Interrupt Enable */
+
+/* Bulk In Delay Register (units of 16.667ns, until ~1092µs) */
+#define BULK_IN_DLY (0x6C)
+
+/* MAC CSRs - MAC Control and Status Registers */
+/* MAC Control Register */
+#define MAC_CR (0x100)
+#define MAC_CR_RXALL_ (0x80000000) /* Receive All Mode */
+#define MAC_CR_RCVOWN_ (0x00800000) /* Disable Receive Own */
+#define MAC_CR_LOOPBK_ (0x00200000) /* Loopback Operation Mode */
+#define MAC_CR_FDPX_ (0x00100000) /* Full Duplex Mode */
+#define MAC_CR_MCPAS_ (0x00080000) /* Pass All Multicast */
+#define MAC_CR_PRMS_ (0x00040000) /* Promiscuous Mode */
+#define MAC_CR_INVFILT_ (0x00020000) /* Inverse Filtering */
+#define MAC_CR_PASSBAD_ (0x00010000) /* Pass Bad Frames */
+#define MAC_CR_HFILT_ (0x00008000) /* Hash Only Filtering Mode */
+#define MAC_CR_HPFILT_ (0x00002000) /* Hash/Perfect Filt. Mode */
+#define MAC_CR_LCOLL_ (0x00001000) /* Late Collision Control */
+#define MAC_CR_BCAST_ (0x00000800) /* Disable Broadcast Frames */
+#define MAC_CR_DISRTY_ (0x00000400) /* Disable Retry */
+#define MAC_CR_PADSTR_ (0x00000100) /* Automatic Pad Stripping */
+#define MAC_CR_BOLMT_MASK (0x000000C0) /* BackOff Limit */
+#define MAC_CR_DFCHK_ (0x00000020) /* Deferral Check */
+#define MAC_CR_TXEN_ (0x00000008) /* Transmitter Enable */
+#define MAC_CR_RXEN_ (0x00000004) /* Receiver Enable */
+
+/* MAC Address High Register */
+#define ADDRH (0x104)
+
+/* MAC Address Low Register */
+#define ADDRL (0x108)
+
+/* Multicast Hash Table High Register */
+#define HASHH (0x10C)
+
+/* Multicast Hash Table Low Register */
+#define HASHL (0x110)
+
+/* MII Access Register */
+#define MII_ADDR (0x114)
+#define MII_WRITE_ (0x02)
+#define MII_BUSY_ (0x01)
+#define MII_READ_ (0x00) /* ~of MII Write bit */
+
+/* MII Data Register */
+#define MII_DATA (0x118)
+
+/* Flow Control Register */
+#define FLOW (0x11C)
+#define FLOW_FCPT_ (0xFFFF0000) /* Pause Time */
+#define FLOW_FCPASS_ (0x00000004) /* Pass Control Frames */
+#define FLOW_FCEN_ (0x00000002) /* Flow Control Enable */
+#define FLOW_FCBSY_ (0x00000001) /* Flow Control Busy */
+
+/* VLAN1 Tag Register */
+#define VLAN1 (0x120)
+
+/* VLAN2 Tag Register */
+#define VLAN2 (0x124)
+
+/* Wake Up Frame Filter Register */
+#define WUFF (0x128)
+#define LAN9500_WUFF_NUM (4)
+#define LAN9500A_WUFF_NUM (8)
+
+/* Wake Up Control and Status Register */
+#define WUCSR (0x12C)
+#define WUCSR_WFF_PTR_RST_ (0x80000000) /* WFrame Filter Pointer Rst */
+#define WUCSR_GUE_ (0x00000200) /* Global Unicast Enable */
+#define WUCSR_WUFR_ (0x00000040) /* Wakeup Frame Received */
+#define WUCSR_MPR_ (0x00000020) /* Magic Packet Received */
+#define WUCSR_WAKE_EN_ (0x00000004) /* Wakeup Frame Enable */
+#define WUCSR_MPEN_ (0x00000002) /* Magic Packet Enable */
+
+/* Checksum Offload Engine Control Register */
+#define COE_CR (0x130)
+#define Tx_COE_EN_ (0x00010000) /* TX Csum Offload Enable */
+#define Rx_COE_MODE_ (0x00000002) /* RX Csum Offload Mode */
+#define Rx_COE_EN_ (0x00000001) /* RX Csum Offload Enable */
+
+/* Vendor-specific PHY Definitions (via MII access) */
/* EDPD NLP / crossover time configuration (LAN9500A only) */
#define PHY_EDPD_CONFIG (16)
#define PHY_EDPD_CONFIG_TX_NLP_EN_ ((u16)0x8000)
@@ -255,17 +317,20 @@
#define MODE_CTRL_STS_EDPWRDOWN_ ((u16)0x2000)
#define MODE_CTRL_STS_ENERGYON_ ((u16)0x0002)
+/* Control/Status Indication Register */
#define SPECIAL_CTRL_STS (27)
#define SPECIAL_CTRL_STS_OVRRD_AMDIX_ ((u16)0x8000)
#define SPECIAL_CTRL_STS_AMDIX_ENABLE_ ((u16)0x4000)
#define SPECIAL_CTRL_STS_AMDIX_STATE_ ((u16)0x2000)
+/* Interrupt Source Register */
#define PHY_INT_SRC (29)
#define PHY_INT_SRC_ENERGY_ON_ ((u16)0x0080)
#define PHY_INT_SRC_ANEG_COMP_ ((u16)0x0040)
#define PHY_INT_SRC_REMOTE_FAULT_ ((u16)0x0020)
#define PHY_INT_SRC_LINK_DOWN_ ((u16)0x0010)
+/* Interrupt Mask Register */
#define PHY_INT_MASK (30)
#define PHY_INT_MASK_ENERGY_ON_ ((u16)0x0080)
#define PHY_INT_MASK_ANEG_COMP_ ((u16)0x0040)
@@ -273,7 +338,7 @@
#define PHY_INT_MASK_LINK_DOWN_ ((u16)0x0010)
#define PHY_INT_MASK_DEFAULT_ (PHY_INT_MASK_ANEG_COMP_ | \
PHY_INT_MASK_LINK_DOWN_)
-
+/* PHY Special Control/Status Register */
#define PHY_SPECIAL (31)
#define PHY_SPECIAL_SPD_ ((u16)0x001C)
#define PHY_SPECIAL_SPD_10HALF_ ((u16)0x0004)
@@ -287,12 +352,13 @@
#define USB_VENDOR_REQUEST_GET_STATS 0xA2
/* Interrupt Endpoint status word bitfields */
-#define INT_ENP_TX_STOP_ ((u32)BIT(17))
-#define INT_ENP_RX_STOP_ ((u32)BIT(16))
-#define INT_ENP_PHY_INT_ ((u32)BIT(15))
-#define INT_ENP_TXE_ ((u32)BIT(14))
-#define INT_ENP_TDFU_ ((u32)BIT(13))
-#define INT_ENP_TDFO_ ((u32)BIT(12))
-#define INT_ENP_RXDF_ ((u32)BIT(11))
+#define INT_ENP_MAC_RTO_ ((u32)BIT(18)) /* MAC Reset Time Out */
+#define INT_ENP_TX_STOP_ ((u32)BIT(17)) /* TX Stopped */
+#define INT_ENP_RX_STOP_ ((u32)BIT(16)) /* RX Stopped */
+#define INT_ENP_PHY_INT_ ((u32)BIT(15)) /* PHY Interrupt */
+#define INT_ENP_TXE_ ((u32)BIT(14)) /* TX Error */
+#define INT_ENP_TDFU_ ((u32)BIT(13)) /* TX FIFO Underrun */
+#define INT_ENP_TDFO_ ((u32)BIT(12)) /* TX FIFO Overrun */
+#define INT_ENP_RXDF_ ((u32)BIT(11)) /* RX Dropped Frame */
#endif /* _SMSC95XX_H */
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index aadfe1d1c37e..2d316c1b851b 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -249,9 +249,9 @@ static const struct ethtool_ops sr9700_ethtool_ops = {
.set_msglevel = usbnet_set_msglevel,
.get_eeprom_len = sr9700_get_eeprom_len,
.get_eeprom = sr9700_get_eeprom,
- .get_settings = usbnet_get_settings,
- .set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
+ .get_link_ksettings = usbnet_get_link_ksettings,
+ .set_link_ksettings = usbnet_set_link_ksettings,
};
static void sr9700_set_multicast(struct net_device *netdev)
@@ -308,6 +308,7 @@ static const struct net_device_ops sr9700_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = sr9700_ioctl,
.ndo_set_rx_mode = sr9700_set_multicast,
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index a50df0d8fb9a..9277a0f228df 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -524,9 +524,9 @@ static const struct ethtool_ops sr9800_ethtool_ops = {
.set_wol = sr_set_wol,
.get_eeprom_len = sr_get_eeprom_len,
.get_eeprom = sr_get_eeprom,
- .get_settings = usbnet_get_settings,
- .set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
+ .get_link_ksettings = usbnet_get_link_ksettings,
+ .set_link_ksettings = usbnet_set_link_ksettings,
};
static int sr9800_link_reset(struct usbnet *dev)
@@ -679,6 +679,7 @@ static const struct net_device_ops sr9800_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_set_mac_address = sr_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = sr_ioctl,
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 453244805c52..79048e72c1bd 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -82,8 +82,6 @@
// randomly generated ethernet address
static u8 node_id [ETH_ALEN];
-static const char driver_name [] = "usbnet";
-
/* use ethtool to change the level for any given device */
static int msg_level = -1;
module_param (msg_level, int, 0);
@@ -316,6 +314,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev)
*/
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
+ struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
int status;
if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
@@ -327,8 +326,10 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
if (skb->protocol == 0)
skb->protocol = eth_type_trans (skb, dev->net);
- dev->net->stats.rx_packets++;
- dev->net->stats.rx_bytes += skb->len;
+ u64_stats_update_begin(&stats64->syncp);
+ stats64->rx_packets++;
+ stats64->rx_bytes += skb->len;
+ u64_stats_update_end(&stats64->syncp);
netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
skb->len + sizeof (struct ethhdr), skb->protocol);
@@ -947,18 +948,20 @@ EXPORT_SYMBOL_GPL(usbnet_open);
* they'll probably want to use this base set.
*/
-int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd)
+int usbnet_get_link_ksettings(struct net_device *net,
+ struct ethtool_link_ksettings *cmd)
{
struct usbnet *dev = netdev_priv(net);
if (!dev->mii.mdio_read)
return -EOPNOTSUPP;
- return mii_ethtool_gset(&dev->mii, cmd);
+ return mii_ethtool_get_link_ksettings(&dev->mii, cmd);
}
-EXPORT_SYMBOL_GPL(usbnet_get_settings);
+EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings);
-int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
+int usbnet_set_link_ksettings(struct net_device *net,
+ const struct ethtool_link_ksettings *cmd)
{
struct usbnet *dev = netdev_priv(net);
int retval;
@@ -966,7 +969,7 @@ int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
if (!dev->mii.mdio_write)
return -EOPNOTSUPP;
- retval = mii_ethtool_sset(&dev->mii, cmd);
+ retval = mii_ethtool_set_link_ksettings(&dev->mii, cmd);
/* link speed/duplex might have changed */
if (dev->driver_info->link_reset)
@@ -976,9 +979,39 @@ int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
usbnet_update_max_qlen(dev);
return retval;
+}
+EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings);
+void usbnet_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats)
+{
+ struct usbnet *dev = netdev_priv(net);
+ unsigned int start;
+ int cpu;
+
+ netdev_stats_to_stats64(stats, &net->stats);
+
+ for_each_possible_cpu(cpu) {
+ struct pcpu_sw_netstats *stats64;
+ u64 rx_packets, rx_bytes;
+ u64 tx_packets, tx_bytes;
+
+ stats64 = per_cpu_ptr(dev->stats64, cpu);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats64->syncp);
+ rx_packets = stats64->rx_packets;
+ rx_bytes = stats64->rx_bytes;
+ tx_packets = stats64->tx_packets;
+ tx_bytes = stats64->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ }
}
-EXPORT_SYMBOL_GPL(usbnet_set_settings);
+EXPORT_SYMBOL_GPL(usbnet_get_stats64);
u32 usbnet_get_link (struct net_device *net)
{
@@ -1038,14 +1071,14 @@ EXPORT_SYMBOL_GPL(usbnet_set_msglevel);
/* drivers may override default ethtool_ops in their bind() routine */
static const struct ethtool_ops usbnet_ethtool_ops = {
- .get_settings = usbnet_get_settings,
- .set_settings = usbnet_set_settings,
.get_link = usbnet_get_link,
.nway_reset = usbnet_nway_reset,
.get_drvinfo = usbnet_get_drvinfo,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = usbnet_get_link_ksettings,
+ .set_link_ksettings = usbnet_set_link_ksettings,
};
/*-------------------------------------------------------------------------*/
@@ -1211,8 +1244,12 @@ static void tx_complete (struct urb *urb)
struct usbnet *dev = entry->dev;
if (urb->status == 0) {
- dev->net->stats.tx_packets += entry->packets;
- dev->net->stats.tx_bytes += entry->length;
+ struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
+
+ u64_stats_update_begin(&stats64->syncp);
+ stats64->tx_packets += entry->packets;
+ stats64->tx_bytes += entry->length;
+ u64_stats_update_end(&stats64->syncp);
} else {
dev->net->stats.tx_errors++;
@@ -1569,6 +1606,7 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_free_urb(dev->interrupt);
kfree(dev->padding_pkt);
+ free_percpu(dev->stats64);
free_netdev(net);
}
EXPORT_SYMBOL_GPL(usbnet_disconnect);
@@ -1580,6 +1618,7 @@ static const struct net_device_ops usbnet_netdev_ops = {
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_set_rx_mode = usbnet_set_rx_mode,
.ndo_change_mtu = usbnet_change_mtu,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1641,6 +1680,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->intf = udev;
dev->driver_info = info;
dev->driver_name = name;
+
+ dev->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->stats64)
+ goto out0;
+
dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
| NETIF_MSG_PROBE | NETIF_MSG_LINK);
init_waitqueue_head(&dev->wait);
@@ -1780,6 +1824,8 @@ out1:
*/
cancel_work_sync(&dev->kevent);
del_timer_sync(&dev->delay);
+ free_percpu(dev->stats64);
+out0:
free_netdev(net);
out:
return status;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 8c39d6d690e5..38f0f03a29c8 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -45,18 +45,13 @@ static struct {
{ "peer_ifindex" },
};
-static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int veth_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
- cmd->supported = 0;
- cmd->advertising = 0;
- ethtool_cmd_speed_set(cmd, SPEED_10000);
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_TP;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ cmd->base.speed = SPEED_10000;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_TP;
+ cmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
@@ -95,12 +90,12 @@ static void veth_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops veth_ethtool_ops = {
- .get_settings = veth_get_settings,
.get_drvinfo = veth_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = veth_get_strings,
.get_sset_count = veth_get_sset_count,
.get_ethtool_stats = veth_get_ethtool_stats,
+ .get_link_ksettings = veth_get_link_ksettings,
};
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -373,7 +368,8 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
ifmp = nla_data(nla_peer);
err = rtnl_nla_parse_ifla(peer_tb,
nla_data(nla_peer) + sizeof(struct ifinfomsg),
- nla_len(nla_peer) - sizeof(struct ifinfomsg));
+ nla_len(nla_peer) - sizeof(struct ifinfomsg),
+ NULL);
if (err < 0)
return err;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f36584616e7d..3d0bc484b3d7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -33,9 +33,10 @@
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
-static bool csum = true, gso = true;
+static bool csum = true, gso = true, napi_tx;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
+module_param(napi_tx, bool, 0644);
/* FIXME: MTU in config. */
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
@@ -86,6 +87,8 @@ struct send_queue {
/* Name of the send queue: output.$index */
char name[40];
+
+ struct napi_struct napi;
};
/* Internal representation of a receive virtqueue */
@@ -239,15 +242,39 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
return p;
}
+static void virtqueue_napi_schedule(struct napi_struct *napi,
+ struct virtqueue *vq)
+{
+ if (napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(vq);
+ __napi_schedule(napi);
+ }
+}
+
+static void virtqueue_napi_complete(struct napi_struct *napi,
+ struct virtqueue *vq, int processed)
+{
+ int opaque;
+
+ opaque = virtqueue_enable_cb_prepare(vq);
+ if (napi_complete_done(napi, processed) &&
+ unlikely(virtqueue_poll(vq, opaque)))
+ virtqueue_napi_schedule(napi, vq);
+}
+
static void skb_xmit_done(struct virtqueue *vq)
{
struct virtnet_info *vi = vq->vdev->priv;
+ struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
/* Suppress further interrupts. */
virtqueue_disable_cb(vq);
- /* We were probably waiting for more output buffers. */
- netif_wake_subqueue(vi->dev, vq2txq(vq));
+ if (napi->weight)
+ virtqueue_napi_schedule(napi, vq);
+ else
+ /* We were probably waiting for more output buffers. */
+ netif_wake_subqueue(vi->dev, vq2txq(vq));
}
static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
@@ -936,27 +963,44 @@ static void skb_recv_done(struct virtqueue *rvq)
struct virtnet_info *vi = rvq->vdev->priv;
struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
- /* Schedule NAPI, Suppress further interrupts if successful. */
- if (napi_schedule_prep(&rq->napi)) {
- virtqueue_disable_cb(rvq);
- __napi_schedule(&rq->napi);
- }
+ virtqueue_napi_schedule(&rq->napi, rvq);
}
-static void virtnet_napi_enable(struct receive_queue *rq)
+static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
{
- napi_enable(&rq->napi);
+ napi_enable(napi);
/* If all buffers were filled by other side before we napi_enabled, we
- * won't get another interrupt, so process any outstanding packets
- * now. virtnet_poll wants re-enable the queue, so we disable here.
- * We synchronize against interrupts via NAPI_STATE_SCHED */
- if (napi_schedule_prep(&rq->napi)) {
- virtqueue_disable_cb(rq->vq);
- local_bh_disable();
- __napi_schedule(&rq->napi);
- local_bh_enable();
+ * won't get another interrupt, so process any outstanding packets now.
+ * Call local_bh_enable after to trigger softIRQ processing.
+ */
+ local_bh_disable();
+ virtqueue_napi_schedule(napi, vq);
+ local_bh_enable();
+}
+
+static void virtnet_napi_tx_enable(struct virtnet_info *vi,
+ struct virtqueue *vq,
+ struct napi_struct *napi)
+{
+ if (!napi->weight)
+ return;
+
+ /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
+ * enable the feature if this is likely affine with the transmit path.
+ */
+ if (!vi->affinity_hint_set) {
+ napi->weight = 0;
+ return;
}
+
+ return virtnet_napi_enable(vq, napi);
+}
+
+static void virtnet_napi_tx_disable(struct napi_struct *napi)
+{
+ if (napi->weight)
+ napi_disable(napi);
}
static void refill_work(struct work_struct *work)
@@ -971,7 +1015,7 @@ static void refill_work(struct work_struct *work)
napi_disable(&rq->napi);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_enable(rq);
+ virtnet_napi_enable(rq->vq, &rq->napi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
@@ -1007,25 +1051,68 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
return received;
}
+static void free_old_xmit_skbs(struct send_queue *sq)
+{
+ struct sk_buff *skb;
+ unsigned int len;
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
+ unsigned int packets = 0;
+ unsigned int bytes = 0;
+
+ while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ pr_debug("Sent skb %p\n", skb);
+
+ bytes += skb->len;
+ packets++;
+
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Avoid overhead when no packets have been processed
+ * happens when called speculatively from start_xmit.
+ */
+ if (!packets)
+ return;
+
+ u64_stats_update_begin(&stats->tx_syncp);
+ stats->tx_bytes += bytes;
+ stats->tx_packets += packets;
+ u64_stats_update_end(&stats->tx_syncp);
+}
+
+static void virtnet_poll_cleantx(struct receive_queue *rq)
+{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ unsigned int index = vq2rxq(rq->vq);
+ struct send_queue *sq = &vi->sq[index];
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
+
+ if (!sq->napi.weight)
+ return;
+
+ if (__netif_tx_trylock(txq)) {
+ free_old_xmit_skbs(sq);
+ __netif_tx_unlock(txq);
+ }
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_tx_wake_queue(txq);
+}
+
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
- unsigned int r, received;
+ unsigned int received;
+
+ virtnet_poll_cleantx(rq);
received = virtnet_receive(rq, budget);
/* Out of packets? */
- if (received < budget) {
- r = virtqueue_enable_cb_prepare(rq->vq);
- if (napi_complete_done(napi, received)) {
- if (unlikely(virtqueue_poll(rq->vq, r)) &&
- napi_schedule_prep(napi)) {
- virtqueue_disable_cb(rq->vq);
- __napi_schedule(napi);
- }
- }
- }
+ if (received < budget)
+ virtqueue_napi_complete(napi, rq->vq, received);
return received;
}
@@ -1040,40 +1127,29 @@ static int virtnet_open(struct net_device *dev)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
- virtnet_napi_enable(&vi->rq[i]);
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
}
return 0;
}
-static void free_old_xmit_skbs(struct send_queue *sq)
+static int virtnet_poll_tx(struct napi_struct *napi, int budget)
{
- struct sk_buff *skb;
- unsigned int len;
+ struct send_queue *sq = container_of(napi, struct send_queue, napi);
struct virtnet_info *vi = sq->vq->vdev->priv;
- struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
- unsigned int packets = 0;
- unsigned int bytes = 0;
-
- while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- pr_debug("Sent skb %p\n", skb);
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
- bytes += skb->len;
- packets++;
+ __netif_tx_lock(txq, raw_smp_processor_id());
+ free_old_xmit_skbs(sq);
+ __netif_tx_unlock(txq);
- dev_kfree_skb_any(skb);
- }
+ virtqueue_napi_complete(napi, sq->vq, 0);
- /* Avoid overhead when no packets have been processed
- * happens when called speculatively from start_xmit.
- */
- if (!packets)
- return;
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_tx_wake_queue(txq);
- u64_stats_update_begin(&stats->tx_syncp);
- stats->tx_bytes += bytes;
- stats->tx_packets += packets;
- u64_stats_update_end(&stats->tx_syncp);
+ return 0;
}
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
@@ -1125,10 +1201,14 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
int err;
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
bool kick = !skb->xmit_more;
+ bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(sq);
+ if (use_napi && kick)
+ virtqueue_enable_cb_delayed(sq->vq);
+
/* timestamp packet in software */
skb_tx_timestamp(skb);
@@ -1147,8 +1227,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Don't wait up for transmitted skbs to be freed. */
- skb_orphan(skb);
- nf_reset(skb);
+ if (!use_napi) {
+ skb_orphan(skb);
+ nf_reset(skb);
+ }
/* If running out of space, stop queue to avoid getting packets that we
* are then unable to transmit.
@@ -1162,7 +1244,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
netif_stop_subqueue(dev, qnum);
- if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+ if (!use_napi &&
+ unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
free_old_xmit_skbs(sq);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
@@ -1366,8 +1449,10 @@ static int virtnet_close(struct net_device *dev)
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
+ virtnet_napi_tx_disable(&vi->sq[i].napi);
+ }
return 0;
}
@@ -1636,47 +1721,57 @@ static void virtnet_get_channels(struct net_device *dev,
}
/* Check if the user is trying to change anything besides speed/duplex */
-static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd)
+static bool
+virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd)
{
- struct ethtool_cmd diff1 = *cmd;
- struct ethtool_cmd diff2 = {};
+ struct ethtool_link_ksettings diff1 = *cmd;
+ struct ethtool_link_ksettings diff2 = {};
/* cmd is always set so we need to clear it, validate the port type
* and also without autonegotiation we can ignore advertising
*/
- ethtool_cmd_speed_set(&diff1, 0);
- diff2.port = PORT_OTHER;
- diff1.advertising = 0;
- diff1.duplex = 0;
- diff1.cmd = 0;
+ diff1.base.speed = 0;
+ diff2.base.port = PORT_OTHER;
+ ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
+ diff1.base.duplex = 0;
+ diff1.base.cmd = 0;
+ diff1.base.link_mode_masks_nwords = 0;
- return !memcmp(&diff1, &diff2, sizeof(diff1));
+ return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) &&
+ bitmap_empty(diff1.link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS) &&
+ bitmap_empty(diff1.link_modes.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS) &&
+ bitmap_empty(diff1.link_modes.lp_advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int virtnet_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct virtnet_info *vi = netdev_priv(dev);
u32 speed;
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
/* don't allow custom speed and duplex */
if (!ethtool_validate_speed(speed) ||
- !ethtool_validate_duplex(cmd->duplex) ||
+ !ethtool_validate_duplex(cmd->base.duplex) ||
!virtnet_validate_ethtool_cmd(cmd))
return -EINVAL;
vi->speed = speed;
- vi->duplex = cmd->duplex;
+ vi->duplex = cmd->base.duplex;
return 0;
}
-static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int virtnet_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct virtnet_info *vi = netdev_priv(dev);
- ethtool_cmd_speed_set(cmd, vi->speed);
- cmd->duplex = vi->duplex;
- cmd->port = PORT_OTHER;
+ cmd->base.speed = vi->speed;
+ cmd->base.duplex = vi->duplex;
+ cmd->base.port = PORT_OTHER;
return 0;
}
@@ -1696,8 +1791,8 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.set_channels = virtnet_set_channels,
.get_channels = virtnet_get_channels,
.get_ts_info = ethtool_op_get_ts_info,
- .get_settings = virtnet_get_settings,
- .set_settings = virtnet_set_settings,
+ .get_link_ksettings = virtnet_get_link_ksettings,
+ .set_link_ksettings = virtnet_set_link_ksettings,
};
static void virtnet_freeze_down(struct virtio_device *vdev)
@@ -1712,8 +1807,10 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
cancel_delayed_work_sync(&vi->refill);
if (netif_running(vi->dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
+ virtnet_napi_tx_disable(&vi->sq[i].napi);
+ }
}
}
@@ -1736,8 +1833,11 @@ static int virtnet_restore_up(struct virtio_device *vdev)
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
- for (i = 0; i < vi->max_queue_pairs; i++)
- virtnet_napi_enable(&vi->rq[i]);
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+ &vi->sq[i].napi);
+ }
}
netif_device_attach(vi->dev);
@@ -1778,7 +1878,8 @@ err:
return ret;
}
-static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
+static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
struct virtnet_info *vi = netdev_priv(dev);
@@ -1790,16 +1891,17 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) {
- netdev_warn(dev, "can't set XDP while host is implementing LRO, disable LRO first\n");
+ NL_SET_ERR_MSG(extack, "can't set XDP while host is implementing LRO, disable LRO first");
return -EOPNOTSUPP;
}
if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
- netdev_warn(dev, "XDP expects header/data in single page, any_header_sg required\n");
+ NL_SET_ERR_MSG(extack, "XDP expects header/data in single page, any_header_sg required");
return -EINVAL;
}
if (dev->mtu > max_sz) {
+ NL_SET_ERR_MSG(extack, "MTU too large to enable XDP");
netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
return -EINVAL;
}
@@ -1810,6 +1912,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
/* XDP requires extra queues for XDP_TX */
if (curr_qp + xdp_qp > vi->max_queue_pairs) {
+ NL_SET_ERR_MSG(extack, "Too few free TX rings available");
netdev_warn(dev, "request %i queues but max is %i\n",
curr_qp + xdp_qp, vi->max_queue_pairs);
return -ENOMEM;
@@ -1871,7 +1974,7 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
- return virtnet_xdp_set(dev, xdp->prog);
+ return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
case XDP_QUERY_PROG:
xdp->prog_attached = virtnet_xdp_query(dev);
return 0;
@@ -1942,6 +2045,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
napi_hash_del(&vi->rq[i].napi);
netif_napi_del(&vi->rq[i].napi);
+ netif_napi_del(&vi->sq[i].napi);
}
/* We called napi_hash_del() before netif_napi_del(),
@@ -2127,6 +2231,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
vi->rq[i].pages = NULL;
netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
napi_weight);
+ netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
+ napi_tx ? napi_weight : 0);
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index f88ffafebfbf..2ff27314e047 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -471,22 +471,25 @@ vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
static int
-vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+vmxnet3_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ecmd)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full |
- SUPPORTED_TP;
- ecmd->advertising = ADVERTISED_TP;
- ecmd->port = PORT_TP;
- ecmd->transceiver = XCVR_INTERNAL;
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
+ ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising, TP);
+ ecmd->base.port = PORT_TP;
if (adapter->link_speed) {
- ethtool_cmd_speed_set(ecmd, adapter->link_speed);
- ecmd->duplex = DUPLEX_FULL;
+ ecmd->base.speed = adapter->link_speed;
+ ecmd->base.duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ ecmd->base.speed = SPEED_UNKNOWN;
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
}
return 0;
}
@@ -880,7 +883,6 @@ done:
}
static const struct ethtool_ops vmxnet3_ethtool_ops = {
- .get_settings = vmxnet3_get_settings,
.get_drvinfo = vmxnet3_get_drvinfo,
.get_regs_len = vmxnet3_get_regs_len,
.get_regs = vmxnet3_get_regs,
@@ -900,6 +902,7 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
.get_rxfh = vmxnet3_get_rss,
.set_rxfh = vmxnet3_set_rss,
#endif
+ .get_link_ksettings = vmxnet3_get_link_ksettings,
};
void vmxnet3_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 7d909c8183e9..ceda5861da78 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -104,6 +104,23 @@ static void vrf_get_stats64(struct net_device *dev,
}
}
+/* by default VRF devices do not have a qdisc and are expected
+ * to be created with only a single queue.
+ */
+static bool qdisc_tx_is_default(const struct net_device *dev)
+{
+ struct netdev_queue *txq;
+ struct Qdisc *qdisc;
+
+ if (dev->num_tx_queues > 1)
+ return false;
+
+ txq = netdev_get_tx_queue(dev, 0);
+ qdisc = rcu_access_pointer(txq->qdisc);
+
+ return !qdisc->enqueue;
+}
+
/* Local traffic destined to local address. Reinsert the packet to rx
* path, similar to loopback handling.
*/
@@ -357,6 +374,29 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
+static int vrf_finish_direct(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct net_device *vrf_dev = skb->dev;
+
+ if (!list_empty(&vrf_dev->ptype_all) &&
+ likely(skb_headroom(skb) >= ETH_HLEN)) {
+ struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
+
+ ether_addr_copy(eth->h_source, vrf_dev->dev_addr);
+ eth_zero_addr(eth->h_dest);
+ eth->h_proto = skb->protocol;
+
+ rcu_read_lock_bh();
+ dev_queue_xmit_nit(skb, vrf_dev);
+ rcu_read_unlock_bh();
+
+ skb_pull(skb, ETH_HLEN);
+ }
+
+ return 1;
+}
+
#if IS_ENABLED(CONFIG_IPV6)
/* modelled after ip6_finish_output2 */
static int vrf_finish_output6(struct net *net, struct sock *sk,
@@ -405,18 +445,13 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
* packet to go through device based features such as qdisc, netfilter
* hooks and packet sockets with skb->dev set to vrf device.
*/
-static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
- struct sock *sk,
- struct sk_buff *skb)
+static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
+ struct sk_buff *skb)
{
struct net_vrf *vrf = netdev_priv(vrf_dev);
struct dst_entry *dst = NULL;
struct rt6_info *rt6;
- /* don't divert link scope packets */
- if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
- return skb;
-
rcu_read_lock();
rt6 = rcu_dereference(vrf->rt6);
@@ -438,6 +473,55 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
return skb;
}
+static int vrf_output6_direct(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ skb->protocol = htons(ETH_P_IPV6);
+
+ return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
+ net, sk, skb, NULL, skb->dev,
+ vrf_finish_direct,
+ !(IPCB(skb)->flags & IPSKB_REROUTED));
+}
+
+static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
+ struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct net *net = dev_net(vrf_dev);
+ int err;
+
+ skb->dev = vrf_dev;
+
+ err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
+ skb, NULL, vrf_dev, vrf_output6_direct);
+
+ if (likely(err == 1))
+ err = vrf_output6_direct(net, sk, skb);
+
+ /* reset skb device */
+ if (likely(err == 1))
+ nf_reset(skb);
+ else
+ skb = NULL;
+
+ return skb;
+}
+
+static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
+ struct sock *sk,
+ struct sk_buff *skb)
+{
+ /* don't divert link scope packets */
+ if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
+ return skb;
+
+ if (qdisc_tx_is_default(vrf_dev))
+ return vrf_ip6_out_direct(vrf_dev, sk, skb);
+
+ return vrf_ip6_out_redirect(vrf_dev, skb);
+}
+
/* holding rtnl */
static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
{
@@ -609,18 +693,13 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
* packet to go through device based features such as qdisc, netfilter
* hooks and packet sockets with skb->dev set to vrf device.
*/
-static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
- struct sock *sk,
- struct sk_buff *skb)
+static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
+ struct sk_buff *skb)
{
struct net_vrf *vrf = netdev_priv(vrf_dev);
struct dst_entry *dst = NULL;
struct rtable *rth;
- /* don't divert multicast */
- if (ipv4_is_multicast(ip_hdr(skb)->daddr))
- return skb;
-
rcu_read_lock();
rth = rcu_dereference(vrf->rth);
@@ -642,6 +721,55 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
return skb;
}
+static int vrf_output_direct(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ skb->protocol = htons(ETH_P_IP);
+
+ return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
+ net, sk, skb, NULL, skb->dev,
+ vrf_finish_direct,
+ !(IPCB(skb)->flags & IPSKB_REROUTED));
+}
+
+static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
+ struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct net *net = dev_net(vrf_dev);
+ int err;
+
+ skb->dev = vrf_dev;
+
+ err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
+ skb, NULL, vrf_dev, vrf_output_direct);
+
+ if (likely(err == 1))
+ err = vrf_output_direct(net, sk, skb);
+
+ /* reset skb device */
+ if (likely(err == 1))
+ nf_reset(skb);
+ else
+ skb = NULL;
+
+ return skb;
+}
+
+static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
+ struct sock *sk,
+ struct sk_buff *skb)
+{
+ /* don't divert multicast */
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr))
+ return skb;
+
+ if (qdisc_tx_is_default(vrf_dev))
+ return vrf_ip_out_direct(vrf_dev, sk, skb);
+
+ return vrf_ip_out_redirect(vrf_dev, skb);
+}
+
/* called with rcu lock held */
static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev,
struct sock *sk,
@@ -749,14 +877,24 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
{
int ret;
+ /* do not allow loopback device to be enslaved to a VRF.
+ * The vrf device acts as the loopback for the vrf.
+ */
+ if (port_dev == dev_net(dev)->loopback_dev)
+ return -EOPNOTSUPP;
+
+ port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL);
if (ret < 0)
- return ret;
+ goto err;
- port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
cycle_netdev(port_dev);
return 0;
+
+err:
+ port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
+ return ret;
}
static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
@@ -978,9 +1116,11 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
- skb_push(skb, skb->mac_len);
- dev_queue_xmit_nit(skb, vrf_dev);
- skb_pull(skb, skb->mac_len);
+ if (!list_empty(&vrf_dev->ptype_all)) {
+ skb_push(skb, skb->mac_len);
+ dev_queue_xmit_nit(skb, vrf_dev);
+ skb_pull(skb, skb->mac_len);
+ }
IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
}
@@ -1021,9 +1161,11 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
vrf_rx_stats(vrf_dev, skb->len);
- skb_push(skb, skb->mac_len);
- dev_queue_xmit_nit(skb, vrf_dev);
- skb_pull(skb, skb->mac_len);
+ if (!list_empty(&vrf_dev->ptype_all)) {
+ skb_push(skb, skb->mac_len);
+ dev_queue_xmit_nit(skb, vrf_dev);
+ skb_pull(skb, skb->mac_len);
+ }
skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
out:
@@ -1146,11 +1288,11 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
/* fib_nl_{new,del}rule handling looks for net from skb->sk */
skb->sk = dev_net(dev)->rtnl;
if (add_it) {
- err = fib_nl_newrule(skb, nlh);
+ err = fib_nl_newrule(skb, nlh, NULL);
if (err == -EEXIST)
err = 0;
} else {
- err = fib_nl_delrule(skb, nlh);
+ err = fib_nl_delrule(skb, nlh, NULL);
if (err == -ENOENT)
err = 0;
}
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
new file mode 100644
index 000000000000..7f0136f2dd9d
--- /dev/null
+++ b/drivers/net/vsockmon.c
@@ -0,0 +1,170 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/if_arp.h>
+#include <net/rtnetlink.h>
+#include <net/sock.h>
+#include <net/af_vsock.h>
+#include <uapi/linux/vsockmon.h>
+#include <linux/virtio_vsock.h>
+
+/* Virtio transport max packet size plus header */
+#define DEFAULT_MTU (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + \
+ sizeof(struct af_vsockmon_hdr))
+
+struct pcpu_lstats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ struct u64_stats_sync syncp;
+};
+
+static int vsockmon_dev_init(struct net_device *dev)
+{
+ dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
+ if (!dev->lstats)
+ return -ENOMEM;
+ return 0;
+}
+
+static void vsockmon_dev_uninit(struct net_device *dev)
+{
+ free_percpu(dev->lstats);
+}
+
+struct vsockmon {
+ struct vsock_tap vt;
+};
+
+static int vsockmon_open(struct net_device *dev)
+{
+ struct vsockmon *vsockmon = netdev_priv(dev);
+
+ vsockmon->vt.dev = dev;
+ vsockmon->vt.module = THIS_MODULE;
+ return vsock_add_tap(&vsockmon->vt);
+}
+
+static int vsockmon_close(struct net_device *dev)
+{
+ struct vsockmon *vsockmon = netdev_priv(dev);
+
+ return vsock_remove_tap(&vsockmon->vt);
+}
+
+static netdev_tx_t vsockmon_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int len = skb->len;
+ struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_bytes += len;
+ stats->rx_packets++;
+ u64_stats_update_end(&stats->syncp);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void
+vsockmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ int i;
+ u64 bytes = 0, packets = 0;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_lstats *vstats;
+ u64 tbytes, tpackets;
+ unsigned int start;
+
+ vstats = per_cpu_ptr(dev->lstats, i);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&vstats->syncp);
+ tbytes = vstats->rx_bytes;
+ tpackets = vstats->rx_packets;
+ } while (u64_stats_fetch_retry_irq(&vstats->syncp, start));
+
+ packets += tpackets;
+ bytes += tbytes;
+ }
+
+ stats->rx_packets = packets;
+ stats->tx_packets = 0;
+
+ stats->rx_bytes = bytes;
+ stats->tx_bytes = 0;
+}
+
+static int vsockmon_is_valid_mtu(int new_mtu)
+{
+ return new_mtu >= (int)sizeof(struct af_vsockmon_hdr);
+}
+
+static int vsockmon_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (!vsockmon_is_valid_mtu(new_mtu))
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static const struct net_device_ops vsockmon_ops = {
+ .ndo_init = vsockmon_dev_init,
+ .ndo_uninit = vsockmon_dev_uninit,
+ .ndo_open = vsockmon_open,
+ .ndo_stop = vsockmon_close,
+ .ndo_start_xmit = vsockmon_xmit,
+ .ndo_get_stats64 = vsockmon_get_stats64,
+ .ndo_change_mtu = vsockmon_change_mtu,
+};
+
+static u32 always_on(struct net_device *dev)
+{
+ return 1;
+}
+
+static const struct ethtool_ops vsockmon_ethtool_ops = {
+ .get_link = always_on,
+};
+
+static void vsockmon_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_VSOCKMON;
+ dev->priv_flags |= IFF_NO_QUEUE;
+
+ dev->netdev_ops = &vsockmon_ops;
+ dev->ethtool_ops = &vsockmon_ethtool_ops;
+ dev->destructor = free_netdev;
+
+ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
+ NETIF_F_HIGHDMA | NETIF_F_LLTX;
+
+ dev->flags = IFF_NOARP;
+
+ dev->mtu = DEFAULT_MTU;
+}
+
+static struct rtnl_link_ops vsockmon_link_ops __read_mostly = {
+ .kind = "vsockmon",
+ .priv_size = sizeof(struct vsockmon),
+ .setup = vsockmon_setup,
+};
+
+static __init int vsockmon_register(void)
+{
+ return rtnl_link_register(&vsockmon_link_ops);
+}
+
+static __exit void vsockmon_unregister(void)
+{
+ rtnl_link_unregister(&vsockmon_link_ops);
+}
+
+module_init(vsockmon_register);
+module_exit(vsockmon_unregister);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Gerard Garcia <ggarcia@deic.uab.cat>");
+MODULE_DESCRIPTION("Vsock monitoring device. Based on nlmon device.");
+MODULE_ALIAS_RTNL_LINK("vsockmon");
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index bdb6ae16d4a8..328b4712683c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -276,9 +276,9 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
send_eth = send_ip = true;
if (type == RTM_GETNEIGH) {
- ndm->ndm_family = AF_INET;
send_ip = !vxlan_addr_any(&rdst->remote_ip);
send_eth = !is_zero_ether_addr(fdb->eth_addr);
+ ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET;
} else
ndm->ndm_family = AF_BRIDGE;
ndm->ndm_state = fdb->state;
@@ -1515,7 +1515,7 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
int ns_olen;
int i, len;
- if (dev == NULL)
+ if (dev == NULL || !pskb_may_pull(request, request->len))
return NULL;
len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
@@ -1530,10 +1530,11 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
skb_push(reply, sizeof(struct ethhdr));
skb_reset_mac_header(reply);
- ns = (struct nd_msg *)skb_transport_header(request);
+ ns = (struct nd_msg *)(ipv6_hdr(request) + 1);
daddr = eth_hdr(request)->h_source;
- ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
+ ns_olen = request->len - skb_network_offset(request) -
+ sizeof(struct ipv6hdr) - sizeof(*ns);
for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
@@ -1604,10 +1605,13 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
if (!in6_dev)
goto out;
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
+ goto out;
+
iphdr = ipv6_hdr(skb);
daddr = &iphdr->daddr;
- msg = (struct nd_msg *)skb_transport_header(skb);
+ msg = (struct nd_msg *)(iphdr + 1);
if (msg->icmph.icmp6_code != 0 ||
msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
goto out;
@@ -2242,16 +2246,13 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
if (ntohs(eth->h_proto) == ETH_P_ARP)
return arp_reduce(dev, skb, vni);
#if IS_ENABLED(CONFIG_IPV6)
- else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
- pskb_may_pull(skb, sizeof(struct ipv6hdr)
- + sizeof(struct nd_msg)) &&
- ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
- struct nd_msg *msg;
-
- msg = (struct nd_msg *)skb_transport_header(skb);
- if (msg->icmph.icmp6_code == 0 &&
- msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
- return neigh_reduce(dev, skb, vni);
+ else if (ntohs(eth->h_proto) == ETH_P_IPV6) {
+ struct ipv6hdr *hdr, _hdr;
+ if ((hdr = skb_header_pointer(skb,
+ skb_network_offset(skb),
+ sizeof(_hdr), &_hdr)) &&
+ hdr->nexthdr == IPPROTO_ICMPV6)
+ return neigh_reduce(dev, skb, vni);
}
#endif
}
@@ -2322,6 +2323,9 @@ static void vxlan_cleanup(unsigned long arg)
if (f->state & (NUD_PERMANENT | NUD_NOARP))
continue;
+ if (f->flags & NTF_EXT_LEARNED)
+ continue;
+
timeout = f->used + vxlan->cfg.age_interval * HZ;
if (time_before_eq(timeout, jiffies)) {
netdev_dbg(vxlan->dev,
@@ -2754,8 +2758,6 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
sock = vxlan_create_sock(net, ipv6, port, flags);
if (IS_ERR(sock)) {
- pr_info("Cannot bind port %d, err=%ld\n", ntohs(port),
- PTR_ERR(sock));
kfree(vs);
return ERR_CAST(sock);
}
@@ -2818,17 +2820,21 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
static int vxlan_sock_add(struct vxlan_dev *vxlan)
{
- bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
+ bool ipv6 = vxlan->flags & VXLAN_F_IPV6 || metadata;
+ bool ipv4 = !ipv6 || metadata;
int ret = 0;
RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
#if IS_ENABLED(CONFIG_IPV6)
RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
- if (ipv6 || metadata)
+ if (ipv6) {
ret = __vxlan_sock_add(vxlan, true);
+ if (ret < 0 && ret != -EAFNOSUPPORT)
+ ipv4 = false;
+ }
#endif
- if (!ret && (!ipv6 || metadata))
+ if (ipv4)
ret = __vxlan_sock_add(vxlan, false);
if (ret < 0)
vxlan_sock_release(vxlan);
@@ -2923,6 +2929,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
return -EINVAL;
}
+ if (lowerdev) {
+ dev->gso_max_size = lowerdev->gso_max_size;
+ dev->gso_max_segs = lowerdev->gso_max_segs;
+ }
+
if (conf->mtu) {
int max_mtu = ETH_MAX_MTU;
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index e1dd1ec18d64..b9b934b7713c 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -346,6 +346,7 @@ static int pc300_pci_init_one(struct pci_dev *pdev,
card->rambase == NULL) {
pr_err("ioremap() failed\n");
pc300_pci_remove_one(pdev);
+ return -ENOMEM;
}
/* PLX PCI 9050 workaround for local configuration register read bug */
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 098c814e22c8..ed626f568b58 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -1917,6 +1917,8 @@ static int adm8211_probe(struct pci_dev *pdev,
dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
+ wiphy_ext_feature_set(dev->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
err = ieee80211_register_hw(dev);
if (err) {
printk(KERN_ERR "%s (adm8211): Cannot register device\n",
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 7a60d2e652da..f2f4ccfdf8da 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1689,6 +1689,8 @@ static int ar5523_probe(struct usb_interface *intf,
if (error)
goto out_cancel_rx_cmd;
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
usb_set_intfdata(intf, hw);
error = ieee80211_register_hw(hw);
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index 45226dbee5ce..da770af83036 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -640,6 +640,7 @@ static int ath10k_ahb_hif_start(struct ath10k *ar)
{
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif start\n");
+ napi_enable(&ar->napi);
ath10k_ce_enable_interrupts(ar);
ath10k_pci_enable_legacy_irq(ar);
@@ -692,7 +693,6 @@ static int ath10k_ahb_hif_power_up(struct ath10k *ar)
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
goto err_ce_deinit;
}
- napi_enable(&ar->napi);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 2872d347ea78..abeee200310b 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -19,12 +19,21 @@
#include "hif.h"
#include "debug.h"
#include "htc.h"
+#include "hw.h"
void ath10k_bmi_start(struct ath10k *ar)
{
+ int ret;
+
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
ar->bmi.done_sent = false;
+
+ /* Enable hardware clock to speed up firmware download */
+ if (ar->hw_params.hw_ops->enable_pll_clk) {
+ ret = ar->hw_params.hw_ops->enable_pll_clk(ar);
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi enable pll ret %d\n", ret);
+ }
}
int ath10k_bmi_done(struct ath10k *ar)
@@ -129,6 +138,69 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
return 0;
}
+int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
+{
+ struct bmi_cmd cmd;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI,
+ "bmi write soc register 0x%08x val 0x%08x\n",
+ address, reg_val);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "bmi write soc register command in progress\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
+ cmd.write_soc_reg.addr = __cpu_to_le32(address);
+ cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "Unable to write soc register to device: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
+ u32 resplen = sizeof(resp.read_soc_reg);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
+ address);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "bmi read soc register command in progress\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
+ cmd.read_soc_reg.addr = __cpu_to_le32(address);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+ if (ret) {
+ ath10k_warn(ar, "Unable to read soc register from device: %d\n",
+ ret);
+ return ret;
+ }
+
+ *reg_val = __le32_to_cpu(resp.read_soc_reg.value);
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
+ *reg_val);
+
+ return 0;
+}
+
int ath10k_bmi_write_memory(struct ath10k *ar,
u32 address, const void *buffer, u32 length)
{
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 7d3231acfb24..cc45b63ade15 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -176,7 +176,8 @@ union bmi_resp {
} rompatch_uninstall;
struct {
/* 0 = nothing executed
- * otherwise = NVRAM segment return value */
+ * otherwise = NVRAM segment return value
+ */
__le32 result;
} nvram_process;
u8 payload[BMI_MAX_CMDBUF_SIZE];
@@ -232,4 +233,6 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
const void *buffer, u32 length);
+int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val);
+int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val);
#endif /* _BMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 4045657e0a6e..ee1090ca2eac 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -261,8 +261,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
}
/*
- * Guts of ath10k_ce_send, used by both ath10k_ce_send and
- * ath10k_ce_sendlist_send.
+ * Guts of ath10k_ce_send.
* The caller takes responsibility for any needed locking.
*/
int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
@@ -1052,7 +1051,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
*/
BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- BUILD_BUG_ON(2 * TARGET_10X_NUM_MSDU_DESC >
+ BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 0a8e29e9a0eb..5a0638915874 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -71,6 +71,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -91,6 +92,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -110,6 +112,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -129,6 +132,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -148,6 +152,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -166,8 +171,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
- .hw_ops = &qca988x_ops,
+ .hw_ops = &qca6174_ops,
+ .hw_clk = qca6174_clk,
+ .target_cpu_freq = 176000000,
.decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -193,6 +201,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
+ .spectral_bin_discard = 4,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -219,6 +228,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
+ .spectral_bin_discard = 12,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -244,6 +254,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
+ .spectral_bin_discard = 12,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -263,6 +274,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -280,8 +292,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
- .hw_ops = &qca988x_ops,
+ .hw_ops = &qca6174_ops,
+ .hw_clk = qca6174_clk,
+ .target_cpu_freq = 176000000,
.decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -308,6 +323,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
+ .spectral_bin_discard = 4,
},
};
@@ -1623,6 +1639,13 @@ static void ath10k_core_restart(struct work_struct *work)
wake_up(&ar->wmi.tx_credits_wq);
wake_up(&ar->peer_mapping_wq);
+ /* TODO: We can have one instance of cancelling coverage_class_work by
+ * moving it to ath10k_halt(), so that both stop() and restart() would
+ * call that but it takes conf_mutex() and if we call cancel_work_sync()
+ * with conf_mutex it will deadlock.
+ */
+ cancel_work_sync(&ar->set_coverage_class_work);
+
mutex_lock(&ar->conf_mutex);
switch (ar->state) {
@@ -1634,7 +1657,8 @@ static void ath10k_core_restart(struct work_struct *work)
break;
case ATH10K_STATE_OFF:
/* this can happen if driver is being unloaded
- * or if the crash happens during FW probing */
+ * or if the crash happens during FW probing
+ */
ath10k_warn(ar, "cannot restart a device that hasn't been started\n");
break;
case ATH10K_STATE_RESTARTING:
@@ -2162,7 +2186,8 @@ EXPORT_SYMBOL(ath10k_core_stop);
/* mac80211 manages fw/hw initialization through start/stop hooks. However in
* order to know what hw capabilities should be advertised to mac80211 it is
* necessary to load the firmware (and tear it down immediately since start
- * hook will try to init it again) before registering */
+ * hook will try to init it again) before registering
+ */
static int ath10k_core_probe_fw(struct ath10k *ar)
{
struct bmi_target_info target_info;
@@ -2356,7 +2381,8 @@ void ath10k_core_unregister(struct ath10k *ar)
/* We must unregister from mac80211 before we stop HTC and HIF.
* Otherwise we will fail to submit commands to FW and mac80211 will be
- * unhappy about callback failures. */
+ * unhappy about callback failures.
+ */
ath10k_mac_unregister(ar);
ath10k_testmode_destroy(ar);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 88d14be7fcce..bf091514ecc6 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -501,14 +501,16 @@ enum ath10k_state {
* stopped in ath10k_core_restart() work holding conf_mutex. The state
* RESTARTED means that the device is up and mac80211 has started hw
* reconfiguration. Once mac80211 is done with the reconfiguration we
- * set the state to STATE_ON in reconfig_complete(). */
+ * set the state to STATE_ON in reconfig_complete().
+ */
ATH10K_STATE_RESTARTING,
ATH10K_STATE_RESTARTED,
/* The device has crashed while restarting hw. This state is like ON
* but commands are blocked in HTC and -ECOMM response is given. This
* prevents completion timeouts and makes the driver more responsive to
- * userspace commands. This is also prevents recursive recovery. */
+ * userspace commands. This is also prevents recursive recovery.
+ */
ATH10K_STATE_WEDGED,
/* factory tests */
@@ -775,6 +777,8 @@ struct ath10k {
u32 num_rf_chains;
u32 max_spatial_stream;
/* protected by conf_mutex */
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
bool ani_enabled;
bool p2p;
@@ -918,7 +922,8 @@ struct ath10k {
struct work_struct restart_work;
/* cycle count is reported twice for each visited channel during scan.
- * access protected by data_lock */
+ * access protected by data_lock
+ */
u32 survey_last_rx_clear_count;
u32 survey_last_cycle_count;
struct survey_info survey[ATH10K_NUM_CHANS];
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index fb0ade3adb07..4cd2a0fd49d6 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -249,9 +249,6 @@ static ssize_t ath10k_read_wmi_services(struct file *file,
mutex_lock(&ar->conf_mutex);
- if (len > buf_len)
- len = buf_len;
-
spin_lock_bh(&ar->data_lock);
for (i = 0; i < WMI_SERVICE_MAX; i++) {
enabled = test_bit(i, ar->wmi.svc_map);
@@ -1819,7 +1816,7 @@ static void ath10k_tpc_stats_fill(struct ath10k *ar,
tpc_stats->num_tx_chain,
tpc_stats->rate_max);
- for (j = 0; j < tpc_stats->num_tx_chain ; j++) {
+ for (j = 0; j < WMI_TPC_FLAG; j++) {
switch (j) {
case WMI_TPC_TABLE_TYPE_CDD:
if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
@@ -1985,7 +1982,8 @@ void ath10k_debug_stop(struct ath10k *ar)
/* Must not use _sync to avoid deadlock, we do that in
* ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
- * warning from del_timer(). */
+ * warning from del_timer().
+ */
if (ar->debug.htt_stats_mask != 0)
cancel_delayed_work(&ar->debug.htt_stats_dwork);
@@ -1997,6 +1995,15 @@ static ssize_t ath10k_write_simulate_radar(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
+ struct ath10k_vif *arvif;
+
+ /* Just check for for the first vif alone, as all the vifs will be
+ * sharing the same channel and if the channel is disabled, all the
+ * vifs will share the same 'is_started' state.
+ */
+ arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list);
+ if (!arvif->is_started)
+ return -EINVAL;
ieee80211_radar_detected(ar->hw);
@@ -2437,86 +2444,82 @@ int ath10k_debug_register(struct ath10k *ar)
init_completion(&ar->debug.tpc_complete);
init_completion(&ar->debug.fw_stats_complete);
- debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
+ debugfs_create_file("fw_stats", 0400, ar->debug.debugfs_phy, ar,
&fops_fw_stats);
- debugfs_create_file("fw_reset_stats", S_IRUSR, ar->debug.debugfs_phy,
- ar, &fops_fw_reset_stats);
+ debugfs_create_file("fw_reset_stats", 0400, ar->debug.debugfs_phy, ar,
+ &fops_fw_reset_stats);
- debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
+ debugfs_create_file("wmi_services", 0400, ar->debug.debugfs_phy, ar,
&fops_wmi_services);
- debugfs_create_file("simulate_fw_crash", S_IRUSR | S_IWUSR,
- ar->debug.debugfs_phy, ar, &fops_simulate_fw_crash);
+ debugfs_create_file("simulate_fw_crash", 0600, ar->debug.debugfs_phy, ar,
+ &fops_simulate_fw_crash);
- debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy,
- ar, &fops_fw_crash_dump);
+ debugfs_create_file("fw_crash_dump", 0400, ar->debug.debugfs_phy, ar,
+ &fops_fw_crash_dump);
- debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR,
- ar->debug.debugfs_phy, ar, &fops_reg_addr);
+ debugfs_create_file("reg_addr", 0600, ar->debug.debugfs_phy, ar,
+ &fops_reg_addr);
- debugfs_create_file("reg_value", S_IRUSR | S_IWUSR,
- ar->debug.debugfs_phy, ar, &fops_reg_value);
+ debugfs_create_file("reg_value", 0600, ar->debug.debugfs_phy, ar,
+ &fops_reg_value);
- debugfs_create_file("mem_value", S_IRUSR | S_IWUSR,
- ar->debug.debugfs_phy, ar, &fops_mem_value);
+ debugfs_create_file("mem_value", 0600, ar->debug.debugfs_phy, ar,
+ &fops_mem_value);
- debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
- ar, &fops_chip_id);
+ debugfs_create_file("chip_id", 0400, ar->debug.debugfs_phy, ar,
+ &fops_chip_id);
- debugfs_create_file("htt_stats_mask", S_IRUSR | S_IWUSR,
- ar->debug.debugfs_phy, ar, &fops_htt_stats_mask);
+ debugfs_create_file("htt_stats_mask", 0600, ar->debug.debugfs_phy, ar,
+ &fops_htt_stats_mask);
- debugfs_create_file("htt_max_amsdu_ampdu", S_IRUSR | S_IWUSR,
- ar->debug.debugfs_phy, ar,
+ debugfs_create_file("htt_max_amsdu_ampdu", 0600, ar->debug.debugfs_phy, ar,
&fops_htt_max_amsdu_ampdu);
- debugfs_create_file("fw_dbglog", S_IRUSR | S_IWUSR,
- ar->debug.debugfs_phy, ar, &fops_fw_dbglog);
+ debugfs_create_file("fw_dbglog", 0600, ar->debug.debugfs_phy, ar,
+ &fops_fw_dbglog);
- debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
- ar, &fops_cal_data);
+ debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar,
+ &fops_cal_data);
- debugfs_create_file("ani_enable", S_IRUSR | S_IWUSR,
- ar->debug.debugfs_phy, ar, &fops_ani_enable);
+ debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_phy, ar,
+ &fops_ani_enable);
- debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
- ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
+ debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar,
+ &fops_nf_cal_period);
if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
- debugfs_create_file("dfs_simulate_radar", S_IWUSR,
- ar->debug.debugfs_phy, ar,
- &fops_simulate_radar);
+ debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_phy,
+ ar, &fops_simulate_radar);
- debugfs_create_bool("dfs_block_radar_events", S_IWUSR,
+ debugfs_create_bool("dfs_block_radar_events", 0200,
ar->debug.debugfs_phy,
&ar->dfs_block_radar_events);
- debugfs_create_file("dfs_stats", S_IRUSR,
- ar->debug.debugfs_phy, ar,
+ debugfs_create_file("dfs_stats", 0400, ar->debug.debugfs_phy, ar,
&fops_dfs_stats);
}
- debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
- ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
+ debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar,
+ &fops_pktlog_filter);
- debugfs_create_file("quiet_period", S_IRUGO | S_IWUSR,
- ar->debug.debugfs_phy, ar, &fops_quiet_period);
+ debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar,
+ &fops_quiet_period);
- debugfs_create_file("tpc_stats", S_IRUSR,
- ar->debug.debugfs_phy, ar, &fops_tpc_stats);
+ debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar,
+ &fops_tpc_stats);
if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
- debugfs_create_file("btcoex", S_IRUGO | S_IWUSR,
- ar->debug.debugfs_phy, ar, &fops_btcoex);
+ debugfs_create_file("btcoex", 0644, ar->debug.debugfs_phy, ar,
+ &fops_btcoex);
if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
- debugfs_create_file("peer_stats", S_IRUGO | S_IWUSR,
- ar->debug.debugfs_phy, ar,
+ debugfs_create_file("peer_stats", 0644, ar->debug.debugfs_phy, ar,
&fops_peer_stats);
- debugfs_create_file("fw_checksums", S_IRUSR,
- ar->debug.debugfs_phy, ar, &fops_fw_checksums);
+ debugfs_create_file("fw_checksums", 0400, ar->debug.debugfs_phy, ar,
+ &fops_fw_checksums);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 7353e7ea88f1..d59ac6b83340 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -372,11 +372,10 @@ static const struct file_operations fops_peer_debug_trigger = {
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
- debugfs_create_file("aggr_mode", S_IRUGO | S_IWUSR, dir, sta,
- &fops_aggr_mode);
- debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
- debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
- debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
+ debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode);
+ debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
+ debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
+ debugfs_create_file("delba", 0200, dir, sta, &fops_delba);
debugfs_create_file("peer_debug_trigger", 0600, dir, sta,
&fops_peer_debug_trigger);
}
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index b2566b06e1e1..6679dd9cfd12 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -54,7 +54,8 @@ struct ath10k_hif_ops {
int (*start)(struct ath10k *ar);
/* Clean up what start() did. This does not revert to BMI phase. If
- * desired so, call power_down() and power_up() */
+ * desired so, call power_down() and power_up()
+ */
void (*stop)(struct ath10k *ar);
int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
@@ -82,7 +83,8 @@ struct ath10k_hif_ops {
int (*power_up)(struct ath10k *ar);
/* Power down the device and free up resources. stop() must be called
- * before this if start() was called earlier */
+ * before this if start() was called earlier
+ */
void (*power_down)(struct ath10k *ar);
int (*suspend)(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 9f6a915f91bf..b7669b2e94aa 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -119,6 +119,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc insufficient credits ep %d required %d available %d\n",
+ eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
ret = -EAGAIN;
goto err_pull;
@@ -419,7 +422,8 @@ static void ath10k_htc_control_rx_complete(struct ath10k *ar,
struct sk_buff *skb)
{
/* This is unexpected. FW is not supposed to send regular rx on this
- * endpoint. */
+ * endpoint.
+ */
ath10k_warn(ar, "unexpected htc rx\n");
kfree_skb(skb);
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 90c2f72666b8..6305308422c4 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -51,7 +51,8 @@ enum htt_h2t_msg_type { /* host-to-target */
HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
/* This command is used for sending management frames in HTT < 3.0.
- * HTT >= 3.0 uses TX_FRM for everything. */
+ * HTT >= 3.0 uses TX_FRM for everything.
+ */
HTT_H2T_MSG_TYPE_MGMT_TX = 7,
HTT_H2T_MSG_TYPE_TX_FETCH_RESP = 11,
@@ -910,7 +911,8 @@ struct htt_rx_test {
/* payload consists of 2 lists:
* a) num_ints * sizeof(__le32)
- * b) num_chars * sizeof(u8) aligned to 4bytes */
+ * b) num_chars * sizeof(u8) aligned to 4bytes
+ */
u8 payload[0];
} __packed;
@@ -1307,7 +1309,8 @@ struct htt_frag_desc_bank_id {
} __packed;
/* real is 16 but it wouldn't fit in the max htt message size
- * so we use a conservatively safe value for now */
+ * so we use a conservatively safe value for now
+ */
#define HTT_FRAG_DESC_BANK_MAX 4
#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
@@ -1684,12 +1687,14 @@ struct ath10k_htt {
DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done);
/* set if host-fw communication goes haywire
- * used to avoid further failures */
+ * used to avoid further failures
+ */
bool rx_confused;
atomic_t num_mpdus_ready;
/* This is used to group tx/rx completions separately and process them
- * in batches to reduce cache stalls */
+ * in batches to reduce cache stalls
+ */
struct sk_buff_head rx_compl_q;
struct sk_buff_head rx_in_ord_compl_q;
struct sk_buff_head tx_fetch_ind_q;
@@ -1725,11 +1730,13 @@ struct ath10k_htt {
/* This structure layout is programmed via rx ring setup
* so that FW knows how to transfer the rx descriptor to the host.
- * Buffers like this are placed on the rx ring. */
+ * Buffers like this are placed on the rx ring.
+ */
struct htt_rx_desc {
union {
/* This field is filled on the host using the msdu buffer
- * from htt_rx_indication */
+ * from htt_rx_indication
+ */
struct fw_rx_desc_base fw_desc;
u32 pad;
} __packed;
@@ -1760,7 +1767,8 @@ struct htt_rx_desc {
#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
- * aggregated traffic more nicely. */
+ * aggregated traffic more nicely.
+ */
#define ATH10K_HTT_MAX_NUM_REFILL 100
/*
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 02a3fc81fbe3..84b6067ff6e7 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -177,7 +177,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
* automatically balances load wrt to CPU power.
*
* This probably comes at a cost of lower maximum throughput but
- * improves the average and stability. */
+ * improves the average and stability.
+ */
spin_lock_bh(&htt->rx_ring.lock);
num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
@@ -304,7 +305,8 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
rx_desc = (struct htt_rx_desc *)msdu->data;
/* FIXME: we must report msdu payload since this is what caller
- * expects now */
+ * expects now
+ */
skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
@@ -630,16 +632,17 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
sgi = (info3 >> 7) & 1;
status->rate_idx = mcs;
- status->flag |= RX_FLAG_HT;
+ status->encoding = RX_ENC_HT;
if (sgi)
- status->flag |= RX_FLAG_SHORT_GI;
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (bw)
- status->flag |= RX_FLAG_40MHZ;
+ status->bw = RATE_INFO_BW_40;
break;
case HTT_RX_VHT:
case HTT_RX_VHT_WITH_TXBF:
/* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
- TODO check this */
+ * TODO check this
+ */
bw = info2 & 3;
sgi = info3 & 1;
group_id = (info2 >> 4) & 0x3F;
@@ -686,10 +689,10 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
}
status->rate_idx = mcs;
- status->vht_nss = nss;
+ status->nss = nss;
if (sgi)
- status->flag |= RX_FLAG_SHORT_GI;
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
switch (bw) {
/* 20MHZ */
@@ -697,18 +700,18 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
break;
/* 40MHZ */
case 1:
- status->flag |= RX_FLAG_40MHZ;
+ status->bw = RATE_INFO_BW_40;
break;
/* 80MHZ */
case 2:
- status->vht_flag |= RX_VHT_FLAG_80MHZ;
+ status->bw = RATE_INFO_BW_80;
break;
case 3:
- status->vht_flag |= RX_VHT_FLAG_160MHZ;
+ status->bw = RATE_INFO_BW_160;
break;
}
- status->flag |= RX_FLAG_VHT;
+ status->encoding = RX_ENC_VHT;
break;
default:
break;
@@ -871,13 +874,10 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
/* New PPDU starts so clear out the old per-PPDU status. */
status->freq = 0;
status->rate_idx = 0;
- status->vht_nss = 0;
- status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
- status->flag &= ~(RX_FLAG_HT |
- RX_FLAG_VHT |
- RX_FLAG_SHORT_GI |
- RX_FLAG_40MHZ |
- RX_FLAG_MACTIME_END);
+ status->nss = 0;
+ status->encoding = RX_ENC_LEGACY;
+ status->bw = RATE_INFO_BW_20;
+ status->flag &= ~RX_FLAG_MACTIME_END;
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
ath10k_htt_rx_h_signal(ar, status, rxd);
@@ -930,7 +930,7 @@ static void ath10k_process_rx(struct ath10k *ar,
*status = *rx_status;
ath10k_dbg(ar, ATH10K_DBG_DATA,
- "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
ieee80211_get_SA(hdr),
@@ -938,16 +938,15 @@ static void ath10k_process_rx(struct ath10k *ar,
is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
"mcast" : "ucast",
(__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
- (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ?
- "legacy" : "",
- status->flag & RX_FLAG_HT ? "ht" : "",
- status->flag & RX_FLAG_VHT ? "vht" : "",
- status->flag & RX_FLAG_40MHZ ? "40" : "",
- status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
- status->vht_flag & RX_VHT_FLAG_160MHZ ? "160" : "",
- status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
+ (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
+ (status->encoding == RX_ENC_HT) ? "ht" : "",
+ (status->encoding == RX_ENC_VHT) ? "vht" : "",
+ (status->bw == RATE_INFO_BW_40) ? "40" : "",
+ (status->bw == RATE_INFO_BW_80) ? "80" : "",
+ (status->bw == RATE_INFO_BW_160) ? "160" : "",
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
- status->vht_nss,
+ status->nss,
status->freq,
status->band, status->flag,
!!(status->flag & RX_FLAG_FAILED_FCS_CRC),
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 86b427f5e2bc..685faac1368f 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -526,7 +526,8 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
memset(req, 0, sizeof(*req));
/* currently we support only max 8 bit masks so no need to worry
- * about endian support */
+ * about endian support
+ */
req->upload_types[0] = mask;
req->reset_types[0] = mask;
req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
@@ -1008,7 +1009,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
* There is simply no point in pushing HTT TX_FRM through HTC tx path
* as it's a waste of resources. By bypassing HTC it is possible to
* avoid extra memory allocations, compress data structures and thus
- * improve performance. */
+ * improve performance.
+ */
txbuf->htc_hdr.eid = htt->eid;
txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index d9f37ee4bfdd..c866ab524571 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -19,6 +19,7 @@
#include "hw.h"
#include "hif.h"
#include "wmi-ops.h"
+#include "bmi.h"
const struct ath10k_hw_regs qca988x_regs = {
.rtc_soc_base_address = 0x00004000,
@@ -72,6 +73,9 @@ const struct ath10k_hw_regs qca6174_regs = {
.pcie_intr_fw_mask = 0x00000400,
.pcie_intr_ce_mask_all = 0x0007f800,
.pcie_intr_clr_address = 0x00000014,
+ .cpu_pll_init_address = 0x00404020,
+ .cpu_speed_address = 0x00404024,
+ .core_clk_div_address = 0x00404028,
};
const struct ath10k_hw_regs qca99x0_regs = {
@@ -187,6 +191,73 @@ const struct ath10k_hw_values qca4019_values = {
.ce_desc_meta_data_lsb = 4,
};
+const struct ath10k_hw_clk_params qca6174_clk[ATH10K_HW_REFCLK_COUNT] = {
+ {
+ .refclk = 48000000,
+ .div = 0xe,
+ .rnfrac = 0x2aaa8,
+ .settle_time = 2400,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 19200000,
+ .div = 0x24,
+ .rnfrac = 0x2aaa8,
+ .settle_time = 960,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 24000000,
+ .div = 0x1d,
+ .rnfrac = 0x15551,
+ .settle_time = 1200,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 26000000,
+ .div = 0x1b,
+ .rnfrac = 0x4ec4,
+ .settle_time = 1300,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 37400000,
+ .div = 0x12,
+ .rnfrac = 0x34b49,
+ .settle_time = 1870,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 38400000,
+ .div = 0x12,
+ .rnfrac = 0x15551,
+ .settle_time = 1920,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 40000000,
+ .div = 0x12,
+ .rnfrac = 0x26665,
+ .settle_time = 2000,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 52000000,
+ .div = 0x1b,
+ .rnfrac = 0x4ec4,
+ .settle_time = 2600,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+};
+
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev)
{
@@ -361,6 +432,195 @@ unlock:
mutex_unlock(&ar->conf_mutex);
}
+/**
+ * ath10k_hw_qca6174_enable_pll_clock() - enable the qca6174 hw pll clock
+ * @ar: the ath10k blob
+ *
+ * This function is very hardware specific, the clock initialization
+ * steps is very sensitive and could lead to unknown crash, so they
+ * should be done in sequence.
+ *
+ * *** Be aware if you planned to refactor them. ***
+ *
+ * Return: 0 if successfully enable the pll, otherwise EINVAL
+ */
+static int ath10k_hw_qca6174_enable_pll_clock(struct ath10k *ar)
+{
+ int ret, wait_limit;
+ u32 clk_div_addr, pll_init_addr, speed_addr;
+ u32 addr, reg_val, mem_val;
+ struct ath10k_hw_params *hw;
+ const struct ath10k_hw_clk_params *hw_clk;
+
+ hw = &ar->hw_params;
+
+ if (ar->regs->core_clk_div_address == 0 ||
+ ar->regs->cpu_pll_init_address == 0 ||
+ ar->regs->cpu_speed_address == 0)
+ return -EINVAL;
+
+ clk_div_addr = ar->regs->core_clk_div_address;
+ pll_init_addr = ar->regs->cpu_pll_init_address;
+ speed_addr = ar->regs->cpu_speed_address;
+
+ /* Read efuse register to find out the right hw clock configuration */
+ addr = (RTC_SOC_BASE_ADDRESS | EFUSE_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* sanitize if the hw refclk index is out of the boundary */
+ if (MS(reg_val, EFUSE_XTAL_SEL) > ATH10K_HW_REFCLK_COUNT)
+ return -EINVAL;
+
+ hw_clk = &hw->hw_clk[MS(reg_val, EFUSE_XTAL_SEL)];
+
+ /* Set the rnfrac and outdiv params to bb_pll register */
+ addr = (RTC_SOC_BASE_ADDRESS | BB_PLL_CONFIG_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~(BB_PLL_CONFIG_FRAC_MASK | BB_PLL_CONFIG_OUTDIV_MASK);
+ reg_val |= (SM(hw_clk->rnfrac, BB_PLL_CONFIG_FRAC) |
+ SM(hw_clk->outdiv, BB_PLL_CONFIG_OUTDIV));
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* Set the correct settle time value to pll_settle register */
+ addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_SETTLE_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~WLAN_PLL_SETTLE_TIME_MASK;
+ reg_val |= SM(hw_clk->settle_time, WLAN_PLL_SETTLE_TIME);
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* Set the clock_ctrl div to core_clk_ctrl register */
+ addr = (RTC_SOC_BASE_ADDRESS | SOC_CORE_CLK_CTRL_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~SOC_CORE_CLK_CTRL_DIV_MASK;
+ reg_val |= SM(1, SOC_CORE_CLK_CTRL_DIV);
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* Set the clock_div register */
+ mem_val = 1;
+ ret = ath10k_bmi_write_memory(ar, clk_div_addr, &mem_val,
+ sizeof(mem_val));
+ if (ret)
+ return -EINVAL;
+
+ /* Configure the pll_control register */
+ addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val |= (SM(hw_clk->refdiv, WLAN_PLL_CONTROL_REFDIV) |
+ SM(hw_clk->div, WLAN_PLL_CONTROL_DIV) |
+ SM(1, WLAN_PLL_CONTROL_NOPWD));
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* busy wait (max 1s) the rtc_sync status register indicate ready */
+ wait_limit = 100000;
+ addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET);
+ do {
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
+ break;
+
+ wait_limit--;
+ udelay(10);
+
+ } while (wait_limit > 0);
+
+ if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
+ return -EINVAL;
+
+ /* Unset the pll_bypass in pll_control register */
+ addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~WLAN_PLL_CONTROL_BYPASS_MASK;
+ reg_val |= SM(0, WLAN_PLL_CONTROL_BYPASS);
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* busy wait (max 1s) the rtc_sync status register indicate ready */
+ wait_limit = 100000;
+ addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET);
+ do {
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
+ break;
+
+ wait_limit--;
+ udelay(10);
+
+ } while (wait_limit > 0);
+
+ if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
+ return -EINVAL;
+
+ /* Enable the hardware cpu clock register */
+ addr = (RTC_SOC_BASE_ADDRESS | SOC_CPU_CLOCK_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~SOC_CPU_CLOCK_STANDARD_MASK;
+ reg_val |= SM(1, SOC_CPU_CLOCK_STANDARD);
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* unset the nopwd from pll_control register */
+ addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~WLAN_PLL_CONTROL_NOPWD_MASK;
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* enable the pll_init register */
+ mem_val = 1;
+ ret = ath10k_bmi_write_memory(ar, pll_init_addr, &mem_val,
+ sizeof(mem_val));
+ if (ret)
+ return -EINVAL;
+
+ /* set the target clock frequency to speed register */
+ ret = ath10k_bmi_write_memory(ar, speed_addr, &hw->target_cpu_freq,
+ sizeof(hw->target_cpu_freq));
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
const struct ath10k_hw_ops qca988x_ops = {
.set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
};
@@ -374,3 +634,8 @@ static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
const struct ath10k_hw_ops qca99x0_ops = {
.rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
};
+
+const struct ath10k_hw_ops qca6174_ops = {
+ .set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
+ .enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock,
+};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index f0fda0f2b3b4..5b1e90bb2a4d 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -129,7 +129,7 @@ enum qca9377_chip_id_rev {
#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234
#define ATH10K_FW_FILE_BASE "firmware"
-#define ATH10K_FW_API_MAX 5
+#define ATH10K_FW_API_MAX 6
#define ATH10K_FW_API_MIN 2
#define ATH10K_FW_API2_FILE "firmware-2.bin"
@@ -141,6 +141,9 @@ enum qca9377_chip_id_rev {
/* HTT id conflict fix for management frames over HTT */
#define ATH10K_FW_API5_FILE "firmware-5.bin"
+/* the firmware-6.bin blob */
+#define ATH10K_FW_API6_FILE "firmware-6.bin"
+
#define ATH10K_FW_UTF_FILE "utf.bin"
#define ATH10K_FW_UTF_API2_FILE "utf-2.bin"
@@ -255,6 +258,9 @@ struct ath10k_hw_regs {
u32 pcie_intr_fw_mask;
u32 pcie_intr_ce_mask_all;
u32 pcie_intr_clr_address;
+ u32 cpu_pll_init_address;
+ u32 cpu_speed_address;
+ u32 core_clk_div_address;
};
extern const struct ath10k_hw_regs qca988x_regs;
@@ -293,7 +299,8 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
* - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
* - raw have FCS, nwifi doesn't
* - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
- * param, llc/snap) are aligned to 4byte boundaries each */
+ * param, llc/snap) are aligned to 4byte boundaries each
+ */
enum ath10k_hw_txrx_mode {
ATH10K_HW_TXRX_RAW = 0,
@@ -363,6 +370,30 @@ enum ath10k_hw_cc_wraparound_type {
ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2,
};
+enum ath10k_hw_refclk_speed {
+ ATH10K_HW_REFCLK_UNKNOWN = -1,
+ ATH10K_HW_REFCLK_48_MHZ = 0,
+ ATH10K_HW_REFCLK_19_2_MHZ = 1,
+ ATH10K_HW_REFCLK_24_MHZ = 2,
+ ATH10K_HW_REFCLK_26_MHZ = 3,
+ ATH10K_HW_REFCLK_37_4_MHZ = 4,
+ ATH10K_HW_REFCLK_38_4_MHZ = 5,
+ ATH10K_HW_REFCLK_40_MHZ = 6,
+ ATH10K_HW_REFCLK_52_MHZ = 7,
+
+ /* must be the last one */
+ ATH10K_HW_REFCLK_COUNT,
+};
+
+struct ath10k_hw_clk_params {
+ u32 refclk;
+ u32 div;
+ u32 rnfrac;
+ u32 settle_time;
+ u32 refdiv;
+ u32 outdiv;
+};
+
struct ath10k_hw_params {
u32 id;
u16 dev_id;
@@ -416,6 +447,13 @@ struct ath10k_hw_params {
/* Number of bytes used for alignment in rx_hdr_status of rx desc. */
int decap_align_bytes;
+
+ /* hw specific clock control parameters */
+ const struct ath10k_hw_clk_params *hw_clk;
+ int target_cpu_freq;
+
+ /* Number of bytes to be discarded for each FFT sample */
+ int spectral_bin_discard;
};
struct htt_rx_desc;
@@ -424,10 +462,14 @@ struct htt_rx_desc;
struct ath10k_hw_ops {
int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
void (*set_coverage_class)(struct ath10k *ar, s16 value);
+ int (*enable_pll_clk)(struct ath10k *ar);
};
extern const struct ath10k_hw_ops qca988x_ops;
extern const struct ath10k_hw_ops qca99x0_ops;
+extern const struct ath10k_hw_ops qca6174_ops;
+
+extern const struct ath10k_hw_clk_params qca6174_clk[];
static inline int
ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
@@ -847,4 +889,38 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define WAVE1_PHYCLK_USEC_MASK 0x0000007F
#define WAVE1_PHYCLK_USEC_LSB 0
+/* qca6174 PLL offset/mask */
+#define SOC_CORE_CLK_CTRL_OFFSET 0x00000114
+#define SOC_CORE_CLK_CTRL_DIV_LSB 0
+#define SOC_CORE_CLK_CTRL_DIV_MASK 0x00000007
+
+#define EFUSE_OFFSET 0x0000032c
+#define EFUSE_XTAL_SEL_LSB 8
+#define EFUSE_XTAL_SEL_MASK 0x00000700
+
+#define BB_PLL_CONFIG_OFFSET 0x000002f4
+#define BB_PLL_CONFIG_FRAC_LSB 0
+#define BB_PLL_CONFIG_FRAC_MASK 0x0003ffff
+#define BB_PLL_CONFIG_OUTDIV_LSB 18
+#define BB_PLL_CONFIG_OUTDIV_MASK 0x001c0000
+
+#define WLAN_PLL_SETTLE_OFFSET 0x0018
+#define WLAN_PLL_SETTLE_TIME_LSB 0
+#define WLAN_PLL_SETTLE_TIME_MASK 0x000007ff
+
+#define WLAN_PLL_CONTROL_OFFSET 0x0014
+#define WLAN_PLL_CONTROL_DIV_LSB 0
+#define WLAN_PLL_CONTROL_DIV_MASK 0x000003ff
+#define WLAN_PLL_CONTROL_REFDIV_LSB 10
+#define WLAN_PLL_CONTROL_REFDIV_MASK 0x00003c00
+#define WLAN_PLL_CONTROL_BYPASS_LSB 16
+#define WLAN_PLL_CONTROL_BYPASS_MASK 0x00010000
+#define WLAN_PLL_CONTROL_NOPWD_LSB 18
+#define WLAN_PLL_CONTROL_NOPWD_MASK 0x00040000
+
+#define RTC_SYNC_STATUS_OFFSET 0x0244
+#define RTC_SYNC_STATUS_PLL_CHANGING_LSB 5
+#define RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020
+/* qca6174 PLL offset/mask end */
+
#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 3029f257a19a..4674ff33d320 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -457,7 +457,8 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
for (;;) {
/* since ath10k_install_key we can't hold data_lock all the
- * time, so we try to remove the keys incrementally */
+ * time, so we try to remove the keys incrementally
+ */
spin_lock_bh(&ar->data_lock);
i = 0;
list_for_each_entry(peer, &ar->peers, list) {
@@ -609,7 +610,8 @@ static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
case 2:
case 3:
/* Our lower layer calculations limit our precision to
- 1 microsecond */
+ * 1 microsecond
+ */
return 1;
case 4:
return 2;
@@ -978,7 +980,8 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
arg.channel.band_center_freq2 = chandef->center_freq2;
/* TODO setup this dynamically, what in case we
- don't have any vifs? */
+ * don't have any vifs?
+ */
arg.channel.mode = chan_to_phymode(chandef);
arg.channel.chan_radar =
!!(channel->flags & IEEE80211_CHAN_RADAR);
@@ -2373,9 +2376,10 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
}
/* TODO setup this based on STA listen interval and
- beacon interval. Currently we don't know
- sta->listen_interval - mac80211 patch required.
- Currently use 10 seconds */
+ * beacon interval. Currently we don't know
+ * sta->listen_interval - mac80211 patch required.
+ * Currently use 10 seconds
+ */
ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
10);
@@ -2451,6 +2455,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
enum nl80211_band band;
const u16 *vht_mcs_mask;
u8 ampdu_factor;
+ u8 max_nss, vht_mcs;
+ int i;
if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
return;
@@ -2478,7 +2484,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
/* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
* zero in VHT IE. Using it would result in degraded throughput.
* arg->peer_max_mpdu at this point contains HT max_mpdu so keep
- * it if VHT max_mpdu is smaller. */
+ * it if VHT max_mpdu is smaller.
+ */
arg->peer_max_mpdu = max(arg->peer_max_mpdu,
(1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1);
@@ -2489,6 +2496,18 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
arg->peer_flags |= ar->wmi.peer_flags->bw160;
+ /* Calculate peer NSS capability from VHT capabilities if STA
+ * supports VHT.
+ */
+ for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
+ (2 * i) & 3;
+
+ if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) &&
+ vht_mcs_mask[i])
+ max_nss = i + 1;
+ }
+ arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
arg->peer_vht_rates.rx_max_rate =
__le16_to_cpu(vht_cap->vht_mcs.rx_highest);
arg->peer_vht_rates.rx_mcs_set =
@@ -2779,7 +2798,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
}
/* ap_sta must be accessed only within rcu section which must be left
- * before calling ath10k_setup_peer_smps() which might sleep. */
+ * before calling ath10k_setup_peer_smps() which might sleep.
+ */
ht_cap = ap_sta->ht_cap;
vht_cap = ap_sta->vht_cap;
@@ -3050,7 +3070,8 @@ static int ath10k_update_channel_list(struct ath10k *ar)
/* FIXME: why use only legacy modes, why not any
* HT/VHT modes? Would that even make any
- * difference? */
+ * difference?
+ */
if (channel->band == NL80211_BAND_2GHZ)
ch->mode = MODE_11G;
else
@@ -3114,7 +3135,8 @@ static void ath10k_regd_update(struct ath10k *ar)
}
/* Target allows setting up per-band regdomain but ath_common provides
- * a combined one only */
+ * a combined one only
+ */
ret = ath10k_wmi_pdev_set_regdomain(ar,
regpair->reg_domain,
regpair->reg_domain, /* 2ghz */
@@ -3126,6 +3148,21 @@ static void ath10k_regd_update(struct ath10k *ar)
ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
}
+static void ath10k_mac_update_channel_list(struct ath10k *ar,
+ struct ieee80211_supported_band *band)
+{
+ int i;
+
+ if (ar->low_5ghz_chan && ar->high_5ghz_chan) {
+ for (i = 0; i < band->n_channels; i++) {
+ if (band->channels[i].center_freq < ar->low_5ghz_chan ||
+ band->channels[i].center_freq > ar->high_5ghz_chan)
+ band->channels[i].flags |=
+ IEEE80211_CHAN_DISABLED;
+ }
+ }
+}
+
static void ath10k_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
@@ -3149,6 +3186,10 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
if (ar->state == ATH10K_STATE_ON)
ath10k_regd_update(ar);
mutex_unlock(&ar->conf_mutex);
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
+ ath10k_mac_update_channel_list(ar,
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ]);
}
/***************/
@@ -3644,7 +3685,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
* never transmitted. We delete the peer upon tx completion.
* It is unlikely that a peer for offchannel tx will already be
* present. However it may be in some rare cases so account for that.
- * Otherwise we might remove a legitimate peer and break stuff. */
+ * Otherwise we might remove a legitimate peer and break stuff.
+ */
for (;;) {
skb = skb_dequeue(&ar->offchan_tx_queue);
@@ -4684,6 +4726,7 @@ static void ath10k_stop(struct ieee80211_hw *hw)
}
mutex_unlock(&ar->conf_mutex);
+ cancel_work_sync(&ar->set_coverage_class_work);
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->restart_work);
}
@@ -5683,7 +5726,8 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
/* the peer should not disappear in mid-way (unless FW goes awry) since
- * we already hold conf_mutex. we just make sure its there now. */
+ * we already hold conf_mutex. we just make sure its there now.
+ */
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
spin_unlock_bh(&ar->data_lock);
@@ -5695,8 +5739,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = -EOPNOTSUPP;
goto exit;
} else {
- /* if the peer doesn't exist there is no key to disable
- * anymore */
+ /* if the peer doesn't exist there is no key to disable anymore */
goto exit;
}
}
@@ -6555,7 +6598,8 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
long time_left;
/* mac80211 doesn't care if we really xmit queued frames or not
- * we'll collect those frames either way if we stop/delete vdevs */
+ * we'll collect those frames either way if we stop/delete vdevs
+ */
if (drop)
return;
@@ -6606,7 +6650,8 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
/* If device failed to restart it will be in a different state, e.g.
- * ATH10K_STATE_WEDGED */
+ * ATH10K_STATE_WEDGED
+ */
if (ar->state == ATH10K_STATE_RESTARTED) {
ath10k_info(ar, "device successfully recovered\n");
ar->state = ATH10K_STATE_ON;
@@ -7129,7 +7174,7 @@ ath10k_mac_update_rx_channel(struct ath10k *ar,
lockdep_assert_held(&ar->data_lock);
WARN_ON(ctx && vifs);
- WARN_ON(vifs && n_vifs != 1);
+ WARN_ON(vifs && !n_vifs);
/* FIXME: Sort of an optimization and a workaround. Peers and vifs are
* on a linked list now. Doing a lookup peer -> vif -> chanctx for each
@@ -8248,6 +8293,8 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->cipher_suites = cipher_suites;
ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
ret = ieee80211_register_hw(ar->hw);
if (ret) {
ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 6094372307aa..1e9806f57ee4 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -720,14 +720,16 @@ void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
{
/* IMPORTANT: INTR_CLR register has to be set after
* INTR_ENABLE is set to 0, otherwise interrupt can not be
- * really cleared. */
+ * really cleared.
+ */
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
0);
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
/* IMPORTANT: this extra read transaction is required to
- * flush the posted write buffer. */
+ * flush the posted write buffer.
+ */
(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_ENABLE_ADDRESS);
}
@@ -739,7 +741,8 @@ void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
/* IMPORTANT: this extra read transaction is required to
- * flush the posted write buffer. */
+ * flush the posted write buffer.
+ */
(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_ENABLE_ADDRESS);
}
@@ -970,12 +973,6 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
}
remaining_bytes -= nbytes;
-
- if (ret) {
- ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
- address, ret);
- break;
- }
memcpy(data, data_buf, nbytes);
address += nbytes;
@@ -2914,7 +2911,8 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
* host won't know when target writes BAR to CORE_CTRL.
* This write might get lost if target has NOT written BAR.
* For now, fix the race by repeating the write in below
- * synchronization checking. */
+ * synchronization checking.
+ */
ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
@@ -3430,6 +3428,7 @@ MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
/* QCA6174 3.1 firmware files */
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index 034e7a54c5b2..c1022a1cf855 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -439,19 +439,22 @@ struct rx_mpdu_end {
* c) A-MSDU subframe header (14 bytes) if appliable
* d) LLC/SNAP (RFC1042, 8 bytes)
*
- * In case of A-MSDU only first frame in sequence contains (a) and (b). */
+ * In case of A-MSDU only first frame in sequence contains (a) and (b).
+ */
enum rx_msdu_decap_format {
RX_MSDU_DECAP_RAW = 0,
/* Note: QoS frames are reported as non-QoS. The rx_hdr_status in
- * htt_rx_desc contains the original decapped 802.11 header. */
+ * htt_rx_desc contains the original decapped 802.11 header.
+ */
RX_MSDU_DECAP_NATIVE_WIFI = 1,
/* Payload contains an ethernet header (struct ethhdr). */
RX_MSDU_DECAP_ETHERNET2_DIX = 2,
/* Payload contains two 48-bit addresses and 2-byte length (14 bytes
- * total), followed by an RFC1042 header (8 bytes). */
+ * total), followed by an RFC1042 header (8 bytes).
+ */
RX_MSDU_DECAP_8023_SNAP_LLC = 3
};
@@ -867,7 +870,7 @@ struct rx_ppdu_start {
*
* reserved_9
* Reserved: HW should fill with 0, FW should ignore.
-*/
+ */
#define RX_PPDU_END_FLAGS_PHY_ERR (1 << 0)
#define RX_PPDU_END_FLAGS_RX_LOCATION (1 << 1)
@@ -1207,7 +1210,7 @@ struct rx_ppdu_end {
* Every time HW sets this bit in memory FW/SW must clear this
* bit in memory. FW will initialize all the ppdu_done dword
* to 0.
-*/
+ */
#define FW_RX_DESC_INFO0_DISCARD (1 << 0)
#define FW_RX_DESC_INFO0_FORWARD (1 << 1)
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index c061d6958bd1..dd9cc0939ea8 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -56,6 +56,21 @@ static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len,
return max_exp;
}
+static inline size_t ath10k_spectral_fix_bin_size(struct ath10k *ar,
+ size_t bin_len)
+{
+ /* some chipsets reports bin size as 2^n bytes + 'm' bytes in
+ * report mode 2. First 2^n bytes carries inband tones and last
+ * 'm' bytes carries band edge detection data mainly used in
+ * radar detection purpose. Strip last 'm' bytes to make bin size
+ * as a valid one. 'm' can take possible values of 4, 12.
+ */
+ if (!is_power_of_2(bin_len))
+ bin_len -= ar->hw_params.spectral_bin_discard;
+
+ return bin_len;
+}
+
int ath10k_spectral_process_fft(struct ath10k *ar,
struct wmi_phyerr_ev_arg *phyerr,
const struct phyerr_fft_report *fftr,
@@ -70,18 +85,11 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
fft_sample = (struct fft_sample_ath10k *)&buf;
+ bin_len = ath10k_spectral_fix_bin_size(ar, bin_len);
+
if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS)
return -EINVAL;
- /* qca99x0 reports bin size as 68 bytes (64 bytes + 4 bytes) in
- * report mode 2. First 64 bytes carries inband tones (-32 to +31)
- * and last 4 byte carries band edge detection data (+32) mainly
- * used in radar detection purpose. Strip last 4 byte to make bin
- * size is valid one.
- */
- if (bin_len == 68)
- bin_len -= 4;
-
reg0 = __le32_to_cpu(fftr->reg0);
reg1 = __le32_to_cpu(fftr->reg1);
@@ -536,15 +544,15 @@ int ath10k_spectral_create(struct ath10k *ar)
1140, 2500,
&rfs_spec_scan_cb, NULL);
debugfs_create_file("spectral_scan_ctl",
- S_IRUSR | S_IWUSR,
+ 0600,
ar->debug.debugfs_phy, ar,
&fops_spec_scan_ctl);
debugfs_create_file("spectral_count",
- S_IRUSR | S_IWUSR,
+ 0600,
ar->debug.debugfs_phy, ar,
&fops_spectral_count);
debugfs_create_file("spectral_bins",
- S_IRUSR | S_IWUSR,
+ 0600,
ar->debug.debugfs_phy, ar,
&fops_spectral_bins);
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index a47cab44d9c8..cbac9e4252d6 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -268,13 +268,13 @@ struct host_interest {
#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
/*
-Fw Mode/SubMode Mask
-|-----------------------------------------------------------------------------|
-| SUB | SUB | SUB | SUB | | | | |
-|MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]|
-| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) |
-|-----------------------------------------------------------------------------|
-*/
+ * Fw Mode/SubMode Mask
+ *-----------------------------------------------------------------------------
+ * SUB | SUB | SUB | SUB | | | |
+ *MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]
+ * (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2)
+ *-----------------------------------------------------------------------------
+ */
#define HI_OPTION_FW_MODE_BITS 0x2
#define HI_OPTION_FW_MODE_MASK 0x3
#define HI_OPTION_FW_MODE_SHIFT 0xC
@@ -428,8 +428,9 @@ Fw Mode/SubMode Mask
#define HI_PWR_SAVE_LPL_ENABLED 0x1
/*b1-b3 reserved*/
/*b4-b5 : dev0 LPL type : 0 - none
- 1- Reduce Pwr Search
- 2- Reduce Pwr Listen*/
+ * 1- Reduce Pwr Search
+ * 2- Reduce Pwr Listen
+ */
/*b6-b7 : dev1 LPL type and so on for Max 8 devices*/
#define HI_PWR_SAVE_LPL_DEV0_LSB 4
#define HI_PWR_SAVE_LPL_DEV_MASK 0x3
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 8bb36c18a749..d8564624415c 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -420,8 +420,8 @@ int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1];
int ret;
- ret = nla_parse(tb, ATH10K_TM_ATTR_MAX, data, len,
- ath10k_tm_policy);
+ ret = nla_parse(tb, ATH10K_TM_ATTR_MAX, data, len, ath10k_tm_policy,
+ NULL);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index 0a47269be289..87948aff1bd5 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -124,7 +124,7 @@ void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature)
complete(&ar->thermal.wmi_sync);
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ath10k_thermal_show_temp,
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, ath10k_thermal_show_temp,
NULL, 0);
static struct attribute *ath10k_hwmon_attrs[] = {
@@ -191,7 +191,8 @@ int ath10k_thermal_register(struct ath10k *ar)
return 0;
/* Avoid linking error on devm_hwmon_device_register_with_groups, I
- * guess linux/hwmon.h is missing proper stubs. */
+ * guess linux/hwmon.h is missing proper stubs.
+ */
if (!IS_REACHABLE(CONFIG_HWMON))
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 9852c5d51139..d4986f626c35 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -34,7 +34,8 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
/* If the original wait_for_completion() timed out before
* {data,mgmt}_tx_completed() was called then we could complete
* offchan_tx_completed for a different skb. Prevent this by using
- * offchan_tx_skb. */
+ * offchan_tx_skb.
+ */
spin_lock_bh(&ar->data_lock);
if (ar->offchan_tx_skb != skb) {
ath10k_warn(ar, "completed old offchannel frame\n");
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index c7956e181f80..2fc3f24ff1ca 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -390,7 +390,8 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
return ret;
/* FIXME There's no ACK event for Management Tx. This probably
- * shouldn't be called here either. */
+ * shouldn't be called here either.
+ */
info->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(ar->hw, msdu);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 2f1743e60fa1..6afc8d27f0d5 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3210,7 +3210,8 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
/* if next SWBA has no tim_changed the tim_bitmap is garbage.
- * we must copy the bitmap upon change and reuse it later */
+ * we must copy the bitmap upon change and reuse it later
+ */
if (__le32_to_cpu(tim_info->tim_changed)) {
int i;
@@ -3529,7 +3530,8 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
* before telling mac80211 to decrement CSA counter
*
* Once CSA counter is completed stop sending beacons until
- * actual channel switch is done */
+ * actual channel switch is done
+ */
if (arvif->vif->csa_active &&
ieee80211_csa_is_complete(arvif->vif)) {
ieee80211_csa_finish(arvif->vif);
@@ -3643,6 +3645,11 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
spin_lock_bh(&ar->data_lock);
ch = ar->rx_channel;
+
+ /* fetch target operating channel during channel change */
+ if (!ch)
+ ch = ar->tgt_oper_chan;
+
spin_unlock_bh(&ar->data_lock);
if (!ch) {
@@ -3686,7 +3693,8 @@ radar_detected:
ATH10K_DFS_STAT_INC(ar, radar_detected);
/* Control radar events reporting in debugfs file
- dfs_block_radar_events */
+ * dfs_block_radar_events
+ */
if (ar->dfs_block_radar_events) {
ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
return;
@@ -4593,6 +4601,8 @@ ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
+ arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
arg->num_mem_reqs = ev->num_mem_reqs;
arg->service_map = ev->wmi_service_bitmap;
arg->service_map_len = sizeof(ev->wmi_service_bitmap);
@@ -4629,6 +4639,8 @@ ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
+ arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
arg->num_mem_reqs = ev->num_mem_reqs;
arg->service_map = ev->wmi_service_bitmap;
arg->service_map_len = sizeof(ev->wmi_service_bitmap);
@@ -4682,6 +4694,8 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
ar->phy_capability = __le32_to_cpu(arg.phy_capab);
ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd);
+ ar->low_5ghz_chan = __le32_to_cpu(arg.low_5ghz_chan);
+ ar->high_5ghz_chan = __le32_to_cpu(arg.high_5ghz_chan);
ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
arg.service_map, arg.service_map_len);
@@ -4758,9 +4772,10 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
num_units = ar->max_num_peers + 1;
} else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
/* number of units to allocate is number of
- * peers, 1 extra for self peer on target */
- /* this needs to be tied, host and target
- * can get out of sync */
+ * peers, 1 extra for self peer on target
+ * this needs to be tied, host and target
+ * can get out of sync
+ */
num_units = ar->max_num_peers + 1;
} else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
num_units = ar->max_num_vdevs + 1;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 386aa51435f1..1b4865a55595 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -1038,7 +1038,8 @@ enum wmi_cmd_id {
WMI_STA_UAPSD_AUTO_TRIG_CMDID,
/* STA Keep alive parameter configuration,
- Requires WMI_SERVICE_STA_KEEP_ALIVE */
+ * Requires WMI_SERVICE_STA_KEEP_ALIVE
+ */
WMI_STA_KEEPALIVE_CMD,
/* misc command group */
@@ -1774,7 +1775,8 @@ static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
break;
/* no default handler to allow compiler to check that the
- * enum is fully handled */
+ * enum is fully handled
+ */
};
return "<unknown>";
@@ -2974,7 +2976,8 @@ struct wmi_start_scan_arg {
/* When set, DFS channels will not be scanned */
#define WMI_SCAN_BYPASS_DFS_CHN 0x40
/* Different FW scan engine may choose to bail out on errors.
- * Allow the driver to have influence over that. */
+ * Allow the driver to have influence over that.
+ */
#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
/* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */
@@ -3182,7 +3185,7 @@ struct wmi_10_4_phyerr_event {
struct phyerr_radar_report {
__le32 reg0; /* RADAR_REPORT_REG0_* */
- __le32 reg1; /* REDAR_REPORT_REG1_* */
+ __le32 reg1; /* RADAR_REPORT_REG1_* */
} __packed;
#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_MASK 0x80000000
@@ -4447,14 +4450,16 @@ enum wmi_vdev_subtype_10_4 {
/* values for vdev_start_request flags */
/*
* Indicates that AP VDEV uses hidden ssid. only valid for
- * AP/GO */
+ * AP/GO
+ */
#define WMI_VDEV_START_HIDDEN_SSID (1 << 0)
/*
* Indicates if robust management frame/management frame
* protection is enabled. For GO/AP vdevs, it indicates that
* it may support station/client associations with RMF enabled.
* For STA/client vdevs, it indicates that sta will
- * associate with AP with RMF enabled. */
+ * associate with AP with RMF enabled.
+ */
#define WMI_VDEV_START_PMF_ENABLED (1 << 1)
struct wmi_p2p_noa_descriptor {
@@ -4814,7 +4819,8 @@ enum wmi_vdev_param {
* An associated STA is considered unresponsive if there is no recent
* TX/RX activity and downlink frames are buffered for it. Once a STA
* exceeds the maximum unresponsive time, the AP will send a
- * WMI_STA_KICKOUT event to the host so the STA can be deleted. */
+ * WMI_STA_KICKOUT event to the host so the STA can be deleted.
+ */
WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
/* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
@@ -4941,7 +4947,8 @@ enum wmi_10x_vdev_param {
* An associated STA is considered unresponsive if there is no recent
* TX/RX activity and downlink frames are buffered for it. Once a STA
* exceeds the maximum unresponsive time, the AP will send a
- * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted. */
+ * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted.
+ */
WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
/* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
@@ -5605,12 +5612,14 @@ struct wmi_tim_info_arg {
struct wmi_p2p_noa_info {
/* Bit 0 - Flag to indicate an update in NOA schedule
- Bits 7-1 - Reserved */
+ * Bits 7-1 - Reserved
+ */
u8 changed;
/* NOA index */
u8 index;
/* Bit 0 - Opp PS state of the AP
- Bits 1-7 - Ctwindow in TUs */
+ * Bits 1-7 - Ctwindow in TUs
+ */
u8 ctwindow_oppps;
/* Number of NOA descriptors */
u8 num_descriptors;
@@ -6000,7 +6009,8 @@ struct wmi_main_peer_assoc_complete_cmd {
struct wmi_common_peer_assoc_complete_cmd cmd;
/* HT Operation Element of the peer. Five bytes packed in 2
- * INT32 array and filled from lsb to msb. */
+ * INT32 array and filled from lsb to msb.
+ */
__le32 peer_ht_info[2];
} __packed;
@@ -6335,6 +6345,8 @@ struct wmi_svc_rdy_ev_arg {
__le32 num_rf_chains;
__le32 eeprom_rd;
__le32 num_mem_reqs;
+ __le32 low_5ghz_chan;
+ __le32 high_5ghz_chan;
const __le32 *service_map;
size_t service_map_len;
const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index d98fd421c7ec..527afcf39246 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1414,10 +1414,10 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
switch (ah->ah_bwmode) {
case AR5K_BWMODE_5MHZ:
- rxs->flag |= RX_FLAG_5MHZ;
+ rxs->bw = RATE_INFO_BW_5;
break;
case AR5K_BWMODE_10MHZ:
- rxs->flag |= RX_FLAG_10MHZ;
+ rxs->bw = RATE_INFO_BW_10;
break;
default:
break;
@@ -1425,7 +1425,7 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
if (rs->rs_rate ==
ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
- rxs->flag |= RX_FLAG_SHORTPRE;
+ rxs->enc_flags |= RX_ENC_FLAG_SHORTPRE;
trace_ath5k_rx(ah, skb);
@@ -2564,6 +2564,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
hw->extra_tx_headroom = 2;
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
/*
* Mark the device as detached to avoid processing
* interrupts until setup is complete.
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 363b30a549c2..414b5b596efc 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -102,10 +102,8 @@ static struct ieee80211_channel ath6kl_2ghz_channels[] = {
};
static struct ieee80211_channel ath6kl_5ghz_a_channels[] = {
- CHAN5G(34, 0), CHAN5G(36, 0),
- CHAN5G(38, 0), CHAN5G(40, 0),
- CHAN5G(42, 0), CHAN5G(44, 0),
- CHAN5G(46, 0), CHAN5G(48, 0),
+ CHAN5G(36, 0), CHAN5G(40, 0),
+ CHAN5G(44, 0), CHAN5G(48, 0),
CHAN5G(52, 0), CHAN5G(56, 0),
CHAN5G(60, 0), CHAN5G(64, 0),
CHAN5G(100, 0), CHAN5G(104, 0),
@@ -171,7 +169,7 @@ static void ath6kl_cfg80211_sscan_disable(struct ath6kl_vif *vif)
if (!stopped)
return;
- cfg80211_sched_scan_stopped(ar->wiphy);
+ cfg80211_sched_scan_stopped(ar->wiphy, 0);
}
static int ath6kl_set_wpa_version(struct ath6kl_vif *vif,
@@ -808,9 +806,15 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
WLAN_STATUS_SUCCESS, GFP_KERNEL);
cfg80211_put_bss(ar->wiphy, bss);
} else if (vif->sme_state == SME_CONNECTED) {
+ struct cfg80211_roam_info roam_info = {
+ .bss = bss,
+ .req_ie = assoc_req_ie,
+ .req_ie_len = assoc_req_len,
+ .resp_ie = assoc_resp_ie,
+ .resp_ie_len = assoc_resp_len,
+ };
/* inform roam event to cfg80211 */
- cfg80211_roamed_bss(vif->ndev, bss, assoc_req_ie, assoc_req_len,
- assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
+ cfg80211_roamed(vif->ndev, &roam_info, GFP_KERNEL);
}
}
@@ -1505,7 +1509,6 @@ static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
- u32 *flags,
struct vif_params *params)
{
struct ath6kl *ar = wiphy_priv(wiphy);
@@ -1552,7 +1555,7 @@ static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
struct net_device *ndev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct ath6kl_vif *vif = netdev_priv(ndev);
@@ -3355,7 +3358,7 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
}
static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy,
- struct net_device *dev)
+ struct net_device *dev, u64 reqid)
{
struct ath6kl_vif *vif = netdev_priv(dev);
bool stopped;
@@ -3976,7 +3979,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, ar->fw_capabilities))
- ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ ar->wiphy->max_sched_scan_reqs = 1;
if (test_bit(ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
ar->fw_capabilities))
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 0614393dd7ae..94297572914f 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -63,6 +63,7 @@ int ath6kl_read_tgt_stats(struct ath6kl *ar, struct ath6kl_vif *vif);
#ifdef CONFIG_ATH6KL_DEBUG
+__printf(2, 3)
void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...);
void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
const char *msg, const char *prefix,
@@ -83,6 +84,7 @@ int ath6kl_debug_init_fs(struct ath6kl *ar);
void ath6kl_debug_cleanup(struct ath6kl *ar);
#else
+__printf(2, 3)
static inline void ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask,
const char *fmt, ...)
{
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index ca1a18c86c0d..d127a08d60df 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -995,7 +995,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
if (netlen < (payload_len + HTC_HDR_LENGTH)) {
ath6kl_dbg(ATH6KL_DBG_HTC,
- "HTC Rx: insufficient length, got:%d expected =%u\n",
+ "HTC Rx: insufficient length, got:%d expected =%zu\n",
netlen, payload_len + HTC_HDR_LENGTH);
status = -EINVAL;
goto free_skb;
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c
index d67170ea1038..d8dcacda9add 100644
--- a/drivers/net/wireless/ath/ath6kl/testmode.c
+++ b/drivers/net/wireless/ath/ath6kl/testmode.c
@@ -74,8 +74,8 @@ int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
int err, buf_len;
void *buf;
- err = nla_parse(tb, ATH6KL_TM_ATTR_MAX, data, len,
- ath6kl_tm_policy);
+ err = nla_parse(tb, ATH6KL_TM_ATTR_MAX, data, len, ath6kl_tm_policy,
+ NULL);
if (err)
return err;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 84a6d12c3f8a..bfc20b45b806 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1082,7 +1082,7 @@ void ath6kl_wmi_sscan_timer(unsigned long ptr)
{
struct ath6kl_vif *vif = (struct ath6kl_vif *) ptr;
- cfg80211_sched_scan_results(vif->ar->wiphy);
+ cfg80211_sched_scan_results(vif->ar->wiphy, 0);
}
static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
@@ -1596,7 +1596,7 @@ static int ath6kl_wmi_txe_notify_event_rx(struct wmi *wmi, u8 *datap, int len,
rate = le32_to_cpu(ev->rate);
pkts = le32_to_cpu(ev->pkts);
- ath6kl_dbg(ATH6KL_DBG_WMI, "TXE notify event: peer %pM rate %d% pkts %d intvl %ds\n",
+ ath6kl_dbg(ATH6KL_DBG_WMI, "TXE notify event: peer %pM rate %d%% pkts %d intvl %ds\n",
vif->bssid, rate, pkts, vif->txe_intvl);
cfg80211_cqm_txe_notify(vif->ndev, vif->bssid, pkts,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index cc5bb0a76baf..68fcbe03bce2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -494,7 +494,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_status = 0;
rxs->rs_flags = 0;
- rxs->flag = 0;
+ rxs->enc_flags = 0;
+ rxs->bw = RATE_INFO_BW_20;
rxs->rs_datalen = rxsp->status2 & AR_DataLen;
rxs->rs_tstamp = rxsp->status3;
@@ -520,8 +521,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
- rxs->flag |= (rxsp->status4 & AR_GI) ? RX_FLAG_SHORT_GI : 0;
- rxs->flag |= (rxsp->status4 & AR_2040) ? RX_FLAG_40MHZ : 0;
+ rxs->enc_flags |= (rxsp->status4 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0;
+ rxs->enc_flags |= (rxsp->status4 & AR_2040) ? RX_ENC_FLAG_40MHZ : 0;
rxs->evm0 = rxsp->status6;
rxs->evm1 = rxsp->status7;
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 0f71146b781d..13ab6bc46775 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -254,7 +254,9 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
continue;
- if (h)
+ if (ah->nf_override)
+ nfval = ah->nf_override;
+ else if (h)
nfval = h[i].privNF;
else
nfval = default_nf;
@@ -348,6 +350,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
return 0;
}
+EXPORT_SYMBOL(ath9k_hw_loadnf);
static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 0ffa23a61568..5e77fe1f5b0d 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -742,6 +742,9 @@ void ath9k_cmn_spectral_scan_trigger(struct ath_common *common,
return;
}
+ if (!spec_priv->spec_config.enabled)
+ return;
+
ath_ps_ops(common)->wakeup(common);
rxfilter = ath9k_hw_getrxfilter(ah);
ath9k_hw_setrxfilter(ah, rxfilter |
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index b80e08b13b74..c67d0e08bd4c 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -181,14 +181,15 @@ int ath9k_cmn_process_rate(struct ath_common *common,
sband = hw->wiphy->bands[band];
if (IS_CHAN_QUARTER_RATE(ah->curchan))
- rxs->flag |= RX_FLAG_5MHZ;
+ rxs->bw = RATE_INFO_BW_5;
else if (IS_CHAN_HALF_RATE(ah->curchan))
- rxs->flag |= RX_FLAG_10MHZ;
+ rxs->bw = RATE_INFO_BW_10;
if (rx_stats->rs_rate & 0x80) {
/* HT rate */
- rxs->flag |= RX_FLAG_HT;
- rxs->flag |= rx_stats->flag;
+ rxs->encoding = RX_ENC_HT;
+ rxs->enc_flags |= rx_stats->enc_flags;
+ rxs->bw = rx_stats->bw;
rxs->rate_idx = rx_stats->rs_rate & 0x7f;
return 0;
}
@@ -199,7 +200,7 @@ int ath9k_cmn_process_rate(struct ath_common *common,
return 0;
}
if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
- rxs->flag |= RX_FLAG_SHORTPRE;
+ rxs->enc_flags |= RX_ENC_FLAG_SHORTPRE;
rxs->rate_idx = i;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 43930c336987..2e64977a8ab6 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -1191,6 +1191,65 @@ static const struct file_operations fops_tpc = {
.llseek = default_llseek,
};
+static ssize_t read_file_nf_override(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ char buf[32];
+ unsigned int len;
+
+ if (ah->nf_override == 0)
+ len = sprintf(buf, "off\n");
+ else
+ len = sprintf(buf, "%d\n", ah->nf_override);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_nf_override(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (strncmp("off", buf, 3) == 0)
+ val = 0;
+ else if (kstrtol(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > 0)
+ return -EINVAL;
+
+ if (val < -120)
+ return -EINVAL;
+
+ ah->nf_override = val;
+
+ if (ah->curchan)
+ ath9k_hw_loadnf(ah, ah->curchan);
+
+ return count;
+}
+
+static const struct file_operations fops_nf_override = {
+ .read = read_file_nf_override,
+ .write = write_file_nf_override,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
/* Ethtool support for get-stats */
#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
@@ -1402,5 +1461,8 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_u16("airtime_flags", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->airtime_flags);
+ debugfs_create_file("nf_override", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_nf_override);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index 524cbf13ca9c..efc692ee67d4 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -116,12 +116,12 @@ void ath_debug_rate_stats(struct ath_softc *sc,
if (rxs->rate_idx >= ARRAY_SIZE(rstats->ht_stats))
goto exit;
- if (rxs->flag & RX_FLAG_40MHZ)
+ if ((rxs->bw == RATE_INFO_BW_40))
rstats->ht_stats[rxs->rate_idx].ht40_cnt++;
else
rstats->ht_stats[rxs->rate_idx].ht20_cnt++;
- if (rxs->flag & RX_FLAG_SHORT_GI)
+ if (rxs->enc_flags & RX_ENC_FLAG_SHORT_GI)
rstats->ht_stats[rxs->rate_idx].sgi_cnt++;
else
rstats->ht_stats[rxs->rate_idx].lgi_cnt++;
@@ -130,7 +130,7 @@ void ath_debug_rate_stats(struct ath_softc *sc,
}
if (IS_CCK_RATE(rs->rs_rate)) {
- if (rxs->flag & RX_FLAG_SHORTPRE)
+ if (rxs->enc_flags & RX_ENC_FLAG_SHORTPRE)
rstats->cck_stats[rxs->rate_idx].cck_sp_cnt++;
else
rstats->cck_stats[rxs->rate_idx].cck_lp_cnt++;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index fb80ec86e53d..6ccf24814514 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -112,7 +112,7 @@ void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
static bool ath9k_hw_nvram_read_array(u16 *blob, size_t blob_size,
off_t offset, u16 *data)
{
- if (offset > blob_size)
+ if (offset >= blob_size)
return false;
*data = blob[offset];
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 30bf722e33ed..31390af6c33e 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -106,7 +106,7 @@
#define AR9285_RDEXT_DEFAULT 0x1F
#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
-#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
+#define FREQ2FBIN(x, y) (u8)((y) ? ((x) - 2300) : (((x) - 4800) / 5))
#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index de2d212f39ec..12aa8abbcba4 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -37,6 +37,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
{ USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
{ USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */
+ { USB_DEVICE(0x1eda, 0x2315) }, /* AirTies */
{ USB_DEVICE(0x0cf3, 0x7015),
.driver_info = AR9287_USB }, /* Atheros */
@@ -1219,6 +1220,9 @@ static int send_eject_command(struct usb_interface *interface)
u8 bulk_out_ep;
int r;
+ if (iface_desc->desc.bNumEndpoints < 2)
+ return -ENODEV;
+
/* Find bulk out endpoint */
for (r = 1; r >= 0; r--) {
endpoint = &iface_desc->endpoint[r].desc;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index b65c1b661ade..defacc6c9c99 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -780,6 +780,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
}
SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
+
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
}
static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index f333ef1e3e7b..b38a586ea59a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -929,11 +929,12 @@ void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
static inline void convert_htc_flag(struct ath_rx_status *rx_stats,
struct ath_htc_rx_status *rxstatus)
{
- rx_stats->flag = 0;
+ rx_stats->enc_flags = 0;
+ rx_stats->bw = RATE_INFO_BW_20;
if (rxstatus->rs_flags & ATH9K_RX_2040)
- rx_stats->flag |= RX_FLAG_40MHZ;
+ rx_stats->bw = RATE_INFO_BW_40;
if (rxstatus->rs_flags & ATH9K_RX_GI)
- rx_stats->flag |= RX_FLAG_SHORT_GI;
+ rx_stats->enc_flags |= RX_ENC_FLAG_SHORT_GI;
}
static void rx_status_htc_to_ath(struct ath_rx_status *rx_stats,
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 9cbca1229bac..4ac70827d142 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -803,6 +803,7 @@ struct ath_hw {
u32 rfkill_gpio;
u32 rfkill_polarity;
u32 ah_flags;
+ s16 nf_override;
bool reset_power_on;
bool htc_reset_init;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index fa4b3cc1ba22..fd9a61834c17 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -955,6 +955,8 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
ath9k_cmn_reload_chainmask(ah);
SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
+
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
}
int ath9k_init_device(u16 devid, struct ath_softc *sc,
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index d937c39b3a0b..6128c2bb23d8 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -535,7 +535,8 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_status = 0;
rs->rs_flags = 0;
- rs->flag = 0;
+ rs->enc_flags = 0;
+ rs->bw = RATE_INFO_BW_20;
rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
rs->rs_tstamp = ads.AR_RcvTimestamp;
@@ -577,15 +578,15 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
/* directly mapped flags for ieee80211_rx_status */
- rs->flag |=
- (ads.ds_rxstatus3 & AR_GI) ? RX_FLAG_SHORT_GI : 0;
- rs->flag |=
- (ads.ds_rxstatus3 & AR_2040) ? RX_FLAG_40MHZ : 0;
+ rs->enc_flags |=
+ (ads.ds_rxstatus3 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0;
+ rs->enc_flags |=
+ (ads.ds_rxstatus3 & AR_2040) ? RX_ENC_FLAG_40MHZ : 0;
if (AR_SREV_9280_20_OR_LATER(ah))
- rs->flag |=
+ rs->enc_flags |=
(ads.ds_rxstatus3 & AR_STBC) ?
/* we can only Nss=1 STBC */
- (1 << RX_FLAG_STBC_SHIFT) : 0;
+ (1 << RX_ENC_FLAG_STBC_SHIFT) : 0;
if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 770fc11b41d1..fd6aa49adadf 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -16,6 +16,7 @@
#ifndef MAC_H
#define MAC_H
+#include <net/cfg80211.h>
#define set11nTries(_series, _index) \
(SM((_series)[_index].Tries, AR_XmitDataTries##_index))
@@ -143,7 +144,8 @@ struct ath_rx_status {
u32 evm2;
u32 evm3;
u32 evm4;
- u32 flag; /* see enum mac80211_rx_flags */
+ u16 enc_flags;
+ enum rate_info_bw bw;
};
struct ath_htc_rx_status {
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index aff473dfa10d..7b7627f85d3a 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -383,6 +383,11 @@ static const struct pci_device_id ath_pci_id_table[] = {
0x10CF, /* Fujitsu */
0x1783),
.driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_DELL,
+ 0x020B),
+ .driver_data = ATH9K_PCI_WOW },
/* Killer Wireless (2x2) */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index d79837fe333f..2197aee2bb72 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1037,11 +1037,11 @@ static void ath_rx_count_airtime(struct ath_softc *sc,
rxs = IEEE80211_SKB_RXCB(skb);
- is_sgi = !!(rxs->flag & RX_FLAG_SHORT_GI);
- is_40 = !!(rxs->flag & RX_FLAG_40MHZ);
- is_sp = !!(rxs->flag & RX_FLAG_SHORTPRE);
+ is_sgi = !!(rxs->enc_flags & RX_ENC_FLAG_SHORT_GI);
+ is_40 = !!(rxs->bw == RATE_INFO_BW_40);
+ is_sp = !!(rxs->enc_flags & RX_ENC_FLAG_SHORTPRE);
- if (!!(rxs->flag & RX_FLAG_HT)) {
+ if (!!(rxs->encoding == RX_ENC_HT)) {
/* MCS rates */
airtime += ath_pkt_duration(sc, rxs->rate_idx, len,
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index ffb22a04beeb..988c8857d78c 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1874,6 +1874,8 @@ void *carl9170_alloc(size_t priv_size)
for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
return ar;
err_nomem:
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 0c34c8729dc6..b2166726b05d 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -358,7 +358,7 @@ static int carl9170_rx_mac_status(struct ar9170 *ar,
switch (mac->status & AR9170_RX_STATUS_MODULATION) {
case AR9170_RX_STATUS_MODULATION_CCK:
if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
- status->flag |= RX_FLAG_SHORTPRE;
+ status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
switch (head->plcp[0]) {
case AR9170_RX_PHY_RATE_CCK_1M:
status->rate_idx = 0;
@@ -423,12 +423,12 @@ static int carl9170_rx_mac_status(struct ar9170 *ar,
case AR9170_RX_STATUS_MODULATION_HT:
if (head->plcp[3] & 0x80)
- status->flag |= RX_FLAG_40MHZ;
+ status->bw = RATE_INFO_BW_40;
if (head->plcp[6] & 0x80)
- status->flag |= RX_FLAG_SHORT_GI;
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
status->rate_idx = clamp(0, 75, head->plcp[3] & 0x7f);
- status->flag |= RX_FLAG_HT;
+ status->encoding = RX_ENC_HT;
break;
default:
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 43afa83a9f0c..e25bfdf78c2e 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -254,8 +254,12 @@ bool ath_is_49ghz_allowed(u16 regdomain)
EXPORT_SYMBOL(ath_is_49ghz_allowed);
/* Frequency is one where radar detection is required */
-static bool ath_is_radar_freq(u16 center_freq)
+static bool ath_is_radar_freq(u16 center_freq,
+ struct ath_regulatory *reg)
+
{
+ if (reg->country_code == CTRY_INDIA)
+ return (center_freq >= 5500 && center_freq <= 5700);
return (center_freq >= 5260 && center_freq <= 5700);
}
@@ -306,7 +310,7 @@ __ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator,
struct ieee80211_channel *ch)
{
- if (ath_is_radar_freq(ch->center_freq) ||
+ if (ath_is_radar_freq(ch->center_freq, reg) ||
(ch->flags & IEEE80211_CHAN_RADAR))
return;
@@ -395,8 +399,9 @@ ath_reg_apply_ir_flags(struct wiphy *wiphy,
}
}
-/* Always apply Radar/DFS rules on freq range 5260 MHz - 5700 MHz */
-static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
+/* Always apply Radar/DFS rules on freq range 5500 MHz - 5700 MHz */
+static void ath_reg_apply_radar_flags(struct wiphy *wiphy,
+ struct ath_regulatory *reg)
{
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
@@ -409,7 +414,7 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
- if (!ath_is_radar_freq(ch->center_freq))
+ if (!ath_is_radar_freq(ch->center_freq, reg))
continue;
/* We always enable radar detection/DFS on this
* frequency range. Additionally we also apply on
@@ -506,7 +511,7 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
struct ath_common *common = container_of(reg, struct ath_common,
regulatory);
/* We always apply this */
- ath_reg_apply_radar_flags(wiphy);
+ ath_reg_apply_radar_flags(wiphy, reg);
/*
* This would happen when we have sent a custom regulatory request
@@ -654,7 +659,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
}
wiphy_apply_custom_regulatory(wiphy, regd);
- ath_reg_apply_radar_flags(wiphy);
+ ath_reg_apply_radar_flags(wiphy, reg);
ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
return 0;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/Kconfig b/drivers/net/wireless/ath/wcn36xx/Kconfig
index 4b83e87f0b94..20bf967a70b9 100644
--- a/drivers/net/wireless/ath/wcn36xx/Kconfig
+++ b/drivers/net/wireless/ath/wcn36xx/Kconfig
@@ -2,7 +2,7 @@ config WCN36XX
tristate "Qualcomm Atheros WCN3660/3680 support"
depends on MAC80211 && HAS_DMA
depends on QCOM_WCNSS_CTRL || QCOM_WCNSS_CTRL=n
- depends on QCOM_SMD || QCOM_SMD=n
+ depends on RPMSG || RPMSG=n
---help---
This module adds support for wireless adapters based on
Qualcomm Atheros WCN3660 and WCN3680 mobile chipsets.
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 7a0c2e7da7f6..d5e993dc9b23 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -22,7 +22,7 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/soc/qcom/smd.h>
+#include <linux/rpmsg.h>
#include <linux/soc/qcom/smem_state.h>
#include <linux/soc/qcom/wcnss_ctrl.h>
#include "wcn36xx.h"
@@ -337,10 +337,10 @@ out_smd_stop:
wcn36xx_smd_stop(wcn);
out_free_smd_buf:
kfree(wcn->hal_buf);
-out_free_dxe_pool:
- wcn36xx_dxe_free_mem_pools(wcn);
out_free_dxe_ctl:
wcn36xx_dxe_free_ctl_blks(wcn);
+out_free_dxe_pool:
+ wcn36xx_dxe_free_mem_pools(wcn);
out_smd_close:
wcn36xx_smd_close(wcn);
out_err:
@@ -1112,6 +1112,9 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
wcn->hw->sta_data_size = sizeof(struct wcn36xx_sta);
wcn->hw->vif_data_size = sizeof(struct wcn36xx_vif);
+ wiphy_ext_feature_set(wcn->hw->wiphy,
+ NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
return ret;
}
@@ -1218,15 +1221,13 @@ static int wcn36xx_probe(struct platform_device *pdev)
INIT_WORK(&wcn->scan_work, wcn36xx_hw_scan_worker);
- wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process);
+ wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process, hw);
if (IS_ERR(wcn->smd_channel)) {
wcn36xx_err("failed to open WLAN_CTRL channel\n");
ret = PTR_ERR(wcn->smd_channel);
goto out_wq;
}
- qcom_smd_set_drvdata(wcn->smd_channel, hw);
-
addr = of_get_property(pdev->dev.of_node, "local-mac-address", &ret);
if (addr && ret != ETH_ALEN) {
wcn36xx_err("invalid local-mac-address\n");
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 1c2966f7db7a..9c6590d5348a 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -19,7 +19,7 @@
#include <linux/etherdevice.h>
#include <linux/firmware.h>
#include <linux/bitops.h>
-#include <linux/soc/qcom/smd.h>
+#include <linux/rpmsg.h>
#include "smd.h"
struct wcn36xx_cfg_val {
@@ -254,7 +254,7 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
init_completion(&wcn->hal_rsp_compl);
start = jiffies;
- ret = qcom_smd_send(wcn->smd_channel, wcn->hal_buf, len);
+ ret = rpmsg_send(wcn->smd_channel, wcn->hal_buf, len);
if (ret) {
wcn36xx_err("HAL TX failed\n");
goto out;
@@ -2205,11 +2205,11 @@ out:
return ret;
}
-int wcn36xx_smd_rsp_process(struct qcom_smd_channel *channel,
- const void *buf, size_t len)
+int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
+ void *buf, int len, void *priv, u32 addr)
{
const struct wcn36xx_hal_msg_header *msg_header = buf;
- struct ieee80211_hw *hw = qcom_smd_get_drvdata(channel);
+ struct ieee80211_hw *hw = priv;
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_hal_ind_msg *msg_ind;
wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 8892ccd67b14..013fc9546f56 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -51,7 +51,7 @@ struct wcn36xx_hal_ind_msg {
};
struct wcn36xx;
-struct qcom_smd_channel;
+struct rpmsg_device;
int wcn36xx_smd_open(struct wcn36xx *wcn);
void wcn36xx_smd_close(struct wcn36xx *wcn);
@@ -129,8 +129,8 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
-int wcn36xx_smd_rsp_process(struct qcom_smd_channel *channel,
- const void *buf, size_t len);
+int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
+ void *buf, int len, void *priv, u32 addr);
int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index 8c387a0a3c09..22304edc5948 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -68,7 +68,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
RX_FLAG_MMIC_STRIPPED |
RX_FLAG_DECRYPTED;
- wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%llx\n", status.flag);
+ wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 7423998ddeb4..b52b4da9a967 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -195,7 +195,7 @@ struct wcn36xx {
void __iomem *ccu_base;
void __iomem *dxe_base;
- struct qcom_smd_channel *smd_channel;
+ struct rpmsg_endpoint *smd_channel;
struct qcom_smem_state *tx_enable_state;
unsigned tx_enable_state_bit;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 83155b5ddbfb..fdaa99c541ac 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -178,9 +178,8 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
BIT(NL80211_STA_INFO_RX_DROP_MISC) |
BIT(NL80211_STA_INFO_TX_FAILED);
- sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
+ sinfo->txrate.flags = RATE_INFO_FLAGS_60G;
sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
- sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
sinfo->rxrate.mcs = stats->last_mcs_rx;
sinfo->rx_bytes = stats->rx_bytes;
sinfo->rx_packets = stats->rx_packets;
@@ -256,7 +255,7 @@ static struct wireless_dev *
wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
- u32 *flags, struct vif_params *params)
+ struct vif_params *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct net_device *ndev = wil_to_ndev(wil);
@@ -307,7 +306,7 @@ static int wil_cfg80211_del_iface(struct wiphy *wiphy,
static int wil_cfg80211_change_iface(struct wiphy *wiphy,
struct net_device *ndev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
@@ -334,11 +333,8 @@ static int wil_cfg80211_change_iface(struct wiphy *wiphy,
case NL80211_IFTYPE_P2P_GO:
break;
case NL80211_IFTYPE_MONITOR:
- if (flags)
- wil->monitor_flags = *flags;
- else
- wil->monitor_flags = 0;
-
+ if (params->flags)
+ wil->monitor_flags = params->flags;
break;
default:
return -EOPNOTSUPP;
@@ -390,22 +386,23 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
}
mutex_unlock(&wil->p2p_wdev_mutex);
- /* social scan on P2P_DEVICE is handled as p2p search */
- if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE &&
- wil_p2p_is_social_scan(request)) {
+ if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
if (!wil->p2p.p2p_dev_started) {
wil_err(wil, "P2P search requested on stopped P2P device\n");
rc = -EIO;
goto out;
}
- wil->scan_request = request;
- wil->radio_wdev = wdev;
- rc = wil_p2p_search(wil, request);
- if (rc) {
- wil->radio_wdev = wil_to_wdev(wil);
- wil->scan_request = NULL;
+ /* social scan on P2P_DEVICE is handled as p2p search */
+ if (wil_p2p_is_social_scan(request)) {
+ wil->scan_request = request;
+ wil->radio_wdev = wdev;
+ rc = wil_p2p_search(wil, request);
+ if (rc) {
+ wil->radio_wdev = wil_to_wdev(wil);
+ wil->scan_request = NULL;
+ }
+ goto out;
}
- goto out;
}
(void)wil_p2p_stop_discovery(wil);
@@ -415,9 +412,9 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
for (i = 0; i < request->n_ssids; i++) {
wil_dbg_misc(wil, "SSID[%d]", i);
- print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
- request->ssids[i].ssid,
- request->ssids[i].ssid_len);
+ wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+ request->ssids[i].ssid,
+ request->ssids[i].ssid_len, true);
}
if (request->n_ssids)
@@ -454,8 +451,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
}
if (request->ie_len)
- print_hex_dump_bytes("Scan IE ", DUMP_PREFIX_OFFSET,
- request->ie, request->ie_len);
+ wil_hex_dump_misc("Scan IE ", DUMP_PREFIX_OFFSET, 16, 1,
+ request->ie, request->ie_len, true);
else
wil_dbg_misc(wil, "Scan has no IE's\n");
@@ -679,6 +676,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
if (rc == 0) {
netif_carrier_on(ndev);
+ wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS);
+ wil->bss = bss;
/* Connect can take lots of time */
mod_timer(&wil->connect_timer,
jiffies + msecs_to_jiffies(2000));
@@ -707,6 +706,7 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
return 0;
}
+ wil->locally_generated_disc = true;
rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
WMI_DISCONNECT_EVENTID, NULL, 0,
WIL6210_DISCONNECT_TO_MS);
@@ -760,7 +760,8 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
*/
wil_dbg_misc(wil, "mgmt_tx\n");
- print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len);
+ wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ len, true);
cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
if (!cmd) {
@@ -1093,18 +1094,18 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
static void wil_print_bcon_data(struct cfg80211_beacon_data *b)
{
- print_hex_dump_bytes("head ", DUMP_PREFIX_OFFSET,
- b->head, b->head_len);
- print_hex_dump_bytes("tail ", DUMP_PREFIX_OFFSET,
- b->tail, b->tail_len);
- print_hex_dump_bytes("BCON IE ", DUMP_PREFIX_OFFSET,
- b->beacon_ies, b->beacon_ies_len);
- print_hex_dump_bytes("PROBE ", DUMP_PREFIX_OFFSET,
- b->probe_resp, b->probe_resp_len);
- print_hex_dump_bytes("PROBE IE ", DUMP_PREFIX_OFFSET,
- b->proberesp_ies, b->proberesp_ies_len);
- print_hex_dump_bytes("ASSOC IE ", DUMP_PREFIX_OFFSET,
- b->assocresp_ies, b->assocresp_ies_len);
+ wil_hex_dump_misc("head ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->head, b->head_len, true);
+ wil_hex_dump_misc("tail ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->tail, b->tail_len, true);
+ wil_hex_dump_misc("BCON IE ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->beacon_ies, b->beacon_ies_len, true);
+ wil_hex_dump_misc("PROBE ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->probe_resp, b->probe_resp_len, true);
+ wil_hex_dump_misc("PROBE IE ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->proberesp_ies, b->proberesp_ies_len, true);
+ wil_hex_dump_misc("ASSOC IE ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->assocresp_ies, b->assocresp_ies_len, true);
}
/* internal functions for device reset and starting AP */
@@ -1198,6 +1199,7 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
wil->pbss = pbss;
netif_carrier_on(ndev);
+ wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS);
rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid, is_go);
if (rc)
@@ -1213,6 +1215,7 @@ err_bcast:
wmi_pcp_stop(wil);
err_pcp_start:
netif_carrier_off(ndev);
+ wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
out:
mutex_unlock(&wil->mutex);
return rc;
@@ -1298,8 +1301,8 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
info->dtim_period);
wil_dbg_misc(wil, "PBSS %d\n", info->pbss);
- print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
- info->ssid, info->ssid_len);
+ wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+ info->ssid, info->ssid_len, true);
wil_print_bcon_data(bcon);
wil_print_crypto(wil, crypto);
@@ -1319,6 +1322,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
wil_dbg_misc(wil, "stop_ap\n");
netif_carrier_off(ndev);
+ wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
wil_set_recovery_state(wil, fw_recovery_idle);
mutex_lock(&wil->mutex);
@@ -1555,12 +1559,6 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_ps_profile_type ps_profile;
- int rc;
-
- if (!test_bit(WMI_FW_CAPABILITY_PS_CONFIG, wil->fw_capabilities)) {
- wil_err(wil, "set_power_mgmt not supported\n");
- return -EOPNOTSUPP;
- }
wil_dbg_misc(wil, "enabled=%d, timeout=%d\n",
enabled, timeout);
@@ -1570,11 +1568,7 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
else
ps_profile = WMI_PS_PROFILE_TYPE_PS_DISABLED;
- rc = wmi_ps_dev_profile_cfg(wil, ps_profile);
- if (rc)
- wil_err(wil, "wmi_ps_dev_profile_cfg failed (%d)\n", rc);
-
- return rc;
+ return wil_ps_update(wil, ps_profile);
}
static const struct cfg80211_ops wil_cfg80211_ops = {
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 3e8cdf12feda..5648ebbd0e16 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -524,9 +524,8 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- wil_memcpy_fromio_halp_vote(wil_blob->wil, buf,
- (const volatile void __iomem *)
- wil_blob->blob.data + pos, count);
+ wil_memcpy_fromio_32(buf, (const void __iomem *)
+ wil_blob->blob.data + pos, count);
ret = copy_to_user(user_buf, buf, count);
kfree(buf);
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index f4901587c005..e01acac88825 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -554,5 +554,7 @@ bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name)
rc = request_firmware(&fw, name, wil_to_dev(wil));
if (!rc)
release_firmware(fw);
- return rc != -ENOENT;
+ else
+ wil_dbg_fw(wil, "<%s> not available: %d\n", name, rc);
+ return !rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index efb1f59aafd9..32086792dfc3 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -30,8 +30,8 @@ bool debug_fw; /* = false; */
module_param(debug_fw, bool, 0444);
MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
-static bool oob_mode;
-module_param(oob_mode, bool, 0444);
+static u8 oob_mode;
+module_param(oob_mode, byte, 0444);
MODULE_PARM_DESC(oob_mode,
" enable out of the box (OOB) mode in FW, for diagnostics and certification");
@@ -130,17 +130,15 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
u32 *d = dst;
const volatile u32 __iomem *s = src;
- /* size_t is unsigned, if (count%4 != 0) it will wrap */
- for (count += 4; count > 4; count -= 4)
+ for (; count >= 4; count -= 4)
*d++ = __raw_readl(s++);
-}
-void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
- const volatile void __iomem *src, size_t count)
-{
- wil_halp_vote(wil);
- wil_memcpy_fromio_32(dst, src, count);
- wil_halp_unvote(wil);
+ if (unlikely(count)) {
+ /* count can be 1..3 */
+ u32 tmp = __raw_readl(s);
+
+ memcpy(d, &tmp, count);
+ }
}
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
@@ -149,17 +147,16 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
volatile u32 __iomem *d = dst;
const u32 *s = src;
- for (count += 4; count > 4; count -= 4)
+ for (; count >= 4; count -= 4)
__raw_writel(*s++, d++);
-}
-void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
- volatile void __iomem *dst,
- const void *src, size_t count)
-{
- wil_halp_vote(wil);
- wil_memcpy_toio_32(dst, src, count);
- wil_halp_unvote(wil);
+ if (unlikely(count)) {
+ /* count can be 1..3 */
+ u32 tmp = 0;
+
+ memcpy(&tmp, s, count);
+ __raw_writel(tmp, d);
+ }
}
static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
@@ -274,15 +271,20 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
wil_bcast_fini(wil);
wil_update_net_queues_bh(wil, NULL, true);
netif_carrier_off(ndev);
+ wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
if (test_bit(wil_status_fwconnected, wil->status)) {
clear_bit(wil_status_fwconnected, wil->status);
cfg80211_disconnected(ndev, reason_code,
- NULL, 0, false, GFP_KERNEL);
+ NULL, 0,
+ wil->locally_generated_disc,
+ GFP_KERNEL);
+ wil->locally_generated_disc = false;
} else if (test_bit(wil_status_fwconnecting, wil->status)) {
cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
+ wil->bss = NULL;
}
clear_bit(wil_status_fwconnecting, wil->status);
break;
@@ -304,10 +306,34 @@ static void wil_disconnect_worker(struct work_struct *work)
{
struct wil6210_priv *wil = container_of(work,
struct wil6210_priv, disconnect_worker);
+ struct net_device *ndev = wil_to_ndev(wil);
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_disconnect_event evt;
+ } __packed reply;
- mutex_lock(&wil->mutex);
- _wil6210_disconnect(wil, NULL, WLAN_REASON_UNSPECIFIED, false);
- mutex_unlock(&wil->mutex);
+ if (test_bit(wil_status_fwconnected, wil->status))
+ /* connect succeeded after all */
+ return;
+
+ if (!test_bit(wil_status_fwconnecting, wil->status))
+ /* already disconnected */
+ return;
+
+ rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
+ WMI_DISCONNECT_EVENTID, &reply, sizeof(reply),
+ WIL6210_DISCONNECT_TO_MS);
+ if (rc) {
+ wil_err(wil, "disconnect error %d\n", rc);
+ return;
+ }
+
+ wil_update_net_queues_bh(wil, NULL, true);
+ netif_carrier_off(ndev);
+ cfg80211_connect_result(ndev, NULL, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL);
+ clear_bit(wil_status_fwconnecting, wil->status);
}
static void wil_connect_timer_fn(ulong x)
@@ -547,6 +573,9 @@ int wil_priv_init(struct wil6210_priv *wil)
if (rx_ring_overflow_thrsh == WIL6210_RX_HIGH_TRSH_INIT)
rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_DEFAULT;
+
+ wil->ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT;
+
return 0;
out_wmi_wq:
@@ -555,6 +584,12 @@ out_wmi_wq:
return -EAGAIN;
}
+void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps)
+{
+ if (wil->platform_ops.bus_request)
+ wil->platform_ops.bus_request(wil->platform_handle, kbps);
+}
+
/**
* wil6210_disconnect - disconnect one connection
* @wil: driver context
@@ -607,13 +642,25 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
wil_w(wil, RGF_USER_USER_CPU_0, 1);
}
-static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable)
+static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
{
- wil_info(wil, "enable=%d\n", enable);
- if (enable)
+ wil_info(wil, "oob_mode to %d\n", mode);
+ switch (mode) {
+ case 0:
+ wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE |
+ BIT_USER_OOB_R2_MODE);
+ break;
+ case 1:
+ wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE);
wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
- else
+ break;
+ case 2:
wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
+ wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE);
+ break;
+ default:
+ wil_err(wil, "invalid oob_mode: %d\n", mode);
+ }
}
static int wil_target_reset(struct wil6210_priv *wil)
@@ -856,6 +903,24 @@ void wil_abort_scan(struct wil6210_priv *wil, bool sync)
}
}
+int wil_ps_update(struct wil6210_priv *wil, enum wmi_ps_profile_type ps_profile)
+{
+ int rc;
+
+ if (!test_bit(WMI_FW_CAPABILITY_PS_CONFIG, wil->fw_capabilities)) {
+ wil_err(wil, "set_power_mgmt not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ rc = wmi_ps_dev_profile_cfg(wil, ps_profile);
+ if (rc)
+ wil_err(wil, "wmi_ps_dev_profile_cfg failed (%d)\n", rc);
+ else
+ wil->ps_profile = ps_profile;
+
+ return rc;
+}
+
/*
* We reset all the structures, and we reset the UMAC.
* After calling this routine, you're expected to reload
@@ -901,15 +966,15 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* Disable device led before reset*/
wmi_led_cfg(wil, false);
+ mutex_lock(&wil->p2p_wdev_mutex);
+ wil_abort_scan(wil, false);
+ mutex_unlock(&wil->p2p_wdev_mutex);
+
/* prevent NAPI from being scheduled and prevent wmi commands */
mutex_lock(&wil->wmi_mutex);
bitmap_zero(wil->status, wil_status_last);
mutex_unlock(&wil->wmi_mutex);
- mutex_lock(&wil->p2p_wdev_mutex);
- wil_abort_scan(wil, false);
- mutex_unlock(&wil->p2p_wdev_mutex);
-
wil_mask_irq(wil);
wmi_event_flush(wil);
@@ -986,6 +1051,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
}
+ if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT)
+ wil_ps_update(wil, wil->ps_profile);
+
wil_collect_fw_info(wil);
if (wil->platform_ops.notify) {
@@ -1066,9 +1134,7 @@ int __wil_up(struct wil6210_priv *wil)
napi_enable(&wil->napi_tx);
set_bit(wil_status_napi_en, wil->status);
- if (wil->platform_ops.bus_request)
- wil->platform_ops.bus_request(wil->platform_handle,
- WIL_MAX_BUS_REQUEST_KBPS);
+ wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
return 0;
}
@@ -1092,8 +1158,7 @@ int __wil_down(struct wil6210_priv *wil)
set_bit(wil_status_resetting, wil->status);
- if (wil->platform_ops.bus_request)
- wil->platform_ops.bus_request(wil->platform_handle, 0);
+ wil6210_bus_request(wil, 0);
wil_disable_irq(wil);
if (test_and_clear_bit(wil_status_napi_en, wil->status)) {
@@ -1154,6 +1219,7 @@ void wil_halp_vote(struct wil6210_priv *wil)
wil->halp.ref_cnt);
if (++wil->halp.ref_cnt == 1) {
+ reinit_completion(&wil->halp.comp);
wil6210_set_halp(wil);
rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
if (!rc) {
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 874c787727fe..b38515fc7ce7 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -211,6 +211,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_err(dev, "wil_if_alloc failed: %d\n", rc);
return rc;
}
+
wil->pdev = pdev;
pci_set_drvdata(pdev, wil);
/* rollback to if_free */
@@ -224,6 +225,21 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* rollback to err_plat */
+ /* device supports 48bit addresses */
+ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (rc) {
+ dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc);
+ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(dev,
+ "dma_set_mask_and_coherent(32) failed: %d\n",
+ rc);
+ goto err_plat;
+ }
+ } else {
+ wil->use_extended_dma_addr = 1;
+ }
+
rc = pci_enable_device(pdev);
if (rc) {
wil_err(wil,
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index a0acb2d0cb79..2ae4fe85cc8c 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -71,6 +71,11 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_dbg_pm(wil, "trying to suspend while suspended\n");
+ return 0;
+ }
+
/* if netif up, hardware is alive, shut it down */
if (ndev->flags & IFF_UP) {
rc = wil_down(wil);
@@ -80,12 +85,24 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
}
}
- if (wil->platform_ops.suspend)
+ /* Disable PCIe IRQ to prevent sporadic IRQs when PCIe is suspending */
+ wil_dbg_pm(wil, "Disabling PCIe IRQ before suspending\n");
+ wil_disable_irq(wil);
+
+ if (wil->platform_ops.suspend) {
rc = wil->platform_ops.suspend(wil->platform_handle);
+ if (rc) {
+ wil_enable_irq(wil);
+ goto out;
+ }
+ }
+
+ set_bit(wil_status_suspended, wil->status);
out:
wil_dbg_pm(wil, "suspend: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
+
return rc;
}
@@ -104,12 +121,18 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
}
}
+ wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
+ wil_enable_irq(wil);
+
/* if netif up, bring hardware up
* During open(), IFF_UP set after actual device method
- * invocation. This prevent recursive call to wil_up()
+ * invocation. This prevent recursive call to wil_up().
+ * wil_status_suspended will be cleared in wil_reset
*/
if (ndev->flags & IFF_UP)
rc = wil_up(wil);
+ else
+ clear_bit(wil_status_suspended, wil->status);
out:
wil_dbg_pm(wil, "resume: %s => %d\n",
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index 3ff4f4ce9fef..2e301b6b32a9 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -107,13 +107,28 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
/* Allocate pring buffer and descriptors.
* vring->va should be aligned on its size rounded up to power of 2
- * This is granted by the dma_alloc_coherent
+ * This is granted by the dma_alloc_coherent.
+ *
+ * HW has limitation that all vrings addresses must share the same
+ * upper 16 msb bits part of 48 bits address. To workaround that,
+ * if we are using 48 bit addresses switch to 32 bit allocation
+ * before allocating vring memory.
+ *
+ * There's no check for the return value of dma_set_mask_and_coherent,
+ * since we assume if we were able to set the mask during
+ * initialization in this system it will not fail if we set it again
*/
+ if (wil->use_extended_dma_addr)
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
pmc->pring_va = dma_alloc_coherent(dev,
sizeof(struct vring_tx_desc) * num_descriptors,
&pmc->pring_pa,
GFP_KERNEL);
+ if (wil->use_extended_dma_addr)
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+
wil_dbg_misc(wil,
"pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
pmc->pring_va, &pmc->pring_pa,
@@ -185,7 +200,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
release_pmc_skbs:
wil_err(wil, "exit on error: Releasing skbs...\n");
- for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
+ for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) {
dma_free_coherent(dev,
descriptor_size,
pmc->descriptors[i].va,
@@ -268,7 +283,7 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
int i;
for (i = 0;
- pmc->descriptors[i].va && i < pmc->num_descriptors; i++) {
+ i < pmc->num_descriptors && pmc->descriptors[i].va; i++) {
dma_free_coherent(dev,
pmc->descriptor_size,
pmc->descriptors[i].va,
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 7404b6f39c6a..a43cffcf1bbf 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -343,8 +343,16 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
wil_err(wil, "BACK requested unsupported ba_policy == 1\n");
status = WLAN_STATUS_INVALID_QOS_PARAM;
}
- if (status == WLAN_STATUS_SUCCESS)
- agg_wsize = wil_agg_size(wil, req_agg_wsize);
+ if (status == WLAN_STATUS_SUCCESS) {
+ if (req_agg_wsize == 0) {
+ wil_dbg_misc(wil, "Suggest BACK wsize %d\n",
+ WIL_MAX_AGG_WSIZE);
+ agg_wsize = WIL_MAX_AGG_WSIZE;
+ } else {
+ agg_wsize = min_t(u16,
+ WIL_MAX_AGG_WSIZE, req_agg_wsize);
+ }
+ }
rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status,
agg_amsdu, agg_wsize, agg_timeout);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 072182e527e6..edab4c0a900f 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -37,6 +37,10 @@ bool rx_align_2;
module_param(rx_align_2, bool, 0444);
MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
+bool rx_large_buf;
+module_param(rx_large_buf, bool, 0444);
+MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
+
static inline uint wil_rx_snaplen(void)
{
return rx_align_2 ? 6 : 0;
@@ -123,15 +127,32 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
vring->va = NULL;
return -ENOMEM;
}
+
/* vring->va should be aligned on its size rounded up to power of 2
- * This is granted by the dma_alloc_coherent
+ * This is granted by the dma_alloc_coherent.
+ *
+ * HW has limitation that all vrings addresses must share the same
+ * upper 16 msb bits part of 48 bits address. To workaround that,
+ * if we are using 48 bit addresses switch to 32 bit allocation
+ * before allocating vring memory.
+ *
+ * There's no check for the return value of dma_set_mask_and_coherent,
+ * since we assume if we were able to set the mask during
+ * initialization in this system it will not fail if we set it again
*/
+ if (wil->use_extended_dma_addr)
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
if (!vring->va) {
kfree(vring->ctx);
vring->ctx = NULL;
return -ENOMEM;
}
+
+ if (wil->use_extended_dma_addr)
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+
/* initially, all descriptors are SW owned
* For Tx and Rx, ownership bit is at the same location, thus
* we can use any
@@ -238,7 +259,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
u32 i, int headroom)
{
struct device *dev = wil_to_dev(wil);
- unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen();
+ unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
struct vring_rx_desc dd, *d = &dd;
volatile struct vring_rx_desc *_d = &vring->va[i].rx;
dma_addr_t pa;
@@ -402,7 +423,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
struct sk_buff *skb;
dma_addr_t pa;
unsigned int snaplen = wil_rx_snaplen();
- unsigned int sz = mtu_max + ETH_HLEN + snaplen;
+ unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen;
u16 dmalen;
u8 ftype;
int cid;
@@ -763,6 +784,20 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
wil_rx_refill(wil, v->size);
}
+static void wil_rx_buf_len_init(struct wil6210_priv *wil)
+{
+ wil->rx_buf_len = rx_large_buf ?
+ WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
+ if (mtu_max > wil->rx_buf_len) {
+ /* do not allow RX buffers to be smaller than mtu_max, for
+ * backward compatibility (mtu_max parameter was also used
+ * to support receiving large packets)
+ */
+ wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max);
+ wil->rx_buf_len = mtu_max;
+ }
+}
+
int wil_rx_init(struct wil6210_priv *wil, u16 size)
{
struct vring *vring = &wil->vring_rx;
@@ -775,6 +810,8 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
return -EINVAL;
}
+ wil_rx_buf_len_init(wil);
+
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 085a2dbfa21d..b00c803a1e83 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -32,6 +32,7 @@ extern unsigned short rx_ring_overflow_thrsh;
extern int agg_wsize;
extern u32 vring_idle_trsh;
extern bool rx_align_2;
+extern bool rx_large_buf;
extern bool debug_fw;
extern bool disable_ap_sme;
@@ -40,6 +41,7 @@ extern bool disable_ap_sme;
#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" /* code Sparrow D0 */
#define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */
+#define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */
#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
/**
@@ -139,6 +141,7 @@ struct RGF_ICR {
#define RGF_USER_USAGE_1 (0x880004)
#define RGF_USER_USAGE_6 (0x880018)
#define BIT_USER_OOB_MODE BIT(31)
+ #define BIT_USER_OOB_R2_MODE BIT(30)
#define RGF_USER_HW_MACHINE_STATE (0x8801dc)
#define HW_MACHINE_BOOT_DONE (0x3fffffd)
#define RGF_USER_USER_CPU_0 (0x8801e0)
@@ -409,6 +412,7 @@ enum { /* for wil6210_priv.status */
wil_status_irqen, /* FIXME: interrupts enabled - for debug */
wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
wil_status_resetting, /* reset in progress */
+ wil_status_suspended, /* suspend completed, device is suspended */
wil_status_last /* keep last */
};
@@ -612,6 +616,8 @@ struct wil6210_priv {
u16 channel; /* relevant in AP mode */
int sinfo_gen;
u32 ap_isolate; /* no intra-BSS communication */
+ struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */
+ int locally_generated_disc; /* relevant in STA mode */
/* interrupt moderation */
u32 tx_max_burst_duration;
u32 tx_interframe_timeout;
@@ -652,11 +658,13 @@ struct wil6210_priv {
struct work_struct probe_client_worker;
/* DMA related */
struct vring vring_rx;
+ unsigned int rx_buf_len;
struct vring vring_tx[WIL6210_MAX_TX_RINGS];
struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
struct wil_sta_info sta[WIL6210_MAX_CID];
int bcast_vring;
+ bool use_extended_dma_addr; /* indicates whether we are using 48 bits */
/* scan */
struct cfg80211_scan_request *scan_request;
@@ -686,6 +694,8 @@ struct wil6210_priv {
/* High Access Latency Policy voting */
struct wil_halp halp;
+ enum wmi_ps_profile_type ps_profile;
+
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
struct notifier_block pm_notify;
@@ -764,6 +774,12 @@ static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
print_hex_dump_debug("DBG[ WMI]" prefix_str,\
prefix_type, rowsize, \
groupsize, buf, len, ascii)
+
+#define wil_hex_dump_misc(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ print_hex_dump_debug("DBG[MISC]" prefix_str,\
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
#else /* defined(CONFIG_DYNAMIC_DEBUG) */
static inline
void wil_hex_dump_txrx(const char *prefix_str, int prefix_type, int rowsize,
@@ -776,18 +792,18 @@ void wil_hex_dump_wmi(const char *prefix_str, int prefix_type, int rowsize,
int groupsize, const void *buf, size_t len, bool ascii)
{
}
+
+static inline
+void wil_hex_dump_misc(const char *prefix_str, int prefix_type, int rowsize,
+ int groupsize, const void *buf, size_t len, bool ascii)
+{
+}
#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
size_t count);
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
size_t count);
-void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
- const volatile void __iomem *src,
- size_t count);
-void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
- volatile void __iomem *dst,
- const void *src, size_t count);
void *wil_if_alloc(struct device *dev);
void wil_if_free(struct wil6210_priv *wil);
@@ -795,6 +811,8 @@ int wil_if_add(struct wil6210_priv *wil);
void wil_if_remove(struct wil6210_priv *wil);
int wil_priv_init(struct wil6210_priv *wil);
void wil_priv_deinit(struct wil6210_priv *wil);
+int wil_ps_update(struct wil6210_priv *wil,
+ enum wmi_ps_profile_type ps_profile);
int wil_reset(struct wil6210_priv *wil, bool no_fw);
void wil_fw_error_recovery(struct wil6210_priv *wil);
void wil_set_recovery_state(struct wil6210_priv *wil, int state);
@@ -899,7 +917,7 @@ int wmi_pcp_stop(struct wil6210_priv *wil);
int wmi_led_cfg(struct wil6210_priv *wil, bool enable);
int wmi_abort_scan(struct wil6210_priv *wil);
void wil_abort_scan(struct wil6210_priv *wil, bool sync);
-
+void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps);
void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
u16 reason_code, bool from_event);
void wil_probe_client_flush(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 1f22c19696b1..814c35645b73 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -518,16 +518,16 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
assoc_resp_ielen = 0;
}
- mutex_lock(&wil->mutex);
if (test_bit(wil_status_resetting, wil->status) ||
!test_bit(wil_status_fwready, wil->status)) {
wil_err(wil, "status_resetting, cancel connect event, CID %d\n",
evt->cid);
- mutex_unlock(&wil->mutex);
/* no need for cleanup, wil_reset will do that */
return;
}
+ mutex_lock(&wil->mutex);
+
if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
if (!test_bit(wil_status_fwconnecting, wil->status)) {
@@ -565,6 +565,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
if (rc) {
netif_carrier_off(ndev);
+ wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
wil_err(wil, "cfg80211_connect_result with failure\n");
cfg80211_connect_result(ndev, evt->bssid, NULL, 0,
NULL, 0,
@@ -572,12 +573,16 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
GFP_KERNEL);
goto out;
} else {
- cfg80211_connect_result(ndev, evt->bssid,
- assoc_req_ie, assoc_req_ielen,
- assoc_resp_ie, assoc_resp_ielen,
- WLAN_STATUS_SUCCESS,
- GFP_KERNEL);
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+
+ cfg80211_ref_bss(wiphy, wil->bss);
+ cfg80211_connect_bss(ndev, evt->bssid, wil->bss,
+ assoc_req_ie, assoc_req_ielen,
+ assoc_resp_ie, assoc_resp_ielen,
+ WLAN_STATUS_SUCCESS, GFP_KERNEL,
+ NL80211_TIMEOUT_UNSPECIFIED);
}
+ wil->bss = NULL;
} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
(wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
if (rc) {
@@ -626,6 +631,13 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
wil->sinfo_gen++;
+ if (test_bit(wil_status_resetting, wil->status) ||
+ !test_bit(wil_status_fwready, wil->status)) {
+ wil_err(wil, "status_resetting, cancel disconnect event\n");
+ /* no need for cleanup, wil_reset will do that */
+ return;
+ }
+
mutex_lock(&wil->mutex);
wil6210_disconnect(wil, evt->bssid, reason_code, true);
mutex_unlock(&wil->mutex);
@@ -1393,7 +1405,8 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
struct wmi_cfg_rx_chain_cmd cmd = {
.action = WMI_RX_CHAIN_ADD,
.rx_sw_ring = {
- .max_mpdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+ .max_mpdu_size = cpu_to_le16(
+ wil_mtu2macbuf(wil->rx_buf_len)),
.ring_mem_base = cpu_to_le64(vring->pa),
.ring_size = cpu_to_le16(vring->size),
},
@@ -1492,6 +1505,7 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac,
wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason);
+ wil->locally_generated_disc = true;
if (del_sta) {
ether_addr_copy(del_sta_cmd.dst_mac, mac);
rc = wmi_call(wil, WMI_DEL_STA_CMDID, &del_sta_cmd,
@@ -1733,14 +1747,19 @@ int wmi_new_sta(struct wil6210_priv *wil, const u8 *mac, u8 aid)
void wmi_event_flush(struct wil6210_priv *wil)
{
+ ulong flags;
struct pending_wmi_event *evt, *t;
wil_dbg_wmi(wil, "event_flush\n");
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
list_del(&evt->list);
kfree(evt);
}
+
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
}
static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 7c9fee57aa91..f7f5f4f801e3 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -58,6 +58,7 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3,
WMI_FW_CAPABILITY_DISABLE_AP_SME = 4,
WMI_FW_CAPABILITY_WMI_ONLY = 5,
+ WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7,
WMI_FW_CAPABILITY_MAX,
};
@@ -142,8 +143,6 @@ enum wmi_command_id {
WMI_MAINTAIN_RESUME_CMDID = 0x851,
WMI_RS_MGMT_CMDID = 0x852,
WMI_RF_MGMT_CMDID = 0x853,
- WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x854,
- WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x855,
WMI_OTP_READ_CMDID = 0x856,
WMI_OTP_WRITE_CMDID = 0x857,
WMI_LED_CFG_CMDID = 0x858,
@@ -192,6 +191,8 @@ enum wmi_command_id {
WMI_GET_MGMT_RETRY_LIMIT_CMDID = 0x931,
WMI_NEW_STA_CMDID = 0x935,
WMI_DEL_STA_CMDID = 0x936,
+ WMI_SET_THERMAL_THROTTLING_CFG_CMDID = 0x940,
+ WMI_GET_THERMAL_THROTTLING_CFG_CMDID = 0x941,
WMI_TOF_SESSION_START_CMDID = 0x991,
WMI_TOF_GET_CAPABILITIES_CMDID = 0x992,
WMI_TOF_SET_LCR_CMDID = 0x993,
@@ -438,16 +439,6 @@ struct wmi_rf_mgmt_cmd {
__le32 rf_mgmt_type;
} __packed;
-/* WMI_THERMAL_THROTTLING_CTRL_CMDID */
-#define THERMAL_THROTTLING_USE_DEFAULT_MAX_TXOP_LENGTH (0xFFFFFFFF)
-
-/* WMI_THERMAL_THROTTLING_CTRL_CMDID */
-struct wmi_thermal_throttling_ctrl_cmd {
- __le32 time_on_usec;
- __le32 time_off_usec;
- __le32 max_txop_length_usec;
-} __packed;
-
/* WMI_RF_RX_TEST_CMDID */
struct wmi_rf_rx_test_cmd {
__le32 sector;
@@ -549,7 +540,7 @@ struct wmi_pcp_start_cmd {
u8 hidden_ssid;
u8 is_go;
u8 reserved0[5];
- /* abft_len override if non-0 */
+ /* A-BFT length override if non-0 */
u8 abft_len;
u8 disable_ap_sme;
u8 network_type;
@@ -910,6 +901,39 @@ struct wmi_set_mgmt_retry_limit_cmd {
u8 reserved[3];
} __packed;
+/* Zones: HIGH, MAX, CRITICAL */
+#define WMI_NUM_OF_TT_ZONES (3)
+
+struct wmi_tt_zone_limits {
+ /* Above this temperature this zone is active */
+ u8 temperature_high;
+ /* Below this temperature the adjacent lower zone is active */
+ u8 temperature_low;
+ u8 reserved[2];
+} __packed;
+
+/* Struct used for both configuration and status commands of thermal
+ * throttling
+ */
+struct wmi_tt_data {
+ /* Enable/Disable TT algorithm for baseband */
+ u8 bb_enabled;
+ u8 reserved0[3];
+ /* Define zones for baseband */
+ struct wmi_tt_zone_limits bb_zones[WMI_NUM_OF_TT_ZONES];
+ /* Enable/Disable TT algorithm for radio */
+ u8 rf_enabled;
+ u8 reserved1[3];
+ /* Define zones for all radio chips */
+ struct wmi_tt_zone_limits rf_zones[WMI_NUM_OF_TT_ZONES];
+} __packed;
+
+/* WMI_SET_THERMAL_THROTTLING_CFG_CMDID */
+struct wmi_set_thermal_throttling_cfg_cmd {
+ /* Command data */
+ struct wmi_tt_data tt_data;
+} __packed;
+
/* WMI_NEW_STA_CMDID */
struct wmi_new_sta_cmd {
u8 dst_mac[WMI_MAC_LEN];
@@ -1040,7 +1064,6 @@ enum wmi_event_id {
WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
WMI_RS_MGMT_DONE_EVENTID = 0x1852,
WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
- WMI_THERMAL_THROTTLING_STATUS_EVENTID = 0x1855,
WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
@@ -1090,6 +1113,8 @@ enum wmi_event_id {
WMI_BRP_SET_ANT_LIMIT_EVENTID = 0x1924,
WMI_SET_MGMT_RETRY_LIMIT_EVENTID = 0x1930,
WMI_GET_MGMT_RETRY_LIMIT_EVENTID = 0x1931,
+ WMI_SET_THERMAL_THROTTLING_CFG_EVENTID = 0x1940,
+ WMI_GET_THERMAL_THROTTLING_CFG_EVENTID = 0x1941,
WMI_TOF_SESSION_END_EVENTID = 0x1991,
WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992,
WMI_TOF_SET_LCR_EVENTID = 0x1993,
@@ -1133,13 +1158,6 @@ struct wmi_rf_mgmt_status_event {
__le32 rf_status;
} __packed;
-/* WMI_THERMAL_THROTTLING_STATUS_EVENTID */
-struct wmi_thermal_throttling_status_event {
- __le32 time_on_usec;
- __le32 time_off_usec;
- __le32 max_txop_length_usec;
-} __packed;
-
/* WMI_GET_STATUS_DONE_EVENTID */
struct wmi_get_status_done_event {
__le32 is_associated;
@@ -2206,6 +2224,19 @@ struct wmi_tof_get_capabilities_event {
__le32 aoa_supported_types;
} __packed;
+/* WMI_SET_THERMAL_THROTTLING_CFG_EVENTID */
+struct wmi_set_thermal_throttling_cfg_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_GET_THERMAL_THROTTLING_CFG_EVENTID */
+struct wmi_get_thermal_throttling_cfg_event {
+ /* Status data */
+ struct wmi_tt_data tt_data;
+} __packed;
+
enum wmi_tof_session_end_status {
WMI_TOF_SESSION_END_NO_ERROR = 0x00,
WMI_TOF_SESSION_END_FAIL = 0x01,
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 0e180677c7fc..09defbcedd5e 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2377,6 +2377,8 @@ static int at76_init_new_device(struct at76_priv *priv,
wiphy->hw_version = priv->board_type;
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
ret = ieee80211_register_hw(priv->hw);
if (ret) {
printk(KERN_ERR "cannot register mac80211 hw (status %d)!\n",
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index e12f62356fd1..27b110dc8cc6 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -513,7 +513,7 @@ struct atmel_private {
} station_state;
int operating_mode, power_mode;
- time_t last_qual;
+ unsigned long last_qual;
int beacons_this_sec;
int channel;
int reg_domain, config_reg_domain;
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 52f3541ecbcf..d23aac7503d3 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -5598,6 +5598,8 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
wl->hw_registred = false;
hw->max_rates = 2;
SET_IEEE80211_DEV(hw, dev->dev);
diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c
index b068d5aeee24..1b9c191e2a22 100644
--- a/drivers/net/wireless/broadcom/b43/xmit.c
+++ b/drivers/net/wireless/broadcom/b43/xmit.c
@@ -694,7 +694,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
if (unlikely(phystat0 & (B43_RX_PHYST0_PLCPHCF | B43_RX_PHYST0_PLCPFV)))
status.flag |= RX_FLAG_FAILED_PLCP_CRC;
if (phystat0 & B43_RX_PHYST0_SHORTPRMBL)
- status.flag |= RX_FLAG_SHORTPRE;
+ status.enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (macstat & B43_RX_MAC_DECERR) {
/* Decryption with the given key failed.
* Drop the packet. We also won't be able to decrypt it with
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index cdafebb9c936..f1e3dad57629 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -3850,6 +3850,8 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
else
SET_IEEE80211_PERM_ADDR(hw, sprom->il0mac);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
/* Get and initialize struct b43legacy_wl */
wl = hw_to_b43legacy_wl(hw);
memset(wl, 0, sizeof(*wl));
diff --git a/drivers/net/wireless/broadcom/brcm80211/Kconfig b/drivers/net/wireless/broadcom/brcm80211/Kconfig
index ab42b1fea03c..9d99eb42d917 100644
--- a/drivers/net/wireless/broadcom/brcm80211/Kconfig
+++ b/drivers/net/wireless/broadcom/brcm80211/Kconfig
@@ -18,14 +18,14 @@ config BRCMSMAC
module, the driver will be called brcmsmac.ko.
config BRCMFMAC
- tristate "Broadcom IEEE802.11n embedded FullMAC WLAN driver"
+ tristate "Broadcom FullMAC WLAN driver"
depends on CFG80211
select BRCMUTIL
---help---
- This module adds support for embedded wireless adapters based on
- Broadcom IEEE802.11n FullMAC chipsets. It has to work with at least
- one of the bus interface support. If you choose to build a module,
- it'll be called brcmfmac.ko.
+ This module adds support for wireless adapters based on Broadcom
+ FullMAC chipsets. It has to work with at least one of the bus
+ interface support. If you choose to build a module, it'll be called
+ brcmfmac.ko.
config BRCMFMAC_PROTO_BCDC
bool
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
index 0383ba559edc..1f5a9b948abf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
@@ -25,7 +25,6 @@ brcmfmac-objs += \
chip.o \
fwil.o \
fweh.o \
- fwsignal.o \
p2p.o \
proto.o \
common.o \
@@ -36,7 +35,8 @@ brcmfmac-objs += \
vendor.o \
pno.o
brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \
- bcdc.o
+ bcdc.o \
+ fwsignal.o
brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \
commonring.o \
flowring.o \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 384b1873e7e3..9f2d0b0cf6e5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -103,9 +103,17 @@ struct brcmf_bcdc {
u8 bus_header[BUS_HEADER_LEN];
struct brcmf_proto_bcdc_dcmd msg;
unsigned char buf[BRCMF_DCMD_MAXLEN];
+ struct brcmf_fws_info *fws;
};
+struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr)
+{
+ struct brcmf_bcdc *bcdc = drvr->proto->pd;
+
+ return bcdc->fws;
+}
+
static int
brcmf_proto_bcdc_msg(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
uint len, bool set)
@@ -330,8 +338,9 @@ static int brcmf_proto_bcdc_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
struct sk_buff *skb)
{
struct brcmf_if *ifp = brcmf_get_ifp(drvr, ifidx);
+ struct brcmf_bcdc *bcdc = drvr->proto->pd;
- if (!brcmf_fws_queue_skbs(drvr->fws))
+ if (!brcmf_fws_queue_skbs(bcdc->fws))
return brcmf_proto_txdata(drvr, ifidx, 0, skb);
return brcmf_fws_process_skb(ifp, skb);
@@ -345,6 +354,36 @@ brcmf_proto_bcdc_txdata(struct brcmf_pub *drvr, int ifidx, u8 offset,
return brcmf_bus_txdata(drvr->bus_if, pktbuf);
}
+void brcmf_proto_bcdc_txflowblock(struct device *dev, bool state)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ brcmf_fws_bus_blocked(drvr, state);
+}
+
+void
+brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp,
+ bool success)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_bcdc *bcdc = bus_if->drvr->proto->pd;
+ struct brcmf_if *ifp;
+
+ /* await txstatus signal for firmware if active */
+ if (brcmf_fws_fc_active(bcdc->fws)) {
+ if (!success)
+ brcmf_fws_bustxfail(bcdc->fws, txp);
+ } else {
+ if (brcmf_proto_bcdc_hdrpull(bus_if->drvr, false, txp, &ifp))
+ brcmu_pkt_buf_free_skb(txp);
+ else
+ brcmf_txfinalize(ifp, txp, success);
+ }
+}
+
static void
brcmf_proto_bcdc_configure_addr_mode(struct brcmf_pub *drvr, int ifidx,
enum proto_addr_mode addr_mode)
@@ -369,6 +408,38 @@ static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp,
brcmf_fws_rxreorder(ifp, skb);
}
+static void
+brcmf_proto_bcdc_add_if(struct brcmf_if *ifp)
+{
+ brcmf_fws_add_interface(ifp);
+}
+
+static void
+brcmf_proto_bcdc_del_if(struct brcmf_if *ifp)
+{
+ brcmf_fws_del_interface(ifp);
+}
+
+static void
+brcmf_proto_bcdc_reset_if(struct brcmf_if *ifp)
+{
+ brcmf_fws_reset_interface(ifp);
+}
+
+static int
+brcmf_proto_bcdc_init_done(struct brcmf_pub *drvr)
+{
+ struct brcmf_bcdc *bcdc = drvr->proto->pd;
+ struct brcmf_fws_info *fws;
+
+ fws = brcmf_fws_attach(drvr);
+ if (IS_ERR(fws))
+ return PTR_ERR(fws);
+
+ bcdc->fws = fws;
+ return 0;
+}
+
int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
{
struct brcmf_bcdc *bcdc;
@@ -392,6 +463,10 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
drvr->proto->delete_peer = brcmf_proto_bcdc_delete_peer;
drvr->proto->add_tdls_peer = brcmf_proto_bcdc_add_tdls_peer;
drvr->proto->rxreorder = brcmf_proto_bcdc_rxreorder;
+ drvr->proto->add_if = brcmf_proto_bcdc_add_if;
+ drvr->proto->del_if = brcmf_proto_bcdc_del_if;
+ drvr->proto->reset_if = brcmf_proto_bcdc_reset_if;
+ drvr->proto->init_done = brcmf_proto_bcdc_init_done;
drvr->proto->pd = bcdc;
drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
@@ -406,6 +481,9 @@ fail:
void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr)
{
- kfree(drvr->proto->pd);
+ struct brcmf_bcdc *bcdc = drvr->proto->pd;
+
drvr->proto->pd = NULL;
+ brcmf_fws_detach(bcdc->fws);
+ kfree(bcdc);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
index 6003179c0ceb..3b0e9eff21b5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
@@ -19,6 +19,10 @@
#ifdef CONFIG_BRCMFMAC_PROTO_BCDC
int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr);
void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
+void brcmf_proto_bcdc_txflowblock(struct device *dev, bool state);
+void brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp,
+ bool success);
+struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr);
#else
static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; }
static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 5bc2ba214735..9b970dc2b922 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -21,6 +21,7 @@
#include <linux/pci_ids.h>
#include <linux/sched.h>
#include <linux/completion.h>
+#include <linux/interrupt.h>
#include <linux/scatterlist.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/core.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 76693df34742..b55c3293c4b4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -229,11 +229,6 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings);
void brcmf_detach(struct device *dev);
/* Indication from bus module that dongle should be reset */
void brcmf_dev_reset(struct device *dev);
-/* Indication from bus module to change flow-control state */
-void brcmf_txflowblock(struct device *dev, bool state);
-
-/* Notify the bus has transferred the tx packet to firmware */
-void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
/* Configure the "global" bus state used by upper layers */
void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 944b83cfc519..cd1d6730eab7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -575,12 +575,11 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
*
* @wiphy: wiphy device of new interface.
* @name: name of the new interface.
- * @flags: not used.
* @params: contains mac address for AP device.
*/
static
struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name,
- u32 *flags, struct vif_params *params)
+ struct vif_params *params)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
@@ -653,7 +652,6 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
- u32 *flags,
struct vif_params *params)
{
struct wireless_dev *wdev;
@@ -674,12 +672,12 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
case NL80211_IFTYPE_MESH_POINT:
return ERR_PTR(-EOPNOTSUPP);
case NL80211_IFTYPE_AP:
- wdev = brcmf_ap_add_vif(wiphy, name, flags, params);
+ wdev = brcmf_ap_add_vif(wiphy, name, params);
break;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_P2P_DEVICE:
- wdev = brcmf_p2p_add_vif(wiphy, name, name_assign_type, type, flags, params);
+ wdev = brcmf_p2p_add_vif(wiphy, name, name_assign_type, type, params);
break;
case NL80211_IFTYPE_UNSPECIFIED:
default:
@@ -764,7 +762,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
brcmf_dbg(SCAN, "scheduled scan completed\n");
cfg->internal_escan = false;
if (!aborted)
- cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
+ cfg80211_sched_scan_results(cfg_to_wiphy(cfg), 0);
} else if (scan_request) {
struct cfg80211_scan_info info = {
.aborted = aborted,
@@ -858,7 +856,7 @@ int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
static s32
brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
@@ -3097,6 +3095,9 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
status = e->status;
+ if (status == BRCMF_E_STATUS_ABORT)
+ goto exit;
+
if (!test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
brcmf_err("scan not ready, bsscfgidx=%d\n", ifp->bsscfgidx);
return -EPERM;
@@ -3213,7 +3214,7 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
{
struct ieee80211_channel *chan;
enum nl80211_band band;
- int freq;
+ int freq, i;
if (channel <= CH_MAX_2G_CHANNEL)
band = NL80211_BAND_2GHZ;
@@ -3228,10 +3229,22 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
if (!chan)
return -EINVAL;
- req->channels[req->n_channels++] = chan;
- memcpy(req->ssids[req->n_ssids].ssid, ssid, ssid_len);
- req->ssids[req->n_ssids++].ssid_len = ssid_len;
+ for (i = 0; i < req->n_channels; i++) {
+ if (req->channels[i] == chan)
+ break;
+ }
+ if (i == req->n_channels)
+ req->channels[req->n_channels++] = chan;
+ for (i = 0; i < req->n_ssids; i++) {
+ if (req->ssids[i].ssid_len == ssid_len &&
+ !memcmp(req->ssids[i].ssid, ssid, ssid_len))
+ break;
+ }
+ if (i == req->n_ssids) {
+ memcpy(req->ssids[req->n_ssids].ssid, ssid, ssid_len);
+ req->ssids[req->n_ssids++].ssid_len = ssid_len;
+ }
return 0;
}
@@ -3297,6 +3310,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
struct brcmf_pno_scanresults_le *pfn_result;
u32 result_count;
u32 status;
+ u32 datalen;
brcmf_dbg(SCAN, "Enter\n");
@@ -3323,6 +3337,14 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
brcmf_err("FALSE PNO Event. (pfn_count == 0)\n");
goto out_err;
}
+
+ netinfo_start = brcmf_get_netinfo_array(pfn_result);
+ datalen = e->datalen - ((void *)netinfo_start - (void *)pfn_result);
+ if (datalen < result_count * sizeof(*netinfo)) {
+ brcmf_err("insufficient event data\n");
+ goto out_err;
+ }
+
request = brcmf_alloc_internal_escan_request(wiphy,
result_count);
if (!request) {
@@ -3330,17 +3352,11 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
goto out_err;
}
- netinfo_start = brcmf_get_netinfo_array(pfn_result);
-
for (i = 0; i < result_count; i++) {
netinfo = &netinfo_start[i];
- if (!netinfo) {
- brcmf_err("Invalid netinfo ptr. index: %d\n",
- i);
- err = -EINVAL;
- goto out_err;
- }
+ if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN)
+ netinfo->SSID_len = IEEE80211_MAX_SSID_LEN;
brcmf_dbg(SCAN, "SSID:%.32s Channel:%d\n",
netinfo->SSID, netinfo->channel);
err = brcmf_internal_escan_add_info(request,
@@ -3356,7 +3372,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
goto free_req;
out_err:
- cfg80211_sched_scan_stopped(wiphy);
+ cfg80211_sched_scan_stopped(wiphy, 0);
free_req:
kfree(request);
return err;
@@ -3389,7 +3405,7 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
}
static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
- struct net_device *ndev)
+ struct net_device *ndev, u64 reqid)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -3591,7 +3607,7 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
cfg->wowl.pre_pmmode);
cfg->wowl.active = false;
if (cfg->wowl.nd_enabled) {
- brcmf_cfg80211_sched_scan_stop(cfg->wiphy, ifp->ndev);
+ brcmf_cfg80211_sched_scan_stop(cfg->wiphy, ifp->ndev, 0);
brcmf_fweh_unregister(cfg->pub, BRCMF_E_PFN_NET_FOUND);
brcmf_fweh_register(cfg->pub, BRCMF_E_PFN_NET_FOUND,
brcmf_notify_sched_scan_results);
@@ -3675,7 +3691,7 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
/* Stop scheduled scan */
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO))
- brcmf_cfg80211_sched_scan_stop(wiphy, ndev);
+ brcmf_cfg80211_sched_scan_stop(wiphy, ndev, 0);
/* end any scanning */
if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
@@ -5343,6 +5359,7 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
struct ieee80211_supported_band *band;
struct brcmf_bss_info_le *bi;
struct brcmu_chan ch;
+ struct cfg80211_roam_info roam_info = {};
u32 freq;
s32 err = 0;
u8 *buf;
@@ -5381,9 +5398,15 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
done:
kfree(buf);
- cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid,
- conn_info->req_ie, conn_info->req_ie_len,
- conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
+
+ roam_info.channel = notify_channel;
+ roam_info.bssid = profile->bssid;
+ roam_info.req_ie = conn_info->req_ie;
+ roam_info.req_ie_len = conn_info->req_ie_len;
+ roam_info.resp_ie = conn_info->resp_ie;
+ roam_info.resp_ie_len = conn_info->resp_ie_len;
+
+ cfg80211_roamed(ndev, &roam_info, GFP_KERNEL);
brcmf_dbg(CONN, "Report roaming result\n");
set_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
@@ -6358,11 +6381,11 @@ err:
static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
{
/* scheduled scan settings */
+ wiphy->max_sched_scan_reqs = 1;
wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD;
- wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
}
#ifdef CONFIG_PM
@@ -6450,7 +6473,8 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
BIT(NL80211_BSS_SELECT_ATTR_BAND_PREF) |
BIT(NL80211_BSS_SELECT_ATTR_RSSI_ADJUST);
- wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
+ wiphy->flags |= WIPHY_FLAG_NETNS_OK |
+ WIPHY_FLAG_PS_ON_BY_DEFAULT |
WIPHY_FLAG_OFFCHAN_TX |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_TDLS))
@@ -6549,7 +6573,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
if (err)
goto default_conf_out;
err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype,
- NULL, NULL);
+ NULL);
if (err)
goto default_conf_out;
@@ -6736,6 +6760,10 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
s32 err;
int i;
+ /* The country code gets set to "00" by default at boot, ignore */
+ if (req->alpha2[0] == '0' && req->alpha2[1] == '0')
+ return;
+
/* ignore non-ISO3166 country codes */
for (i = 0; i < sizeof(req->alpha2); i++)
if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 33b133f7e63a..7a2b49587b4d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -161,7 +161,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
strsep(&ptr, "\n");
/* Print fw version info */
- brcmf_err("Firmware version = %s\n", buf);
+ brcmf_info("Firmware version = %s\n", buf);
/* locate firmware version number for ethtool */
ptr = strrchr(buf, ' ') + 1;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 60da86a8d95b..a3d82368f1a9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -32,7 +32,6 @@
#include "p2p.h"
#include "cfg80211.h"
#include "fwil.h"
-#include "fwsignal.h"
#include "feature.h"
#include "proto.h"
#include "pcie.h"
@@ -198,7 +197,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
int ret;
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- struct ethhdr *eh = (struct ethhdr *)(skb->data);
+ struct ethhdr *eh;
brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
@@ -211,22 +210,13 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
goto done;
}
- /* Make sure there's enough room for any header */
- if (skb_headroom(skb) < drvr->hdrlen) {
- struct sk_buff *skb2;
-
- brcmf_dbg(INFO, "%s: insufficient headroom\n",
+ /* Make sure there's enough writable headroom*/
+ ret = skb_cow_head(skb, drvr->hdrlen);
+ if (ret < 0) {
+ brcmf_err("%s: skb_cow_head failed\n",
brcmf_ifname(ifp));
- drvr->bus_if->tx_realloc++;
- skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
dev_kfree_skb(skb);
- skb = skb2;
- if (skb == NULL) {
- brcmf_err("%s: skb_realloc_headroom failed\n",
- brcmf_ifname(ifp));
- ret = -ENOMEM;
- goto done;
- }
+ goto done;
}
/* validate length for ether packet */
@@ -236,6 +226,8 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
goto done;
}
+ eh = (struct ethhdr *)(skb->data);
+
if (eh->h_proto == htons(ETH_P_PAE))
atomic_inc(&ifp->pend_8021x_cnt);
@@ -283,16 +275,6 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
}
-void brcmf_txflowblock(struct device *dev, bool state)
-{
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_pub *drvr = bus_if->drvr;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- brcmf_fws_bus_blocked(drvr, state);
-}
-
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
{
if (skb->pkt_type == PACKET_MULTICAST)
@@ -393,24 +375,6 @@ void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
brcmu_pkt_buf_free_skb(txp);
}
-void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
-{
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_pub *drvr = bus_if->drvr;
- struct brcmf_if *ifp;
-
- /* await txstatus signal for firmware if active */
- if (brcmf_fws_fc_active(drvr->fws)) {
- if (!success)
- brcmf_fws_bustxfail(drvr->fws, txp);
- } else {
- if (brcmf_proto_hdrpull(drvr, false, txp, &ifp))
- brcmu_pkt_buf_free_skb(txp);
- else
- brcmf_txfinalize(ifp, txp, success);
- }
-}
-
static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
@@ -504,8 +468,9 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
ndev->needed_headroom += drvr->hdrlen;
ndev->ethtool_ops = &brcmf_ethtool_ops;
- /* set the mac address */
+ /* set the mac address & netns */
memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
+ dev_net_set(ndev, wiphy_net(cfg_to_wiphy(drvr->config)));
INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
INIT_WORK(&ifp->ndoffload_work, _brcmf_update_ndtable);
@@ -734,10 +699,28 @@ void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked)
return;
brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", ifp->bsscfgidx,
ifp->ifidx);
- brcmf_fws_del_interface(ifp);
+ brcmf_proto_del_if(ifp->drvr, ifp);
brcmf_del_if(ifp->drvr, ifp->bsscfgidx, rtnl_locked);
}
+static int brcmf_psm_watchdog_notify(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *evtmsg,
+ void *data)
+{
+ int err;
+
+ brcmf_dbg(TRACE, "enter: bsscfgidx=%d\n", ifp->bsscfgidx);
+
+ brcmf_err("PSM's watchdog has fired!\n");
+
+ err = brcmf_debug_create_memdump(ifp->drvr->bus_if, data,
+ evtmsg->datalen);
+ if (err)
+ brcmf_err("Failed to get memory dump, %d\n", err);
+
+ return err;
+}
+
#ifdef CONFIG_INET
#define ARPOL_MAX_ENTRIES 8
static int brcmf_inetaddr_changed(struct notifier_block *nb,
@@ -917,6 +900,10 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings)
goto fail;
}
+ /* Attach to events important for core code */
+ brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG,
+ brcmf_psm_watchdog_notify);
+
/* attach firmware event handler */
brcmf_fweh_attach(drvr);
@@ -992,11 +979,11 @@ int brcmf_bus_started(struct device *dev)
}
brcmf_feat_attach(drvr);
- ret = brcmf_fws_init(drvr);
+ ret = brcmf_proto_init_done(drvr);
if (ret < 0)
goto fail;
- brcmf_fws_add_interface(ifp);
+ brcmf_proto_add_if(drvr, ifp);
drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev,
drvr->settings->p2p_enable);
@@ -1040,10 +1027,6 @@ fail:
brcmf_cfg80211_detach(drvr->config);
drvr->config = NULL;
}
- if (drvr->fws) {
- brcmf_fws_del_interface(ifp);
- brcmf_fws_deinit(drvr);
- }
brcmf_net_detach(ifp->ndev, false);
if (p2p_ifp)
brcmf_net_detach(p2p_ifp->ndev, false);
@@ -1109,8 +1092,6 @@ void brcmf_detach(struct device *dev)
brcmf_cfg80211_detach(drvr->config);
- brcmf_fws_deinit(drvr);
-
brcmf_bus_stop(drvr->bus_if);
brcmf_proto_detach(drvr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 6aecd8dfd824..a4dd313140f3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -127,8 +127,6 @@ struct brcmf_pub {
struct brcmf_fweh_info fweh;
- struct brcmf_fws_info *fws;
-
struct brcmf_ampdu_rx_reorder
*reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
index f4644cf371c7..1447a8352383 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
@@ -27,8 +27,8 @@
static struct dentry *root_folder;
-static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
- size_t len)
+int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
+ size_t len)
{
void *dump;
size_t ramsize;
@@ -54,24 +54,6 @@ static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
return 0;
}
-static int brcmf_debug_psm_watchdog_notify(struct brcmf_if *ifp,
- const struct brcmf_event_msg *evtmsg,
- void *data)
-{
- int err;
-
- brcmf_dbg(TRACE, "enter: bsscfgidx=%d\n", ifp->bsscfgidx);
-
- brcmf_err("PSM's watchdog has fired!\n");
-
- err = brcmf_debug_create_memdump(ifp->drvr->bus_if, data,
- evtmsg->datalen);
- if (err)
- brcmf_err("Failed to get memory dump, %d\n", err);
-
- return err;
-}
-
void brcmf_debugfs_init(void)
{
root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
@@ -99,9 +81,7 @@ int brcmf_debug_attach(struct brcmf_pub *drvr)
if (IS_ERR(drvr->dbgfs_dir))
return PTR_ERR(drvr->dbgfs_dir);
-
- return brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG,
- brcmf_debug_psm_watchdog_notify);
+ return 0;
}
void brcmf_debug_detach(struct brcmf_pub *drvr)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
index 066126123e96..fe264a5798f1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
@@ -59,6 +59,10 @@ void __brcmf_err(const char *func, const char *fmt, ...);
} while (0)
#if defined(DEBUG) || defined(CONFIG_BRCM_TRACING)
+
+/* For debug/tracing purposes treat info messages as errors */
+#define brcmf_info brcmf_err
+
__printf(3, 4)
void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...);
#define brcmf_dbg(level, fmt, ...) \
@@ -77,6 +81,11 @@ do { \
#else /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
+#define brcmf_info(fmt, ...) \
+ do { \
+ pr_info("%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
#define brcmf_dbg(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#define BRCMF_DATA_ON() 0
@@ -99,6 +108,7 @@ do { \
extern int brcmf_msg_level;
+struct brcmf_bus;
struct brcmf_pub;
#ifdef DEBUG
void brcmf_debugfs_init(void);
@@ -108,6 +118,8 @@ void brcmf_debug_detach(struct brcmf_pub *drvr);
struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
int (*read_fn)(struct seq_file *seq, void *data));
+int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
+ size_t len);
#else
static inline void brcmf_debugfs_init(void)
{
@@ -128,6 +140,12 @@ int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
{
return 0;
}
+static inline
+int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
+ size_t len)
+{
+ return 0;
+}
#endif
#endif /* BRCMFMAC_DEBUG_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index c79306b57532..4eb1e1ce9ace 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -22,9 +22,9 @@
#include "core.h"
#include "debug.h"
#include "tracepoint.h"
-#include "fwsignal.h"
#include "fweh.h"
#include "fwil.h"
+#include "proto.h"
/**
* struct brcmf_fweh_queue_item - event item on event queue.
@@ -172,14 +172,14 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
if (IS_ERR(ifp))
return;
if (!is_p2pdev)
- brcmf_fws_add_interface(ifp);
+ brcmf_proto_add_if(drvr, ifp);
if (!drvr->fweh.evt_handler[BRCMF_E_IF])
if (brcmf_net_attach(ifp, false) < 0)
return;
}
if (ifp && ifevent->action == BRCMF_E_IF_CHANGE)
- brcmf_fws_reset_interface(ifp);
+ brcmf_proto_reset_if(drvr, ifp);
err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 5f1a5929cb30..72373e59308e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -36,6 +36,7 @@
#include "p2p.h"
#include "cfg80211.h"
#include "proto.h"
+#include "bcdc.h"
#include "common.h"
/**
@@ -1586,7 +1587,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
const struct brcmf_event_msg *e,
void *data)
{
- struct brcmf_fws_info *fws = ifp->drvr->fws;
+ struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
int i;
u8 *credits = data;
@@ -1617,7 +1618,7 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp,
const struct brcmf_event_msg *e,
void *data)
{
- struct brcmf_fws_info *fws = ifp->drvr->fws;
+ struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
if (fws) {
brcmf_fws_lock(fws);
@@ -1826,7 +1827,7 @@ netif_rx:
void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb)
{
struct brcmf_skb_reorder_data *rd;
- struct brcmf_fws_info *fws = ifp->drvr->fws;
+ struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
u8 *signal_data;
s16 data_len;
u8 type;
@@ -2091,8 +2092,7 @@ static int brcmf_fws_assign_htod(struct brcmf_fws_info *fws, struct sk_buff *p,
int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
{
- struct brcmf_pub *drvr = ifp->drvr;
- struct brcmf_fws_info *fws = drvr->fws;
+ struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
struct ethhdr *eh = (struct ethhdr *)(skb->data);
int fifo = BRCMF_FWS_FIFO_BCMC;
@@ -2142,10 +2142,10 @@ void brcmf_fws_reset_interface(struct brcmf_if *ifp)
void brcmf_fws_add_interface(struct brcmf_if *ifp)
{
- struct brcmf_fws_info *fws = ifp->drvr->fws;
+ struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
struct brcmf_fws_mac_descriptor *entry;
- if (!ifp->ndev)
+ if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE)
return;
entry = &fws->desc.iface[ifp->ifidx];
@@ -2160,16 +2160,17 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
void brcmf_fws_del_interface(struct brcmf_if *ifp)
{
struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
+ struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
if (!entry)
return;
- brcmf_fws_lock(ifp->drvr->fws);
+ brcmf_fws_lock(fws);
ifp->fws_desc = NULL;
brcmf_dbg(TRACE, "deleting %s\n", entry->name);
brcmf_fws_macdesc_deinit(entry);
- brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx);
- brcmf_fws_unlock(ifp->drvr->fws);
+ brcmf_fws_cleanup(fws, ifp->ifidx);
+ brcmf_fws_unlock(fws);
}
static void brcmf_fws_dequeue_worker(struct work_struct *worker)
@@ -2243,7 +2244,7 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data)
{
struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
- struct brcmf_fws_stats *fwstats = &bus_if->drvr->fws->stats;
+ struct brcmf_fws_stats *fwstats = &(drvr_to_fws(bus_if->drvr)->stats);
seq_printf(seq,
"header_pulls: %u\n"
@@ -2308,7 +2309,7 @@ static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data)
}
#endif
-int brcmf_fws_init(struct brcmf_pub *drvr)
+struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr)
{
struct brcmf_fws_info *fws;
struct brcmf_if *ifp;
@@ -2316,17 +2317,15 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
int rc;
u32 mode;
- drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL);
- if (!drvr->fws) {
+ fws = kzalloc(sizeof(*fws), GFP_KERNEL);
+ if (!fws) {
rc = -ENOMEM;
goto fail;
}
- fws = drvr->fws;
-
spin_lock_init(&fws->spinlock);
- /* set linkage back */
+ /* store drvr reference */
fws->drvr = drvr;
fws->fcmode = drvr->settings->fcmode;
@@ -2334,7 +2333,7 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
(fws->fcmode == BRCMF_FWS_FCMODE_NONE)) {
fws->avoid_queueing = true;
brcmf_dbg(INFO, "FWS queueing will be avoided\n");
- return 0;
+ return fws;
}
fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
@@ -2396,6 +2395,7 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
brcmf_fws_hanger_init(&fws->hanger);
brcmf_fws_macdesc_init(&fws->desc.other, NULL, 0);
brcmf_fws_macdesc_set_name(fws, &fws->desc.other);
+ brcmf_dbg(INFO, "added %s\n", fws->desc.other.name);
brcmu_pktq_init(&fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
BRCMF_FWS_PSQ_LEN);
@@ -2405,27 +2405,24 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n",
fws->fw_signals ? "enabled" : "disabled", tlv);
- return 0;
+ return fws;
fail:
- brcmf_fws_deinit(drvr);
- return rc;
+ brcmf_fws_detach(fws);
+ return ERR_PTR(rc);
}
-void brcmf_fws_deinit(struct brcmf_pub *drvr)
+void brcmf_fws_detach(struct brcmf_fws_info *fws)
{
- struct brcmf_fws_info *fws = drvr->fws;
-
if (!fws)
return;
- if (drvr->fws->fws_wq)
- destroy_workqueue(drvr->fws->fws_wq);
+ if (fws->fws_wq)
+ destroy_workqueue(fws->fws_wq);
/* cleanup */
brcmf_fws_lock(fws);
brcmf_fws_cleanup(fws, -1);
- drvr->fws = NULL;
brcmf_fws_unlock(fws);
/* free top structure */
@@ -2461,7 +2458,7 @@ void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
{
- struct brcmf_fws_info *fws = drvr->fws;
+ struct brcmf_fws_info *fws = drvr_to_fws(drvr);
struct brcmf_if *ifp;
int i;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index 96df66073b2a..ba07bd972002 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -18,8 +18,8 @@
#ifndef FWSIGNAL_H_
#define FWSIGNAL_H_
-int brcmf_fws_init(struct brcmf_pub *drvr);
-void brcmf_fws_deinit(struct brcmf_pub *drvr);
+struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr);
+void brcmf_fws_detach(struct brcmf_fws_info *fws);
bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws);
bool brcmf_fws_fc_active(struct brcmf_fws_info *fws);
void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 85d949e03f79..aa299c47bfa2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2141,12 +2141,11 @@ fail:
* @name: name of the new interface.
* @name_assign_type: origin of the interface name
* @type: nl80211 interface type.
- * @flags: not used.
* @params: contains mac address for P2P device.
*/
struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
unsigned char name_assign_type,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
index 8ce9447533ef..0e8b34d2d85c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
@@ -150,7 +150,7 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced);
void brcmf_p2p_detach(struct brcmf_p2p_info *p2p);
struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
unsigned char name_assign_type,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params);
int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 6fae4cf3f6ab..f36b96dc6acd 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1877,6 +1877,7 @@ static int brcmf_pcie_pm_enter_D3(struct device *dev)
BRCMF_PCIE_MBDATA_TIMEOUT);
if (!devinfo->mbdata_completed) {
brcmf_err("Timeout on response for entering D3 substate\n");
+ brcmf_bus_change_state(bus, BRCMF_BUS_UP);
return -EIO;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index 9a25e79a46cf..6c3bde83d070 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -182,7 +182,6 @@ int brcmf_pno_clean(struct brcmf_if *ifp)
int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
struct cfg80211_sched_scan_request *req)
{
- struct brcmu_d11inf *d11inf;
struct brcmf_pno_config_le pno_cfg;
struct cfg80211_ssid *ssid;
u16 chan;
@@ -209,7 +208,6 @@ int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
}
/* configure channels to use */
- d11inf = &ifp->drvr->config->d11inf;
for (i = 0; i < req->n_channels; i++) {
chan = req->channels[i]->hw_value;
pno_cfg.channel_list[i] = cpu_to_le16(chan);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index 34b59feedeba..2404f8a7c31c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -44,6 +44,10 @@ struct brcmf_proto {
void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
u8 peer[ETH_ALEN]);
void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb);
+ void (*add_if)(struct brcmf_if *ifp);
+ void (*del_if)(struct brcmf_if *ifp);
+ void (*reset_if)(struct brcmf_if *ifp);
+ int (*init_done)(struct brcmf_pub *drvr);
void *pd;
};
@@ -118,4 +122,36 @@ brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
ifp->drvr->proto->rxreorder(ifp, skb);
}
+static inline void
+brcmf_proto_add_if(struct brcmf_pub *drvr, struct brcmf_if *ifp)
+{
+ if (!drvr->proto->add_if)
+ return;
+ drvr->proto->add_if(ifp);
+}
+
+static inline void
+brcmf_proto_del_if(struct brcmf_pub *drvr, struct brcmf_if *ifp)
+{
+ if (!drvr->proto->del_if)
+ return;
+ drvr->proto->del_if(ifp);
+}
+
+static inline void
+brcmf_proto_reset_if(struct brcmf_pub *drvr, struct brcmf_if *ifp)
+{
+ if (!drvr->proto->reset_if)
+ return;
+ drvr->proto->reset_if(ifp);
+}
+
+static inline int
+brcmf_proto_init_done(struct brcmf_pub *drvr)
+{
+ if (!drvr->proto->init_done)
+ return 0;
+ return drvr->proto->init_done(drvr);
+}
+
#endif /* BRCMFMAC_PROTO_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 65689469c5a1..fc64b8913aa6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -44,6 +44,7 @@
#include "firmware.h"
#include "core.h"
#include "common.h"
+#include "bcdc.h"
#define DCMD_RESP_TIMEOUT msecs_to_jiffies(2500)
#define CTL_DONE_TIMEOUT msecs_to_jiffies(2500)
@@ -539,7 +540,11 @@ static int qcount[NUMPRIO];
/* Limit on rounding up frames */
static const uint max_roundup = 512;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+#define ALIGNMENT 8
+#else
#define ALIGNMENT 4
+#endif
enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_NORMAL,
@@ -2265,7 +2270,8 @@ done:
bus->tx_seq = (bus->tx_seq + pktq->qlen) % SDPCM_SEQ_WRAP;
skb_queue_walk_safe(pktq, pkt_next, tmp) {
__skb_unlink(pkt_next, pktq);
- brcmf_txcomplete(bus->sdiodev->dev, pkt_next, ret == 0);
+ brcmf_proto_bcdc_txcomplete(bus->sdiodev->dev, pkt_next,
+ ret == 0);
}
return ret;
}
@@ -2328,7 +2334,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
if ((bus->sdiodev->state == BRCMF_SDIOD_DATA) &&
bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
bus->txoff = false;
- brcmf_txflowblock(bus->sdiodev->dev, false);
+ brcmf_proto_bcdc_txflowblock(bus->sdiodev->dev, false);
}
return cnt;
@@ -2753,7 +2759,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
if (pktq_len(&bus->txq) >= TXHI) {
bus->txoff = true;
- brcmf_txflowblock(dev, true);
+ brcmf_proto_bcdc_txflowblock(dev, true);
}
spin_unlock_bh(&bus->txq_lock);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index d93ebbdc7737..e4d545f9edee 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -29,6 +29,7 @@
#include "usb.h"
#include "core.h"
#include "common.h"
+#include "bcdc.h"
#define IOCTL_RESP_TIMEOUT msecs_to_jiffies(2000)
@@ -482,13 +483,13 @@ static void brcmf_usb_tx_complete(struct urb *urb)
req->skb);
brcmf_usb_del_fromq(devinfo, req);
- brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
+ brcmf_proto_bcdc_txcomplete(devinfo->dev, req->skb, urb->status == 0);
req->skb = NULL;
brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
devinfo->tx_flowblock) {
- brcmf_txflowblock(devinfo->dev, false);
+ brcmf_proto_bcdc_txflowblock(devinfo->dev, false);
devinfo->tx_flowblock = false;
}
spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
@@ -635,7 +636,7 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
!devinfo->tx_flowblock) {
- brcmf_txflowblock(dev, true);
+ brcmf_proto_bcdc_txflowblock(dev, true);
devinfo->tx_flowblock = true;
}
spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 7c2a9a9bc372..ddfdfe177e24 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -1082,6 +1082,8 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
* hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
*/
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
hw->rate_control_algorithm = "minstrel_ht";
hw->sta_data_size = 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index c2a938b59044..0a14942b8216 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -7092,9 +7092,9 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
rspec = brcms_c_compute_rspec(rxh, plcp);
if (is_mcs_rate(rspec)) {
rx_status->rate_idx = rspec & RSPEC_RATE_MASK;
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
if (rspec_is40mhz(rspec))
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
} else {
switch (rspec2rate(rspec)) {
case BRCM_RATE_1M:
@@ -7149,9 +7149,9 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
/* Determine short preamble and rate_idx */
if (is_cck_rate(rspec)) {
if (rxh->PhyRxStatus_0 & PRXS0_SHORTH)
- rx_status->flag |= RX_FLAG_SHORTPRE;
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
} else if (is_ofdm_rate(rspec)) {
- rx_status->flag |= RX_FLAG_SHORTPRE;
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
} else {
brcms_err(wlc->hw->d11core, "%s: Unknown modulation\n",
__func__);
@@ -7159,7 +7159,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
}
if (plcp3_issgi(plcp[3]))
- rx_status->flag |= RX_FLAG_SHORT_GI;
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rxh->RxStatus1 & RXS_DECERR) {
rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 5ef3c5cc47c5..bbc579b647b6 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -3539,9 +3539,6 @@ static int ipw_load(struct ipw_priv *priv)
fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
le32_to_cpu(fw->ucode_size)];
- if (rc < 0)
- goto error;
-
if (!priv->rxq)
priv->rxq = ipw_rx_queue_alloc(priv);
else
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index e8e65115feba..38bf403bb1e1 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -3592,6 +3592,8 @@ il3945_setup_mac(struct il_priv *il)
il_leds_init(il);
+ wiphy_ext_feature_set(il->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
ret = ieee80211_register_hw(il->hw);
if (ret) {
IL_ERR("Failed to register hw (error %d)\n", ret);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
index 03ad9b8b55f4..b2f35dfbc01b 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -656,7 +656,7 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
rate_mask = sta->supp_rates[sband->band];
/* get user max rate if set */
- max_rate_idx = txrc->max_rate_idx;
+ max_rate_idx = fls(txrc->rate_idx_mask) - 1;
if (sband->band == NL80211_BAND_5GHZ && max_rate_idx != -1)
max_rate_idx += IL_FIRST_OFDM_RATE;
if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index 4db327a95414..080ea8155b90 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -570,7 +570,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
/* set the preamble flag if appropriate */
if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
+ rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE;
if ((unlikely(rx_stats->phy_count > 20))) {
D_DROP("dsp size out of range [0,20]: %d\n",
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 2781f5728d07..5d5faa3cad24 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -728,15 +728,15 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
/* set the preamble flag if appropriate */
if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
+ rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE;
/* Set up the HT phy flags */
if (rate_n_flags & RATE_MCS_HT_MSK)
- rx_status.flag |= RX_FLAG_HT;
+ rx_status.encoding = RX_ENC_HT;
if (rate_n_flags & RATE_MCS_HT40_MSK)
- rx_status.flag |= RX_FLAG_40MHZ;
+ rx_status.enc_flags |= RX_ENC_FLAG_40MHZ;
if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status.flag |= RX_FLAG_SHORT_GI;
+ rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
/* We know which subframes of an A-MPDU belong
@@ -5799,6 +5799,8 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
il_leds_init(il);
+ wiphy_ext_feature_set(il->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
ret = ieee80211_register_hw(il->hw);
if (ret) {
IL_ERR("Failed to register hw (error %d)\n", ret);
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index a867ae7f4095..c055f6da11c6 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -2211,7 +2211,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
/* Get max rate if user set max rate */
if (lq_sta) {
- lq_sta->max_rate_idx = txrc->max_rate_idx;
+ lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1;
if (sband->band == NL80211_BAND_5GHZ &&
lq_sta->max_rate_idx != -1)
lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 92e611841200..411cb91c102f 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -7,6 +7,7 @@ iwlwifi-objs += iwl-notif-wait.o
iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
+iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o iwl-9000.o iwl-a000.o
iwlwifi-objs += iwl-trans.o
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index 6c2d6da7eec6..74e52f7c5aa1 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -180,7 +180,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
goto done;
}
IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
- iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
+ iwl_trans_wait_tx_queues_empty(priv->trans, 0xffffffff);
done:
ieee80211_wake_queues(priv->hw);
mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 2a04d0cd71ae..444c74371929 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -213,6 +213,8 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
iwl_leds_init(priv);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
ret = ieee80211_register_hw(priv->hw);
if (ret) {
IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
@@ -1143,7 +1145,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
- iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues);
+ iwl_trans_wait_tx_queues_empty(priv->trans, scd_queues);
done:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index ff44ebc5829d..ddcd8c2d66cd 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -2720,7 +2720,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
/* Get max rate if user set max rate */
if (lq_sta) {
- lq_sta->max_rate_idx = txrc->max_rate_idx;
+ lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1;
if ((sband->band == NL80211_BAND_5GHZ) &&
(lq_sta->max_rate_idx != -1))
lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index dfa2041cfdac..1ee1ba9931a7 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -873,7 +873,7 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
/* set the preamble flag if appropriate */
if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
+ rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
/*
@@ -887,13 +887,13 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
/* Set up the HT phy flags */
if (rate_n_flags & RATE_MCS_HT_MSK)
- rx_status.flag |= RX_FLAG_HT;
+ rx_status.encoding = RX_ENC_HT;
if (rate_n_flags & RATE_MCS_HT40_MSK)
- rx_status.flag |= RX_FLAG_40MHZ;
+ rx_status.enc_flags |= RX_ENC_FLAG_40MHZ;
if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status.flag |= RX_FLAG_SHORT_GI;
+ rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_n_flags & RATE_MCS_GF_MSK)
- rx_status.flag |= RX_FLAG_HT_GF;
+ rx_status.enc_flags |= RX_ENC_FLAG_HT_GF;
iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
rxb, &rx_status);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index a72e58623d3a..3b3e076571d6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -73,8 +73,8 @@
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 17
#define IWL7265_UCODE_API_MAX 17
-#define IWL7265D_UCODE_API_MAX 28
-#define IWL3168_UCODE_API_MAX 28
+#define IWL7265D_UCODE_API_MAX 29
+#define IWL3168_UCODE_API_MAX 29
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 17
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index b7953bf55f6f..b9718c0cf174 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -70,8 +70,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 28
-#define IWL8265_UCODE_API_MAX 28
+#define IWL8000_UCODE_API_MAX 30
+#define IWL8265_UCODE_API_MAX 30
/* Lowest firmware API version supported */
#define IWL8000_UCODE_API_MIN 17
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
index a5f0c0bf85ec..110ceefccc15 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015-2016 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,10 +55,10 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX 28
+#define IWL9000_UCODE_API_MAX 30
/* Lowest firmware API version supported */
-#define IWL9000_UCODE_API_MIN 17
+#define IWL9000_UCODE_API_MIN 30
/* NVM versions */
#define IWL9000_NVM_VERSION 0x0a1d
@@ -73,14 +73,14 @@
#define IWL9000_SMEM_LEN 0x68000
#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
-#define IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
-#define IWL9000LC_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
+#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
+#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
#define IWL9000_MODULE_FIRMWARE(api) \
IWL9000_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL9260_MODULE_FIRMWARE(api) \
- IWL9260_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL9000LC_MODULE_FIRMWARE(api) \
- IWL9000LC_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL9260A_MODULE_FIRMWARE(api) \
+ IWL9260A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL9260B_MODULE_FIRMWARE(api) \
+ IWL9260B_FW_PRE "-" __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
@@ -148,7 +148,8 @@ static const struct iwl_tt_params iwl9000_tt_params = {
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
- .fw_name_pre = IWL9260_FW_PRE,
+ .fw_name_pre = IWL9260A_FW_PRE,
+ .fw_name_pre_next_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@@ -158,7 +159,8 @@ const struct iwl_cfg iwl9160_2ac_cfg = {
const struct iwl_cfg iwl9260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260",
- .fw_name_pre = IWL9260_FW_PRE,
+ .fw_name_pre = IWL9260A_FW_PRE,
+ .fw_name_pre_next_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@@ -168,7 +170,8 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
const struct iwl_cfg iwl9270_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9270",
- .fw_name_pre = IWL9260_FW_PRE,
+ .fw_name_pre = IWL9260A_FW_PRE,
+ .fw_name_pre_next_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@@ -198,21 +201,6 @@ const struct iwl_cfg iwl9560_2ac_cfg = {
.integrated = true,
};
-/*
- * TODO the struct below is for internal testing only this should be
- * removed by EO 2016~
- */
-const struct iwl_cfg iwl9000lc_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 9000",
- .fw_name_pre = IWL9000LC_FW_PRE,
- IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
- .integrated = true,
-};
-
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9000LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
index 15dd7f6137c8..c648cfb981a3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015-2016 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,7 +55,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL_A000_UCODE_API_MAX 28
+#define IWL_A000_UCODE_API_MAX 30
/* Lowest firmware API version supported */
#define IWL_A000_UCODE_API_MIN 24
@@ -65,15 +65,16 @@
#define IWL_A000_TX_POWER_VERSION 0xffff /* meaningless */
/* Memory offsets and lengths */
-#define IWL_A000_DCCM_OFFSET 0x800000
-#define IWL_A000_DCCM_LEN 0x18000
+#define IWL_A000_DCCM_OFFSET 0x800000 /* LMAC1 */
+#define IWL_A000_DCCM_LEN 0x10000 /* LMAC1 */
#define IWL_A000_DCCM2_OFFSET 0x880000
#define IWL_A000_DCCM2_LEN 0x8000
#define IWL_A000_SMEM_OFFSET 0x400000
-#define IWL_A000_SMEM_LEN 0x68000
+#define IWL_A000_SMEM_LEN 0xD0000
-#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
-#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
+#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
+#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
+#define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-a0-hrcdb-a0-"
#define IWL_A000_HR_MODULE_FIRMWARE(api) \
IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
@@ -84,7 +85,7 @@
static const struct iwl_base_params iwl_a000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_A000,
- .num_of_queues = 31,
+ .num_of_queues = 512,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
@@ -121,7 +122,8 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
.use_tfh = true, \
- .rf_id = true
+ .rf_id = true, \
+ .gen2 = true
const struct iwl_cfg iwla000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC a000",
@@ -133,6 +135,17 @@ const struct iwl_cfg iwla000_2ac_cfg_hr = {
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
+const struct iwl_cfg iwla000_2ac_cfg_hr_cdb = {
+ .name = "Intel(R) Dual Band Wireless AC a000",
+ .fw_name_pre = IWL_A000_HR_CDB_FW_PRE,
+ IWL_DEVICE_A000,
+ .ht_params = &iwl_a000_ht_params,
+ .nvm_ver = IWL_A000_NVM_VERSION,
+ .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .cdb = true,
+};
+
const struct iwl_cfg iwla000_2ac_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AC a000",
.fw_name_pre = IWL_A000_JF_FW_PRE,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 94f8a51b633e..a12197e3ce78 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2016 Intel Deutschland GmbH
+ * Copyright (C) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2016 Intel Deutschland GmbH
+ * Copyright (C) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -90,16 +90,6 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_8000,
};
-static inline bool iwl_has_secure_boot(u32 hw_rev,
- enum iwl_device_family family)
-{
- /* return 1 only for family 8000 B0 */
- if ((family == IWL_DEVICE_FAMILY_8000) && (hw_rev & 0xC))
- return true;
-
- return false;
-}
-
/*
* LED mode
* IWL_LED_DEFAULT: use device default
@@ -180,7 +170,7 @@ struct iwl_base_params {
apmg_wake_up_wa:1,
scd_chain_ext_wa:1;
- u8 num_of_queues; /* def: HW dependent */
+ u16 num_of_queues; /* def: HW dependent */
u8 max_ll_items;
u8 led_compensation;
@@ -283,6 +273,8 @@ struct iwl_pwr_tx_backoff {
* @fw_name_pre: Firmware filename prefix. The api version and extension
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode.
+ * @fw_name_pre_next_step: same as @fw_name_pre, only for next step
+ * (if supported)
* @ucode_api_max: Highest version of uCode API supported by driver.
* @ucode_api_min: Lowest version of uCode API supported by driver.
* @max_inst_size: The maximal length of the fw inst section
@@ -321,6 +313,8 @@ struct iwl_pwr_tx_backoff {
* @vht_mu_mimo_supported: VHT MU-MIMO support
* @rf_id: need to read rf_id to determine the firmware image
* @integrated: discrete or integrated
+ * @gen2: a000 and on transport operation
+ * @cdb: CDB support
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -330,6 +324,7 @@ struct iwl_cfg {
/* params specific to an individual device within a device family */
const char *name;
const char *fw_name_pre;
+ const char *fw_name_pre_next_step;
/* params not likely to change within a device family */
const struct iwl_base_params *base_params;
/* params likely to change within a device family */
@@ -365,7 +360,9 @@ struct iwl_cfg {
vht_mu_mimo_supported:1,
rf_id:1,
integrated:1,
- use_tfh:1;
+ use_tfh:1,
+ gen2:1,
+ cdb:1;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
@@ -449,13 +446,13 @@ extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
-extern const struct iwl_cfg iwl9000lc_2ac_cfg;
extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
extern const struct iwl_cfg iwla000_2ac_cfg_hr;
+extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwla000_2ac_cfg_jf;
#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
new file mode 100644
index 000000000000..b870c0986744
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -0,0 +1,203 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_context_info_file_h__
+#define __iwl_context_info_file_h__
+
+/* maximmum number of DRAM map entries supported by FW */
+#define IWL_MAX_DRAM_ENTRY 64
+#define CSR_CTXT_INFO_BA 0x40
+
+/**
+ * enum iwl_context_info_flags - Context information control flags
+ * @IWL_CTXT_INFO_AUTO_FUNC_INIT: If set, FW will not wait before interrupting
+ * the init done for driver command that configures several system modes
+ * @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug
+ * @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump
+ * @IWL_CTXT_INFO_RB_SIZE_4K: Use 4K RB size (the default is 2K)
+ * @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size
+ * exponent, the actual size is 2**value, valid sizes are 8-2048.
+ * The value is four bits long. Maximum valid exponent is 12
+ * @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
+ * default is short format - not supported by the driver)
+ */
+enum iwl_context_info_flags {
+ IWL_CTXT_INFO_AUTO_FUNC_INIT = BIT(0),
+ IWL_CTXT_INFO_EARLY_DEBUG = BIT(1),
+ IWL_CTXT_INFO_ENABLE_CDMP = BIT(2),
+ IWL_CTXT_INFO_RB_SIZE_4K = BIT(3),
+ IWL_CTXT_INFO_RB_CB_SIZE_POS = 4,
+ IWL_CTXT_INFO_TFD_FORMAT_LONG = BIT(8),
+};
+
+/*
+ * struct iwl_context_info_version - version structure
+ * @mac_id: SKU and revision id
+ * @version: context information version id
+ * @size: the size of the context information in DWs
+ */
+struct iwl_context_info_version {
+ __le16 mac_id;
+ __le16 version;
+ __le16 size;
+ __le16 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info_control - version structure
+ * @control_flags: context information flags see &enum iwl_context_info_flags
+ */
+struct iwl_context_info_control {
+ __le32 control_flags;
+ __le32 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info_dram - images DRAM map
+ * each entry in the map represents a DRAM chunk of up to 32 KB
+ * @umac_img: UMAC image DRAM map
+ * @lmac_img: LMAC image DRAM map
+ * @virtual_img: paged image DRAM map
+ */
+struct iwl_context_info_dram {
+ __le64 umac_img[IWL_MAX_DRAM_ENTRY];
+ __le64 lmac_img[IWL_MAX_DRAM_ENTRY];
+ __le64 virtual_img[IWL_MAX_DRAM_ENTRY];
+} __packed;
+
+/*
+ * struct iwl_context_info_rbd_cfg - RBDs configuration
+ * @free_rbd_addr: default queue free RB CB base address
+ * @used_rbd_addr: default queue used RB CB base address
+ * @status_wr_ptr: default queue used RB status write pointer
+ */
+struct iwl_context_info_rbd_cfg {
+ __le64 free_rbd_addr;
+ __le64 used_rbd_addr;
+ __le64 status_wr_ptr;
+} __packed;
+
+/*
+ * struct iwl_context_info_hcmd_cfg - command queue configuration
+ * @cmd_queue_addr: address of command queue
+ * @cmd_queue_size: number of entries
+ */
+struct iwl_context_info_hcmd_cfg {
+ __le64 cmd_queue_addr;
+ u8 cmd_queue_size;
+ u8 reserved[7];
+} __packed;
+
+/*
+ * struct iwl_context_info_dump_cfg - Core Dump configuration
+ * @core_dump_addr: core dump (debug DRAM address) start address
+ * @core_dump_size: size, in DWs
+ */
+struct iwl_context_info_dump_cfg {
+ __le64 core_dump_addr;
+ __le32 core_dump_size;
+ __le32 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info_pnvm_cfg - platform NVM data configuration
+ * @platform_nvm_addr: Platform NVM data start address
+ * @platform_nvm_size: size in DWs
+ */
+struct iwl_context_info_pnvm_cfg {
+ __le64 platform_nvm_addr;
+ __le32 platform_nvm_size;
+ __le32 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info_early_dbg_cfg - early debug configuration for
+ * dumping DRAM addresses
+ * @early_debug_addr: early debug start address
+ * @early_debug_size: size in DWs
+ */
+struct iwl_context_info_early_dbg_cfg {
+ __le64 early_debug_addr;
+ __le32 early_debug_size;
+ __le32 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info - device INIT configuration
+ * @version: version information of context info and HW
+ * @control: control flags of FH configurations
+ * @rbd_cfg: default RX queue configuration
+ * @hcmd_cfg: command queue configuration
+ * @dump_cfg: core dump data
+ * @edbg_cfg: early debug configuration
+ * @pnvm_cfg: platform nvm configuration
+ * @dram: firmware image addresses in DRAM
+ */
+struct iwl_context_info {
+ struct iwl_context_info_version version;
+ struct iwl_context_info_control control;
+ __le64 reserved0;
+ struct iwl_context_info_rbd_cfg rbd_cfg;
+ struct iwl_context_info_hcmd_cfg hcmd_cfg;
+ __le32 reserved1[4];
+ struct iwl_context_info_dump_cfg dump_cfg;
+ struct iwl_context_info_early_dbg_cfg edbg_cfg;
+ struct iwl_context_info_pnvm_cfg pnvm_cfg;
+ __le32 reserved2[16];
+ struct iwl_context_info_dram dram;
+ __le32 reserved3[16];
+} __packed;
+
+int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw);
+void iwl_pcie_ctxt_info_free(struct iwl_trans *trans);
+void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
+
+#endif /* __iwl_context_info_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 4ee3b621ec27..fa120fb55373 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -348,7 +348,6 @@ enum {
/* RF_ID value */
#define CSR_HW_RF_ID_TYPE_JF (0x00105000)
-#define CSR_HW_RF_ID_TYPE_LC (0x00101000)
#define CSR_HW_RF_ID_TYPE_HR (0x00109000)
/* EEPROM REG */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index be466a074c1d..5cfacb0bca84 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -211,24 +211,46 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw,
static int iwl_request_firmware(struct iwl_drv *drv, bool first)
{
- const char *name_pre = drv->trans->cfg->fw_name_pre;
+ const struct iwl_cfg *cfg = drv->trans->cfg;
char tag[8];
+ const char *fw_pre_name;
+
+ if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+ CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP)
+ fw_pre_name = cfg->fw_name_pre_next_step;
+ else
+ fw_pre_name = cfg->fw_name_pre;
if (first) {
- drv->fw_index = drv->trans->cfg->ucode_api_max;
+ drv->fw_index = cfg->ucode_api_max;
sprintf(tag, "%d", drv->fw_index);
} else {
drv->fw_index--;
sprintf(tag, "%d", drv->fw_index);
}
- if (drv->fw_index < drv->trans->cfg->ucode_api_min) {
+ if (drv->fw_index < cfg->ucode_api_min) {
IWL_ERR(drv, "no suitable firmware found!\n");
+
+ if (cfg->ucode_api_min == cfg->ucode_api_max) {
+ IWL_ERR(drv, "%s%d is required\n", fw_pre_name,
+ cfg->ucode_api_max);
+ } else {
+ IWL_ERR(drv, "minimum version required: %s%d\n",
+ fw_pre_name,
+ cfg->ucode_api_min);
+ IWL_ERR(drv, "maximum version supported: %s%d\n",
+ fw_pre_name,
+ cfg->ucode_api_max);
+ }
+
+ IWL_ERR(drv,
+ "check git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git\n");
return -ENOENT;
}
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
- name_pre, tag);
+ fw_pre_name, tag);
IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
drv->firmware_name);
@@ -1260,7 +1282,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces)
- return;
+ goto out_free_fw;
if (!ucode_raw)
goto try_again;
@@ -1472,7 +1494,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* or hangs loading.
*/
if (load_module) {
- err = request_module("%s", op->name);
+ request_module("%s", op->name);
#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
if (err)
IWL_ERR(drv,
@@ -1490,17 +1512,18 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
goto free;
out_free_fw:
- IWL_ERR(drv, "failed to allocate pci memory\n");
iwl_dealloc_ucode(drv);
release_firmware(ucode_raw);
out_unbind:
complete(&drv->request_firmware_complete);
device_release_driver(drv->trans->dev);
free:
- for (i = 0; i < ARRAY_SIZE(pieces->img); i++)
- kfree(pieces->img[i].sec);
- kfree(pieces->dbg_mem_tlv);
- kfree(pieces);
+ if (pieces) {
+ for (i = 0; i < ARRAY_SIZE(pieces->img); i++)
+ kfree(pieces->img[i].sec);
+ kfree(pieces->dbg_mem_tlv);
+ kfree(pieces);
+ }
}
struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 33ef5372d195..62f9fe926d78 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -614,6 +614,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define RX_POOL_SIZE (MQ_RX_NUM_RBDS + \
IWL_MAX_RX_HW_QUEUES * \
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC))
+/* cb size is the exponent */
+#define RX_QUEUE_CB_SIZE(x) ilog2(x)
#define RX_QUEUE_SIZE 256
#define RX_QUEUE_MASK 255
@@ -639,6 +641,8 @@ struct iwl_rb_status {
#define TFD_QUEUE_SIZE_MAX (256)
+/* cb size is the exponent - 3 */
+#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
@@ -647,7 +651,7 @@ struct iwl_rb_status {
static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
{
- return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
+ return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF;
}
/**
* struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
index d01701ee4777..44419e82da1b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -241,6 +241,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* iteration complete notification, and the timestamp reported for RX
* received during scan, are reported in TSF of the mac specified in the
* scan request.
+ * @IWL_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of
+ * ADD_MODIFY_STA_KEY_API_S_VER_2.
+ * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -250,6 +253,8 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28,
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS = (__force iwl_ucode_tlv_api_t)29,
+ IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
@@ -344,6 +349,8 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38,
+ IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39,
+ IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index a9f69fdd170b..9c8b09cf1f7b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -54,8 +54,8 @@ IWL_EXPORT_SYMBOL(iwl_write32);
void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val)
{
trace_iwlwifi_dev_iowrite64(trans->dev, ofs, val);
- iwl_trans_write32(trans, ofs, val & 0xffffffff);
- iwl_trans_write32(trans, ofs + 4, val >> 32);
+ iwl_trans_write32(trans, ofs, lower_32_bits(val));
+ iwl_trans_write32(trans, ofs + 4, upper_32_bits(val));
}
IWL_EXPORT_SYMBOL(iwl_write64);
@@ -246,6 +246,9 @@ void iwl_force_nmi(struct iwl_trans *trans)
DEVICE_SET_NMI_VAL_DRV);
iwl_write_prph(trans, DEVICE_SET_NMI_REG,
DEVICE_SET_NMI_VAL_HW);
+ } else if (trans->cfg->gen2) {
+ iwl_write_prph(trans, UREG_NIC_SET_NMI_DRIVER,
+ DEVICE_SET_NMI_8000_VAL);
} else {
iwl_write_prph(trans, DEVICE_SET_NMI_8000_REG,
DEVICE_SET_NMI_8000_VAL);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
index 88f260db3744..68412ff2112e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
@@ -76,8 +76,8 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
}
IWL_EXPORT_SYMBOL(iwl_notification_wait_init);
-void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
- struct iwl_rx_packet *pkt)
+bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt)
{
bool triggered = false;
@@ -118,13 +118,11 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
}
}
spin_unlock(&notif_wait->notif_wait_lock);
-
}
- if (triggered)
- wake_up_all(&notif_wait->notif_waitq);
+ return triggered;
}
-IWL_EXPORT_SYMBOL(iwl_notification_wait_notify);
+IWL_EXPORT_SYMBOL(iwl_notification_wait);
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h
index 0f9995ed71cd..368884be4e7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,10 +90,10 @@ struct iwl_notif_wait_data {
*
* This structure is not used directly, to wait for a
* notification declare it on the stack, and call
- * iwlagn_init_notification_wait() with appropriate
+ * iwl_init_notification_wait() with appropriate
* parameters. Then do whatever will cause the ucode
* to notify the driver, and to wait for that then
- * call iwlagn_wait_notification().
+ * call iwl_wait_notification().
*
* Each notification is one-shot. If at some point we
* need to support multi-shot notifications (which
@@ -114,10 +115,24 @@ struct iwl_notification_wait {
/* caller functions */
void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data);
-void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
- struct iwl_rx_packet *pkt);
+bool iwl_notification_wait(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt);
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
+static inline void
+iwl_notification_notify(struct iwl_notif_wait_data *notif_data)
+{
+ wake_up_all(&notif_data->notif_waitq);
+}
+
+static inline void
+iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt)
+{
+ if (iwl_notification_wait(notif_data, pkt))
+ iwl_notification_notify(notif_data);
+}
+
/* user functions */
void __acquires(wait_entry)
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 3bd6fc1b76d4..721ae6bef5da 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -438,25 +439,16 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
}
-static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
- struct iwl_nvm_data *data,
- const __le16 *ch_section,
- u8 tx_chains, u8 rx_chains, bool lar_supported)
+void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data, const __le16 *nvm_ch_flags,
+ u8 tx_chains, u8 rx_chains, bool lar_supported)
{
int n_channels;
int n_used = 0;
struct ieee80211_supported_band *sband;
- if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
- n_channels = iwl_init_channel_map(
- dev, cfg, data,
- &ch_section[NVM_CHANNELS], lar_supported);
- else
- n_channels = iwl_init_channel_map(
- dev, cfg, data,
- &ch_section[NVM_CHANNELS_FAMILY_8000],
- lar_supported);
-
+ n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags,
+ lar_supported);
sband = &data->bands[NL80211_BAND_2GHZ];
sband->band = NL80211_BAND_2GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
@@ -482,6 +474,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
n_used, n_channels);
}
+IWL_EXPORT_SYMBOL(iwl_init_sbands);
static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
const __le16 *phy_sku)
@@ -559,8 +552,8 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest)
dest[5] = hw_addr[0];
}
-static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
- struct iwl_nvm_data *data)
+void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
+ struct iwl_nvm_data *data)
{
__le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
__le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
@@ -578,6 +571,7 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
}
+IWL_EXPORT_SYMBOL(iwl_set_hw_address_from_csr);
static void iwl_set_hw_address_family_8000(struct iwl_trans *trans,
const struct iwl_cfg *cfg,
@@ -718,7 +712,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
lar_enabled = true;
- ch_section = nvm_sw;
+ ch_section = &nvm_sw[NVM_CHANNELS];
} else {
u16 lar_offset = data->nvm_version < 0xE39 ?
NVM_LAR_OFFSET_FAMILY_8000_OLD :
@@ -728,7 +722,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
data->lar_enabled = !!(lar_config &
NVM_LAR_ENABLED_FAMILY_8000);
lar_enabled = data->lar_enabled;
- ch_section = regulatory;
+ ch_section = &regulatory[NVM_CHANNELS_FAMILY_8000];
}
/* If no valid mac address was found - bail out */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index 7249e5b403f4..3fd6506a02ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -82,6 +83,19 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
/**
+ * iwl_set_hw_address_from_csr - sets HW address for 9000 devices and on
+ */
+void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
+ struct iwl_nvm_data *data);
+
+/**
+ * iwl_init_sbands - parse and set all channel profiles
+ */
+void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data, const __le16 *nvm_ch_flags,
+ u8 tx_chains, u8 rx_chains, bool lar_supported);
+
+/**
* iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
*
* This function parses the regulatory channel data received as a
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 406ef301b8ab..306bc967742e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -114,6 +114,7 @@
#define DEVICE_SET_NMI_VAL_DRV BIT(7)
#define DEVICE_SET_NMI_8000_REG 0x00a01c24
#define DEVICE_SET_NMI_8000_VAL 0x1000000
+#define UREG_NIC_SET_NMI_DRIVER 0x00a05c10
/* Shared registers (0x0..0x3ff, via target indirect or periphery */
#define SHR_BASE 0x00a10000
@@ -294,9 +295,6 @@
/*********************** END TX SCHEDULER *************************************/
-/* tcp checksum offload */
-#define RX_EN_CSUM (0x00a00d88)
-
/* Oscillator clock */
#define OSC_CLK (0xa04068)
#define OSC_CLK_FORCE_CONTROL (0x8)
@@ -309,6 +307,7 @@
* Note this address is cleared after MAC reset.
*/
#define UREG_UCODE_LOAD_STATUS (0xa05c40)
+#define UREG_CPU_INIT_RUN (0xa05c44)
#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78)
#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C)
@@ -316,6 +315,8 @@
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
+#define LMAC2_PRPH_OFFSET (0x100000)
+
/* Rx FIFO */
#define RXF_SIZE_ADDR (0xa00c88)
#define RXF_RD_D_SPACE (0xa00c40)
@@ -378,6 +379,7 @@
#define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078
#define RFIC_REG_RD 0xAD0470
#define WFPM_CTRL_REG 0xA03030
+#define WFPM_GP2 0xA030B4
enum {
ENABLE_WFPM = BIT(31),
WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000,
@@ -398,6 +400,8 @@ enum aux_misc_master1_en {
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
#define SB_CPU_1_STATUS 0xA01E30
#define SB_CPU_2_STATUS 0xA01E34
+#define UMAG_SB_CPU_1_STATUS 0xA038C0
+#define UMAG_SB_CPU_2_STATUS 0xA038C4
/* FW chicken bits */
#define LMPM_CHICK 0xA01FF8
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index d42cab291025..0bde26bab15d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -70,8 +70,7 @@
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_cfg *cfg,
- const struct iwl_trans_ops *ops,
- size_t dev_cmd_headroom)
+ const struct iwl_trans_ops *ops)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
@@ -90,15 +89,13 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
trans->dev = dev;
trans->cfg = cfg;
trans->ops = ops;
- trans->dev_cmd_headroom = dev_cmd_headroom;
trans->num_rx_queues = 1;
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
kmem_cache_create(trans->dev_cmd_pool_name,
- sizeof(struct iwl_device_cmd)
- + trans->dev_cmd_headroom,
+ sizeof(struct iwl_device_cmd),
sizeof(void *),
SLAB_HWCACHE_ALIGN,
NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 0296124a7f9c..0ebfdbb22992 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -396,7 +396,10 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
* currently supports
*/
#define IWL_MAX_HW_QUEUES 32
+#define IWL_MAX_TVQM_QUEUES 512
+
#define IWL_MAX_TID_COUNT 8
+#define IWL_MGMT_TID 15
#define IWL_FRAME_LIMIT 64
#define IWL_MAX_RX_HW_QUEUES 16
@@ -530,6 +533,44 @@ struct iwl_trans_txq_scd_cfg {
int frame_limit;
};
+/* Available options for &struct iwl_tx_queue_cfg_cmd */
+enum iwl_tx_queue_cfg_actions {
+ TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0),
+ TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
+};
+
+/**
+ * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
+ * @sta_id: station id
+ * @tid: tid of the queue
+ * @flags: Bit 0 - on enable, off - disable, Bit 1 - short TFD format
+ * @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
+ * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
+ * @byte_cnt_addr: address of byte count table
+ * @tfdq_addr: address of TFD circular buffer
+ */
+struct iwl_tx_queue_cfg_cmd {
+ u8 sta_id;
+ u8 tid;
+ __le16 flags;
+ __le32 cb_size;
+ __le64 byte_cnt_addr;
+ __le64 tfdq_addr;
+} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config
+ * @queue_number: queue number assigned to this RA -TID
+ * @flags: set on failure
+ * @write_pointer: initial value for write pointer
+ */
+struct iwl_tx_queue_cfg_rsp {
+ __le16 queue_number;
+ __le16 flags;
+ __le16 write_pointer;
+ __le16 reserved;
+} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
+
/**
* struct iwl_trans_ops - transport specific operations
*
@@ -640,13 +681,17 @@ struct iwl_trans_ops {
unsigned int queue_wdg_timeout);
void (*txq_disable)(struct iwl_trans *trans, int queue,
bool configure_scd);
+ /* a000 functions */
+ int (*txq_alloc)(struct iwl_trans *trans,
+ struct iwl_tx_queue_cfg_cmd *cmd,
+ int cmd_id,
+ unsigned int queue_wdg_timeout);
+ void (*txq_free)(struct iwl_trans *trans, int queue);
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
bool shared);
- dma_addr_t (*get_txq_byte_table)(struct iwl_trans *trans, int txq_id);
-
- int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
+ int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
bool freeze);
void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
@@ -774,9 +819,6 @@ enum iwl_plat_pm_mode {
* the transport must set this before calling iwl_drv_start()
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
* The user should use iwl_trans_{alloc,free}_tx_cmd.
- * @dev_cmd_headroom: room needed for the transport's private use before the
- * device_cmd for Tx - for internal use only
- * The user should use iwl_trans_{alloc,free}_tx_cmd.
* @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
* starting the firmware, used for tracing
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
@@ -827,7 +869,6 @@ struct iwl_trans {
/* The following fields are internal only */
struct kmem_cache *dev_cmd_pool;
- size_t dev_cmd_headroom;
char dev_cmd_pool_name[50];
struct dentry *dbgfs_dir;
@@ -1000,13 +1041,13 @@ iwl_trans_dump_data(struct iwl_trans *trans,
static inline struct iwl_device_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
{
- u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
+ struct iwl_device_cmd *dev_cmd_ptr =
+ kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
if (unlikely(dev_cmd_ptr == NULL))
return NULL;
- return (struct iwl_device_cmd *)
- (dev_cmd_ptr + trans->dev_cmd_headroom);
+ return dev_cmd_ptr;
}
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
@@ -1014,9 +1055,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
struct iwl_device_cmd *dev_cmd)
{
- u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
-
- kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
+ kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
}
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -1065,20 +1104,39 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
}
-static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
- int queue, bool shared_mode)
+static inline void
+iwl_trans_txq_free(struct iwl_trans *trans, int queue)
{
- if (trans->ops->txq_set_shared_mode)
- trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
+ if (WARN_ON_ONCE(!trans->ops->txq_free))
+ return;
+
+ trans->ops->txq_free(trans, queue);
}
-static inline dma_addr_t iwl_trans_get_txq_byte_table(struct iwl_trans *trans,
- int queue)
+static inline int
+iwl_trans_txq_alloc(struct iwl_trans *trans,
+ struct iwl_tx_queue_cfg_cmd *cmd,
+ int cmd_id,
+ unsigned int queue_wdg_timeout)
{
- /* we should never be called if the trans doesn't support it */
- BUG_ON(!trans->ops->get_txq_byte_table);
+ might_sleep();
+
+ if (WARN_ON_ONCE(!trans->ops->txq_alloc))
+ return -ENOTSUPP;
+
+ if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+ return -EIO;
+ }
- return trans->ops->get_txq_byte_table(trans, queue);
+ return trans->ops->txq_alloc(trans, cmd, cmd_id, queue_wdg_timeout);
+}
+
+static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
+ int queue, bool shared_mode)
+{
+ if (trans->ops->txq_set_shared_mode)
+ trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
}
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
@@ -1137,15 +1195,15 @@ static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
trans->ops->block_txq_ptrs(trans, block);
}
-static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
- u32 txqs)
+static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
+ u32 txqs)
{
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return -EIO;
}
- return trans->ops->wait_tx_queue_empty(trans, txqs);
+ return trans->ops->wait_tx_queues_empty(trans, txqs);
}
static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -1248,8 +1306,7 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_cfg *cfg,
- const struct iwl_trans_ops *ops,
- size_t dev_cmd_headroom);
+ const struct iwl_trans_ops *ops);
void iwl_trans_free(struct iwl_trans *trans);
/*****************************************************
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
index 7cb68f6ed1b0..75d35f6b041e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -82,9 +84,22 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt;
int i, ret;
u32 status;
+ int size;
memset(&cmd, 0, sizeof(cmd));
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
+ size = sizeof(cmd);
+ if (phyctxt->channel->band == NL80211_BAND_2GHZ ||
+ !iwl_mvm_is_cdb_supported(mvm))
+ cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
+ else
+ cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
+ } else {
+ size = IWL_BINDING_CMD_SIZE_V1;
+ }
+
cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
phyctxt->color));
cmd.action = cpu_to_le32(action);
@@ -99,7 +114,7 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
status = 0;
ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
- sizeof(cmd), &cmd, &status);
+ size, &cmd, &status);
if (ret) {
IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n",
action, ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 5bdb6c2c8390..49b4418e6c35 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -756,7 +756,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* Rssi update while not associated - can happen since the statistics
* are handled asynchronously
*/
- if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
+ if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA)
return;
/* No BT - reports should be disabled */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index c7eb1983c4f9..119a3bd92c50 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -665,6 +665,19 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_binding_cmd binding_cmd = {};
struct iwl_time_quota_cmd quota_cmd = {};
u32 status;
+ int size;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
+ size = sizeof(binding_cmd);
+ if (mvmvif->phy_ctxt->channel->band == NL80211_BAND_2GHZ ||
+ !iwl_mvm_is_cdb_supported(mvm))
+ binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
+ else
+ binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
+ } else {
+ size = IWL_BINDING_CMD_SIZE_V1;
+ }
/* add back the PHY */
if (WARN_ON(!mvmvif->phy_ctxt))
@@ -711,8 +724,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
status = 0;
ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
- sizeof(binding_cmd), &binding_cmd,
- &status);
+ size, &binding_cmd, &status);
if (ret) {
IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
return ret;
@@ -986,7 +998,9 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
goto out;
}
- if (key_data.use_tkip) {
+ if (key_data.use_tkip &&
+ !fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
ret = iwl_mvm_send_cmd_pdu(mvm,
WOWLAN_TKIP_PARAM,
cmd_flags, sizeof(tkip_cmd),
@@ -1194,7 +1208,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif);
- if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) {
+ if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) {
/* if we're not associated, this must be netdetect */
if (!wowlan->nd_config) {
ret = 1;
@@ -2102,6 +2116,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
*/
iwl_mvm_update_changed_regdom(mvm);
+ if (!unified_image)
+ /* Re-configure default SAR profile */
+ iwl_mvm_sar_select_profile(mvm, 1, 1);
+
if (mvm->net_detect) {
/* If this is a non-unified image, we restart the FW,
* so no need to stop the netdetect scan. If that
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index f4d75ffe3d8a..5d475b4850ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -280,7 +280,7 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
mvmvif->queue_params[i].uapsd);
if (vif->type == NL80211_IFTYPE_STATION &&
- ap_sta_id != IWL_MVM_STATION_COUNT) {
+ ap_sta_id != IWL_MVM_INVALID_STA) {
struct iwl_mvm_sta *mvm_sta;
mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 077bfd8f4c0c..402846650cbe 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -330,7 +330,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
index 480a54af4534..970b030ed28d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
@@ -73,7 +73,9 @@
#define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1)
#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2)
-#define IWL_MVM_STATION_COUNT 16
+#define IWL_MVM_STATION_COUNT 16
+#define IWL_MVM_INVALID_STA 0xFF
+
#define IWL_MVM_TDLS_STA_COUNT 4
enum iwl_ac {
@@ -155,7 +157,8 @@ enum iwl_tsf_id {
* @bi_reciprocal: 2^32 / bi
* @dtim_interval: dtim transmit time in TU
* @dtim_reciprocal: 2^32 / dtim_interval
- * @mcast_qid: queue ID for multicast traffic
+ * @mcast_qid: queue ID for multicast traffic.
+ * NOTE: obsolete from VER2 and on
* @beacon_template: beacon template ID
*/
struct iwl_mac_data_ap {
@@ -167,7 +170,7 @@ struct iwl_mac_data_ap {
__le32 dtim_reciprocal;
__le32 mcast_qid;
__le32 beacon_template;
-} __packed; /* AP_MAC_DATA_API_S_VER_1 */
+} __packed; /* AP_MAC_DATA_API_S_VER_2 */
/**
* struct iwl_mac_data_ibss - configuration data for IBSS MAC context
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
index 3fa43d1348a2..750510aff70b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -351,6 +351,45 @@ struct iwl_dev_tx_power_cmd {
u8 reserved[3];
} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
+#define IWL_NUM_GEO_PROFILES 3
+
+/**
+ * enum iwl_geo_per_chain_offset_operation - type of operation
+ * @IWL_PER_CHAIN_OFFSET_SET_TABLES: send the tables from the host to the FW.
+ * @IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE: retrieve the last configured table.
+ */
+enum iwl_geo_per_chain_offset_operation {
+ IWL_PER_CHAIN_OFFSET_SET_TABLES,
+ IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE,
+}; /* GEO_TX_POWER_LIMIT FLAGS TYPE */
+
+/**
+ * struct iwl_per_chain_offset - embedded struct for GEO_TX_POWER_LIMIT.
+ * @max_tx_power: maximum allowed tx power.
+ * @chain_a: tx power offset for chain a.
+ * @chain_b: tx power offset for chain b.
+ */
+struct iwl_per_chain_offset {
+ __le16 max_tx_power;
+ u8 chain_a;
+ u8 chain_b;
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */
+
+struct iwl_per_chain_offset_group {
+ struct iwl_per_chain_offset lb;
+ struct iwl_per_chain_offset hb;
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */
+
+/**
+ * struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd.
+ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
+ * @table: offset profile per band.
+ */
+struct iwl_geo_tx_power_profiles_cmd {
+ __le32 ops;
+ struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
+} __packed; /* GEO_TX_POWER_LIMIT */
+
/**
* struct iwl_beacon_filter_cmd
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
index ad9cc03e16c4..1b7d265ffb0a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -227,6 +229,9 @@ enum {
*/
#define RATE_LEGACY_RATE_MSK 0xff
+/* Bit 10 - OFDM HE */
+#define RATE_MCS_OFDM_HE_POS 10
+#define RATE_MCS_OFDM_HE_MSK BIT(RATE_MCS_OFDM_HE_POS)
/*
* Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
@@ -255,18 +260,29 @@ enum {
#define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK
#define RATE_MCS_ANT_NUM 3
-/* Bit 17-18: (0) SS, (1) SS*2 */
+/* Bit 17: (0) SS, (1) SS*2 */
#define RATE_MCS_STBC_POS 17
-#define RATE_MCS_HT_STBC_MSK (3 << RATE_MCS_STBC_POS)
-#define RATE_MCS_VHT_STBC_MSK (1 << RATE_MCS_STBC_POS)
+#define RATE_MCS_STBC_MSK BIT(RATE_MCS_STBC_POS)
+
+/* Bit 18: OFDM-HE dual carrier mode */
+#define RATE_HE_DUAL_CARRIER_MODE 18
+#define RATE_HE_DUAL_CARRIER_MODE_MSK BIT(RATE_HE_DUAL_CARRIER_MODE)
/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
#define RATE_MCS_BF_POS 19
#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS)
-/* Bit 20: (0) ZLF is off, (1) ZLF is on */
-#define RATE_MCS_ZLF_POS 20
-#define RATE_MCS_ZLF_MSK (1 << RATE_MCS_ZLF_POS)
+/*
+ * Bit 20-21: HE guard interval and LTF type.
+ * (0) 1xLTF+1.6us, (1) 2xLTF+0.8us,
+ * (2) 2xLTF+1.6us, (3) 4xLTF+3.2us
+ */
+#define RATE_MCS_HE_GI_LTF_POS 20
+#define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS)
+
+/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
+#define RATE_MCS_HE_TYPE_POS 22
+#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS)
/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
#define RATE_MCS_DUP_POS 24
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
index c78a0c499459..3178eb96e395 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
@@ -516,7 +516,7 @@ struct iwl_scan_dwell {
* scan_config_channel_flag
* @channel_array: default supported channels
*/
-struct iwl_scan_config {
+struct iwl_scan_config_v1 {
__le32 flags;
__le32 tx_chains;
__le32 rx_chains;
@@ -532,7 +532,7 @@ struct iwl_scan_config {
#define SCAN_TWO_LMACS 2
-struct iwl_scan_config_cdb {
+struct iwl_scan_config {
__le32 flags;
__le32 tx_chains;
__le32 rx_chains;
@@ -669,7 +669,7 @@ struct iwl_scan_req_umac {
u8 n_channels;
__le16 reserved;
u8 data[];
- } no_cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
+ } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
struct {
__le32 max_out_time[SCAN_TWO_LMACS];
__le32 suspend_time[SCAN_TWO_LMACS];
@@ -679,13 +679,13 @@ struct iwl_scan_req_umac {
u8 n_channels;
__le16 reserved;
u8 data[];
- } cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_5 */
+ } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
};
} __packed;
-#define IWL_SCAN_REQ_UMAC_SIZE_CDB sizeof(struct iwl_scan_req_umac)
-#define IWL_SCAN_REQ_UMAC_SIZE (sizeof(struct iwl_scan_req_umac) - \
- 2 * sizeof(__le32))
+#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
+ 2 * sizeof(__le32))
/**
* struct iwl_umac_scan_abort
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
index 3b5150e9975d..421b9dd1fb66 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -179,7 +179,7 @@ enum iwl_sta_key_flag {
* enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
* @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
* @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
- * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_trigger_acs
+ * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_acs
* @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
* @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
* @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
@@ -214,20 +214,6 @@ enum iwl_sta_sleep_flag {
STA_SLEEP_STATE_MOREDATA = BIT(2),
};
-/* STA ID and color bits definitions */
-#define STA_ID_SEED (0x0f)
-#define STA_ID_POS (0)
-#define STA_ID_MSK (STA_ID_SEED << STA_ID_POS)
-
-#define STA_COLOR_SEED (0x7)
-#define STA_COLOR_POS (4)
-#define STA_COLOR_MSK (STA_COLOR_SEED << STA_COLOR_POS)
-
-#define STA_ID_N_COLOR_GET_COLOR(id_n_color) \
- (((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS)
-#define STA_ID_N_COLOR_GET_ID(id_n_color) \
- (((id_n_color) & STA_ID_MSK) >> STA_ID_POS)
-
#define STA_KEY_MAX_NUM (16)
#define STA_KEY_IDX_INVALID (0xff)
#define STA_KEY_MAX_DATA_KEY_NUM (4)
@@ -324,6 +310,24 @@ struct iwl_mvm_add_sta_cmd_v7 {
} __packed; /* ADD_STA_CMD_API_S_VER_7 */
/**
+ * enum iwl_sta_type - FW station types
+ * ( REPLY_ADD_STA = 0x18 )
+ * @IWL_STA_LINK: Link station - normal RX and TX traffic.
+ * @IWL_STA_GENERAL_PURPOSE: General purpose. In AP mode used for beacons
+ * and probe responses.
+ * @IWL_STA_MULTICAST: multicast traffic,
+ * @IWL_STA_TDLS_LINK: TDLS link station
+ * @IWL_STA_AUX_ACTIVITY: auxilary station (scan, ROC and so on).
+ */
+enum iwl_sta_type {
+ IWL_STA_LINK,
+ IWL_STA_GENERAL_PURPOSE,
+ IWL_STA_MULTICAST,
+ IWL_STA_TDLS_LINK,
+ IWL_STA_AUX_ACTIVITY,
+};
+
+/**
* struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
* ( REPLY_ADD_STA = 0x18 )
* @add_modify: 1: modify existing, 0: add new station
@@ -347,14 +351,17 @@ struct iwl_mvm_add_sta_cmd_v7 {
* @sleep_tx_count: number of packets to transmit to station even though it is
* asleep. Used to synchronise PS-poll and u-APSD responses while ucode
* keeps track of STA sleep state.
+ * @station_type: type of this station. See &enum iwl_sta_type.
* @sleep_state_flags: Look at %iwl_sta_sleep_flag.
* @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
* mac-addr.
* @beamform_flags: beam forming controls
- * @tfd_queue_msk: tfd queues used by this station
+ * @tfd_queue_msk: tfd queues used by this station.
+ * Obselete for new TX API (9 and above).
* @rx_ba_window: aggregation window size
- * @scd_queue_bank: queue bank in used. Each bank contains 32 queues. 0 means
- * that the queues used by this station are in the first 32.
+ * @sp_length: the size of the SP as it appears in the WME IE
+ * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
+ * enabled ACs.
*
* The device contains an internal table of per-station information, with info
* on security keys, aggregation parameters, and Tx rates for initial Tx
@@ -379,38 +386,61 @@ struct iwl_mvm_add_sta_cmd {
u8 remove_immediate_ba_tid;
__le16 add_immediate_ba_ssn;
__le16 sleep_tx_count;
- __le16 sleep_state_flags;
+ u8 sleep_state_flags;
+ u8 station_type;
__le16 assoc_id;
__le16 beamform_flags;
__le32 tfd_queue_msk;
__le16 rx_ba_window;
- u8 scd_queue_bank;
- u8 uapsd_trigger_acs;
-} __packed; /* ADD_STA_CMD_API_S_VER_8 */
+ u8 sp_length;
+ u8 uapsd_acs;
+} __packed; /* ADD_STA_CMD_API_S_VER_10 */
/**
- * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
+ * struct iwl_mvm_add_sta_key_common - add/modify sta key common part
* ( REPLY_ADD_STA_KEY = 0x17 )
* @sta_id: index of station in uCode's station table
* @key_offset: key offset in key storage
* @key_flags: type %iwl_sta_key_flag
* @key: key material data
* @rx_secur_seq_cnt: RX security sequence counter for the key
- * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
- * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
*/
-struct iwl_mvm_add_sta_key_cmd {
+struct iwl_mvm_add_sta_key_common {
u8 sta_id;
u8 key_offset;
__le16 key_flags;
u8 key[32];
u8 rx_secur_seq_cnt[16];
+} __packed;
+
+/**
+ * struct iwl_mvm_add_sta_key_cmd_v1 - add/modify sta key
+ * @common: see &struct iwl_mvm_add_sta_key_common
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ */
+struct iwl_mvm_add_sta_key_cmd_v1 {
+ struct iwl_mvm_add_sta_key_common common;
u8 tkip_rx_tsc_byte2;
u8 reserved;
__le16 tkip_rx_ttak[5];
} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
/**
+ * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
+ * @common: see &struct iwl_mvm_add_sta_key_common
+ * @rx_mic_key: TKIP RX unicast or multicast key
+ * @tx_mic_key: TKIP TX key
+ * @transmit_seq_cnt: TSC, transmit packet number
+ */
+struct iwl_mvm_add_sta_key_cmd {
+ struct iwl_mvm_add_sta_key_common common;
+ __le64 rx_mic_key;
+ __le64 tx_mic_key;
+ __le64 transmit_seq_cnt;
+} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */
+
+/**
* enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
* @ADD_STA_SUCCESS: operation was executed successfully
* @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index b38cc073adcc..81b98915b1a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -124,6 +125,20 @@ enum iwl_tx_flags {
}; /* TX_FLAGS_BITS_API_S_VER_1 */
/**
+ * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for a000
+ * @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command
+ * @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs
+ * to a secured STA
+ * @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate
+ * selection, retry limits and BT kill
+ */
+enum iwl_tx_cmd_flags {
+ IWL_TX_FLAGS_CMD_RATE = BIT(0),
+ IWL_TX_FLAGS_ENCRYPT_DIS = BIT(1),
+ IWL_TX_FLAGS_HIGH_PRI = BIT(2),
+}; /* TX_FLAGS_BITS_API_S_VER_3 */
+
+/**
* enum iwl_tx_pm_timeouts - pm timeout values in TX command
* @PM_FRAME_NONE: no need to suspend sleep mode
* @PM_FRAME_MGMT: fw suspend sleep mode for 100TU
@@ -159,7 +174,7 @@ enum iwl_tx_cmd_sec_ctrl {
TX_CMD_SEC_EXT = 0x04,
TX_CMD_SEC_GCMP = 0x05,
TX_CMD_SEC_KEY128 = 0x08,
- TX_CMD_SEC_KEY_FROM_TABLE = 0x08,
+ TX_CMD_SEC_KEY_FROM_TABLE = 0x10,
};
/* TODO: how does these values are OK with only 16 bit variable??? */
@@ -301,6 +316,31 @@ struct iwl_tx_cmd {
struct ieee80211_hdr hdr[0];
} __packed; /* TX_CMD_API_S_VER_6 */
+struct iwl_dram_sec_info {
+ __le32 pn_low;
+ __le16 pn_high;
+ __le16 aux_info;
+} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
+
+/**
+ * struct iwl_tx_cmd_gen2 - TX command struct to FW for a000 devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @offload_assist: TX offload configuration
+ * @tx_flags: combination of &iwl_tx_cmd_flags
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ */
+struct iwl_tx_cmd_gen2 {
+ __le16 len;
+ __le16 offload_assist;
+ __le32 flags;
+ struct iwl_dram_sec_info dram_info;
+ __le32 rate_n_flags;
+ struct ieee80211_hdr hdr[0];
+} __packed; /* TX_CMD_API_S_VER_7 */
+
/*
* TX response related data
*/
@@ -508,9 +548,11 @@ struct agg_tx_status {
* @tlc_info: TLC rate info
* @ra_tid: bits [3:0] = ra, bits [7:4] = tid
* @frame_ctrl: frame control
+ * @tx_queue: TX queue for this response
* @status: for non-agg: frame status TX_STATUS_*
* for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields
* follow this one, up to frame_count.
+ * For version 6 TX response isn't received for aggregation at all.
*
* After the array of statuses comes the SSN of the SCD. Look at
* %iwl_mvm_get_scd_ssn for more details.
@@ -537,9 +579,17 @@ struct iwl_mvm_tx_resp {
u8 tlc_info;
u8 ra_tid;
__le16 frame_ctrl;
-
- struct agg_tx_status status;
-} __packed; /* TX_RSP_API_S_VER_3 */
+ union {
+ struct {
+ struct agg_tx_status status;
+ } v3;/* TX_RSP_API_S_VER_3 */
+ struct {
+ __le16 tx_queue;
+ __le16 reserved2;
+ struct agg_tx_status status;
+ } v6;
+ };
+} __packed; /* TX_RSP_API_S_VER_6 */
/**
* struct iwl_mvm_ba_notif - notifies about reception of BA
@@ -579,11 +629,14 @@ struct iwl_mvm_ba_notif {
* struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue
* @q_num: TFD queue number
* @tfd_index: Index of first un-acked frame in the TFD queue
+ * @scd_queue: For debug only - the physical queue the TFD queue is bound to
*/
struct iwl_mvm_compressed_ba_tfd {
- u8 q_num;
- u8 reserved;
+ __le16 q_num;
__le16 tfd_index;
+ u8 scd_queue;
+ u8 reserved;
+ __le16 reserved2;
} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
/**
@@ -635,6 +688,10 @@ enum iwl_mvm_ba_resp_flags {
* @tx_rate: the rate the aggregation was sent at
* @tfd_cnt: number of TFD-Q elements
* @ra_tid_cnt: number of RATID-Q elements
+ * @ba_tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
+ * for details.
+ * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
+ * &iwl_mvm_compressed_ba_ratid for more details.
*/
struct iwl_mvm_compressed_ba_notif {
__le32 flags;
@@ -646,6 +703,7 @@ struct iwl_mvm_compressed_ba_notif {
__le16 query_frame_cnt;
__le16 txed;
__le16 done;
+ __le16 reserved;
__le32 wireless_time;
__le32 tx_rate;
__le16 tfd_cnt;
@@ -754,25 +812,6 @@ struct iwl_tx_path_flush_cmd {
__le16 reserved;
} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
-/**
- * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
- * @tx_resp: the Tx response from the fw (agg or non-agg)
- *
- * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
- * it can't know that everything will go well until the end of the AMPDU, it
- * can't know in advance the number of MPDUs that will be sent in the current
- * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
- * Hence, it can't know in advance what the SSN of the SCD will be at the end
- * of the batch. This is why the SSN of the SCD is written at the end of the
- * whole struct at a variable offset. This function knows how to cope with the
- * variable offset and returns the SSN of the SCD.
- */
-static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
-{
- return le32_to_cpup((__le32 *)&tx_resp->status +
- tx_resp->frame_count) & 0xfff;
-}
-
/* Available options for the SCD_QUEUE_CFG HCMD */
enum iwl_scd_cfg_actions {
SCD_CFG_DISABLE_QUEUE = 0x0,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index cf2b836f3888..f545c5f9e4e3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -320,12 +320,14 @@ enum iwl_phy_ops_subcmd_ids {
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
CTDP_CONFIG_CMD = 0x03,
TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
+ GEO_TX_POWER_LIMIT = 0x05,
CT_KILL_NOTIFICATION = 0xFE,
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
};
enum iwl_system_subcmd_ids {
SHARED_MEM_CFG_CMD = 0x0,
+ INIT_EXTENDED_CFG_CMD = 0x03,
};
enum iwl_data_path_subcmd_ids {
@@ -345,9 +347,10 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
NVM_ACCESS_COMPLETE = 0x0,
};
-enum iwl_fmac_debug_cmds {
+enum iwl_debug_cmds {
LMAC_RD_WR = 0x0,
UMAC_RD_WR = 0x1,
+ MFU_ASSERT_DUMP_NTF = 0xFE,
};
/* command groups */
@@ -673,10 +676,8 @@ struct iwl_error_resp {
/* Common PHY, MAC and Bindings definitions */
-
#define MAX_MACS_IN_BINDING (3)
#define MAX_BINDINGS (4)
-#define AUX_BINDING_INDEX (3)
/* Used to extract ID and color from the context dword */
#define FW_CTXT_ID_POS (0)
@@ -689,7 +690,7 @@ struct iwl_error_resp {
(_color << FW_CTXT_COLOR_POS))
/* Possible actions on PHYs, MACs and Bindings */
-enum {
+enum iwl_phy_ctxt_action {
FW_CTXT_ACTION_STUB = 0,
FW_CTXT_ACTION_ADD,
FW_CTXT_ACTION_MODIFY,
@@ -960,6 +961,7 @@ struct iwl_time_event_notif {
* @action: action to perform, one of FW_CTXT_ACTION_*
* @macs: array of MAC id and colors which belong to the binding
* @phy: PHY id and color which belongs to the binding
+ * @lmac_id: the lmac id the binding belongs to
*/
struct iwl_binding_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
@@ -968,7 +970,13 @@ struct iwl_binding_cmd {
/* BINDING_DATA_API_S_VER_1 */
__le32 macs[MAX_MACS_IN_BINDING];
__le32 phy;
-} __packed; /* BINDING_CMD_API_S_VER_1 */
+ /* BINDING_CMD_API_S_VER_1 */
+ __le32 lmac_id;
+} __packed; /* BINDING_CMD_API_S_VER_2 */
+
+#define IWL_BINDING_CMD_SIZE_V1 offsetof(struct iwl_binding_cmd, lmac_id)
+#define IWL_LMAC_24G_INDEX 0
+#define IWL_LMAC_5G_INDEX 1
/* The maximal number of fragments in the FW's schedule session */
#define IWL_MVM_MAX_QUOTA 128
@@ -990,6 +998,9 @@ struct iwl_time_quota_data {
* struct iwl_time_quota_cmd - configuration of time quota between bindings
* ( TIME_QUOTA_CMD = 0x2c )
* @quotas: allocations per binding
+ * Note: on non-CDB the fourth one is the auxilary mac and is
+ * essentially zero.
+ * On CDB the fourth one is a regular binding.
*/
struct iwl_time_quota_cmd {
struct iwl_time_quota_data quotas[MAX_BINDINGS];
@@ -1231,6 +1242,25 @@ struct iwl_mfuart_load_notif {
} __packed; /*MFU_LOADER_NTFY_API_S_VER_2*/
/**
+ * struct iwl_mfu_assert_dump_notif - mfuart dump logs
+ * ( MFU_ASSERT_DUMP_NTF = 0xfe )
+ * @assert_id: mfuart assert id that cause the notif
+ * @curr_reset_num: number of asserts since uptime
+ * @index_num: current chunk id
+ * @parts_num: total number of chunks
+ * @data_size: number of data bytes sent
+ * @data: data buffer
+ */
+struct iwl_mfu_assert_dump_notif {
+ __le32 assert_id;
+ __le32 curr_reset_num;
+ __le16 index_num;
+ __le16 parts_num;
+ __le32 data_size;
+ __le32 data[0];
+} __packed; /*MFU_DUMP_ASSERT_API_S_VER_1*/
+
+/**
* struct iwl_set_calib_default_cmd - set default value for calibration.
* ( SET_CALIB_DEFAULT_CMD = 0x8e )
* @calib_index: the calibration to set value for
@@ -1998,19 +2028,48 @@ struct iwl_shared_mem_cfg_v1 {
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
+/**
+ * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration
+ *
+ * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB)
+ * @txfifo_size: size of TX FIFOs
+ * @rxfifo1_addr: RXF1 addr
+ * @rxfifo1_size: RXF1 size
+ */
+struct iwl_shared_mem_lmac_cfg {
+ __le32 txfifo_addr;
+ __le32 txfifo_size[TX_FIFO_MAX_NUM];
+ __le32 rxfifo1_addr;
+ __le32 rxfifo1_size;
+
+} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */
+
+/**
+ * Shared memory configuration information from the FW
+ *
+ * @shared_mem_addr: shared memory address
+ * @shared_mem_size: shared memory size
+ * @sample_buff_addr: internal sample (mon/adc) buff addr
+ * @sample_buff_size: internal sample buff size
+ * @rxfifo2_addr: start addr of RXF2
+ * @rxfifo2_size: size of RXF2
+ * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
+ * when paging is not supported this should be 0
+ * @page_buff_size: size of %page_buff_addr
+ * @lmac_num: number of LMACs (1 or 2)
+ * @lmac_smem: per - LMAC smem data
+ */
struct iwl_shared_mem_cfg {
__le32 shared_mem_addr;
__le32 shared_mem_size;
__le32 sample_buff_addr;
__le32 sample_buff_size;
- __le32 txfifo_addr;
- __le32 txfifo_size[TX_FIFO_MAX_NUM];
- __le32 rxfifo_size[RX_FIFO_MAX_NUM];
+ __le32 rxfifo2_addr;
+ __le32 rxfifo2_size;
__le32 page_buff_addr;
__le32 page_buff_size;
- __le32 rxfifo_addr;
- __le32 internal_txfifo_addr;
- __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+ __le32 lmac_num;
+ struct iwl_shared_mem_lmac_cfg lmac_smem[2];
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
/**
@@ -2178,4 +2237,26 @@ struct iwl_nvm_access_complete_cmd {
__le32 reserved;
} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
+/**
+ * enum iwl_extended_cfg_flag - commands driver may send before
+ * finishing init flow
+ * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command
+ * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands
+ * @IWL_INIT_PHY: driver is going to send PHY_DB commands
+ */
+enum iwl_extended_cfg_flags {
+ IWL_INIT_DEBUG_CFG,
+ IWL_INIT_NVM,
+ IWL_INIT_PHY,
+};
+
+/**
+ * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for
+ * before finishing init flows
+ * @init_flags: values from iwl_extended_cfg_flags
+ */
+struct iwl_init_extended_cfg_cmd {
+ __le32 init_flags;
+} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index a027b11bbdb3..7b86a4f1b574 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -99,10 +99,120 @@ static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm,
iwl_trans_release_nic_access(mvm->trans, &flags);
}
+static void iwl_mvm_dump_rxf(struct iwl_mvm *mvm,
+ struct iwl_fw_error_dump_data **dump_data,
+ int size, u32 offset, int fifo_num)
+{
+ struct iwl_fw_error_dump_fifo *fifo_hdr;
+ u32 *fifo_data;
+ u32 fifo_len;
+ int i;
+
+ fifo_hdr = (void *)(*dump_data)->data;
+ fifo_data = (void *)fifo_hdr->data;
+ fifo_len = size;
+
+ /* No need to try to read the data if the length is 0 */
+ if (fifo_len == 0)
+ return;
+
+ /* Add a TLV for the RXF */
+ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
+ (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+ fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
+ fifo_hdr->available_bytes =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_RD_D_SPACE + offset));
+ fifo_hdr->wr_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_RD_WR_PTR + offset));
+ fifo_hdr->rd_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_RD_RD_PTR + offset));
+ fifo_hdr->fence_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_RD_FENCE_PTR + offset));
+ fifo_hdr->fence_mode =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_SET_FENCE_MODE + offset));
+
+ /* Lock fence */
+ iwl_trans_write_prph(mvm->trans, RXF_SET_FENCE_MODE + offset, 0x1);
+ /* Set fence pointer to the same place like WR pointer */
+ iwl_trans_write_prph(mvm->trans, RXF_LD_WR2FENCE + offset, 0x1);
+ /* Set fence offset */
+ iwl_trans_write_prph(mvm->trans,
+ RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0);
+
+ /* Read FIFO */
+ fifo_len /= sizeof(u32); /* Size in DWORDS */
+ for (i = 0; i < fifo_len; i++)
+ fifo_data[i] = iwl_trans_read_prph(mvm->trans,
+ RXF_FIFO_RD_FENCE_INC +
+ offset);
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+}
+
+static void iwl_mvm_dump_txf(struct iwl_mvm *mvm,
+ struct iwl_fw_error_dump_data **dump_data,
+ int size, u32 offset, int fifo_num)
+{
+ struct iwl_fw_error_dump_fifo *fifo_hdr;
+ u32 *fifo_data;
+ u32 fifo_len;
+ int i;
+
+ fifo_hdr = (void *)(*dump_data)->data;
+ fifo_data = (void *)fifo_hdr->data;
+ fifo_len = size;
+
+ /* No need to try to read the data if the length is 0 */
+ if (fifo_len == 0)
+ return;
+
+ /* Add a TLV for the FIFO */
+ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
+ (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+ fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
+ fifo_hdr->available_bytes =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_FIFO_ITEM_CNT + offset));
+ fifo_hdr->wr_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_WR_PTR + offset));
+ fifo_hdr->rd_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_RD_PTR + offset));
+ fifo_hdr->fence_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_FENCE_PTR + offset));
+ fifo_hdr->fence_mode =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_LOCK_FENCE + offset));
+
+ /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
+ iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR + offset,
+ TXF_WR_PTR + offset);
+
+ /* Dummy-read to advance the read pointer to the head */
+ iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA + offset);
+
+ /* Read FIFO */
+ fifo_len /= sizeof(u32); /* Size in DWORDS */
+ for (i = 0; i < fifo_len; i++)
+ fifo_data[i] = iwl_trans_read_prph(mvm->trans,
+ TXF_READ_MODIFY_DATA +
+ offset);
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+}
+
static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
struct iwl_fw_error_dump_data **dump_data)
{
struct iwl_fw_error_dump_fifo *fifo_hdr;
+ struct iwl_mvm_shared_mem_cfg *cfg = &mvm->smem_cfg;
u32 *fifo_data;
u32 fifo_len;
unsigned long flags;
@@ -111,126 +221,47 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
if (!iwl_trans_grab_nic_access(mvm->trans, &flags))
return;
- /* Pull RXF data from all RXFs */
- for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
- /*
- * Keep aside the additional offset that might be needed for
- * next RXF
- */
- u32 offset_diff = RXF_DIFF_FROM_PREV * i;
-
- fifo_hdr = (void *)(*dump_data)->data;
- fifo_data = (void *)fifo_hdr->data;
- fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
-
- /* No need to try to read the data if the length is 0 */
- if (fifo_len == 0)
- continue;
-
- /* Add a TLV for the RXF */
- (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
- (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
-
- fifo_hdr->fifo_num = cpu_to_le32(i);
- fifo_hdr->available_bytes =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- RXF_RD_D_SPACE +
- offset_diff));
- fifo_hdr->wr_ptr =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- RXF_RD_WR_PTR +
- offset_diff));
- fifo_hdr->rd_ptr =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- RXF_RD_RD_PTR +
- offset_diff));
- fifo_hdr->fence_ptr =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- RXF_RD_FENCE_PTR +
- offset_diff));
- fifo_hdr->fence_mode =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- RXF_SET_FENCE_MODE +
- offset_diff));
-
- /* Lock fence */
- iwl_trans_write_prph(mvm->trans,
- RXF_SET_FENCE_MODE + offset_diff, 0x1);
- /* Set fence pointer to the same place like WR pointer */
- iwl_trans_write_prph(mvm->trans,
- RXF_LD_WR2FENCE + offset_diff, 0x1);
- /* Set fence offset */
- iwl_trans_write_prph(mvm->trans,
- RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
- 0x0);
-
- /* Read FIFO */
- fifo_len /= sizeof(u32); /* Size in DWORDS */
- for (j = 0; j < fifo_len; j++)
- fifo_data[j] = iwl_trans_read_prph(mvm->trans,
- RXF_FIFO_RD_FENCE_INC +
- offset_diff);
- *dump_data = iwl_fw_error_next_data(*dump_data);
- }
-
- /* Pull TXF data from all TXFs */
- for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
+ /* Pull RXF1 */
+ iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
+ /* Pull RXF2 */
+ iwl_mvm_dump_rxf(mvm, dump_data, cfg->rxfifo2_size,
+ RXF_DIFF_FROM_PREV, 1);
+ /* Pull LMAC2 RXF1 */
+ if (mvm->smem_cfg.num_lmacs > 1)
+ iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[1].rxfifo1_size,
+ LMAC2_PRPH_OFFSET, 2);
+
+ /* Pull TXF data from LMAC1 */
+ for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
+ iwl_mvm_dump_txf(mvm, dump_data, cfg->lmac[0].txfifo_size[i],
+ 0, i);
+ }
- fifo_hdr = (void *)(*dump_data)->data;
- fifo_data = (void *)fifo_hdr->data;
- fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
-
- /* No need to try to read the data if the length is 0 */
- if (fifo_len == 0)
- continue;
-
- /* Add a TLV for the FIFO */
- (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
- (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
-
- fifo_hdr->fifo_num = cpu_to_le32(i);
- fifo_hdr->available_bytes =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- TXF_FIFO_ITEM_CNT));
- fifo_hdr->wr_ptr =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- TXF_WR_PTR));
- fifo_hdr->rd_ptr =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- TXF_RD_PTR));
- fifo_hdr->fence_ptr =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- TXF_FENCE_PTR));
- fifo_hdr->fence_mode =
- cpu_to_le32(iwl_trans_read_prph(mvm->trans,
- TXF_LOCK_FENCE));
-
- /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
- iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
- TXF_WR_PTR);
-
- /* Dummy-read to advance the read pointer to the head */
- iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
-
- /* Read FIFO */
- fifo_len /= sizeof(u32); /* Size in DWORDS */
- for (j = 0; j < fifo_len; j++)
- fifo_data[j] = iwl_trans_read_prph(mvm->trans,
- TXF_READ_MODIFY_DATA);
- *dump_data = iwl_fw_error_next_data(*dump_data);
+ /* Pull TXF data from LMAC2 */
+ if (mvm->smem_cfg.num_lmacs > 1) {
+ for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) {
+ /* Mark the number of TXF we're pulling now */
+ iwl_trans_write_prph(mvm->trans,
+ TXF_LARC_NUM + LMAC2_PRPH_OFFSET,
+ i);
+ iwl_mvm_dump_txf(mvm, dump_data,
+ cfg->lmac[1].txfifo_size[i],
+ LMAC2_PRPH_OFFSET,
+ i + cfg->num_txfifo_entries);
+ }
}
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
/* Pull UMAC internal TXF data from all TXFs */
for (i = 0;
- i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+ i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
i++) {
fifo_hdr = (void *)(*dump_data)->data;
fifo_data = (void *)fifo_hdr->data;
- fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i];
+ fifo_len = mvm->smem_cfg.internal_txfifo_size[i];
/* No need to try to read the data if the length is 0 */
if (fifo_len == 0)
@@ -246,7 +277,7 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i +
- ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size));
+ mvm->smem_cfg.num_txfifo_entries);
fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
@@ -553,31 +584,45 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
/* reading RXF/TXF sizes */
if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
- struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
+ struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->smem_cfg;
fifo_data_len = 0;
- /* Count RXF size */
- for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
- if (!mem_cfg->rxfifo_size[i])
- continue;
-
+ /* Count RXF2 size */
+ if (mem_cfg->rxfifo2_size) {
/* Add header info */
- fifo_data_len += mem_cfg->rxfifo_size[i] +
+ fifo_data_len += mem_cfg->rxfifo2_size +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
- for (i = 0; i < mem_cfg->num_txfifo_entries; i++) {
- if (!mem_cfg->txfifo_size[i])
+ /* Count RXF1 sizes */
+ for (i = 0; i < mem_cfg->num_lmacs; i++) {
+ if (!mem_cfg->lmac[i].rxfifo1_size)
continue;
/* Add header info */
- fifo_data_len += mem_cfg->txfifo_size[i] +
+ fifo_data_len += mem_cfg->lmac[i].rxfifo1_size +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
+ /* Count TXF sizes */
+ for (i = 0; i < mem_cfg->num_lmacs; i++) {
+ int j;
+
+ for (j = 0; j < mem_cfg->num_txfifo_entries; j++) {
+ if (!mem_cfg->lmac[i].txfifo_size[j])
+ continue;
+
+ /* Add header info */
+ fifo_data_len +=
+ mem_cfg->lmac[i].txfifo_size[j] +
+ sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ }
+ }
+
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
for (i = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 45cb4f476e76..e6c9528eeeda 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -271,6 +272,27 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
return 0;
}
+void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
+ __le32 *dump_data = mfu_dump_notif->data;
+ int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
+ int i;
+
+ if (mfu_dump_notif->index_num == 0)
+ IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
+ le32_to_cpu(mfu_dump_notif->assert_id));
+
+ for (i = 0; i < n_words; i++)
+ IWL_DEBUG_INFO(mvm,
+ "MFUART assert dump, dword %u: 0x%08x\n",
+ le16_to_cpu(mfu_dump_notif->index_num) *
+ n_words + i,
+ le32_to_cpu(dump_data[i]));
+}
+
static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
const struct fw_img *image)
{
@@ -617,11 +639,18 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
MVM_UCODE_ALIVE_TIMEOUT);
if (ret) {
- if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ struct iwl_trans *trans = mvm->trans;
+
+ if (trans->cfg->gen2)
+ IWL_ERR(mvm,
+ "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+ iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
+ iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS));
+ else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
- iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
- iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
+ iwl_read_prph(trans, SB_CPU_1_STATUS),
+ iwl_read_prph(trans, SB_CPU_2_STATUS));
mvm->cur_ucode = old_type;
return ret;
}
@@ -669,6 +698,82 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
return 0;
}
+static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+{
+ struct iwl_notification_wait init_wait;
+ struct iwl_nvm_access_complete_cmd nvm_complete = {};
+ struct iwl_init_extended_cfg_cmd init_cfg = {
+ .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
+ };
+ static const u16 init_complete[] = {
+ INIT_COMPLETE_NOTIF,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_init_notification_wait(&mvm->notif_wait,
+ &init_wait,
+ init_complete,
+ ARRAY_SIZE(init_complete),
+ iwl_wait_init_complete,
+ NULL);
+
+ /* Will also start the device */
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+ goto error;
+ }
+
+ /* Send init config command to mark that we are sending NVM access
+ * commands
+ */
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
+ INIT_EXTENDED_CFG_CMD), 0,
+ sizeof(init_cfg), &init_cfg);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run init config command: %d\n",
+ ret);
+ goto error;
+ }
+
+ /* Read the NVM only at driver load time, no need to do this twice */
+ if (read_nvm) {
+ /* Read nvm */
+ ret = iwl_nvm_init(mvm, true);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+ goto error;
+ }
+ }
+
+ /* In case we read the NVM from external file, load it to the NIC */
+ if (mvm->nvm_file_name)
+ iwl_mvm_load_nvm_to_nic(mvm);
+
+ ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
+ if (WARN_ON(ret))
+ goto error;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ NVM_ACCESS_COMPLETE), 0,
+ sizeof(nvm_complete), &nvm_complete);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
+ ret);
+ goto error;
+ }
+
+ /* We wait for the INIT complete notification */
+ return iwl_wait_notification(&mvm->notif_wait, &init_wait,
+ MVM_UCODE_ALIVE_TIMEOUT);
+
+error:
+ iwl_remove_notification(&mvm->notif_wait, &init_wait);
+ return ret;
+}
+
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
struct iwl_phy_cfg_cmd phy_cfg_cmd;
@@ -697,6 +802,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
};
int ret;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_run_unified_mvm_ucode(mvm, true);
+
lockdep_assert_held(&mvm->mutex);
if (WARN_ON_ONCE(mvm->calibrating))
@@ -803,97 +911,31 @@ out:
return ret;
}
-int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
-{
- struct iwl_notification_wait init_wait;
- struct iwl_nvm_access_complete_cmd nvm_complete = {};
- static const u16 init_complete[] = {
- INIT_COMPLETE_NOTIF,
- };
- int ret;
-
- lockdep_assert_held(&mvm->mutex);
-
- iwl_init_notification_wait(&mvm->notif_wait,
- &init_wait,
- init_complete,
- ARRAY_SIZE(init_complete),
- iwl_wait_init_complete,
- NULL);
-
- /* Will also start the device */
- ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
- if (ret) {
- IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
- goto error;
- }
-
- /* TODO: remove when integrating context info */
- ret = iwl_mvm_init_paging(mvm);
- if (ret) {
- IWL_ERR(mvm, "Failed to init paging: %d\n",
- ret);
- goto error;
- }
-
- /* Read the NVM only at driver load time, no need to do this twice */
- if (read_nvm) {
- /* Read nvm */
- ret = iwl_nvm_init(mvm, true);
- if (ret) {
- IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
- goto error;
- }
- }
-
- /* In case we read the NVM from external file, load it to the NIC */
- if (mvm->nvm_file_name)
- iwl_mvm_load_nvm_to_nic(mvm);
-
- ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
- if (WARN_ON(ret))
- goto error;
-
- ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
- NVM_ACCESS_COMPLETE), 0,
- sizeof(nvm_complete), &nvm_complete);
- if (ret) {
- IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
- ret);
- goto error;
- }
-
- /* We wait for the INIT complete notification */
- return iwl_wait_notification(&mvm->notif_wait, &init_wait,
- MVM_UCODE_ALIVE_TIMEOUT);
-
-error:
- iwl_remove_notification(&mvm->notif_wait, &init_wait);
- return ret;
-}
-
static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
- int i;
+ int i, lmac;
+ int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
- mvm->shared_mem_cfg.num_txfifo_entries =
- ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
- for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
- mvm->shared_mem_cfg.txfifo_size[i] =
- le32_to_cpu(mem_cfg->txfifo_size[i]);
- for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
- mvm->shared_mem_cfg.rxfifo_size[i] =
- le32_to_cpu(mem_cfg->rxfifo_size[i]);
+ if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem)))
+ return;
+
+ mvm->smem_cfg.num_lmacs = lmac_num;
+ mvm->smem_cfg.num_txfifo_entries =
+ ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
+ mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
- BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
- sizeof(mem_cfg->internal_txfifo_size));
+ for (lmac = 0; lmac < lmac_num; lmac++) {
+ struct iwl_shared_mem_lmac_cfg *lmac_cfg =
+ &mem_cfg->lmac_smem[lmac];
- for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
- i++)
- mvm->shared_mem_cfg.internal_txfifo_size[i] =
- le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
+ for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++)
+ mvm->smem_cfg.lmac[lmac].txfifo_size[i] =
+ le32_to_cpu(lmac_cfg->txfifo_size[i]);
+ mvm->smem_cfg.lmac[lmac].rxfifo1_size =
+ le32_to_cpu(lmac_cfg->rxfifo1_size);
+ }
}
static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
@@ -902,25 +944,27 @@ static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data;
int i;
- mvm->shared_mem_cfg.num_txfifo_entries =
- ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
+ mvm->smem_cfg.num_lmacs = 1;
+
+ mvm->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size);
for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
- mvm->shared_mem_cfg.txfifo_size[i] =
+ mvm->smem_cfg.lmac[0].txfifo_size[i] =
le32_to_cpu(mem_cfg->txfifo_size[i]);
- for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
- mvm->shared_mem_cfg.rxfifo_size[i] =
- le32_to_cpu(mem_cfg->rxfifo_size[i]);
+
+ mvm->smem_cfg.lmac[0].rxfifo1_size =
+ le32_to_cpu(mem_cfg->rxfifo_size[0]);
+ mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]);
/* new API has more data, from rxfifo_addr field and on */
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
- BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
+ BUILD_BUG_ON(sizeof(mvm->smem_cfg.internal_txfifo_size) !=
sizeof(mem_cfg->internal_txfifo_size));
for (i = 0;
- i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+ i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
i++)
- mvm->shared_mem_cfg.internal_txfifo_size[i] =
+ mvm->smem_cfg.internal_txfifo_size[i] =
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
}
}
@@ -969,85 +1013,94 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
sizeof(cmd), &cmd);
}
-#define ACPI_WRDS_METHOD "WRDS"
-#define ACPI_WRDS_WIFI (0x07)
-#define ACPI_WRDS_TABLE_SIZE 10
+#ifdef CONFIG_ACPI
+#define ACPI_WRDS_METHOD "WRDS"
+#define ACPI_EWRD_METHOD "EWRD"
+#define ACPI_WGDS_METHOD "WGDS"
+#define ACPI_WIFI_DOMAIN (0x07)
+#define ACPI_WRDS_WIFI_DATA_SIZE (IWL_MVM_SAR_TABLE_SIZE + 2)
+#define ACPI_EWRD_WIFI_DATA_SIZE ((IWL_MVM_SAR_PROFILE_NUM - 1) * \
+ IWL_MVM_SAR_TABLE_SIZE + 3)
+#define ACPI_WGDS_WIFI_DATA_SIZE 18
+#define ACPI_WGDS_NUM_BANDS 2
+#define ACPI_WGDS_TABLE_SIZE 3
+
+static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
+ union acpi_object *table,
+ struct iwl_mvm_sar_profile *profile,
+ bool enabled)
+{
+ int i;
-struct iwl_mvm_sar_table {
- bool enabled;
- u8 values[ACPI_WRDS_TABLE_SIZE];
-};
+ profile->enabled = enabled;
-#ifdef CONFIG_ACPI
-static int iwl_mvm_sar_get_wrds(struct iwl_mvm *mvm, union acpi_object *wrds,
- struct iwl_mvm_sar_table *sar_table)
+ for (i = 0; i < IWL_MVM_SAR_TABLE_SIZE; i++) {
+ if ((table[i].type != ACPI_TYPE_INTEGER) ||
+ (table[i].integer.value > U8_MAX))
+ return -EINVAL;
+
+ profile->table[i] = table[i].integer.value;
+ }
+
+ return 0;
+}
+
+static union acpi_object *iwl_mvm_sar_find_wifi_pkg(struct iwl_mvm *mvm,
+ union acpi_object *data,
+ int data_size)
{
- union acpi_object *data_pkg;
- u32 i;
+ int i;
+ union acpi_object *wifi_pkg;
- /* We need at least two packages, one for the revision and one
+ /*
+ * We need at least two packages, one for the revision and one
* for the data itself. Also check that the revision is valid
* (i.e. it is an integer set to 0).
- */
- if (wrds->type != ACPI_TYPE_PACKAGE ||
- wrds->package.count < 2 ||
- wrds->package.elements[0].type != ACPI_TYPE_INTEGER ||
- wrds->package.elements[0].integer.value != 0) {
- IWL_DEBUG_RADIO(mvm, "Unsupported wrds structure\n");
- return -EINVAL;
+ */
+ if (data->type != ACPI_TYPE_PACKAGE ||
+ data->package.count < 2 ||
+ data->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ data->package.elements[0].integer.value != 0) {
+ IWL_DEBUG_RADIO(mvm, "Unsupported packages structure\n");
+ return ERR_PTR(-EINVAL);
}
/* loop through all the packages to find the one for WiFi */
- for (i = 1; i < wrds->package.count; i++) {
+ for (i = 1; i < data->package.count; i++) {
union acpi_object *domain;
- data_pkg = &wrds->package.elements[i];
+ wifi_pkg = &data->package.elements[i];
/* Skip anything that is not a package with the right
* amount of elements (i.e. domain_type,
- * enabled/disabled plus the sar table size.
+ * enabled/disabled plus the actual data size.
*/
- if (data_pkg->type != ACPI_TYPE_PACKAGE ||
- data_pkg->package.count != ACPI_WRDS_TABLE_SIZE + 2)
+ if (wifi_pkg->type != ACPI_TYPE_PACKAGE ||
+ wifi_pkg->package.count != data_size)
continue;
- domain = &data_pkg->package.elements[0];
+ domain = &wifi_pkg->package.elements[0];
if (domain->type == ACPI_TYPE_INTEGER &&
- domain->integer.value == ACPI_WRDS_WIFI)
+ domain->integer.value == ACPI_WIFI_DOMAIN)
break;
- data_pkg = NULL;
+ wifi_pkg = NULL;
}
- if (!data_pkg)
- return -ENOENT;
-
- if (data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
- return -EINVAL;
-
- sar_table->enabled = !!(data_pkg->package.elements[1].integer.value);
-
- for (i = 0; i < ACPI_WRDS_TABLE_SIZE; i++) {
- union acpi_object *entry;
-
- entry = &data_pkg->package.elements[i + 2];
- if ((entry->type != ACPI_TYPE_INTEGER) ||
- (entry->integer.value > U8_MAX))
- return -EINVAL;
-
- sar_table->values[i] = entry->integer.value;
- }
+ if (!wifi_pkg)
+ return ERR_PTR(-ENOENT);
- return 0;
+ return wifi_pkg;
}
-static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
- struct iwl_mvm_sar_table *sar_table)
+static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
{
+ union acpi_object *wifi_pkg, *table;
acpi_handle root_handle;
acpi_handle handle;
struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
+ bool enabled;
int ret;
root_handle = ACPI_HANDLE(mvm->dev);
@@ -1072,62 +1125,307 @@ static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
return -ENOENT;
}
- ret = iwl_mvm_sar_get_wrds(mvm, wrds.pointer, sar_table);
+ wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wrds.pointer,
+ ACPI_WRDS_WIFI_DATA_SIZE);
+ if (IS_ERR(wifi_pkg)) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ enabled = !!(wifi_pkg->package.elements[1].integer.value);
+
+ /* position of the actual table */
+ table = &wifi_pkg->package.elements[2];
+
+ /* The profile from WRDS is officially profile 1, but goes
+ * into sar_profiles[0] (because we don't have a profile 0).
+ */
+ ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0],
+ enabled);
+
+out_free:
kfree(wrds.pointer);
+ return ret;
+}
+static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+{
+ union acpi_object *wifi_pkg;
+ acpi_handle root_handle;
+ acpi_handle handle;
+ struct acpi_buffer ewrd = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+ bool enabled;
+ int i, n_profiles, ret;
+
+ root_handle = ACPI_HANDLE(mvm->dev);
+ if (!root_handle) {
+ IWL_DEBUG_RADIO(mvm,
+ "Could not retrieve root port ACPI handle\n");
+ return -ENOENT;
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(root_handle, (acpi_string)ACPI_EWRD_METHOD,
+ &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_RADIO(mvm, "EWRD method not found\n");
+ return -ENOENT;
+ }
+
+ /* Call EWRD with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &ewrd);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_RADIO(mvm, "EWRD invocation failed (0x%x)\n", status);
+ return -ENOENT;
+ }
+
+ wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, ewrd.pointer,
+ ACPI_EWRD_WIFI_DATA_SIZE);
+ if (IS_ERR(wifi_pkg)) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
+ (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ enabled = !!(wifi_pkg->package.elements[1].integer.value);
+ n_profiles = wifi_pkg->package.elements[2].integer.value;
+
+ /* in case of BIOS bug */
+ if (n_profiles <= 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ for (i = 0; i < n_profiles; i++) {
+ /* the tables start at element 3 */
+ static int pos = 3;
+
+ /* The EWRD profiles officially go from 2 to 4, but we
+ * save them in sar_profiles[1-3] (because we don't
+ * have profile 0). So in the array we start from 1.
+ */
+ ret = iwl_mvm_sar_set_profile(mvm,
+ &wifi_pkg->package.elements[pos],
+ &mvm->sar_profiles[i + 1],
+ enabled);
+ if (ret < 0)
+ break;
+
+ /* go to the next table */
+ pos += IWL_MVM_SAR_TABLE_SIZE;
+ }
+
+out_free:
+ kfree(ewrd.pointer);
return ret;
}
-#else /* CONFIG_ACPI */
-static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
- struct iwl_mvm_sar_table *sar_table)
+
+static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm,
+ struct iwl_mvm_geo_table *geo_table)
{
- return -ENOENT;
+ union acpi_object *wifi_pkg;
+ acpi_handle root_handle;
+ acpi_handle handle;
+ struct acpi_buffer wgds = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+ int i, ret;
+
+ root_handle = ACPI_HANDLE(mvm->dev);
+ if (!root_handle) {
+ IWL_DEBUG_RADIO(mvm,
+ "Could not retrieve root port ACPI handle\n");
+ return -ENOENT;
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(root_handle, (acpi_string)ACPI_WGDS_METHOD,
+ &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_RADIO(mvm, "WGDS method not found\n");
+ return -ENOENT;
+ }
+
+ /* Call WGDS with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &wgds);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_RADIO(mvm, "WGDS invocation failed (0x%x)\n", status);
+ return -ENOENT;
+ }
+
+ wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wgds.pointer,
+ ACPI_WGDS_WIFI_DATA_SIZE);
+ if (IS_ERR(wifi_pkg)) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ for (i = 0; i < ACPI_WGDS_WIFI_DATA_SIZE; i++) {
+ union acpi_object *entry;
+
+ entry = &wifi_pkg->package.elements[i + 1];
+ if ((entry->type != ACPI_TYPE_INTEGER) ||
+ (entry->integer.value > U8_MAX))
+ return -EINVAL;
+
+ geo_table->values[i] = entry->integer.value;
+ }
+ ret = 0;
+out_free:
+ kfree(wgds.pointer);
+ return ret;
}
-#endif /* CONFIG_ACPI */
-static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
- struct iwl_mvm_sar_table sar_table;
struct iwl_dev_tx_power_cmd cmd = {
.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
- int ret, i, j, idx;
+ int i, j, idx;
+ int profs[IWL_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
int len = sizeof(cmd);
+ BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS < 2);
+ BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
+ IWL_MVM_SAR_TABLE_SIZE);
+
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
len = sizeof(cmd.v3);
- ret = iwl_mvm_sar_get_table(mvm, &sar_table);
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ struct iwl_mvm_sar_profile *prof;
+
+ /* don't allow SAR to be disabled (profile 0 means disable) */
+ if (profs[i] == 0)
+ return -EPERM;
+
+ /* we are off by one, so allow up to IWL_MVM_SAR_PROFILE_NUM */
+ if (profs[i] > IWL_MVM_SAR_PROFILE_NUM)
+ return -EINVAL;
+
+ /* profiles go from 1 to 4, so decrement to access the array */
+ prof = &mvm->sar_profiles[profs[i] - 1];
+
+ /* if the profile is disabled, do nothing */
+ if (!prof->enabled) {
+ IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n",
+ profs[i]);
+ /* if one of the profiles is disabled, we fail all */
+ return -ENOENT;
+ }
+
+ IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
+ for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
+ idx = (i * IWL_NUM_SUB_BANDS) + j;
+ cmd.v3.per_chain_restriction[i][j] =
+ cpu_to_le16(prof->table[idx]);
+ IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
+ j, prof->table[idx]);
+ }
+ }
+
+ IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
+
+ return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+}
+
+static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_geo_table geo_table;
+ struct iwl_geo_tx_power_profiles_cmd cmd = {
+ .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
+ };
+ int ret, i, j, idx;
+ u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
+
+ ret = iwl_mvm_sar_get_wgds_table(mvm, &geo_table);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
- "SAR BIOS table invalid or unavailable. (%d)\n",
+ "Geo SAR BIOS table invalid or unavailable. (%d)\n",
ret);
/* we don't fail if the table is not available */
return 0;
}
- if (!sar_table.enabled)
- return 0;
+ IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
- IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
+ BUILD_BUG_ON(IWL_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
+ ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE);
- BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
- ACPI_WRDS_TABLE_SIZE);
+ for (i = 0; i < IWL_NUM_GEO_PROFILES; i++) {
+ struct iwl_per_chain_offset *chain =
+ (struct iwl_per_chain_offset *)&cmd.table[i];
- for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
- IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
- for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
- idx = (i * IWL_NUM_SUB_BANDS) + j;
- cmd.v3.per_chain_restriction[i][j] =
- cpu_to_le16(sar_table.values[idx]);
- IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
- j, sar_table.values[idx]);
+ for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
+ u8 *value;
+
+ idx = i * ACPI_WGDS_NUM_BANDS * ACPI_WGDS_TABLE_SIZE +
+ j * ACPI_WGDS_TABLE_SIZE;
+ value = &geo_table.values[idx];
+ chain[j].max_tx_power = cpu_to_le16(value[0]);
+ chain[j].chain_a = value[1];
+ chain[j].chain_b = value[2];
+ IWL_DEBUG_RADIO(mvm,
+ "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
+ i, j, value[1], value[2], value[0]);
}
}
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
+}
- ret = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
- if (ret)
- IWL_ERR(mvm, "failed to set per-chain TX power: %d\n", ret);
+#else /* CONFIG_ACPI */
+static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
+{
+ return -ENOENT;
+}
+
+static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+{
+ return -ENOENT;
+}
+
+static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+{
+ return 0;
+}
+#endif /* CONFIG_ACPI */
+
+static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ ret = iwl_mvm_sar_get_wrds_table(mvm);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mvm,
+ "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+ /* if not available, don't fail and don't bother with EWRD */
+ return 0;
+ }
+
+ ret = iwl_mvm_sar_get_ewrd_table(mvm);
+ /* if EWRD is not available, we can still use WRDS, so don't fail */
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mvm,
+ "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+
+ /* choose profile 1 (WRDS) as default for both chains */
+ ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
+
+ /* if we don't have profile 0 from BIOS, just skip it */
+ if (ret == -ENOENT)
+ return 0;
return ret;
}
@@ -1219,7 +1517,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
}
/* Init RSS configuration */
- if (iwl_mvm_has_new_rx_api(mvm)) {
+ /* TODO - remove a000 disablement when we have RXQ config API */
+ if (iwl_mvm_has_new_rx_api(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
ret = iwl_send_rss_cfg_cmd(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
@@ -1229,10 +1528,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
}
/* init the fw <-> mac80211 STA mapping */
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
- mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
+ mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
/* reset quota debouncing buffer - 0xff will yield invalid data */
memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
@@ -1313,10 +1612,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
- if (iwl_mvm_is_csum_supported(mvm) &&
- mvm->cfg->features & NETIF_F_RXCSUM)
- iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
-
/* allow FW/transport low power modes if not during restart */
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
@@ -1325,6 +1620,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
goto error;
+ ret = iwl_mvm_sar_geo_init(mvm);
+ if (ret)
+ goto error;
+
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
@@ -1362,7 +1661,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
goto error;
/* init the fw <-> mac80211 STA mapping */
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
/* Add auxiliary station for scanning */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index c5734e1a02d2..0f1831b41915 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -467,13 +467,19 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
queue = IWL_MVM_DQA_GCAST_QUEUE;
}
+ /*
+ * For TVQM this will be overwritten later with the FW assigned
+ * queue value (when queue is enabled).
+ */
+ mvmvif->cab_queue = queue;
vif->cab_queue = queue;
} else {
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
}
- mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT;
- mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
+ mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA;
+ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
@@ -901,7 +907,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
/* Allocate sniffer station */
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->snif_sta, tfd_queue_msk,
- vif->type);
+ vif->type, IWL_STA_GENERAL_PURPOSE);
if (ret)
return ret;
@@ -1222,7 +1228,9 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
vif->bss_conf.dtim_period));
- ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_STA_TYPE))
+ ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
/*
* Only set the beacon time when the MAC is being added, when we
@@ -1442,6 +1450,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *beacon_notify_hdr;
struct ieee80211_vif *csa_vif;
struct ieee80211_vif *tx_blocked_vif;
+ struct agg_tx_status *agg_status;
u16 status;
lockdep_assert_held(&mvm->mutex);
@@ -1449,7 +1458,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
beacon_notify_hdr = &beacon->beacon_notify_hdr;
mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
- status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK;
+ agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
+ status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
IWL_DEBUG_RX(mvm,
"beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
status, beacon_notify_hdr->failure_frame,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 486dcceed17a..a67aa1f5a51c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -6,8 +6,8 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,7 +33,8 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -619,7 +620,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ hw->wiphy->max_sched_scan_reqs = 1;
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
/* we create the 802.11 header and zero length SSID IE. */
@@ -766,7 +767,7 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
goto out;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
- if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
+ if (mvmsta->sta_id == IWL_MVM_INVALID_STA ||
mvmsta->sta_id != mvm->d0i3_ap_sta_id)
goto out;
@@ -1010,7 +1011,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
mvmvif->uploaded = false;
- mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
spin_lock_bh(&mvm->time_event_lock);
iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
@@ -1053,7 +1054,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
mvm->p2p_device_vif = NULL;
- mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
@@ -1351,6 +1352,18 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_release;
}
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ /*
+ * Only queue for this station is the mcast queue,
+ * which shouldn't be in TFD mask anyway
+ */
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
+ 0, vif->type,
+ IWL_STA_MULTICAST);
+ if (ret)
+ goto out_release;
+ }
+
iwl_mvm_vif_dbgfs_register(mvm, vif);
goto out_unlock;
}
@@ -1465,7 +1478,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
* already marked as draining, so to complete the draining, we
* just need to wait until the transport is empty.
*/
- iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
+ iwl_trans_wait_tx_queues_empty(mvm->trans, tfd_msk);
}
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
@@ -1516,6 +1529,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mvm->noa_duration = 0;
}
#endif
+ iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta);
iwl_mvm_dealloc_bcast_sta(mvm, vif);
goto out_release;
}
@@ -1952,7 +1966,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_MVM_SMPS_REQ_PROT,
IEEE80211_SMPS_DYNAMIC);
}
- } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+ } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
/*
* If update fails - SF might be running in associated
* mode while disassociated - which is forbidden.
@@ -1966,8 +1980,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to remove AP station\n");
if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
- mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
- mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
+ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
/* remove quota for this interface */
ret = iwl_mvm_update_quotas(mvm, false, NULL);
if (ret)
@@ -2098,11 +2112,15 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (ret)
goto out_remove;
+ ret = iwl_mvm_add_mcast_sta(mvm, vif);
+ if (ret)
+ goto out_unbind;
+
/* Send the bcast station. At this stage the TBTT and DTIM time events
* are added and applied to the scheduler */
ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
if (ret)
- goto out_unbind;
+ goto out_rm_mcast;
/* must be set before quota calculations */
mvmvif->ap_ibss_active = true;
@@ -2132,6 +2150,8 @@ out_quota_failed:
iwl_mvm_power_update_mac(mvm);
mvmvif->ap_ibss_active = false;
iwl_mvm_send_rm_bcast_sta(mvm, vif);
+out_rm_mcast:
+ iwl_mvm_rm_mcast_sta(mvm, vif);
out_unbind:
iwl_mvm_binding_remove_vif(mvm, vif);
out_remove:
@@ -2177,7 +2197,20 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
iwl_mvm_update_quotas(mvm, false, NULL);
+
+ /*
+ * This is not very nice, but the simplest:
+ * For older FWs removing the mcast sta before the bcast station may
+ * cause assert 0x2b00.
+ * This is fixed in later FW (which will stop beaconing when removing
+ * bcast station).
+ * So make the order of removal depend on the TLV
+ */
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ iwl_mvm_rm_mcast_sta(mvm, vif);
iwl_mvm_send_rm_bcast_sta(mvm, vif);
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ iwl_mvm_rm_mcast_sta(mvm, vif);
iwl_mvm_binding_remove_vif(mvm, vif);
iwl_mvm_power_update_mac(mvm);
@@ -2343,6 +2376,9 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
continue;
+ if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE)
+ continue;
+
__set_bit(tid_data->txq_id, &txqs);
if (iwl_mvm_tid_queued(tid_data) == 0)
@@ -2368,7 +2404,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
*/
break;
case STA_NOTIFY_AWAKE:
- if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
+ if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA))
break;
if (txqs)
@@ -3711,7 +3747,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
int err;
u32 noa_duration;
- err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
+ err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy,
+ NULL);
if (err)
return err;
@@ -3938,7 +3975,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif);
/* flush the AP-station and all TDLS peers */
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
@@ -3964,7 +4001,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
/* this can take a while, and we may need/want other operations
* to succeed while doing this, so do it without the mutex held
*/
- iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
+ iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
}
}
@@ -4195,7 +4232,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
- if (!iwl_mvm_has_new_rx_api(mvm))
+ /* TODO - remove a000 disablement when we have RXQ config API */
+ if (!iwl_mvm_has_new_rx_api(mvm) || iwl_mvm_has_new_tx_api(mvm))
return;
notif->cookie = mvm->queue_sync_cookie;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 73a216524af2..4e74a6b90e70 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -380,6 +380,8 @@ struct iwl_mvm_vif {
bool associated;
u8 ap_assoc_sta_count;
+ u16 cab_queue;
+
bool uploaded;
bool ap_ibss_active;
bool pm_enabled;
@@ -407,6 +409,7 @@ struct iwl_mvm_vif {
struct iwl_mvm_time_event_data hs_time_event_data;
struct iwl_mvm_int_sta bcast_sta;
+ struct iwl_mvm_int_sta mcast_sta;
/*
* Assigned while mac80211 has the interface in a channel context,
@@ -603,10 +606,15 @@ enum iwl_mvm_tdls_cs_state {
IWL_MVM_TDLS_SW_ACTIVE,
};
+#define MAX_NUM_LMAC 2
struct iwl_mvm_shared_mem_cfg {
+ int num_lmacs;
int num_txfifo_entries;
- u32 txfifo_size[TX_FIFO_MAX_NUM];
- u32 rxfifo_size[RX_FIFO_MAX_NUM];
+ struct {
+ u32 txfifo_size[TX_FIFO_MAX_NUM];
+ u32 rxfifo1_size;
+ } lmac[MAX_NUM_LMAC];
+ u32 rxfifo2_size;
u32 internal_txfifo_addr;
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
};
@@ -625,6 +633,7 @@ struct iwl_mvm_shared_mem_cfg {
* @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
* it is the time of last received sub-frame
* @removed: prevent timer re-arming
+ * @valid: reordering is valid for this queue
* @lock: protect reorder buffer internal state
* @mvm: mvm pointer, needed for frame timer context
*/
@@ -640,6 +649,7 @@ struct iwl_mvm_reorder_buffer {
unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF];
struct timer_list reorder_timer;
bool removed;
+ bool valid;
spinlock_t lock;
struct iwl_mvm *mvm;
} ____cacheline_aligned_in_smp;
@@ -707,8 +717,25 @@ enum iwl_mvm_queue_status {
};
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
+#define IWL_MVM_INVALID_QUEUE 0xFFFF
+
#define IWL_MVM_NUM_CIPHERS 10
+#ifdef CONFIG_ACPI
+#define IWL_MVM_SAR_TABLE_SIZE 10
+#define IWL_MVM_SAR_PROFILE_NUM 4
+#define IWL_MVM_GEO_TABLE_SIZE 18
+
+struct iwl_mvm_sar_profile {
+ bool enabled;
+ u8 table[IWL_MVM_SAR_TABLE_SIZE];
+};
+
+struct iwl_mvm_geo_table {
+ u8 values[IWL_MVM_GEO_TABLE_SIZE];
+};
+#endif
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -761,9 +788,9 @@ struct iwl_mvm {
u64 on_time_scan;
} radio_stats, accu_radio_stats;
+ u8 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
+
struct {
- /* Map to HW queue */
- u32 hw_queue_to_mac80211;
u8 hw_queue_refcount;
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
bool reserved; /* Is this the TXQ reserved for a STA */
@@ -975,7 +1002,10 @@ struct iwl_mvm {
#endif
/* Tx queues */
- u8 aux_queue;
+ u16 aux_queue;
+ u16 probe_queue;
+ u16 p2p_dev_queue;
+
u8 first_agg_queue;
u8 last_agg_queue;
@@ -1018,7 +1048,7 @@ struct iwl_mvm {
} peer;
} tdls_cs;
- struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
+ struct iwl_mvm_shared_mem_cfg smem_cfg;
u32 ciphers[IWL_MVM_NUM_CIPHERS];
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
@@ -1035,6 +1065,9 @@ struct iwl_mvm {
bool drop_bcn_ap_mode;
struct delayed_work cs_tx_unblock_dwork;
+#ifdef CONFIG_ACPI
+ struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM];
+#endif
};
/* Extract MVM priv from op_mode and _hw */
@@ -1222,13 +1255,25 @@ static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
{
/*
* TODO:
- * The issue of how to determine CDB support is still not well defined.
- * It may be that it will be for all next HW devices and it may be per
- * FW compilation and it may also differ between different devices.
- * For now take a ride on the new TX API and get back to it when
- * it is well defined.
+ * The issue of how to determine CDB APIs and usage is still not fully
+ * defined.
+ * There is a compilation for CDB and non-CDB FW, but there may
+ * be also runtime check.
+ * For now there is a TLV for checking compilation mode, but a
+ * runtime check will also have to be here - once defined.
*/
- return iwl_mvm_has_new_tx_api(mvm);
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CDB_SUPPORT);
+}
+
+static inline struct agg_tx_status*
+iwl_mvm_get_agg_status(struct iwl_mvm *mvm,
+ struct iwl_mvm_tx_resp *tx_resp)
+{
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return &tx_resp->v6.status;
+ else
+ return &tx_resp->v3.status;
}
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
@@ -1271,7 +1316,6 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
******************/
/* uCode */
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
-int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
/* Utils */
int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
@@ -1389,6 +1433,8 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
int queue);
void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
@@ -1668,6 +1714,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout);
+int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
+ u8 sta_id, u8 tid, unsigned int timeout);
+
/*
* Disable a TXQ.
* Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
@@ -1701,7 +1750,8 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
- iwl_free_fw_paging(mvm);
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ iwl_free_fw_paging(mvm);
mvm->ucode_loaded = false;
iwl_trans_stop_device(mvm->trans);
}
@@ -1781,6 +1831,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
u32 size);
void iwl_mvm_reorder_timer_expired(unsigned long data);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
+bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
@@ -1797,4 +1848,14 @@ int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
u32 duration, u32 timeout);
bool iwl_mvm_lqm_active(struct iwl_mvm *mvm);
+#ifdef CONFIG_ACPI
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
+#else
+static inline
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
+{
+ return -ENOENT;
+}
+#endif /* CONFIG_ACPI */
+
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index eade099b6dbf..283c41df622c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -817,6 +817,11 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
+ if (iwl_mvm_is_vif_assoc(mvm) && notif->source_id == MCC_SOURCE_WIFI) {
+ IWL_DEBUG_LAR(mvm, "Ignore mcc update while associated\n");
+ return;
+ }
+
if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 4cd72d4cdc47..9ffff6ed8133 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -302,6 +302,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_SYNC),
RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
+ iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
@@ -426,6 +428,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
*/
static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
HCMD_NAME(SHARED_MEM_CFG_CMD),
+ HCMD_NAME(INIT_EXTENDED_CFG_CMD),
};
/* Please keep this array *SORTED* by hex value.
@@ -444,6 +447,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
HCMD_NAME(CTDP_CONFIG_CMD),
HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
+ HCMD_NAME(GEO_TX_POWER_LIMIT),
HCMD_NAME(CT_KILL_NOTIFICATION),
HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
};
@@ -452,6 +456,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
+ HCMD_NAME(DQA_ENABLE_CMD),
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
HCMD_NAME(STA_PM_NOTIF),
@@ -462,6 +467,13 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
+static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
+ HCMD_NAME(MFU_ASSERT_DUMP_NTF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
HCMD_NAME(STORED_BEACON_NTF),
};
@@ -602,6 +614,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
} else {
mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
+ mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE;
mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
}
@@ -732,10 +746,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex);
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
- if (iwl_mvm_has_new_tx_api(mvm))
- err = iwl_run_unified_mvm_ucode(mvm, true);
- else
- err = iwl_run_init_mvm_ucode(mvm, true);
+ err = iwl_run_init_mvm_ucode(mvm, true);
if (!err || !iwlmvm_mod_params.init_dbg)
iwl_mvm_stop_device(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
@@ -1033,7 +1044,7 @@ static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
unsigned long mq;
spin_lock_bh(&mvm->queue_info_lock);
- mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211;
+ mq = mvm->hw_queue_to_mac80211[hw_queue];
spin_unlock_bh(&mvm->queue_info_lock);
iwl_mvm_stop_mac_queues(mvm, mq);
@@ -1063,7 +1074,7 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
unsigned long mq;
spin_lock_bh(&mvm->queue_info_lock);
- mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211;
+ mq = mvm->hw_queue_to_mac80211[hw_queue];
spin_unlock_bh(&mvm->queue_info_lock);
iwl_mvm_start_mac_queues(mvm, mq);
@@ -1256,7 +1267,7 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
u8 tid;
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
- mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
+ mvmvif->ap_sta_id == IWL_MVM_INVALID_STA))
return false;
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
@@ -1344,7 +1355,7 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta;
- if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT)
+ if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA)
return;
rcu_read_lock();
@@ -1414,7 +1425,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
} else {
WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
- mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
mvm->d0i3_offloading = false;
}
@@ -1427,7 +1438,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
return ret;
/* configure wowlan configuration only if needed */
- if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) {
+ if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
/* wake on beacons only if beacon storing isn't supported */
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BEACON_STORING))
@@ -1504,7 +1515,7 @@ void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
spin_lock_bh(&mvm->d0i3_tx_lock);
- if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT)
+ if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA)
goto out;
IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
@@ -1542,7 +1553,7 @@ out:
}
clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
wake_up(&mvm->d0i3_exit_waitq);
- mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
if (wake_queues)
ieee80211_wake_queues(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 95138830b9f8..d59efe804356 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -250,12 +251,30 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
+ enum iwl_phy_ctxt_action action = FW_CTXT_ACTION_MODIFY;
+
lockdep_assert_held(&mvm->mutex);
+ /* In CDB mode we cannot modify PHY context between bands so... */
+ if (iwl_mvm_has_new_tx_api(mvm) &&
+ ctxt->channel->band != chandef->chan->band) {
+ int ret;
+
+ /* ... remove it here ...*/
+ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ chains_static, chains_dynamic,
+ FW_CTXT_ACTION_REMOVE, 0);
+ if (ret)
+ return ret;
+
+ /* ... and proceed to add it again */
+ action = FW_CTXT_ACTION_ADD;
+ }
+
ctxt->channel = chandef->chan;
return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic,
- FW_CTXT_ACTION_MODIFY, 0);
+ action, 0);
}
void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index ce907c58ebf6..7788eefcd2bd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -826,7 +826,7 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
if (is_siso(rate) && rate->stbc) {
/* To enable STBC we need to set both a flag and ANT_AB */
ucode_rate |= RATE_MCS_ANT_AB_MSK;
- ucode_rate |= RATE_MCS_VHT_STBC_MSK;
+ ucode_rate |= RATE_MCS_STBC_MSK;
}
ucode_rate |= rate->bw;
@@ -873,7 +873,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
rate->sgi = true;
if (ucode_rate & RATE_MCS_LDPC_MSK)
rate->ldpc = true;
- if (ucode_rate & RATE_MCS_VHT_STBC_MSK)
+ if (ucode_rate & RATE_MCS_STBC_MSK)
rate->stbc = true;
if (ucode_rate & RATE_MCS_BF_MSK)
rate->bfer = true;
@@ -3641,13 +3641,12 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
bw = "BAD BW";
}
- return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n",
+ return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
type, rs_pretty_ant(ant), bw, mcs, nss,
(rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
- (rate & RATE_MCS_HT_STBC_MSK) ? "STBC " : "",
+ (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
(rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
- (rate & RATE_MCS_BF_MSK) ? "BF " : "",
- (rate & RATE_MCS_ZLF_MSK) ? "ZLF " : "");
+ (rate & RATE_MCS_BF_MSK) ? "BF " : "");
}
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 20473df79c94..fd1dd06c4f18 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -104,7 +104,20 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
u8 crypt_len,
struct iwl_rx_cmd_buffer *rxb)
{
- unsigned int hdrlen, fraglen;
+ unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ unsigned int fraglen;
+
+ /*
+ * The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len,
+ * but those are all multiples of 4 long) all goes away, but we
+ * want the *end* of it, which is going to be the start of the IP
+ * header, to be aligned when it gets pulled in.
+ * The beginning of the skb->data is aligned on at least a 4-byte
+ * boundary after allocation. Everything here is aligned at least
+ * on a 2-byte boundary so we can just take hdrlen & 3 and pad by
+ * the result.
+ */
+ skb_reserve(skb, hdrlen & 3);
/* If frame is small enough to fit in skb->head, pull it completely.
* If not, only pull ieee80211_hdr (including crypto if present, and
@@ -118,8 +131,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
* If the latter changes (there are efforts in the standards group
* to do so) we should revisit this and ieee80211_data_to_8023().
*/
- hdrlen = (len <= skb_tailroom(skb)) ? len :
- sizeof(*hdr) + crypt_len + 8;
+ hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
fraglen = len - hdrlen;
@@ -339,7 +351,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT;
- if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) {
+ if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
if (IS_ERR(sta))
sta = NULL;
@@ -398,7 +410,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
/* set the preamble flag if appropriate */
if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
- rx_status->flag |= RX_FLAG_SHORTPRE;
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
/*
@@ -415,42 +427,49 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
case RATE_MCS_CHAN_WIDTH_20:
break;
case RATE_MCS_CHAN_WIDTH_40:
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
break;
case RATE_MCS_CHAN_WIDTH_80:
- rx_status->vht_flag |= RX_VHT_FLAG_80MHZ;
+ rx_status->bw = RATE_INFO_BW_80;
break;
case RATE_MCS_CHAN_WIDTH_160:
- rx_status->vht_flag |= RX_VHT_FLAG_160MHZ;
+ rx_status->bw = RATE_INFO_BW_160;
break;
}
if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status->flag |= RX_FLAG_SHORT_GI;
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
- rx_status->flag |= RX_FLAG_HT_GF;
+ rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
if (rate_n_flags & RATE_MCS_LDPC_MSK)
- rx_status->flag |= RX_FLAG_LDPC;
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
if (rate_n_flags & RATE_MCS_HT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
- rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
- rx_status->vht_nss =
+ rx_status->nss =
((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
- rx_status->flag |= RX_FLAG_VHT;
- rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
- rx_status->vht_flag |= RX_VHT_FLAG_BF;
+ rx_status->enc_flags |= RX_ENC_FLAG_BF;
} else {
- rx_status->rate_idx =
- iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
- rx_status->band);
+ int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+ rx_status->band);
+
+ if (WARN(rate < 0 || rate > 0xFF,
+ "Invalid rate flags 0x%x, band %d,\n",
+ rate_n_flags, rx_status->band)) {
+ kfree_skb(skb);
+ return;
+ }
+ rx_status->rate_idx = rate;
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -637,6 +656,9 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
.mvm = mvm,
};
int expected_size;
+ int i;
+ u8 *energy;
+ __le32 *bytes, *air_time;
if (iwl_mvm_is_cdb_supported(mvm))
expected_size = sizeof(*stats);
@@ -645,8 +667,11 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
else
expected_size = sizeof(struct iwl_notif_statistics_v10);
- if (iwl_rx_packet_payload_len(pkt) != expected_size)
- goto invalid;
+ if (iwl_rx_packet_payload_len(pkt) != expected_size) {
+ IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
+ iwl_rx_packet_payload_len(pkt));
+ return;
+ }
data.mac_id = stats->rx.general.mac_id;
data.beacon_filter_average_energy =
@@ -662,38 +687,6 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
le64_to_cpu(stats->general.common.on_time_scan);
data.general = &stats->general;
- if (iwl_mvm_has_new_rx_api(mvm)) {
- int i;
- u8 *energy;
- __le32 *bytes, *air_time;
-
- if (!iwl_mvm_is_cdb_supported(mvm)) {
- struct iwl_notif_statistics_v11 *v11 =
- (void *)&pkt->data;
-
- energy = (void *)&v11->load_stats.avg_energy;
- bytes = (void *)&v11->load_stats.byte_count;
- air_time = (void *)&v11->load_stats.air_time;
- } else {
- energy = (void *)&stats->load_stats.avg_energy;
- bytes = (void *)&stats->load_stats.byte_count;
- air_time = (void *)&stats->load_stats.air_time;
- }
-
- rcu_read_lock();
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
- struct iwl_mvm_sta *sta;
-
- if (!energy[i])
- continue;
-
- sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
- if (!sta)
- continue;
- sta->avg_energy = energy[i];
- }
- rcu_read_unlock();
- }
iwl_mvm_rx_stats_check_trigger(mvm, pkt);
@@ -701,10 +694,36 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator,
&data);
- return;
- invalid:
- IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
- iwl_rx_packet_payload_len(pkt));
+
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return;
+
+ if (!iwl_mvm_is_cdb_supported(mvm)) {
+ struct iwl_notif_statistics_v11 *v11 =
+ (void *)&pkt->data;
+
+ energy = (void *)&v11->load_stats.avg_energy;
+ bytes = (void *)&v11->load_stats.byte_count;
+ air_time = (void *)&v11->load_stats.air_time;
+ } else {
+ energy = (void *)&stats->load_stats.avg_energy;
+ bytes = (void *)&stats->load_stats.byte_count;
+ air_time = (void *)&stats->load_stats.air_time;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ struct iwl_mvm_sta *sta;
+
+ if (!energy[i])
+ continue;
+
+ sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
+ if (!sta)
+ continue;
+ sta->avg_energy = energy[i];
+ }
+ rcu_read_unlock();
}
void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index d79e9c2a2654..966cd7543629 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -462,6 +462,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
int i;
u16 sn = 0, index = 0;
bool expired = false;
+ bool cont = false;
spin_lock(&buf->lock);
@@ -473,12 +474,21 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
for (i = 0; i < buf->buf_size ; i++) {
index = (buf->head_sn + i) % buf->buf_size;
- if (skb_queue_empty(&buf->entries[index]))
+ if (skb_queue_empty(&buf->entries[index])) {
+ /*
+ * If there is a hole and the next frame didn't expire
+ * we want to break and not advance SN
+ */
+ cont = false;
continue;
- if (!time_after(jiffies, buf->reorder_time[index] +
- RX_REORDER_BUF_TIMEOUT_MQ))
+ }
+ if (!cont && !time_after(jiffies, buf->reorder_time[index] +
+ RX_REORDER_BUF_TIMEOUT_MQ))
break;
+
expired = true;
+ /* continue until next hole after this expired frames */
+ cont = true;
sn = ieee80211_sn_add(buf->head_sn, i + 1);
}
@@ -626,9 +636,13 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
return false;
baid_data = rcu_dereference(mvm->baid_map[baid]);
- if (WARN(!baid_data,
- "Received baid %d, but no data exists for this BAID\n", baid))
+ if (!baid_data) {
+ WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN),
+ "Received baid %d, but no data exists for this BAID\n",
+ baid);
return false;
+ }
+
if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
"baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
@@ -643,6 +657,14 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
spin_lock_bh(&buffer->lock);
+ if (!buffer->valid) {
+ if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
+ spin_unlock_bh(&buffer->lock);
+ return false;
+ }
+ buffer->valid = true;
+ }
+
if (ieee80211_is_back_req(hdr->frame_control)) {
iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
goto drop;
@@ -727,7 +749,8 @@ drop:
return true;
}
-static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
+static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
+ u32 reorder_data, u8 baid)
{
unsigned long now = jiffies;
unsigned long timeout;
@@ -736,8 +759,10 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
rcu_read_lock();
data = rcu_dereference(mvm->baid_map[baid]);
- if (WARN_ON(!data))
+ if (!data) {
+ WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN));
goto out;
+ }
if (!data->timeout)
goto out;
@@ -799,7 +824,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
/* set the preamble flag if appropriate */
if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
- rx_status->flag |= RX_FLAG_SHORTPRE;
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
@@ -831,7 +856,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) {
u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
- if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) {
+ if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
if (IS_ERR(sta))
sta = NULL;
@@ -893,26 +918,39 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
kfree_skb(skb);
- rcu_read_unlock();
- return;
+ goto out;
}
/*
* Our hardware de-aggregates AMSDUs but copies the mac header
* as it to the de-aggregated MPDUs. We need to turn off the
* AMSDU bit in the QoS control ourselves.
+ * In addition, HW reverses addr3 and addr4 - reverse it back.
*/
if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
!WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
+ int i;
u8 *qc = ieee80211_get_qos_ctl(hdr);
+ u8 mac_addr[ETH_ALEN];
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
- if (!(desc->amsdu_info &
- IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
- rx_status->flag |= RX_FLAG_AMSDU_MORE;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = hdr->addr3[ETH_ALEN - i - 1];
+ ether_addr_copy(hdr->addr3, mac_addr);
+
+ if (ieee80211_has_a4(hdr->frame_control)) {
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] =
+ hdr->addr4[ETH_ALEN - i - 1];
+ ether_addr_copy(hdr->addr4, mac_addr);
+ }
+ }
+ if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) {
+ u32 reorder_data = le32_to_cpu(desc->reorder_data);
+
+ iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
}
- if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
- iwl_mvm_agg_rx_received(mvm, baid);
}
/* Set up the HT phy flags */
@@ -920,42 +958,50 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
case RATE_MCS_CHAN_WIDTH_20:
break;
case RATE_MCS_CHAN_WIDTH_40:
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
break;
case RATE_MCS_CHAN_WIDTH_80:
- rx_status->vht_flag |= RX_VHT_FLAG_80MHZ;
+ rx_status->bw = RATE_INFO_BW_80;
break;
case RATE_MCS_CHAN_WIDTH_160:
- rx_status->vht_flag |= RX_VHT_FLAG_160MHZ;
+ rx_status->bw = RATE_INFO_BW_160;
break;
}
if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status->flag |= RX_FLAG_SHORT_GI;
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
- rx_status->flag |= RX_FLAG_HT_GF;
+ rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
if (rate_n_flags & RATE_MCS_LDPC_MSK)
- rx_status->flag |= RX_FLAG_LDPC;
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
if (rate_n_flags & RATE_MCS_HT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
- rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
- rx_status->vht_nss =
+ rx_status->nss =
((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
- rx_status->flag |= RX_FLAG_VHT;
- rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
- rx_status->vht_flag |= RX_VHT_FLAG_BF;
+ rx_status->enc_flags |= RX_ENC_FLAG_BF;
} else {
- rx_status->rate_idx =
- iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
- rx_status->band);
+ int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+ rx_status->band);
+
+ if (WARN(rate < 0 || rate > 0xFF,
+ "Invalid rate flags 0x%x, band %d,\n",
+ rate_n_flags, rx_status->band)) {
+ kfree_skb(skb);
+ goto out;
+ }
+ rx_status->rate_idx = rate;
+
}
/* management stuff on default queue */
@@ -974,6 +1020,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+out:
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 0a64efa844b7..8d1b994ae79f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -81,44 +81,30 @@ enum iwl_mvm_traffic_load {
IWL_MVM_TRAFFIC_HIGH,
};
+#define IWL_SCAN_DWELL_ACTIVE 10
+#define IWL_SCAN_DWELL_PASSIVE 110
+#define IWL_SCAN_DWELL_FRAGMENTED 44
+#define IWL_SCAN_DWELL_EXTENDED 90
+
struct iwl_mvm_scan_timing_params {
- u32 dwell_active;
- u32 dwell_passive;
- u32 dwell_fragmented;
- u32 dwell_extended;
u32 suspend_time;
u32 max_out_time;
};
static struct iwl_mvm_scan_timing_params scan_timing[] = {
[IWL_SCAN_TYPE_UNASSOC] = {
- .dwell_active = 10,
- .dwell_passive = 110,
- .dwell_fragmented = 44,
- .dwell_extended = 90,
.suspend_time = 0,
.max_out_time = 0,
},
[IWL_SCAN_TYPE_WILD] = {
- .dwell_active = 10,
- .dwell_passive = 110,
- .dwell_fragmented = 44,
- .dwell_extended = 90,
.suspend_time = 30,
.max_out_time = 120,
},
[IWL_SCAN_TYPE_MILD] = {
- .dwell_active = 10,
- .dwell_passive = 110,
- .dwell_fragmented = 44,
- .dwell_extended = 90,
.suspend_time = 120,
.max_out_time = 120,
},
[IWL_SCAN_TYPE_FRAGMENTED] = {
- .dwell_active = 10,
- .dwell_passive = 110,
- .dwell_fragmented = 44,
.suspend_time = 95,
.max_out_time = 44,
},
@@ -294,34 +280,15 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
return max_ie_len;
}
-static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
- int num_res, u8 *buf, size_t buf_size)
-{
- int i;
- u8 *pos = buf, *end = buf + buf_size;
-
- for (i = 0; pos < end && i < num_res; i++)
- pos += snprintf(pos, end - pos, " %u", res[i].channel);
-
- /* terminate the string in case the buffer was too short */
- *(buf + buf_size - 1) = '\0';
-
- return buf;
-}
-
void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
- u8 buf[256];
IWL_DEBUG_SCAN(mvm,
- "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n",
- notif->status, notif->scanned_channels,
- iwl_mvm_dump_channel_list(notif->results,
- notif->scanned_channels, buf,
- sizeof(buf)));
+ "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
+ notif->status, notif->scanned_channels);
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
@@ -743,10 +710,10 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
struct iwl_scan_req_lmac *cmd,
struct iwl_mvm_scan_params *params)
{
- cmd->active_dwell = scan_timing[params->type].dwell_active;
- cmd->passive_dwell = scan_timing[params->type].dwell_passive;
- cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
- cmd->extended_dwell = scan_timing[params->type].dwell_extended;
+ cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+ cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+ cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
@@ -944,13 +911,12 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
}
static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
- struct iwl_scan_dwell *dwell,
- struct iwl_mvm_scan_timing_params *timing)
+ struct iwl_scan_dwell *dwell)
{
- dwell->active = timing->dwell_active;
- dwell->passive = timing->dwell_passive;
- dwell->fragmented = timing->dwell_fragmented;
- dwell->extended = timing->dwell_extended;
+ dwell->active = IWL_SCAN_DWELL_ACTIVE;
+ dwell->passive = IWL_SCAN_DWELL_PASSIVE;
+ dwell->fragmented = IWL_SCAN_DWELL_FRAGMENTED;
+ dwell->extended = IWL_SCAN_DWELL_EXTENDED;
}
static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
@@ -966,11 +932,11 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
channels[j] = band->channels[i].hw_value;
}
-static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
- u32 flags, u8 channel_flags)
+static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
+ u32 flags, u8 channel_flags)
{
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
- struct iwl_scan_config *cfg = config;
+ struct iwl_scan_config_v1 *cfg = config;
cfg->flags = cpu_to_le32(flags);
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@@ -979,7 +945,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time);
cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
- iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
+ iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
@@ -989,11 +955,11 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
iwl_mvm_fill_channels(mvm, cfg->channel_array);
}
-static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config,
- u32 flags, u8 channel_flags)
+static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
+ u32 flags, u8 channel_flags)
{
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
- struct iwl_scan_config_cdb *cfg = config;
+ struct iwl_scan_config *cfg = config;
cfg->flags = cpu_to_le32(flags);
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@@ -1001,12 +967,16 @@ static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config,
cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
cfg->out_of_channel_time[0] =
cpu_to_le32(scan_timing[type].max_out_time);
- cfg->out_of_channel_time[1] =
- cpu_to_le32(scan_timing[type].max_out_time);
cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time);
- cfg->suspend_time[1] = cpu_to_le32(scan_timing[type].suspend_time);
- iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ cfg->suspend_time[1] =
+ cpu_to_le32(scan_timing[type].suspend_time);
+ cfg->out_of_channel_time[1] =
+ cpu_to_le32(scan_timing[type].max_out_time);
+ }
+
+ iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
@@ -1033,16 +1003,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
return -ENOBUFS;
- if (type == mvm->scan_type) {
- IWL_DEBUG_SCAN(mvm,
- "Ignoring UMAC scan config of the same type\n");
+ if (type == mvm->scan_type)
return 0;
- }
- if (iwl_mvm_is_cdb_supported(mvm))
- cmd_size = sizeof(struct iwl_scan_config_cdb);
- else
+ if (iwl_mvm_has_new_tx_api(mvm))
cmd_size = sizeof(struct iwl_scan_config);
+ else
+ cmd_size = sizeof(struct iwl_scan_config_v1);
cmd_size += mvm->fw->ucode_capa.n_scan_channels;
cfg = kzalloc(cmd_size, GFP_KERNEL);
@@ -1068,13 +1035,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
IWL_CHANNEL_FLAG_EBS_ADD |
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
- if (iwl_mvm_is_cdb_supported(mvm)) {
+ if (iwl_mvm_has_new_tx_api(mvm)) {
flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ?
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
- iwl_mvm_fill_scan_config_cdb(mvm, cfg, flags, channel_flags);
- } else {
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
+ } else {
+ iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags);
}
cmd.data[0] = cfg;
@@ -1113,22 +1080,26 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->passive_dwell = params->measurement_dwell;
cmd->extended_dwell = params->measurement_dwell;
} else {
- cmd->active_dwell = timing->dwell_active;
- cmd->passive_dwell = timing->dwell_passive;
- cmd->extended_dwell = timing->dwell_extended;
+ cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+ cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
}
- cmd->fragmented_dwell = timing->dwell_fragmented;
-
- if (iwl_mvm_is_cdb_supported(mvm)) {
- cmd->cdb.max_out_time[0] = cpu_to_le32(timing->max_out_time);
- cmd->cdb.suspend_time[0] = cpu_to_le32(timing->suspend_time);
- cmd->cdb.max_out_time[1] = cpu_to_le32(timing->max_out_time);
- cmd->cdb.suspend_time[1] = cpu_to_le32(timing->suspend_time);
- cmd->cdb.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time);
+ cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time);
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ cmd->v6.max_out_time[1] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v6.suspend_time[1] =
+ cpu_to_le32(timing->suspend_time);
+ }
} else {
- cmd->no_cdb.max_out_time = cpu_to_le32(timing->max_out_time);
- cmd->no_cdb.suspend_time = cpu_to_le32(timing->suspend_time);
- cmd->no_cdb.scan_priority =
+ cmd->v1.max_out_time = cpu_to_le32(timing->max_out_time);
+ cmd->v1.suspend_time = cpu_to_le32(timing->suspend_time);
+ cmd->v1.scan_priority =
cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
}
@@ -1207,8 +1178,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int type)
{
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
- void *cmd_data = iwl_mvm_is_cdb_supported(mvm) ?
- (void *)&cmd->cdb.data : (void *)&cmd->no_cdb.data;
+ void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ?
+ (void *)&cmd->v6.data : (void *)&cmd->v1.data;
struct iwl_scan_req_umac_tail *sec_part = cmd_data +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
@@ -1245,12 +1216,12 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
- if (iwl_mvm_is_cdb_supported(mvm)) {
- cmd->cdb.channel_flags = channel_flags;
- cmd->cdb.n_channels = params->n_channels;
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ cmd->v6.channel_flags = channel_flags;
+ cmd->v6.n_channels = params->n_channels;
} else {
- cmd->no_cdb.channel_flags = channel_flags;
- cmd->no_cdb.n_channels = params->n_channels;
+ cmd->v1.channel_flags = channel_flags;
+ cmd->v1.n_channels = params->n_channels;
}
iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
@@ -1607,16 +1578,12 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
- u8 buf[256];
mvm->scan_start = le64_to_cpu(notif->start_tsf);
IWL_DEBUG_SCAN(mvm,
- "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
- notif->status, notif->scanned_channels,
- iwl_mvm_dump_channel_list(notif->results,
- notif->scanned_channels, buf,
- sizeof(buf)));
+ "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n",
+ notif->status, notif->scanned_channels);
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
@@ -1692,10 +1659,10 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
- int base_size = IWL_SCAN_REQ_UMAC_SIZE;
+ int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
- if (iwl_mvm_is_cdb_supported(mvm))
- base_size = IWL_SCAN_REQ_UMAC_SIZE_CDB;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
return base_size +
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index 101fb04a8573..539b06bf0803 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -235,7 +235,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
break;
case SF_FULL_ON:
- if (sta_id == IWL_MVM_STATION_COUNT) {
+ if (sta_id == IWL_MVM_INVALID_STA) {
IWL_ERR(mvm,
"No station: Cannot switch SF to FULL_ON\n");
return -EINVAL;
@@ -276,12 +276,12 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
bool remove_vif)
{
enum iwl_sf_state new_state;
- u8 sta_id = IWL_MVM_STATION_COUNT;
+ u8 sta_id = IWL_MVM_INVALID_STA;
struct iwl_mvm_vif *mvmvif = NULL;
struct iwl_mvm_active_iface_iterator_data data = {
.ignore_vif = changed_vif,
.sta_vif_state = SF_UNINIT,
- .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
+ .sta_vif_ap_sta_id = IWL_MVM_INVALID_STA,
};
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 9d28db7f56aa..f5c786ddc526 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -77,9 +77,11 @@
*/
static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
{
- return iwl_mvm_has_new_rx_api(mvm) ?
- sizeof(struct iwl_mvm_add_sta_cmd) :
- sizeof(struct iwl_mvm_add_sta_cmd_v7);
+ if (iwl_mvm_has_new_rx_api(mvm) ||
+ fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ return sizeof(struct iwl_mvm_add_sta_cmd);
+ else
+ return sizeof(struct iwl_mvm_add_sta_cmd_v7);
}
static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
@@ -98,7 +100,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
reserved_ids = BIT(0);
/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
- for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
+ for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
if (BIT(sta_id) & reserved_ids)
continue;
@@ -106,7 +108,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
lockdep_is_held(&mvm->mutex)))
return sta_id;
}
- return IWL_MVM_STATION_COUNT;
+ return IWL_MVM_INVALID_STA;
}
/* send station add/update command to firmware */
@@ -126,12 +128,21 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u32 status;
u32 agg_size = 0, mpdu_dens = 0;
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ add_sta_cmd.station_type = mvm_sta->sta_type;
+
if (!update || (flags & STA_MODIFY_QUEUES)) {
- add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
- if (flags & STA_MODIFY_QUEUES)
- add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ add_sta_cmd.tfd_queue_msk =
+ cpu_to_le32(mvm_sta->tfd_queue_msk);
+
+ if (flags & STA_MODIFY_QUEUES)
+ add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
+ } else {
+ WARN_ON(flags & STA_MODIFY_QUEUES);
+ }
}
switch (sta->bandwidth) {
@@ -209,13 +220,15 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
- add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BK);
+ add_sta_cmd.uapsd_acs |= BIT(AC_BK);
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
- add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BE);
+ add_sta_cmd.uapsd_acs |= BIT(AC_BE);
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
- add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VI);
+ add_sta_cmd.uapsd_acs |= BIT(AC_VI);
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
- add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VO);
+ add_sta_cmd.uapsd_acs |= BIT(AC_VO);
+ add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
+ add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
}
status = ADD_STA_SUCCESS;
@@ -337,6 +350,9 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
u8 sta_id;
int ret;
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
spin_unlock_bh(&mvm->queue_info_lock);
@@ -387,6 +403,9 @@ static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
@@ -426,6 +445,9 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
@@ -447,7 +469,7 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
disable_agg_tids |= BIT(tid);
- mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
+ mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
}
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
@@ -468,6 +490,9 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
spin_lock_bh(&mvm->queue_info_lock);
txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
sta_id = mvm->queue_info[queue].ra_sta_id;
@@ -475,6 +500,8 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
spin_unlock_bh(&mvm->queue_info_lock);
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+ if (WARN_ON(!mvmsta))
+ return -EINVAL;
disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
/* Disable the queue */
@@ -512,6 +539,8 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
int i;
lockdep_assert_held(&mvm->queue_info_lock);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
@@ -596,6 +625,9 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
unsigned long mq;
int ret;
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
/*
* If the AC is lower than current one - FIFO needs to be redirected to
* the lowest one of the streams in the queue. Check if this is needed
@@ -617,7 +649,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
cmd.tid = mvm->queue_info[queue].txq_tid;
- mq = mvm->queue_info[queue].hw_queue_to_mac80211;
+ mq = mvm->hw_queue_to_mac80211[queue];
shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
spin_unlock_bh(&mvm->queue_info_lock);
@@ -626,7 +658,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
/* Stop MAC queues and wait for this queue to empty */
iwl_mvm_stop_mac_queues(mvm, mq);
- ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue));
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
if (ret) {
IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
queue);
@@ -677,6 +709,37 @@ out:
return ret;
}
+static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, u8 ac,
+ int tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+ u8 mac_queue = mvmsta->vif->hw_queue[ac];
+ int queue = -1;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Allocating queue for sta %d on tid %d\n",
+ mvmsta->sta_id, tid);
+ queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
+ wdg_timeout);
+ if (queue < 0)
+ return queue;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
+
+ spin_lock_bh(&mvmsta->lock);
+ mvmsta->tid_data[tid].txq_id = queue;
+ mvmsta->tid_data[tid].is_tid_active = true;
+ mvmsta->tfd_queue_msk |= BIT(queue);
+ spin_unlock_bh(&mvmsta->lock);
+
+ return 0;
+}
+
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 ac, int tid,
struct ieee80211_hdr *hdr)
@@ -702,6 +765,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
+
spin_lock_bh(&mvmsta->lock);
tfd_queue_mask = mvmsta->tfd_queue_msk;
spin_unlock_bh(&mvmsta->lock);
@@ -880,6 +946,9 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
spin_lock_bh(&mvm->queue_info_lock);
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
@@ -917,6 +986,10 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
int ssn;
int ret = true;
+ /* queue sharing is disabled on new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->queue_info_lock);
@@ -1014,7 +1087,7 @@ static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
ac = iwl_mvm_tid_to_ac_queue(tid);
mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
- if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
+ if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
IWL_ERR(mvm,
"Can't alloc TXQ for sta %d tid %d - dropping frame\n",
@@ -1059,8 +1132,12 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
mutex_lock(&mvm->mutex);
+ /* No queue reconfiguration in TVQM mode */
+ if (iwl_mvm_has_new_tx_api(mvm))
+ goto alloc_queues;
+
/* Reconfigure queues requiring reconfiguation */
- for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
+ for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
bool reconfig;
bool change_owner;
@@ -1088,6 +1165,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
iwl_mvm_change_queue_owner(mvm, queue);
}
+alloc_queues:
/* Go over all stations with deferred traffic */
for_each_set_bit(sta_id, mvm->sta_deferred_frames,
IWL_MVM_STATION_COUNT) {
@@ -1116,6 +1194,10 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
int queue;
bool using_inactive_queue = false, same_sta = false;
+ /* queue reserving is disabled on new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return 0;
+
/*
* Check for inactive queues, so we don't reach a situation where we
* can't add a STA due to a shortage in queues that doesn't really exist
@@ -1191,7 +1273,7 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
int ac;
u8 mac_queue;
- if (txq_id == IEEE80211_INVAL_HW_QUEUE)
+ if (txq_id == IWL_MVM_INVALID_QUEUE)
continue;
skb_queue_head_init(&tid_data->deferred_tx_frames);
@@ -1199,20 +1281,31 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
ac = tid_to_mac80211_ac[i];
mac_queue = mvm_sta->vif->hw_queue[ac];
- cfg.tid = i;
- cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
- cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
- txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Re-mapping sta %d tid %d\n",
+ mvm_sta->sta_id, i);
+ txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
+ mvm_sta->sta_id,
+ i, wdg_timeout);
+ tid_data->txq_id = txq_id;
+ } else {
+ u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
- IWL_DEBUG_TX_QUEUES(mvm,
- "Re-mapping sta %d tid %d to queue %d\n",
- mvm_sta->sta_id, i, txq_id);
+ cfg.tid = i;
+ cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
+ cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ txq_id ==
+ IWL_MVM_DQA_BSS_CLIENT_QUEUE);
- iwl_mvm_enable_txq(mvm, txq_id, mac_queue,
- IEEE80211_SEQ_TO_SN(tid_data->seq_number),
- &cfg, wdg_timeout);
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Re-mapping sta %d tid %d to queue %d\n",
+ mvm_sta->sta_id, i, txq_id);
- mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
+ iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
+ wdg_timeout);
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
+ }
}
atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
@@ -1235,7 +1328,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
else
sta_id = mvm_sta->sta_id;
- if (sta_id == IWL_MVM_STATION_COUNT)
+ if (sta_id == IWL_MVM_INVALID_STA)
return -ENOSPC;
spin_lock_init(&mvm_sta->lock);
@@ -1254,6 +1347,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
mvm_sta->tx_protection = 0;
mvm_sta->tt_tx_protection = false;
+ mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
/* HW restart, don't assume the memory has been zeroed */
atomic_set(&mvm->pending_frames[sta_id], 0);
@@ -1287,7 +1381,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
* Mark all queues for this STA as unallocated and defer TX
* frames until the queue is allocated
*/
- mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
+ mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
}
mvm_sta->deferred_traffic_tid_map = 0;
@@ -1303,7 +1397,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvm_sta->dup_data = dup_data;
}
- if (iwl_mvm_is_dqa_supported(mvm)) {
+ if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
ret = iwl_mvm_reserve_sta_stream(mvm, sta,
ieee80211_vif_type_p2p(vif));
if (ret)
@@ -1317,10 +1411,10 @@ update_fw:
if (vif->type == NL80211_IFTYPE_STATION) {
if (!sta->tdls) {
- WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
+ WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
mvmvif->ap_sta_id = sta_id;
} else {
- WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
+ WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
}
}
@@ -1486,13 +1580,13 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
- if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
+ if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
continue;
ac = iwl_mvm_tid_to_ac_queue(i);
iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
vif->hw_queue[ac], i, 0);
- mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
+ mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
}
}
@@ -1520,8 +1614,8 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
if (ret)
return ret;
- ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
- mvm_sta->tfd_queue_msk);
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
+ mvm_sta->tfd_queue_msk);
if (ret)
return ret;
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
@@ -1571,11 +1665,11 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
return ret;
/* unassoc - go ahead - remove the AP STA now */
- mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
/* clear d0i3_ap_sta_id if no longer relevant */
if (mvm->d0i3_ap_sta_id == sta_id)
- mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
}
}
@@ -1584,7 +1678,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
* before the STA is removed.
*/
if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
- mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
+ mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
cancel_delayed_work(&mvm->tdls_cs.dwork);
}
@@ -1637,27 +1731,28 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
struct iwl_mvm_int_sta *sta,
- u32 qmask, enum nl80211_iftype iftype)
+ u32 qmask, enum nl80211_iftype iftype,
+ enum iwl_sta_type type)
{
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
- if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
+ if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
return -ENOSPC;
}
sta->tfd_queue_msk = qmask;
+ sta->type = type;
/* put a non-NULL value so iterating over the stations won't stop */
rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
return 0;
}
-static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
- struct iwl_mvm_int_sta *sta)
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
{
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
- sta->sta_id = IWL_MVM_STATION_COUNT;
+ sta->sta_id = IWL_MVM_INVALID_STA;
}
static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
@@ -1675,8 +1770,11 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
cmd.sta_id = sta->sta_id;
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
color));
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ cmd.station_type = sta->type;
- cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
cmd.tid_disable_tx = cpu_to_le16(0xffff);
if (addr)
@@ -1701,27 +1799,19 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
return ret;
}
-int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
+static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
{
unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
mvm->cfg->base_params->wd_timeout :
IWL_WATCHDOG_DISABLED;
- int ret;
-
- lockdep_assert_held(&mvm->mutex);
-
- /* Map Aux queue to fifo - needs to happen before adding Aux station */
- if (!iwl_mvm_is_dqa_supported(mvm))
- iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
- IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
-
- /* Allocate aux station and assign to it the aux queue */
- ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
- NL80211_IFTYPE_UNSPECIFIED);
- if (ret)
- return ret;
- if (iwl_mvm_is_dqa_supported(mvm)) {
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue,
+ mvm->aux_sta.sta_id,
+ IWL_MAX_TID_COUNT,
+ wdg_timeout);
+ mvm->aux_queue = queue;
+ } else if (iwl_mvm_is_dqa_supported(mvm)) {
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = IWL_MVM_TX_FIFO_MCAST,
.sta_id = mvm->aux_sta.sta_id,
@@ -1732,14 +1822,44 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
wdg_timeout);
+ } else {
+ iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
+ IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
}
+}
- ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
- MAC_INDEX_AUX, 0);
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+ /* Allocate aux station and assign to it the aux queue */
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
+ NL80211_IFTYPE_UNSPECIFIED,
+ IWL_STA_AUX_ACTIVITY);
if (ret)
+ return ret;
+
+ /* Map Aux queue to fifo - needs to happen before adding Aux station */
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_enable_aux_queue(mvm);
+
+ ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
+ MAC_INDEX_AUX, 0);
+ if (ret) {
iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
- return ret;
+ return ret;
+ }
+
+ /*
+ * For a000 firmware and on we cannot add queue to a station unknown
+ * to firmware so enable queue here - after the station was added
+ */
+ if (iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_enable_aux_queue(mvm);
+
+ return 0;
}
int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -1790,39 +1910,39 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
const u8 *baddr = _baddr;
+ int queue;
int ret;
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = IWL_MVM_TX_FIFO_VO,
+ .sta_id = mvmvif->bcast_sta.sta_id,
+ .tid = IWL_MAX_TID_COUNT,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
lockdep_assert_held(&mvm->mutex);
- if (iwl_mvm_is_dqa_supported(mvm)) {
- struct iwl_trans_txq_scd_cfg cfg = {
- .fifo = IWL_MVM_TX_FIFO_VO,
- .sta_id = mvmvif->bcast_sta.sta_id,
- .tid = IWL_MAX_TID_COUNT,
- .aggregate = false,
- .frame_limit = IWL_FRAME_LIMIT,
- };
- unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, vif, false, false);
- int queue;
-
+ if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC)
- queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ queue = mvm->probe_queue;
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
- queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+ queue = mvm->p2p_dev_queue;
else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
return -EINVAL;
- iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
- wdg_timeout);
bsta->tfd_queue_msk |= BIT(queue);
+
+ iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
+ &cfg, wdg_timeout);
}
if (vif->type == NL80211_IFTYPE_ADHOC)
baddr = vif->bss_conf.bssid;
- if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
+ if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
return -ENOSPC;
ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
@@ -1831,27 +1951,21 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
/*
- * In AP vif type, we also need to enable the cab_queue. However, we
- * have to enable it after the ADD_STA command is sent, otherwise the
- * FW will throw an assert once we send the ADD_STA command (it'll
- * detect a mismatch in the tfd_queue_msk, as we can't add the
- * enabled-cab_queue to the mask)
+ * For a000 firmware and on we cannot add queue to a station unknown
+ * to firmware so enable queue here - after the station was added
*/
- if (iwl_mvm_is_dqa_supported(mvm) &&
- (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC)) {
- struct iwl_trans_txq_scd_cfg cfg = {
- .fifo = IWL_MVM_TX_FIFO_MCAST,
- .sta_id = mvmvif->bcast_sta.sta_id,
- .tid = IWL_MAX_TID_COUNT,
- .aggregate = false,
- .frame_limit = IWL_FRAME_LIMIT,
- };
- unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
+ bsta->sta_id,
+ IWL_MAX_TID_COUNT,
+ wdg_timeout);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ mvm->probe_queue = queue;
+ else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ mvm->p2p_dev_queue = queue;
- iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue,
- 0, &cfg, wdg_timeout);
+ bsta->tfd_queue_msk |= BIT(queue);
}
return 0;
@@ -1869,24 +1983,18 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0);
- if (mvmvif->bcast_sta.tfd_queue_msk &
- BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)) {
- iwl_mvm_disable_txq(mvm,
- IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
+ if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->probe_queue)) {
+ iwl_mvm_disable_txq(mvm, mvm->probe_queue,
vif->hw_queue[0], IWL_MAX_TID_COUNT,
0);
- mvmvif->bcast_sta.tfd_queue_msk &=
- ~BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
+ mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->probe_queue);
}
- if (mvmvif->bcast_sta.tfd_queue_msk &
- BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)) {
- iwl_mvm_disable_txq(mvm,
- IWL_MVM_DQA_P2P_DEVICE_QUEUE,
+ if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->p2p_dev_queue)) {
+ iwl_mvm_disable_txq(mvm, mvm->p2p_dev_queue,
vif->hw_queue[0], IWL_MAX_TID_COUNT,
0);
- mvmvif->bcast_sta.tfd_queue_msk &=
- ~BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
+ mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->p2p_dev_queue);
}
}
@@ -1928,7 +2036,8 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
- ieee80211_vif_type_p2p(vif));
+ ieee80211_vif_type_p2p(vif),
+ IWL_STA_GENERAL_PURPOSE);
}
/* Allocate a new station entry for the broadcast station to the given vif,
@@ -1982,6 +2091,101 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
}
+/*
+ * Allocate a new station entry for the multicast station to the given vif,
+ * and send it to the FW.
+ * Note that each AP/GO mac should have its own multicast station.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the multicast station is added
+ */
+int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
+ static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
+ const u8 *maddr = _maddr;
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = IWL_MVM_TX_FIFO_MCAST,
+ .sta_id = msta->sta_id,
+ .tid = IWL_MAX_TID_COUNT,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+ unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ return 0;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_AP))
+ return -ENOTSUPP;
+
+ /*
+ * While in previous FWs we had to exclude cab queue from TFD queue
+ * mask, now it is needed as any other queue.
+ */
+ if (!iwl_mvm_has_new_tx_api(mvm) &&
+ fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
+ iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
+ &cfg, timeout);
+ msta->tfd_queue_msk |= BIT(vif->cab_queue);
+ }
+ ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
+ mvmvif->id, mvmvif->color);
+ if (ret) {
+ iwl_mvm_dealloc_int_sta(mvm, msta);
+ return ret;
+ }
+
+ /*
+ * Enable cab queue after the ADD_STA command is sent.
+ * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
+ * command with unknown station id, and for FW that doesn't support
+ * station API since the cab queue is not included in the
+ * tfd_queue_mask.
+ */
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
+ msta->sta_id,
+ IWL_MAX_TID_COUNT,
+ timeout);
+ mvmvif->cab_queue = queue;
+ } else if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_STA_TYPE)) {
+ iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
+ &cfg, timeout);
+ }
+
+ return 0;
+}
+
+/*
+ * Send the FW a request to remove the station from it's internal data
+ * structures, and in addition remove it from the local data structure.
+ */
+int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ return 0;
+
+ iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
+ IWL_MAX_TID_COUNT, 0);
+
+ ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
+ if (ret)
+ IWL_WARN(mvm, "Failed sending remove station\n");
+
+ return ret;
+}
+
#define IWL_MAX_RX_BA_SESSIONS 16
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
@@ -2059,6 +2263,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
reorder_buf->mvm = mvm;
reorder_buf->queue = i;
reorder_buf->sta_id = sta_id;
+ reorder_buf->valid = false;
for (j = 0; j < reorder_buf->buf_size; j++)
__skb_queue_head_init(&reorder_buf->entries[j]);
}
@@ -2226,7 +2431,9 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
cmd.sta_id = mvm_sta->sta_id;
cmd.add_modify = STA_MODE_MODIFY;
- cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ cmd.modify_mask = STA_MODIFY_QUEUES;
+ cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
@@ -2310,10 +2517,18 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* one and mark it as reserved
* 3. In DQA mode, but no traffic yet on this TID: same treatment as in
* non-DQA mode, since the TXQ hasn't yet been allocated
+ * Don't support case 3 for new TX path as it is not expected to happen
+ * and aggregation will be offloaded soon anyway
*/
txq_id = mvmsta->tid_data[tid].txq_id;
- if (iwl_mvm_is_dqa_supported(mvm) &&
- unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ if (txq_id == IWL_MVM_INVALID_QUEUE) {
+ ret = -ENXIO;
+ goto release_locks;
+ }
+ } else if (iwl_mvm_is_dqa_supported(mvm) &&
+ unlikely(mvm->queue_info[txq_id].status ==
+ IWL_MVM_QUEUE_SHARED)) {
ret = -ENXIO;
IWL_DEBUG_TX_QUEUES(mvm,
"Can't start tid %d agg on shared queue!\n",
@@ -2409,6 +2624,20 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
tid_data->amsdu_in_ampdu_allowed = amsdu;
spin_unlock_bh(&mvmsta->lock);
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ /*
+ * If no queue iwl_mvm_sta_tx_agg_start() would have failed so
+ * no need to check queue's status
+ */
+ if (buf_size < mvmsta->max_agg_bufsize)
+ return -ENOTSUPP;
+
+ ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+ if (ret)
+ return -EIO;
+ goto out;
+ }
+
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
spin_lock_bh(&mvm->queue_info_lock);
@@ -2430,8 +2659,8 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* If reconfiguring an existing queue, it first must be
* drained
*/
- ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
- BIT(queue));
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
+ BIT(queue));
if (ret) {
IWL_ERR(mvm,
"Error draining queue before reconfig\n");
@@ -2466,6 +2695,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
+out:
/*
* Even though in theory the peer could have different
* aggregation reorder buffer sizes for different sessions,
@@ -2483,6 +2713,27 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
}
+static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ u16 txq_id)
+{
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ /*
+ * The TXQ is marked as reserved only if no traffic came through yet
+ * This means no traffic has been sent on this TID (agg'd or not), so
+ * we no longer have use for the queue. Since it hasn't even been
+ * allocated through iwl_mvm_enable_txq, so we can just mark it back as
+ * free.
+ */
+ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+}
+
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid)
{
@@ -2509,18 +2760,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvmsta->agg_tids &= ~BIT(tid);
- spin_lock_bh(&mvm->queue_info_lock);
- /*
- * The TXQ is marked as reserved only if no traffic came through yet
- * This means no traffic has been sent on this TID (agg'd or not), so
- * we no longer have use for the queue. Since it hasn't even been
- * allocated through iwl_mvm_enable_txq, so we can just mark it back as
- * free.
- */
- if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
- mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
-
- spin_unlock_bh(&mvm->queue_info_lock);
+ iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
switch (tid_data->state) {
case IWL_AGG_ON:
@@ -2600,24 +2840,14 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvmsta->agg_tids &= ~BIT(tid);
spin_unlock_bh(&mvmsta->lock);
- spin_lock_bh(&mvm->queue_info_lock);
- /*
- * The TXQ is marked as reserved only if no traffic came through yet
- * This means no traffic has been sent on this TID (agg'd or not), so
- * we no longer have use for the queue. Since it hasn't even been
- * allocated through iwl_mvm_enable_txq, so we can just mark it back as
- * free.
- */
- if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
- mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
- spin_unlock_bh(&mvm->queue_info_lock);
+ iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
if (old_state >= IWL_AGG_ON) {
iwl_mvm_drain_sta(mvm, mvmsta, true);
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
- iwl_trans_wait_tx_queue_empty(mvm->trans,
- mvmsta->tfd_queue_msk);
+ iwl_trans_wait_tx_queues_empty(mvm->trans,
+ mvmsta->tfd_queue_msk);
iwl_mvm_drain_sta(mvm, mvmsta, false);
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
@@ -2675,7 +2905,7 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
* station ID, then use AP's station ID.
*/
if (vif->type == NL80211_IFTYPE_STATION &&
- mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+ mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
u8 sta_id = mvmvif->ap_sta_id;
sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
@@ -2697,68 +2927,97 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta,
- struct ieee80211_key_conf *keyconf, bool mcast,
+ struct ieee80211_key_conf *key, bool mcast,
u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
u8 key_offset)
{
- struct iwl_mvm_add_sta_key_cmd cmd = {};
+ union {
+ struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
+ struct iwl_mvm_add_sta_key_cmd cmd;
+ } u = {};
__le16 key_flags;
int ret;
u32 status;
u16 keyidx;
- int i;
- u8 sta_id = mvm_sta->sta_id;
+ u64 pn = 0;
+ int i, size;
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
- keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
+ keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK;
key_flags = cpu_to_le16(keyidx);
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
- switch (keyconf->cipher) {
+ switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
- cmd.tkip_rx_tsc_byte2 = tkip_iv32;
- for (i = 0; i < 5; i++)
- cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
- memcpy(cmd.key, keyconf->key, keyconf->keylen);
+ if (new_api) {
+ memcpy((void *)&u.cmd.tx_mic_key,
+ &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+ IWL_MIC_KEY_SIZE);
+
+ memcpy((void *)&u.cmd.rx_mic_key,
+ &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+ IWL_MIC_KEY_SIZE);
+ pn = atomic64_read(&key->tx_pn);
+
+ } else {
+ u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
+ for (i = 0; i < 5; i++)
+ u.cmd_v1.tkip_rx_ttak[i] =
+ cpu_to_le16(tkip_p1k[i]);
+ }
+ memcpy(u.cmd.common.key, key->key, key->keylen);
break;
case WLAN_CIPHER_SUITE_CCMP:
key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
- memcpy(cmd.key, keyconf->key, keyconf->keylen);
+ memcpy(u.cmd.common.key, key->key, key->keylen);
+ if (new_api)
+ pn = atomic64_read(&key->tx_pn);
break;
case WLAN_CIPHER_SUITE_WEP104:
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
/* fall through */
case WLAN_CIPHER_SUITE_WEP40:
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
- memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
+ memcpy(u.cmd.common.key + 3, key->key, key->keylen);
break;
case WLAN_CIPHER_SUITE_GCMP_256:
key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
/* fall through */
case WLAN_CIPHER_SUITE_GCMP:
key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
- memcpy(cmd.key, keyconf->key, keyconf->keylen);
+ memcpy(u.cmd.common.key, key->key, key->keylen);
+ if (new_api)
+ pn = atomic64_read(&key->tx_pn);
break;
default:
key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
- memcpy(cmd.key, keyconf->key, keyconf->keylen);
+ memcpy(u.cmd.common.key, key->key, key->keylen);
}
if (mcast)
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
- cmd.key_offset = key_offset;
- cmd.key_flags = key_flags;
- cmd.sta_id = sta_id;
+ u.cmd.common.key_offset = key_offset;
+ u.cmd.common.key_flags = key_flags;
+ u.cmd.common.sta_id = mvm_sta->sta_id;
+
+ if (new_api) {
+ u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
+ size = sizeof(u.cmd);
+ } else {
+ size = sizeof(u.cmd_v1);
+ }
status = ADD_STA_SUCCESS;
if (cmd_flags & CMD_ASYNC)
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
- sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
+ &u.cmd);
else
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
+ &u.cmd, &status);
switch (status) {
case ADD_STA_SUCCESS:
@@ -2858,7 +3117,7 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
return sta->addr;
if (vif->type == NL80211_IFTYPE_STATION &&
- mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+ mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
u8 sta_id = mvmvif->ap_sta_id;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
@@ -2911,9 +3170,14 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
struct ieee80211_key_conf *keyconf,
bool mcast)
{
- struct iwl_mvm_add_sta_key_cmd cmd = {};
+ union {
+ struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
+ struct iwl_mvm_add_sta_key_cmd cmd;
+ } u = {};
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
__le16 key_flags;
- int ret;
+ int ret, size;
u32 status;
key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
@@ -2924,13 +3188,19 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
if (mcast)
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
- cmd.key_flags = key_flags;
- cmd.key_offset = keyconf->hw_key_idx;
- cmd.sta_id = sta_id;
+ /*
+ * The fields assigned here are in the same location at the start
+ * of the command, so we can do this union trick.
+ */
+ u.cmd.common.key_flags = key_flags;
+ u.cmd.common.key_offset = keyconf->hw_key_idx;
+ u.cmd.common.sta_id = sta_id;
+
+ size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
- &cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
+ &status);
switch (status) {
case ADD_STA_SUCCESS:
@@ -3044,7 +3314,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
{
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
struct iwl_mvm_sta *mvm_sta;
- u8 sta_id = IWL_MVM_STATION_COUNT;
+ u8 sta_id = IWL_MVM_INVALID_STA;
int ret, i;
lockdep_assert_held(&mvm->mutex);
@@ -3207,13 +3477,13 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
/* Note: this is ignored by firmware not supporting GO uAPSD */
if (more_data)
- cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
+ cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
mvmsta->next_status_eosp = true;
- cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
+ cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
} else {
- cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
+ cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
}
/* block the Tx queues until the FW updated the sleep Tx count */
@@ -3290,6 +3560,27 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
spin_unlock_bh(&mvm_sta->lock);
}
+static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_int_sta *sta,
+ bool disable)
+{
+ u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .add_modify = STA_MODE_MODIFY,
+ .sta_id = sta->sta_id,
+ .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
+ .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
+ .mac_id_n_color = cpu_to_le32(id),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif,
bool disable)
@@ -3301,7 +3592,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
/* Block/unblock all the stations of the given mvmvif */
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
@@ -3314,6 +3605,22 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
}
+
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ return;
+
+ /* Need to block/unblock also multicast station */
+ if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
+ iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
+ &mvmvif->mcast_sta, disable);
+
+ /*
+ * Only unblock the broadcast station (FW blocks it for immediate
+ * quiet, not the driver)
+ */
+ if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
+ iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
+ &mvmvif->bcast_sta, disable);
}
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 1927ce607798..2716cb5483bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -380,6 +380,7 @@ struct iwl_mvm_rxq_dup_data {
* @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
* tid.
* @max_agg_bufsize: the maximal size of the AGG buffer for this station
+ * @sta_type: station type
* @bt_reduced_txpower: is reduced tx power enabled for this station
* @next_status_eosp: the next reclaimed packet is a PS-Poll response and
* we need to signal the EOSP
@@ -416,6 +417,7 @@ struct iwl_mvm_sta {
u32 mac_id_n_color;
u16 tid_disable_agg;
u8 max_agg_bufsize;
+ enum iwl_sta_type sta_type;
bool bt_reduced_txpower;
bool next_status_eosp;
spinlock_t lock;
@@ -453,10 +455,12 @@ iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta)
* struct iwl_mvm_int_sta - representation of an internal station (auxiliary or
* broadcast)
* @sta_id: the index of the station in the fw (will be replaced by id_n_color)
+ * @type: station type
* @tfd_queue_msk: the tfd queues used by the station
*/
struct iwl_mvm_int_sta {
u32 sta_id;
+ enum iwl_sta_type type;
u32 tfd_queue_msk;
};
@@ -532,10 +536,14 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
struct iwl_mvm_int_sta *sta,
- u32 qmask, enum nl80211_iftype iftype);
+ u32 qmask, enum nl80211_iftype iftype,
+ enum iwl_sta_type type);
void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta);
int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index 9f160fc58cd0..df7cd87199ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -78,7 +80,7 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (!sta || IS_ERR(sta) || !sta->tdls)
@@ -101,7 +103,7 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (!sta || IS_ERR(sta) || !sta->tdls)
@@ -145,7 +147,7 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* populate TDLS peer data */
cnt = 0;
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta) || !sta->tdls)
@@ -251,7 +253,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
if (state == IWL_MVM_TDLS_SW_IDLE)
- mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
+ mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
}
void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
@@ -305,7 +307,7 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
/* get the existing peer if it's there */
if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
- mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
+ mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
struct ieee80211_sta *sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
lockdep_is_held(&mvm->mutex));
@@ -523,7 +525,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
/* station might be gone, in that case do nothing */
- if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT)
+ if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
goto out;
sta = rcu_dereference_protected(
@@ -573,7 +575,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
sta->addr, chandef->chan->center_freq, chandef->width);
/* we only support a single peer for channel switching */
- if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) {
+ if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
IWL_DEBUG_TDLS(mvm,
"Existing peer. Can't start switch with %pM\n",
sta->addr);
@@ -633,7 +635,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
/* we only support a single peer for channel switching */
- if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) {
+ if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
goto out;
}
@@ -654,7 +656,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
wait_for_phy = true;
- mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
+ mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
dev_kfree_skb(mvm->tdls_cs.peer.skb);
mvm->tdls_cs.peer.skb = NULL;
@@ -697,7 +699,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
params->status != 0 &&
mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
- mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
+ mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
struct ieee80211_sta *cur_sta;
/* make sure it's the same peer */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
index a1947d6f3a2c..16ce8a56b5b9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
@@ -80,7 +80,7 @@ void iwl_mvm_tof_init(struct iwl_mvm *mvm)
if (IWL_MVM_TOF_IS_RESPONDER) {
tof_data->responder_cfg.sub_grp_cmd_id =
cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
- tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT;
+ tof_data->responder_cfg.sta_id = IWL_MVM_INVALID_STA;
}
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index bec7d9c46087..f9cbd197246f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -356,7 +356,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
struct iwl_mvm_sta *mvmsta;
int i, err;
- for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i);
if (!mvmsta)
continue;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 1ba0a6f55503..bcaceb64a6e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -475,6 +475,39 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
memset(dev_cmd, 0, sizeof(*dev_cmd));
dev_cmd->hdr.cmd = TX_CMD;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
+ u16 offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info);
+
+ /* padding is inserted later in transport */
+ /* FIXME - check for AMSDU may need to be removed */
+ if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
+ !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
+ offload_assist |= BIT(TX_CMD_OFFLD_PAD);
+
+ cmd->offload_assist |= cpu_to_le16(offload_assist);
+
+ /* Total # bytes to be transmitted */
+ cmd->len = cpu_to_le16((u16)skb->len);
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(cmd->hdr, hdr, hdrlen);
+
+ if (!info->control.hw_key)
+ cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS);
+
+ /* For data packets rate info comes from the fw */
+ if (ieee80211_is_data(hdr->frame_control) && sta)
+ goto out;
+
+ cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE);
+ cmd->rate_n_flags =
+ cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
+
+ goto out;
+ }
+
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
if (info->control.hw_key)
@@ -484,6 +517,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdrlen);
+
+out:
return dev_cmd;
}
@@ -514,21 +551,21 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
*/
if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
ieee80211_is_deauth(fc))
- return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ return mvm->probe_queue;
if (info->hw_queue == info->control.vif->cab_queue)
return info->hw_queue;
WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
"fc=0x%02x", le16_to_cpu(fc));
- return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ return mvm->probe_queue;
case NL80211_IFTYPE_P2P_DEVICE:
if (ieee80211_is_mgmt(fc))
- return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+ return mvm->p2p_dev_queue;
if (info->hw_queue == info->control.vif->cab_queue)
return info->hw_queue;
WARN_ON_ONCE(1);
- return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+ return mvm->p2p_dev_queue;
default:
WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
return -1;
@@ -541,7 +578,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_info info;
struct iwl_device_cmd *dev_cmd;
- struct iwl_tx_cmd *tx_cmd;
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
int queue;
@@ -594,11 +630,13 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (queue < 0)
return -1;
+ if (queue == info.control.vif->cab_queue)
+ queue = mvmvif->cab_queue;
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
is_multicast_ether_addr(hdr->addr1)) {
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
- if (ap_sta_id != IWL_MVM_STATION_COUNT)
+ if (ap_sta_id != IWL_MVM_INVALID_STA)
sta_id = ap_sta_id;
} else if (iwl_mvm_is_dqa_supported(mvm) &&
info.control.vif->type == NL80211_IFTYPE_STATION &&
@@ -616,11 +654,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
/* From now on, we cannot access info->control */
iwl_mvm_skb_prepare_status(skb, dev_cmd);
- tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
-
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdrlen);
-
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
return -1;
@@ -713,7 +746,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* fifo to be able to send bursts.
*/
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
- mvm->shared_mem_cfg.txfifo_size[txf] - 256);
+ mvm->smem_cfg.lmac[0].txfifo_size[txf] - 256);
if (unlikely(dbg_max_amsdu_len))
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
@@ -862,6 +895,9 @@ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
unsigned long now = jiffies;
int tid;
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return false;
+
for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
IWL_MVM_DQA_QUEUE_TIMEOUT, now))
@@ -881,11 +917,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_mvm_sta *mvmsta;
struct iwl_device_cmd *dev_cmd;
- struct iwl_tx_cmd *tx_cmd;
__le16 fc;
u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT;
- u8 txq_id = info->hw_queue;
+ u16 txq_id = info->hw_queue;
bool is_ampdu = false;
int hdrlen;
@@ -896,7 +931,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(!mvmsta))
return -1;
- if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
+ if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
return -1;
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
@@ -904,8 +939,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (!dev_cmd)
goto drop;
- tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
-
/*
* we handle that entirely ourselves -- for uAPSD the firmware
* will always send a notification, and for PS-Poll responses
@@ -926,18 +959,27 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
goto drop_unlock_sta;
- seq_number = mvmsta->tid_data[tid].seq_number;
- seq_number &= IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(seq_number);
is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
if (WARN_ON_ONCE(is_ampdu &&
mvmsta->tid_data[tid].state != IWL_AGG_ON))
goto drop_unlock_sta;
+
+ seq_number = mvmsta->tid_data[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ /* update the tx_cmd hdr as it was already copied */
+ tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
+ }
}
if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu)
txq_id = mvmsta->tid_data[tid].txq_id;
+
if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
/* default to TID 0 for non-QoS packets */
u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
@@ -945,17 +987,14 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
}
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdrlen);
-
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
/* Check if TXQ needs to be allocated or re-activated */
- if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE ||
+ if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
!mvmsta->tid_data[tid].is_tid_active) &&
iwl_mvm_is_dqa_supported(mvm)) {
/* If TXQ needs to be allocated... */
- if (txq_id == IEEE80211_INVAL_HW_QUEUE) {
+ if (txq_id == IWL_MVM_INVALID_QUEUE) {
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
/*
@@ -967,6 +1006,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
return 0;
}
+ /* queue should always be active in new TX path */
+ WARN_ON(iwl_mvm_has_new_tx_api(mvm));
+
/* If we are here - TXQ exists and needs to be re-activated */
spin_lock(&mvm->queue_info_lock);
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
@@ -977,7 +1019,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id);
}
- if (iwl_mvm_is_dqa_supported(mvm)) {
+ if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
/* Keep track of the time of the last frame for this RA/TID */
mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
@@ -1036,7 +1078,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(!mvmsta))
return -1;
- if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
+ if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
return -1;
memcpy(&info, skb->cb, sizeof(info));
@@ -1245,6 +1287,26 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
}
}
+/**
+ * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
+ * @tx_resp: the Tx response from the fw (agg or non-agg)
+ *
+ * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
+ * it can't know that everything will go well until the end of the AMPDU, it
+ * can't know in advance the number of MPDUs that will be sent in the current
+ * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
+ * Hence, it can't know in advance what the SSN of the SCD will be at the end
+ * of the batch. This is why the SSN of the SCD is written at the end of the
+ * whole struct at a variable offset. This function knows how to cope with the
+ * variable offset and returns the SSN of the SCD.
+ */
+static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
+ struct iwl_mvm_tx_resp *tx_resp)
+{
+ return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
+ tx_resp->frame_count) & 0xfff;
+}
+
static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@@ -1254,8 +1316,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
- u32 status = le16_to_cpu(tx_resp->status.status);
- u16 ssn = iwl_mvm_get_scd_ssn(tx_resp);
+ struct agg_tx_status *agg_status =
+ iwl_mvm_get_agg_status(mvm, tx_resp);
+ u32 status = le16_to_cpu(agg_status->status);
+ u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
struct iwl_mvm_sta *mvmsta;
struct sk_buff_head skbs;
u8 skb_freed = 0;
@@ -1264,6 +1328,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
__skb_queue_head_init(&skbs);
+ if (iwl_mvm_has_new_tx_api(mvm))
+ txq_id = le16_to_cpu(tx_resp->v6.tx_queue);
+
seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
/* we can free until ssn % q.n_bd not inclusive */
@@ -1388,7 +1455,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
if (!IS_ERR(sta)) {
mvmsta = iwl_mvm_sta_from_mac80211(sta);
- if (tid != IWL_TID_NON_QOS) {
+ if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) {
struct iwl_mvm_tid_data *tid_data =
&mvmsta->tid_data[tid];
bool send_eosp_ndp = false;
@@ -1520,7 +1587,8 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
- struct agg_tx_status *frame_status = &tx_resp->status;
+ struct agg_tx_status *frame_status =
+ iwl_mvm_get_agg_status(mvm, tx_resp);
int i;
for (i = 0; i < tx_resp->frame_count; i++) {
@@ -1722,6 +1790,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
ba_info.status.status_driver_data[0] =
(void *)(uintptr_t)ba_res->reduced_txp;
+ if (!le16_to_cpu(ba_res->tfd_cnt))
+ goto out;
+
/*
* TODO:
* When supporting multi TID aggregations - we need to move
@@ -1730,12 +1801,16 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
* This will go together with SN and AddBA offload and cannot
* be handled properly for now.
*/
- WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1);
- iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid,
- (int)ba_res->tfd[0].q_num,
+ WARN_ON(le16_to_cpu(ba_res->ra_tid_cnt) != 1);
+ tid = ba_res->ra_tid[0].tid;
+ if (tid == IWL_MGMT_TID)
+ tid = IWL_MAX_TID_COUNT;
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid,
+ (int)(le16_to_cpu(ba_res->tfd[0].q_num)),
le16_to_cpu(ba_res->tfd[0].tfd_index),
&ba_info, le32_to_cpu(ba_res->tx_rate));
+out:
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
sta_id, le32_to_cpu(ba_res->flags),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index dedea96a8e0f..8f4f176e204e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright (C) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -591,6 +592,10 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
lockdep_assert_held(&mvm->queue_info_lock);
+ /* This should not be hit with new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -ENOSPC;
+
/* Start by looking for a free queue */
for (i = minq; i <= maxq; i++)
if (mvm->queue_info[i].hw_queue_refcount == 0 &&
@@ -627,6 +632,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
};
int ret;
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
spin_lock_bh(&mvm->queue_info_lock);
if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
"Trying to reconfig unallocated queue %d\n", queue)) {
@@ -644,50 +652,94 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
return ret;
}
-void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
- u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
- unsigned int wdg_timeout)
+static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
+ int mac80211_queue, u8 sta_id, u8 tid)
{
bool enable_queue = true;
spin_lock_bh(&mvm->queue_info_lock);
/* Make sure this TID isn't already enabled */
- if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) {
+ if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
- queue, cfg->tid);
- return;
+ queue, tid);
+ return false;
}
/* Update mappings and refcounts */
if (mvm->queue_info[queue].hw_queue_refcount > 0)
enable_queue = false;
- mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue);
+ mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+
mvm->queue_info[queue].hw_queue_refcount++;
- mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
- mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
+ mvm->queue_info[queue].tid_bitmap |= BIT(tid);
+ mvm->queue_info[queue].ra_sta_id = sta_id;
if (enable_queue) {
- if (cfg->tid != IWL_MAX_TID_COUNT)
+ if (tid != IWL_MAX_TID_COUNT)
mvm->queue_info[queue].mac80211_ac =
- tid_to_mac80211_ac[cfg->tid];
+ tid_to_mac80211_ac[tid];
else
mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
- mvm->queue_info[queue].txq_tid = cfg->tid;
+ mvm->queue_info[queue].txq_tid = tid;
}
IWL_DEBUG_TX_QUEUES(mvm,
"Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
queue, mvm->queue_info[queue].hw_queue_refcount,
- mvm->queue_info[queue].hw_queue_to_mac80211);
+ mvm->hw_queue_to_mac80211[queue]);
spin_unlock_bh(&mvm->queue_info_lock);
+ return enable_queue;
+}
+
+int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
+ u8 sta_id, u8 tid, unsigned int timeout)
+{
+ struct iwl_tx_queue_cfg_cmd cmd = {
+ .flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
+ .sta_id = sta_id,
+ .tid = tid,
+ };
+ int queue;
+
+ if (cmd.tid == IWL_MAX_TID_COUNT)
+ cmd.tid = IWL_MGMT_TID;
+ queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd,
+ SCD_QUEUE_CFG, timeout);
+
+ if (queue < 0) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
+ sta_id, tid, queue);
+ return queue;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
+ queue, sta_id, tid);
+
+ mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Enabling TXQ #%d (mac80211 map:0x%x)\n",
+ queue, mvm->hw_queue_to_mac80211[queue]);
+
+ return queue;
+}
+
+void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+ u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int wdg_timeout)
+{
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
/* Send the enabling command if we need to */
- if (enable_queue) {
+ if (iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
+ cfg->sta_id, cfg->tid)) {
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_ENABLE_QUEUE,
@@ -701,7 +753,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
wdg_timeout);
- WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
+ WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
+ sizeof(struct iwl_scd_txq_cfg_cmd),
&cmd),
"Failed to configure queue %d on FIFO %d\n", queue,
cfg->fifo);
@@ -718,6 +771,16 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
bool remove_mac_queue = true;
int ret;
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue);
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ iwl_trans_txq_free(mvm->trans, queue);
+
+ return 0;
+ }
+
spin_lock_bh(&mvm->queue_info_lock);
if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
@@ -744,7 +807,7 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
}
if (remove_mac_queue)
- mvm->queue_info[queue].hw_queue_to_mac80211 &=
+ mvm->hw_queue_to_mac80211[queue] &=
~BIT(mac80211_queue);
mvm->queue_info[queue].hw_queue_refcount--;
@@ -757,7 +820,7 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
"Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
queue,
mvm->queue_info[queue].hw_queue_refcount,
- mvm->queue_info[queue].hw_queue_to_mac80211);
+ mvm->hw_queue_to_mac80211[queue]);
/* If the queue is still enabled - nothing left to do in this func */
if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
@@ -771,16 +834,16 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
/* Make sure queue info is correct even though we overwrite it */
WARN(mvm->queue_info[queue].hw_queue_refcount ||
mvm->queue_info[queue].tid_bitmap ||
- mvm->queue_info[queue].hw_queue_to_mac80211,
+ mvm->hw_queue_to_mac80211[queue],
"TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
queue, mvm->queue_info[queue].hw_queue_refcount,
- mvm->queue_info[queue].hw_queue_to_mac80211,
+ mvm->hw_queue_to_mac80211[queue],
mvm->queue_info[queue].tid_bitmap);
/* If we are here - the queue is freed and we can zero out these vals */
mvm->queue_info[queue].hw_queue_refcount = 0;
mvm->queue_info[queue].tid_bitmap = 0;
- mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
+ mvm->hw_queue_to_mac80211[queue] = 0;
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
mvm->queue_info[queue].reserved = false;
@@ -789,11 +852,11 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
- sizeof(cmd), &cmd);
+ sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
+
if (ret)
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
queue, ret);
-
return ret;
}
@@ -816,7 +879,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
.data = { lq, },
};
- if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT))
+ if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA))
return -EINVAL;
return iwl_mvm_send_cmd(mvm, &cmd);
@@ -1006,6 +1069,35 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
return bss_iter_data.vif;
}
+struct iwl_sta_iter_data {
+ bool assoc;
+};
+
+static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_sta_iter_data *data = _data;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (vif->bss_conf.assoc)
+ data->assoc = true;
+}
+
+bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
+{
+ struct iwl_sta_iter_data data = {
+ .assoc = false,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_sta_iface_iterator,
+ &data);
+ return data.assoc;
+}
+
unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool tdls, bool cmd_q)
@@ -1088,6 +1180,9 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
lockdep_assert_held(&mvmsta->lock);
lockdep_assert_held(&mvm->queue_info_lock);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
/* If some TFDs are still queued - don't mark TID as inactive */
@@ -1114,8 +1209,8 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
- mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
- mvm->queue_info[queue].hw_queue_to_mac80211 &= ~BIT(mac_queue);
+ mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+ mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
mvm->queue_info[queue].hw_queue_refcount--;
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
mvmsta->tid_data[tid].is_tid_active = false;
@@ -1135,7 +1230,7 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
*/
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- mvm->queue_info[queue].hw_queue_to_mac80211 |=
+ mvm->hw_queue_to_mac80211[queue] |=
BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
}
@@ -1154,6 +1249,9 @@ void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
unsigned long now = jiffies;
int i;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return;
+
spin_lock_bh(&mvm->queue_info_lock);
for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
if (mvm->queue_info[i].hw_queue_refcount > 0)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
new file mode 100644
index 000000000000..b1f43397bb59
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -0,0 +1,281 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "iwl-trans.h"
+#include "iwl-fh.h"
+#include "iwl-context-info.h"
+#include "internal.h"
+#include "iwl-prph.h"
+
+static int iwl_pcie_get_num_sections(const struct fw_img *fw,
+ int start)
+{
+ int i = 0;
+
+ while (start < fw->num_sec &&
+ fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
+ fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
+ start++;
+ i++;
+ }
+
+ return i;
+}
+
+static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
+ const struct fw_desc *sec,
+ struct iwl_dram_data *dram)
+{
+ dram->block = dma_alloc_coherent(trans->dev, sec->len,
+ &dram->physical,
+ GFP_KERNEL);
+ if (!dram->block)
+ return -ENOMEM;
+
+ dram->size = sec->len;
+ memcpy(dram->block, sec->data, sec->len);
+
+ return 0;
+}
+
+static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+ int i;
+
+ if (!dram->fw) {
+ WARN_ON(dram->fw_cnt);
+ return;
+ }
+
+ for (i = 0; i < dram->fw_cnt; i++)
+ dma_free_coherent(trans->dev, dram->fw[i].size,
+ dram->fw[i].block, dram->fw[i].physical);
+
+ kfree(dram->fw);
+ dram->fw_cnt = 0;
+}
+
+void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+ int i;
+
+ if (!dram->paging) {
+ WARN_ON(dram->paging_cnt);
+ return;
+ }
+
+ /* free paging*/
+ for (i = 0; i < dram->paging_cnt; i++)
+ dma_free_coherent(trans->dev, dram->paging[i].size,
+ dram->paging[i].block,
+ dram->paging[i].physical);
+
+ kfree(dram->paging);
+ dram->paging_cnt = 0;
+}
+
+static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans,
+ const struct fw_img *fw,
+ struct iwl_context_info *ctxt_info)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+ struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram;
+ int i, ret, lmac_cnt, umac_cnt, paging_cnt;
+
+ lmac_cnt = iwl_pcie_get_num_sections(fw, 0);
+ /* add 1 due to separator */
+ umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1);
+ /* add 2 due to separators */
+ paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2);
+
+ dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL);
+ if (!dram->fw)
+ return -ENOMEM;
+ dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL);
+ if (!dram->paging)
+ return -ENOMEM;
+
+ /* initialize lmac sections */
+ for (i = 0; i < lmac_cnt; i++) {
+ ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i],
+ &dram->fw[dram->fw_cnt]);
+ if (ret)
+ return ret;
+ ctxt_dram->lmac_img[i] =
+ cpu_to_le64(dram->fw[dram->fw_cnt].physical);
+ dram->fw_cnt++;
+ }
+
+ /* initialize umac sections */
+ for (i = 0; i < umac_cnt; i++) {
+ /* access FW with +1 to make up for lmac separator */
+ ret = iwl_pcie_ctxt_info_alloc_dma(trans,
+ &fw->sec[dram->fw_cnt + 1],
+ &dram->fw[dram->fw_cnt]);
+ if (ret)
+ return ret;
+ ctxt_dram->umac_img[i] =
+ cpu_to_le64(dram->fw[dram->fw_cnt].physical);
+ dram->fw_cnt++;
+ }
+
+ /*
+ * Initialize paging.
+ * Paging memory isn't stored in dram->fw as the umac and lmac - it is
+ * stored separately.
+ * This is since the timing of its release is different -
+ * while fw memory can be released on alive, the paging memory can be
+ * freed only when the device goes down.
+ * Given that, the logic here in accessing the fw image is a bit
+ * different - fw_cnt isn't changing so loop counter is added to it.
+ */
+ for (i = 0; i < paging_cnt; i++) {
+ /* access FW with +2 to make up for lmac & umac separators */
+ int fw_idx = dram->fw_cnt + i + 2;
+
+ ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx],
+ &dram->paging[i]);
+ if (ret)
+ return ret;
+
+ ctxt_dram->virtual_img[i] =
+ cpu_to_le64(dram->paging[i].physical);
+ dram->paging_cnt++;
+ }
+
+ return 0;
+}
+
+int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
+ const struct fw_img *fw)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_context_info *ctxt_info;
+ struct iwl_context_info_rbd_cfg *rx_cfg;
+ u32 control_flags = 0;
+ int ret;
+
+ ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
+ &trans_pcie->ctxt_info_dma_addr,
+ GFP_KERNEL);
+ if (!ctxt_info)
+ return -ENOMEM;
+
+ ctxt_info->version.version = 0;
+ ctxt_info->version.mac_id =
+ cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ /* size is in DWs */
+ ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
+
+ BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
+ control_flags = IWL_CTXT_INFO_RB_SIZE_4K |
+ IWL_CTXT_INFO_TFD_FORMAT_LONG |
+ RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
+ IWL_CTXT_INFO_RB_CB_SIZE_POS;
+ ctxt_info->control.control_flags = cpu_to_le32(control_flags);
+
+ /* initialize RX default queue */
+ rx_cfg = &ctxt_info->rbd_cfg;
+ rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
+ rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
+ rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
+
+ /* initialize TX command queue */
+ ctxt_info->hcmd_cfg.cmd_queue_addr =
+ cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+ ctxt_info->hcmd_cfg.cmd_queue_size =
+ TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX);
+
+ /* allocate ucode sections in dram and set addresses */
+ ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
+ if (ret) {
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
+ ctxt_info, trans_pcie->ctxt_info_dma_addr);
+ return ret;
+ }
+
+ trans_pcie->ctxt_info = ctxt_info;
+
+ iwl_enable_interrupts(trans);
+
+ /* Configure debug, if exists */
+ if (trans->dbg_dest_tlv)
+ iwl_pcie_apply_destination(trans);
+
+ /* kick FW self load */
+ iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
+ iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
+
+ /* Context info will be released upon alive or failure to get one */
+
+ return 0;
+}
+
+void iwl_pcie_ctxt_info_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!trans_pcie->ctxt_info)
+ return;
+
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
+ trans_pcie->ctxt_info,
+ trans_pcie->ctxt_info_dma_addr);
+ trans_pcie->ctxt_info_dma_addr = 0;
+ trans_pcie->ctxt_info = NULL;
+
+ iwl_pcie_ctxt_info_free_fw_img(trans);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index ba8a81cb0e2b..e51760e752d4 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -501,6 +501,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8275_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
/* 9000 Series */
@@ -533,7 +537,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
/* a000 Series */
- {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x2722, 0x0A10, iwla000_2ac_cfg_hr)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -667,18 +672,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_trans->cfg = cfg_7265d;
}
- if (iwl_trans->cfg->rf_id) {
- if (cfg == &iwl9460_2ac_cfg &&
- iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
- cfg = &iwl9000lc_2ac_cfg;
- iwl_trans->cfg = cfg;
- }
-
- if (cfg == &iwla000_2ac_cfg_hr &&
- iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) {
- cfg = &iwla000_2ac_cfg_jf;
- iwl_trans->cfg = cfg;
- }
+ if (iwl_trans->cfg->rf_id &&
+ (cfg == &iwla000_2ac_cfg_hr || cfg == &iwla000_2ac_cfg_hr_cdb) &&
+ iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) {
+ cfg = &iwla000_2ac_cfg_jf;
+ iwl_trans->cfg = cfg;
}
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 10937309641a..fd4faaaa1484 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -2,7 +2,7 @@
*
* Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -205,11 +205,11 @@ struct iwl_cmd_meta {
* into the buffer regardless of whether it should be mapped or not.
* This indicates how big the first TB must be to include the scratch buffer
* and the assigned PN.
- * Since PN location is 16 bytes at offset 24, it's 40 now.
+ * Since PN location is 8 bytes at offset 12, it's 20 now.
* If we make it bigger then allocations will be bigger and copy slower, so
* that's probably not useful.
*/
-#define IWL_FIRST_TB_SIZE 40
+#define IWL_FIRST_TB_SIZE 20
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
struct iwl_pcie_txq_entry {
@@ -237,11 +237,11 @@ struct iwl_pcie_first_tb_buf {
* @stuck_timer: timer that fires if queue gets stuck
* @trans_pcie: pointer back to transport (for timer)
* @need_update: indicates need to update read/write index
- * @active: stores if queue is active
* @ampdu: true if this queue is an ampdu queue for an specific RA/TID
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
* @frozen: tx stuck queue timer is frozen
* @frozen_expiry_remainder: remember how long until the timer fires
+ * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
* @write_ptr: 1-st empty entry (index) host_w
* @read_ptr: last used entry (index) host_r
* @dma_addr: physical addr for BD's
@@ -277,11 +277,11 @@ struct iwl_txq {
struct iwl_trans_pcie *trans_pcie;
bool need_update;
bool frozen;
- u8 active;
bool ampdu;
int block;
unsigned long wd_timeout;
struct sk_buff_head overflow_q;
+ struct iwl_dma_ptr bc_tbl;
int write_ptr;
int read_ptr;
@@ -315,11 +315,43 @@ enum iwl_shared_irq_flags {
};
/**
+ * struct iwl_dram_data
+ * @physical: page phy pointer
+ * @block: pointer to the allocated block/page
+ * @size: size of the block/page
+ */
+struct iwl_dram_data {
+ dma_addr_t physical;
+ void *block;
+ int size;
+};
+
+/**
+ * struct iwl_self_init_dram - dram data used by self init process
+ * @fw: lmac and umac dram data
+ * @fw_cnt: total number of items in array
+ * @paging: paging dram data
+ * @paging_cnt: total number of items in array
+ */
+struct iwl_self_init_dram {
+ struct iwl_dram_data *fw;
+ int fw_cnt;
+ struct iwl_dram_data *paging;
+ int paging_cnt;
+};
+
+/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
* @global_table: table mapping received VID from hw to rxb
* @rba: allocator for RX replenishing
+ * @ctxt_info: context information for FW self init
+ * @ctxt_info_dma_addr: dma addr of context information
+ * @init_dram: DRAM data of firmware image (including paging).
+ * Context information addresses will be taken from here.
+ * This is driver's local copy for keeping track of size and
+ * count for allocating and freeing the memory.
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
* @scd_bc_tbls: pointer to the byte count table of the scheduler
@@ -357,6 +389,9 @@ struct iwl_trans_pcie {
struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
struct iwl_rb_allocator rba;
+ struct iwl_context_info *ctxt_info;
+ dma_addr_t ctxt_info_dma_addr;
+ struct iwl_self_init_dram init_dram;
struct iwl_trans *trans;
struct net_device napi_dev;
@@ -378,9 +413,10 @@ struct iwl_trans_pcie {
struct iwl_dma_ptr scd_bc_tbls;
struct iwl_dma_ptr kw;
- struct iwl_txq *txq;
- unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
- unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
+ struct iwl_txq *txq_memory;
+ struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
+ unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+ unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
/* PCI bus related data */
struct pci_dev *pci_dev;
@@ -454,6 +490,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
* RX
******************************************************/
int iwl_pcie_rx_init(struct iwl_trans *trans);
+int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
@@ -474,6 +511,7 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
* TX / HCMD
******************************************************/
int iwl_pcie_tx_init(struct iwl_trans *trans);
+int iwl_pcie_gen2_tx_init(struct iwl_trans *trans);
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans);
@@ -484,7 +522,6 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
bool configure_scd);
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode);
-dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq);
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -616,6 +653,12 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
}
}
+static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq, int idx)
+{
+ return txq->tfds + trans_pcie->tfd_size * idx;
+}
+
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -719,4 +762,41 @@ int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
+/* common functions that are used by gen2 transport */
+void iwl_pcie_apm_config(struct iwl_trans *trans);
+int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
+void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
+bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans);
+void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
+int iwl_queue_space(const struct iwl_txq *q);
+int iwl_pcie_apm_stop_master(struct iwl_trans *trans);
+void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
+int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue);
+int iwl_pcie_txq_alloc(struct iwl_trans *trans,
+ struct iwl_txq *txq, int slots_num, bool cmd_queue);
+int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr, size_t size);
+void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
+void iwl_pcie_apply_destination(struct iwl_trans *trans);
+
+/* transport gen 2 exported functions */
+int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw, bool run_in_rfkill);
+void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
+ struct iwl_tx_queue_cfg_cmd *cmd,
+ int cmd_id,
+ unsigned int timeout);
+void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
+int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, int txq_id);
+int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd);
+void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
+ bool low_power);
+void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
+void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
+void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
+void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index de94dfdf2ec9..1da2de205cdf 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -2,7 +2,7 @@
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -880,7 +880,7 @@ static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
return 0;
}
-int iwl_pcie_rx_init(struct iwl_trans *trans)
+static int _iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *def_rxq;
@@ -958,20 +958,40 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
+ return 0;
+}
+
+int iwl_pcie_rx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret = _iwl_pcie_rx_init(trans);
+
+ if (ret)
+ return ret;
+
if (trans->cfg->mq_rx_supported)
iwl_pcie_rx_mq_hw_init(trans);
else
- iwl_pcie_rx_hw_init(trans, def_rxq);
+ iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
- iwl_pcie_rxq_restock(trans, def_rxq);
+ iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
- spin_lock(&def_rxq->lock);
- iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq);
- spin_unlock(&def_rxq->lock);
+ spin_lock(&trans_pcie->rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
+ spin_unlock(&trans_pcie->rxq->lock);
return 0;
}
+int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
+{
+ /*
+ * We don't configure the RFH.
+ * Restock will be done at alive, after firmware configured the RFH.
+ */
+ return _iwl_pcie_rx_init(trans);
+}
+
void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1074,7 +1094,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
bool emergency)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
bool page_stolen = false;
int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
u32 offset = 0;
@@ -1127,7 +1147,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
* Ucode should set SEQ_RX_FRAME bit if ucode-originated,
* but apparently a few don't get set; catch them here. */
reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
- if (reclaim) {
+ if (reclaim && !pkt->hdr.group_id) {
int i;
for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
@@ -1393,17 +1413,17 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
return;
}
- iwl_pcie_dump_csr(trans);
- iwl_dump_fh(trans, NULL);
-
local_bh_disable();
/* The STATUS_FW_ERROR bit is set in this function. This must happen
* before we wake up the command caller, to ensure a proper cleanup. */
iwl_trans_fw_error(trans);
local_bh_enable();
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
- del_timer(&trans_pcie->txq[i].stuck_timer);
+ for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ if (!trans_pcie->txq[i])
+ continue;
+ del_timer(&trans_pcie->txq[i]->stuck_timer);
+ }
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
wake_up(&trans_pcie->wait_command_queue);
@@ -1597,6 +1617,13 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
if (inta & CSR_INT_BIT_ALIVE) {
IWL_DEBUG_ISR(trans, "Alive interrupt\n");
isr_stats->alive++;
+ if (trans->cfg->gen2) {
+ /*
+ * We can restock, since firmware configured
+ * the RFH
+ */
+ iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
+ }
}
}
@@ -1933,6 +1960,10 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
IWL_DEBUG_ISR(trans, "Alive interrupt\n");
isr_stats->alive++;
+ if (trans->cfg->gen2) {
+ /* We can restock, since firmware configured the RFH */
+ iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
+ }
}
/* uCode wakes up after power-down sleep */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
new file mode 100644
index 000000000000..ac60a282d6de
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -0,0 +1,374 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "iwl-trans.h"
+#include "iwl-context-info.h"
+#include "internal.h"
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
+{
+ int ret = 0;
+
+ IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
+
+ /*
+ * Use "set_bit" below rather than "write", to preserve any hardware
+ * bits already set by default after reset.
+ */
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+ iwl_pcie_apm_config(trans);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. iwl_write_prph()
+ * and accesses to uCode SRAM.
+ */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ if (ret < 0) {
+ IWL_DEBUG_INFO(trans, "Failed to init the card\n");
+ return ret;
+ }
+
+ set_bit(STATUS_DEVICE_ENABLED, &trans->status);
+
+ return 0;
+}
+
+static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
+{
+ IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
+
+ if (op_mode_leave) {
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ iwl_pcie_gen2_apm_init(trans);
+
+ /* inform ME that we are leaving */
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PREPARE |
+ CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+ mdelay(1);
+ iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ mdelay(5);
+ }
+
+ clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
+
+ /* Stop device's DMA activity */
+ iwl_pcie_apm_stop_master(trans);
+
+ /* Reset the entire device */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ usleep_range(1000, 2000);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+
+void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool hw_rfkill, was_hw_rfkill;
+
+ lockdep_assert_held(&trans_pcie->mutex);
+
+ if (trans_pcie->is_down)
+ return;
+
+ trans_pcie->is_down = true;
+
+ was_hw_rfkill = iwl_is_rfkill_set(trans);
+
+ /* tell the device to stop sending interrupts */
+ iwl_disable_interrupts(trans);
+
+ /* device going down, Stop using ICT table */
+ iwl_pcie_disable_ict(trans);
+
+ /*
+ * If a HW restart happens during firmware loading,
+ * then the firmware loading might call this function
+ * and later it might be called again due to the
+ * restart. So don't process again if the device is
+ * already dead.
+ */
+ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ IWL_DEBUG_INFO(trans,
+ "DEVICE_ENABLED bit was set and is now cleared\n");
+ iwl_pcie_gen2_tx_stop(trans);
+ iwl_pcie_rx_stop(trans);
+ }
+
+ iwl_pcie_ctxt_info_free_paging(trans);
+ iwl_pcie_ctxt_info_free(trans);
+
+ /* Make sure (redundant) we've released our request to stay awake */
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /* Stop the device, and put it in low power state */
+ iwl_pcie_gen2_apm_stop(trans, false);
+
+ /* stop and reset the on-board processor */
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ usleep_range(1000, 2000);
+
+ /*
+ * Upon stop, the IVAR table gets erased, so msi-x won't
+ * work. This causes a bug in RF-KILL flows, since the interrupt
+ * that enables radio won't fire on the correct irq, and the
+ * driver won't be able to handle the interrupt.
+ * Configure the IVAR table again after reset.
+ */
+ iwl_pcie_conf_msix_hw(trans_pcie);
+
+ /*
+ * Upon stop, the APM issues an interrupt if HW RF kill is set.
+ * This is a bug in certain verions of the hardware.
+ * Certain devices also keep sending HW RF kill interrupt all
+ * the time, unless the interrupt is ACKed even if the interrupt
+ * should be masked. Re-ACK all the interrupts here.
+ */
+ iwl_disable_interrupts(trans);
+
+ /* clear all status bits */
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ clear_bit(STATUS_INT_ENABLED, &trans->status);
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
+ clear_bit(STATUS_RFKILL, &trans->status);
+
+ /*
+ * Even if we stop the HW, we still want the RF kill
+ * interrupt
+ */
+ iwl_enable_rfkill_int(trans);
+
+ /*
+ * Check again since the RF kill state may have changed while
+ * all the interrupts were disabled, in this case we couldn't
+ * receive the RF kill interrupt and update the state in the
+ * op_mode.
+ * Don't call the op_mode if the rkfill state hasn't changed.
+ * This allows the op_mode to call stop_device from the rfkill
+ * notification without endless recursion. Under very rare
+ * circumstances, we might have a small recursion if the rfkill
+ * state changed exactly now while we were called from stop_device.
+ * This is very unlikely but can happen and is supported.
+ */
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans->status);
+ if (hw_rfkill != was_hw_rfkill)
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+
+ /* re-take ownership to prevent other users from stealing the device */
+ iwl_pcie_prepare_card_hw(trans);
+}
+
+void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ mutex_lock(&trans_pcie->mutex);
+ _iwl_trans_pcie_gen2_stop_device(trans, low_power);
+ mutex_unlock(&trans_pcie->mutex);
+}
+
+static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* TODO: most of the logic can be removed in A0 - but not in Z0 */
+ spin_lock(&trans_pcie->irq_lock);
+ iwl_pcie_gen2_apm_init(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ iwl_op_mode_nic_config(trans->op_mode);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ if (iwl_pcie_gen2_rx_init(trans))
+ return -ENOMEM;
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (iwl_pcie_gen2_tx_init(trans))
+ return -ENOMEM;
+
+ /* enable shadow regs in HW */
+ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
+ IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
+
+ return 0;
+}
+
+void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_pcie_reset_ict(trans);
+
+ /* make sure all queue are not stopped/used */
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+ /* now that we got alive we can free the fw image & the context info.
+ * paging memory cannot be freed included since FW will still use it
+ */
+ iwl_pcie_ctxt_info_free(trans);
+}
+
+int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw, bool run_in_rfkill)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool hw_rfkill;
+ int ret;
+
+ /* This may fail if AMT took ownership of the device */
+ if (iwl_pcie_prepare_card_hw(trans)) {
+ IWL_WARN(trans, "Exit HW not ready\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ iwl_enable_rfkill_int(trans);
+
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+ /*
+ * We enabled the RF-Kill interrupt and the handler may very
+ * well be running. Disable the interrupts to make sure no other
+ * interrupt can be fired.
+ */
+ iwl_disable_interrupts(trans);
+
+ /* Make sure it finished running */
+ iwl_pcie_synchronize_irqs(trans);
+
+ mutex_lock(&trans_pcie->mutex);
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
+ if (hw_rfkill && !run_in_rfkill) {
+ ret = -ERFKILL;
+ goto out;
+ }
+
+ /* Someone called stop_device, don't try to start_fw */
+ if (trans_pcie->is_down) {
+ IWL_WARN(trans,
+ "Can't start_fw since the HW hasn't been started\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+ ret = iwl_pcie_gen2_nic_init(trans);
+ if (ret) {
+ IWL_ERR(trans, "Unable to init nic\n");
+ goto out;
+ }
+
+ ret = iwl_pcie_ctxt_info_init(trans, fw);
+ if (ret)
+ goto out;
+
+ /* re-check RF-Kill state since we may have missed the interrupt */
+ hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
+ if (hw_rfkill && !run_in_rfkill)
+ ret = -ERFKILL;
+
+out:
+ mutex_unlock(&trans_pcie->mutex);
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 7f05fc56587a..70acf850a9f1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -201,7 +201,7 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-static void iwl_pcie_apm_config(struct iwl_trans *trans)
+void iwl_pcie_apm_config(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u16 lctl;
@@ -448,7 +448,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
}
-static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
+int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
{
int ret = 0;
@@ -567,7 +567,7 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
}
/* Note: returns standard 0/-ERROR code */
-static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
{
int ret;
int t = 0;
@@ -636,29 +636,6 @@ static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
}
-static void iwl_pcie_load_firmware_chunk_tfh(struct iwl_trans *trans,
- u32 dst_addr, dma_addr_t phy_addr,
- u32 byte_cnt)
-{
- /* Stop DMA channel */
- iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, 0);
-
- /* Configure SRAM address */
- iwl_write32(trans, TFH_SRV_DMA_CHNL0_SRAM_ADDR,
- dst_addr);
-
- /* Configure DRAM address - 64 bit */
- iwl_write64(trans, TFH_SRV_DMA_CHNL0_DRAM_ADDR, phy_addr);
-
- /* Configure byte count to transfer */
- iwl_write32(trans, TFH_SRV_DMA_CHNL0_BC, byte_cnt);
-
- /* Enable the DRAM2SRAM to start */
- iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, TFH_SRV_DMA_SNOOP |
- TFH_SRV_DMA_TO_DRIVER |
- TFH_SRV_DMA_START);
-}
-
static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
u32 dst_addr, dma_addr_t phy_addr,
u32 byte_cnt)
@@ -672,12 +649,8 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
if (!iwl_trans_grab_nic_access(trans, &flags))
return -EIO;
- if (trans->cfg->use_tfh)
- iwl_pcie_load_firmware_chunk_tfh(trans, dst_addr, phy_addr,
- byte_cnt);
- else
- iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
- byte_cnt);
+ iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
+ byte_cnt);
iwl_trans_release_nic_access(trans, &flags);
ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
@@ -747,47 +720,6 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
return ret;
}
-/*
- * Driver Takes the ownership on secure machine before FW load
- * and prevent race with the BT load.
- * W/A for ROM bug. (should be remove in the next Si step)
- */
-static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
-{
- u32 val, loop = 1000;
-
- /*
- * Check the RSA semaphore is accessible.
- * If the HW isn't locked and the rsa semaphore isn't accessible,
- * we are in trouble.
- */
- val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
- if (val & (BIT(1) | BIT(17))) {
- IWL_DEBUG_INFO(trans,
- "can't access the RSA semaphore it is write protected\n");
- return 0;
- }
-
- /* take ownership on the AUX IF */
- iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
- iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
-
- do {
- iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
- val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
- if (val == 0x1) {
- iwl_write_prph(trans, RSA_ENABLE, 0);
- return 0;
- }
-
- udelay(10);
- loop--;
- } while (loop > 0);
-
- IWL_ERR(trans, "Failed to take ownership on secure machine\n");
- return -EIO;
-}
-
static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
const struct fw_img *image,
int cpu,
@@ -828,15 +760,10 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
return ret;
/* Notify ucode of loaded section number and status */
- if (trans->cfg->use_tfh) {
- val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS);
- val = val | (sec_num << shift_param);
- iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val);
- } else {
- val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
- val = val | (sec_num << shift_param);
- iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
- }
+ val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
+ val = val | (sec_num << shift_param);
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+
sec_num = (sec_num << 1) | 0x1;
}
@@ -904,7 +831,7 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
return 0;
}
-static void iwl_pcie_apply_destination(struct iwl_trans *trans)
+void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
@@ -1042,10 +969,15 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
if (trans->dbg_dest_tlv)
iwl_pcie_apply_destination(trans);
- /* TODO: remove in the next Si step */
- ret = iwl_pcie_rsa_race_bug_wa(trans);
- if (ret)
- return ret;
+ IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
+ iwl_read_prph(trans, WFPM_GP2));
+
+ /*
+ * Set default value. On resume reading the values that were
+ * zeored can provide debug data on the resume flow.
+ * This is for debugging only and has no functional impact.
+ */
+ iwl_write_prph(trans, WFPM_GP2, 0x01010101);
/* configure the ucode to be ready to get the secured image */
/* release CPU reset */
@@ -1062,7 +994,7 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
&first_ucode_section);
}
-static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
+bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
{
bool hw_rfkill = iwl_is_rfkill_set(trans);
@@ -1147,7 +1079,7 @@ static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
}
-static void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
+void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
{
struct iwl_trans *trans = trans_pcie->trans;
@@ -1299,7 +1231,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
iwl_pcie_prepare_card_hw(trans);
}
-static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
+void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1423,8 +1355,12 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
lockdep_assert_held(&trans_pcie->mutex);
- if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
- _iwl_trans_pcie_stop_device(trans, true);
+ if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
+ if (trans->cfg->gen2)
+ _iwl_trans_pcie_gen2_stop_device(trans, true);
+ else
+ _iwl_trans_pcie_stop_device(trans, true);
+ }
}
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
@@ -1527,6 +1463,9 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
}
}
+ IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
+ iwl_read_prph(trans, WFPM_GP2));
+
val = iwl_read32(trans, CSR_RESET);
if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
*status = IWL_D3_STATUS_RESET;
@@ -1828,7 +1767,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_synchronize_irqs(trans);
- iwl_pcie_tx_free(trans);
+ if (trans->cfg->gen2)
+ iwl_pcie_gen2_tx_free(trans);
+ else
+ iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
if (trans_pcie->msix_enabled) {
@@ -1998,7 +1940,7 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
int queue;
for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
- struct iwl_txq *txq = &trans_pcie->txq[queue];
+ struct iwl_txq *txq = trans_pcie->txq[queue];
unsigned long now;
spin_lock_bh(&txq->lock);
@@ -2050,7 +1992,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
int i;
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = &trans_pcie->txq[i];
+ struct iwl_txq *txq = trans_pcie->txq[i];
if (i == trans_pcie->cmd_queue)
continue;
@@ -2075,48 +2017,32 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 scd_sram_addr;
- u8 buf[16];
- int cnt;
+ u32 txq_id = txq->id;
+ u32 status;
+ bool active;
+ u8 fifo;
- IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
- txq->read_ptr, txq->write_ptr);
-
- if (trans->cfg->use_tfh)
+ if (trans->cfg->use_tfh) {
+ IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
+ txq->read_ptr, txq->write_ptr);
/* TODO: access new SCD registers and dump them */
return;
+ }
- scd_sram_addr = trans_pcie->scd_base_addr +
- SCD_TX_STTS_QUEUE_OFFSET(txq->id);
- iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
-
- iwl_print_hex_error(trans, buf, sizeof(buf));
-
- for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
- IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
- iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
-
- for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
- u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
- u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
- bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
- u32 tbl_dw =
- iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
-
- if (cnt & 0x1)
- tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
- else
- tbl_dw = tbl_dw & 0x0000FFFF;
+ status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
+ fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
- IWL_ERR(trans,
- "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
- cnt, active ? "" : "in", fifo, tbl_dw,
- iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
- (TFD_QUEUE_SIZE_MAX - 1),
- iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
- }
+ IWL_ERR(trans,
+ "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
+ txq_id, active ? "" : "in", fifo,
+ jiffies_to_msecs(txq->wd_timeout),
+ txq->read_ptr, txq->write_ptr,
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
+ (TFD_QUEUE_SIZE_MAX - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
+ (TFD_QUEUE_SIZE_MAX - 1),
+ iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
}
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
@@ -2139,7 +2065,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
continue;
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
- txq = &trans_pcie->txq[cnt];
+ txq = trans_pcie->txq[cnt];
wr_ptr = ACCESS_ONCE(txq->write_ptr);
while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
@@ -2330,7 +2256,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
- if (!trans_pcie->txq)
+ if (!trans_pcie->txq_memory)
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -2338,7 +2264,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
return -ENOMEM;
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
- txq = &trans_pcie->txq[cnt];
+ txq = trans_pcie->txq[cnt];
pos += scnprintf(buf + pos, bufsz - pos,
"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
cnt, txq->read_ptr, txq->write_ptr,
@@ -2755,7 +2681,7 @@ static struct iwl_trans_dump_data
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data;
- struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs;
@@ -2890,21 +2816,43 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
}
#endif /* CONFIG_PM_SLEEP */
+#define IWL_TRANS_COMMON_OPS \
+ .op_mode_leave = iwl_trans_pcie_op_mode_leave, \
+ .write8 = iwl_trans_pcie_write8, \
+ .write32 = iwl_trans_pcie_write32, \
+ .read32 = iwl_trans_pcie_read32, \
+ .read_prph = iwl_trans_pcie_read_prph, \
+ .write_prph = iwl_trans_pcie_write_prph, \
+ .read_mem = iwl_trans_pcie_read_mem, \
+ .write_mem = iwl_trans_pcie_write_mem, \
+ .configure = iwl_trans_pcie_configure, \
+ .set_pmi = iwl_trans_pcie_set_pmi, \
+ .grab_nic_access = iwl_trans_pcie_grab_nic_access, \
+ .release_nic_access = iwl_trans_pcie_release_nic_access, \
+ .set_bits_mask = iwl_trans_pcie_set_bits_mask, \
+ .ref = iwl_trans_pcie_ref, \
+ .unref = iwl_trans_pcie_unref, \
+ .dump_data = iwl_trans_pcie_dump_data, \
+ .wait_tx_queues_empty = iwl_trans_pcie_wait_txq_empty, \
+ .d3_suspend = iwl_trans_pcie_d3_suspend, \
+ .d3_resume = iwl_trans_pcie_d3_resume
+
+#ifdef CONFIG_PM_SLEEP
+#define IWL_TRANS_PM_OPS \
+ .suspend = iwl_trans_pcie_suspend, \
+ .resume = iwl_trans_pcie_resume,
+#else
+#define IWL_TRANS_PM_OPS
+#endif /* CONFIG_PM_SLEEP */
+
static const struct iwl_trans_ops trans_ops_pcie = {
+ IWL_TRANS_COMMON_OPS,
+ IWL_TRANS_PM_OPS
.start_hw = iwl_trans_pcie_start_hw,
- .op_mode_leave = iwl_trans_pcie_op_mode_leave,
.fw_alive = iwl_trans_pcie_fw_alive,
.start_fw = iwl_trans_pcie_start_fw,
.stop_device = iwl_trans_pcie_stop_device,
- .d3_suspend = iwl_trans_pcie_d3_suspend,
- .d3_resume = iwl_trans_pcie_d3_resume,
-
-#ifdef CONFIG_PM_SLEEP
- .suspend = iwl_trans_pcie_suspend,
- .resume = iwl_trans_pcie_resume,
-#endif /* CONFIG_PM_SLEEP */
-
.send_cmd = iwl_trans_pcie_send_hcmd,
.tx = iwl_trans_pcie_tx,
@@ -2913,31 +2861,27 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.txq_disable = iwl_trans_pcie_txq_disable,
.txq_enable = iwl_trans_pcie_txq_enable,
- .get_txq_byte_table = iwl_trans_pcie_get_txq_byte_table,
-
.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
- .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
+};
+
+static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
+ IWL_TRANS_COMMON_OPS,
+ IWL_TRANS_PM_OPS
+ .start_hw = iwl_trans_pcie_start_hw,
+ .fw_alive = iwl_trans_pcie_gen2_fw_alive,
+ .start_fw = iwl_trans_pcie_gen2_start_fw,
+ .stop_device = iwl_trans_pcie_gen2_stop_device,
- .write8 = iwl_trans_pcie_write8,
- .write32 = iwl_trans_pcie_write32,
- .read32 = iwl_trans_pcie_read32,
- .read_prph = iwl_trans_pcie_read_prph,
- .write_prph = iwl_trans_pcie_write_prph,
- .read_mem = iwl_trans_pcie_read_mem,
- .write_mem = iwl_trans_pcie_write_mem,
- .configure = iwl_trans_pcie_configure,
- .set_pmi = iwl_trans_pcie_set_pmi,
- .grab_nic_access = iwl_trans_pcie_grab_nic_access,
- .release_nic_access = iwl_trans_pcie_release_nic_access,
- .set_bits_mask = iwl_trans_pcie_set_bits_mask,
-
- .ref = iwl_trans_pcie_ref,
- .unref = iwl_trans_pcie_unref,
-
- .dump_data = iwl_trans_pcie_dump_data,
+ .send_cmd = iwl_trans_pcie_gen2_send_hcmd,
+
+ .tx = iwl_trans_pcie_gen2_tx,
+ .reclaim = iwl_trans_pcie_reclaim,
+
+ .txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
+ .txq_free = iwl_trans_pcie_dyn_txq_free,
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -2952,8 +2896,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
if (ret)
return ERR_PTR(ret);
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
- &pdev->dev, cfg, &trans_ops_pcie, 0);
+ if (cfg->gen2)
+ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
+ &pdev->dev, cfg, &trans_ops_pcie_gen2);
+ else
+ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
+ &pdev->dev, cfg, &trans_ops_pcie);
if (!trans)
return ERR_PTR(-ENOMEM);
@@ -3028,7 +2976,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
- trans->dev = &pdev->dev;
trans_pcie->pci_dev = pdev;
iwl_disable_interrupts(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
new file mode 100644
index 000000000000..9fb46a6f47cf
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -0,0 +1,1018 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/pm_runtime.h>
+
+#include "iwl-debug.h"
+#include "iwl-csr.h"
+#include "iwl-io.h"
+#include "internal.h"
+#include "mvm/fw-api.h"
+
+ /*
+ * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
+ */
+void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int txq_id;
+
+ /*
+ * This function can be called before the op_mode disabled the
+ * queues. This happens when we have an rfkill interrupt.
+ * Since we stop Tx altogether - mark the queues as stopped.
+ */
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
+ if (!trans_pcie->txq[txq_id])
+ continue;
+ iwl_pcie_gen2_txq_unmap(trans, txq_id);
+ }
+}
+
+/*
+ * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
+{
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+ int write_ptr = txq->write_ptr;
+ u8 filled_tfd_size, num_fetch_chunks;
+ u16 len = byte_cnt;
+ __le16 bc_ent;
+
+ len = DIV_ROUND_UP(len, 4);
+
+ if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+ return;
+
+ filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
+ num_tbs * sizeof(struct iwl_tfh_tb);
+ /*
+ * filled_tfd_size contains the number of filled bytes in the TFD.
+ * Dividing it by 64 will give the number of chunks to fetch
+ * to SRAM- 0 for one chunk, 1 for 2 and so on.
+ * If, for example, TFD contains only 3 TBs then 32 bytes
+ * of the TFD are used, and only one chunk of 64 bytes should
+ * be fetched
+ */
+ num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
+
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
+ scd_bc_tbl->tfd_offset[write_ptr] = bc_ent;
+}
+
+/*
+ * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
+ */
+static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ lockdep_assert_held(&txq->lock);
+
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
+
+ /*
+ * if not in power-save mode, uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
+}
+
+static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd)
+{
+ return le16_to_cpu(tfd->num_tbs) & 0x1f;
+}
+
+static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_tfh_tfd *tfd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i, num_tbs;
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
+
+ if (num_tbs >= trans_pcie->max_tbs) {
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+ return;
+ }
+
+ /* first TB is never freed - it's the bidirectional DMA data */
+ for (i = 1; i < num_tbs; i++) {
+ if (meta->tbs & BIT(i))
+ dma_unmap_page(trans->dev,
+ le64_to_cpu(tfd->tbs[i].addr),
+ le16_to_cpu(tfd->tbs[i].tb_len),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev,
+ le64_to_cpu(tfd->tbs[i].addr),
+ le16_to_cpu(tfd->tbs[i].tb_len),
+ DMA_TO_DEVICE);
+ }
+
+ tfd->num_tbs = 0;
+}
+
+static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
+ int rd_ptr = txq->read_ptr;
+ int idx = get_cmd_index(txq, rd_ptr);
+
+ lockdep_assert_held(&txq->lock);
+
+ /* We have only q->n_window txq->entries, but we use
+ * TFD_QUEUE_SIZE_MAX tfds
+ */
+ iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
+ iwl_pcie_get_tfd(trans_pcie, txq, rd_ptr));
+
+ /* free SKB */
+ if (txq->entries) {
+ struct sk_buff *skb;
+
+ skb = txq->entries[idx].skb;
+
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
+ }
+ }
+}
+
+static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd, dma_addr_t addr,
+ u16 len)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
+ struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+ /* Each TFD can point to a maximum max_tbs Tx buffers */
+ if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
+ IWL_ERR(trans, "Error can not send more than %d chunks\n",
+ trans_pcie->max_tbs);
+ return -EINVAL;
+ }
+
+ put_unaligned_le64(addr, &tb->addr);
+ tb->tb_len = cpu_to_le16(len);
+
+ tfd->num_tbs = cpu_to_le16(idx + 1);
+
+ return idx;
+}
+
+static
+struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_tfh_tfd *tfd =
+ iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
+ dma_addr_t tb_phys;
+ int i, len, tb1_len, tb2_len, hdr_len;
+ void *tb1_addr;
+
+ memset(tfd, 0, sizeof(*tfd));
+
+ tb_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
+ /* The first TB points to bi-directional DMA data */
+ memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE);
+
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /* there must be data left over for TB1 or this code must be changed */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) +
+ ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE;
+
+ tb1_len = ALIGN(len, 4);
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
+
+ /* set up TFD's third entry to point to remainder of skb's head */
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ tb2_len = skb_headlen(skb) - hdr_len;
+
+ if (tb2_len > 0) {
+ tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
+ tb2_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
+ }
+
+ /* set up the remaining entries to point to the data */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int tb_idx;
+
+ if (!skb_frag_size(frag))
+ continue;
+
+ tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
+ skb_frag_size(frag));
+
+ out_meta->tbs |= BIT(tb_idx);
+ }
+
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE + tb1_len,
+ skb->data + hdr_len, tb2_len);
+ trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len,
+ skb->len - hdr_len);
+
+ return tfd;
+
+out_err:
+ iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ void *tfd;
+
+ if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+ "TX on unused queue %d\n", txq_id))
+ return -EINVAL;
+
+ if (skb_is_nonlinear(skb) &&
+ skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
+ __skb_linearize(skb))
+ return -ENOMEM;
+
+ spin_lock(&txq->lock);
+
+ /* Set up driver data for this TFD */
+ txq->entries[txq->write_ptr].skb = skb;
+ txq->entries[txq->write_ptr].cmd = dev_cmd;
+
+ dev_cmd->hdr.sequence =
+ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(txq->write_ptr)));
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_meta = &txq->entries[txq->write_ptr].meta;
+ out_meta->flags = 0;
+
+ tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
+ if (!tfd) {
+ spin_unlock(&txq->lock);
+ return -1;
+ }
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len),
+ iwl_pcie_gen2_get_num_tbs(trans, tfd));
+
+ /* start timer if queue currently empty */
+ if (txq->read_ptr == txq->write_ptr) {
+ if (txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+ IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
+ iwl_trans_ref(trans);
+ }
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
+ if (iwl_queue_space(txq) < txq->high_mark)
+ iwl_stop_queue(trans, txq);
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually.
+ */
+ spin_unlock(&txq->lock);
+ return 0;
+}
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/*
+ * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
+ * @priv: device private data point
+ * @cmd: a pointer to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation
+ * failed. On success, it returns the index (>= 0) of command in the
+ * command queue.
+ */
+static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
+ unsigned long flags;
+ void *dup_buf = NULL;
+ dma_addr_t phys_addr;
+ int idx, i, cmd_pos;
+ u16 copy_size, cmd_size, tb0_size;
+ bool had_nocopy = false;
+ u8 group_id = iwl_cmd_groupid(cmd->id);
+ const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
+ u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
+ struct iwl_tfh_tfd *tfd =
+ iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
+
+ memset(tfd, 0, sizeof(*tfd));
+
+ copy_size = sizeof(struct iwl_cmd_header_wide);
+ cmd_size = sizeof(struct iwl_cmd_header_wide);
+
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ cmddata[i] = cmd->data[i];
+ cmdlen[i] = cmd->len[i];
+
+ if (!cmd->len[i])
+ continue;
+
+ /* need at least IWL_FIRST_TB_SIZE copied */
+ if (copy_size < IWL_FIRST_TB_SIZE) {
+ int copy = IWL_FIRST_TB_SIZE - copy_size;
+
+ if (copy > cmdlen[i])
+ copy = cmdlen[i];
+ cmdlen[i] -= copy;
+ cmddata[i] += copy;
+ copy_size += copy;
+ }
+
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
+ had_nocopy = true;
+ if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+ } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
+ /*
+ * This is also a chunk that isn't copied
+ * to the static buffer so set had_nocopy.
+ */
+ had_nocopy = true;
+
+ /* only allowed once */
+ if (WARN_ON(dup_buf)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+
+ dup_buf = kmemdup(cmddata[i], cmdlen[i],
+ GFP_ATOMIC);
+ if (!dup_buf)
+ return -ENOMEM;
+ } else {
+ /* NOCOPY must not be followed by normal! */
+ if (WARN_ON(had_nocopy)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+ copy_size += cmdlen[i];
+ }
+ cmd_size += cmd->len[i];
+ }
+
+ /*
+ * If any of the command structures end up being larger than the
+ * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
+ * separate TFDs, then we will need to increase the size of the buffers
+ */
+ if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
+ "Command %s (%#x) is too large (%d bytes)\n",
+ iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+
+ spin_lock_bh(&txq->lock);
+
+ if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ spin_unlock_bh(&txq->lock);
+
+ IWL_ERR(trans, "No space in command queue\n");
+ iwl_op_mode_cmd_queue_full(trans->op_mode);
+ idx = -ENOSPC;
+ goto free_dup_buf;
+ }
+
+ idx = get_cmd_index(txq, txq->write_ptr);
+ out_cmd = txq->entries[idx].cmd;
+ out_meta = &txq->entries[idx].meta;
+
+ /* re-initialize to NULL */
+ memset(out_meta, 0, sizeof(*out_meta));
+ if (cmd->flags & CMD_WANT_SKB)
+ out_meta->source = cmd;
+
+ /* set up the header */
+ out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
+ out_cmd->hdr_wide.group_id = group_id;
+ out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
+ out_cmd->hdr_wide.length =
+ cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
+ out_cmd->hdr_wide.reserved = 0;
+ out_cmd->hdr_wide.sequence =
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+ INDEX_TO_SEQ(txq->write_ptr));
+
+ cmd_pos = sizeof(struct iwl_cmd_header_wide);
+ copy_size = sizeof(struct iwl_cmd_header_wide);
+
+ /* and copy the data that needs to be copied */
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ int copy;
+
+ if (!cmd->len[i])
+ continue;
+
+ /* copy everything if not nocopy/dup */
+ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP))) {
+ copy = cmd->len[i];
+
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+ cmd_pos += copy;
+ copy_size += copy;
+ continue;
+ }
+
+ /*
+ * Otherwise we need at least IWL_FIRST_TB_SIZE copied
+ * in total (for bi-directional DMA), but copy up to what
+ * we can fit into the payload for debug dump purposes.
+ */
+ copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
+
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+ cmd_pos += copy;
+
+ /* However, treat copy_size the proper way, we need it below */
+ if (copy_size < IWL_FIRST_TB_SIZE) {
+ copy = IWL_FIRST_TB_SIZE - copy_size;
+
+ if (copy > cmd->len[i])
+ copy = cmd->len[i];
+ copy_size += copy;
+ }
+ }
+
+ IWL_DEBUG_HC(trans,
+ "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+ iwl_get_cmd_string(trans, cmd->id), group_id,
+ out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
+ cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
+
+ /* start the TFD with the minimum copy bytes */
+ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
+ memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
+ iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
+ tb0_size);
+
+ /* map first command fragment, if any remains */
+ if (copy_size > tb0_size) {
+ phys_addr = dma_map_single(trans->dev,
+ ((u8 *)&out_cmd->hdr) + tb0_size,
+ copy_size - tb0_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(trans->dev, phys_addr)) {
+ idx = -ENOMEM;
+ iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+ goto out;
+ }
+ iwl_pcie_gen2_set_tb(trans, tfd, phys_addr,
+ copy_size - tb0_size);
+ }
+
+ /* map the remaining (adjusted) nocopy/dup fragments */
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ const void *data = cmddata[i];
+
+ if (!cmdlen[i])
+ continue;
+ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP)))
+ continue;
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
+ data = dup_buf;
+ phys_addr = dma_map_single(trans->dev, (void *)data,
+ cmdlen[i], DMA_TO_DEVICE);
+ if (dma_mapping_error(trans->dev, phys_addr)) {
+ idx = -ENOMEM;
+ iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+ goto out;
+ }
+ iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
+ }
+
+ BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
+ out_meta->flags = cmd->flags;
+ if (WARN_ON_ONCE(txq->entries[idx].free_buf))
+ kzfree(txq->entries[idx].free_buf);
+ txq->entries[idx].free_buf = dup_buf;
+
+ trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
+
+ /* start timer if queue currently empty */
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
+ !trans_pcie->ref_cmd_in_flight) {
+ trans_pcie->ref_cmd_in_flight = true;
+ IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
+ iwl_trans_ref(trans);
+ }
+ /* Increment and update queue's write index */
+ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+
+out:
+ spin_unlock_bh(&txq->lock);
+free_dup_buf:
+ if (idx < 0)
+ kfree(dup_buf);
+ return idx;
+}
+
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+
+static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ int cmd_idx;
+ int ret;
+
+ IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
+
+ if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ "Command %s: a command is already active!\n", cmd_str))
+ return -EIO;
+
+ IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
+
+ if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
+ ret = wait_event_timeout(trans_pcie->d0i3_waitq,
+ pm_runtime_active(&trans_pcie->pci_dev->dev),
+ msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
+ if (!ret) {
+ IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
+ if (cmd_idx < 0) {
+ ret = cmd_idx;
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+ cmd_str, ret);
+ return ret;
+ }
+
+ ret = wait_event_timeout(trans_pcie->wait_command_queue,
+ !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ HOST_COMPLETE_TIMEOUT);
+ if (!ret) {
+ IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+ cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+ IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+ txq->read_ptr, txq->write_ptr);
+
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+ cmd_str);
+ ret = -ETIMEDOUT;
+
+ iwl_force_nmi(trans);
+ iwl_trans_fw_error(trans);
+
+ goto cancel;
+ }
+
+ if (test_bit(STATUS_FW_ERROR, &trans->status)) {
+ IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
+ dump_stack();
+ ret = -EIO;
+ goto cancel;
+ }
+
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
+ ret = -ERFKILL;
+ goto cancel;
+ }
+
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
+ IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
+ ret = -EIO;
+ goto cancel;
+ }
+
+ return 0;
+
+cancel:
+ if (cmd->flags & CMD_WANT_SKB) {
+ /*
+ * Cancel the CMD_WANT_SKB flag for the cmd in the
+ * TX cmd queue. Otherwise in case the cmd comes
+ * in later, it will possibly set an invalid
+ * address (cmd->meta.source).
+ */
+ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+ }
+
+ if (cmd->resp_pkt) {
+ iwl_free_resp(cmd);
+ cmd->resp_pkt = NULL;
+ }
+
+ return ret;
+}
+
+int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
+ cmd->id);
+ return -ERFKILL;
+ }
+
+ if (cmd->flags & CMD_ASYNC) {
+ int ret;
+
+ /* An asynchronous command can not expect an SKB to be set. */
+ if (WARN_ON(cmd->flags & CMD_WANT_SKB))
+ return -EINVAL;
+
+ ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
+ if (ret < 0) {
+ IWL_ERR(trans,
+ "Error sending %s: enqueue_hcmd failed: %d\n",
+ iwl_get_cmd_string(trans, cmd->id), ret);
+ return ret;
+ }
+ return 0;
+ }
+
+ return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
+}
+
+/*
+ * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
+
+ spin_lock_bh(&txq->lock);
+ while (txq->write_ptr != txq->read_ptr) {
+ IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+ txq_id, txq->read_ptr);
+
+ iwl_pcie_gen2_free_tfd(trans, txq);
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
+
+ if (txq->read_ptr == txq->write_ptr) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ if (txq_id != trans_pcie->cmd_queue) {
+ IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
+ txq->id);
+ iwl_trans_unref(trans);
+ } else if (trans_pcie->ref_cmd_in_flight) {
+ trans_pcie->ref_cmd_in_flight = false;
+ IWL_DEBUG_RPM(trans,
+ "clear ref_cmd_in_flight\n");
+ iwl_trans_unref(trans);
+ }
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ }
+ }
+ spin_unlock_bh(&txq->lock);
+
+ /* just in case - this queue may have been stopped */
+ iwl_wake_queue(trans, txq);
+}
+
+static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct device *dev = trans->dev;
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->tfds) {
+ dma_free_coherent(dev,
+ trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
+ txq->tfds, txq->dma_addr);
+ dma_free_coherent(dev,
+ sizeof(*txq->first_tb_bufs) * txq->n_window,
+ txq->first_tb_bufs, txq->first_tb_dma);
+ }
+
+ kfree(txq->entries);
+ iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
+ kfree(txq);
+}
+
+/*
+ * iwl_pcie_txq_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ int i;
+
+ if (WARN_ON(!txq))
+ return;
+
+ iwl_pcie_gen2_txq_unmap(trans, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ if (txq_id == trans_pcie->cmd_queue)
+ for (i = 0; i < txq->n_window; i++) {
+ kzfree(txq->entries[i].cmd);
+ kzfree(txq->entries[i].free_buf);
+ }
+ del_timer_sync(&txq->stuck_timer);
+
+ iwl_pcie_gen2_txq_free_memory(trans, txq);
+
+ trans_pcie->txq[txq_id] = NULL;
+
+ clear_bit(txq_id, trans_pcie->queue_used);
+}
+
+int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
+ struct iwl_tx_queue_cfg_cmd *cmd,
+ int cmd_id,
+ unsigned int timeout)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue_cfg_rsp *rsp;
+ struct iwl_txq *txq;
+ struct iwl_host_cmd hcmd = {
+ .id = cmd_id,
+ .len = { sizeof(*cmd) },
+ .data = { cmd, },
+ .flags = CMD_WANT_SKB,
+ };
+ int ret, qid;
+
+ txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+ if (!txq)
+ return -ENOMEM;
+ ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
+ sizeof(struct iwlagn_scd_bc_tbl));
+ if (ret) {
+ IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+ kfree(txq);
+ return -ENOMEM;
+ }
+
+ ret = iwl_pcie_txq_alloc(trans, txq, TFD_TX_CMD_SLOTS, false);
+ if (ret) {
+ IWL_ERR(trans, "Tx queue alloc failed\n");
+ goto error;
+ }
+ ret = iwl_pcie_txq_init(trans, txq, TFD_TX_CMD_SLOTS, false);
+ if (ret) {
+ IWL_ERR(trans, "Tx queue init failed\n");
+ goto error;
+ }
+
+ txq->wd_timeout = msecs_to_jiffies(timeout);
+
+ cmd->tfdq_addr = cpu_to_le64(txq->dma_addr);
+ cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX));
+
+ ret = iwl_trans_send_cmd(trans, &hcmd);
+ if (ret)
+ goto error;
+
+ if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ rsp = (void *)hcmd.resp_pkt->data;
+ qid = le16_to_cpu(rsp->queue_number);
+
+ if (qid > ARRAY_SIZE(trans_pcie->txq)) {
+ WARN_ONCE(1, "queue index %d unsupported", qid);
+ ret = -EIO;
+ goto error;
+ }
+
+ if (test_and_set_bit(qid, trans_pcie->queue_used)) {
+ WARN_ONCE(1, "queue %d already used", qid);
+ ret = -EIO;
+ goto error;
+ }
+
+ txq->id = qid;
+ trans_pcie->txq[qid] = txq;
+
+ /* Place first TFD at index corresponding to start sequence number */
+ txq->read_ptr = le16_to_cpu(rsp->write_pointer);
+ txq->write_ptr = le16_to_cpu(rsp->write_pointer);
+ iwl_write_direct32(trans, HBUS_TARG_WRPTR,
+ (txq->write_ptr) | (qid << 16));
+ IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
+
+ return qid;
+
+error:
+ iwl_pcie_gen2_txq_free_memory(trans, txq);
+ return ret;
+}
+
+void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /*
+ * Upon HW Rfkill - we stop the device, and then stop the queues
+ * in the op_mode. Just for the sake of the simplicity of the op_mode,
+ * allow the op_mode to call txq_disable after it already called
+ * stop_device.
+ */
+ if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
+ WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+ "queue %d not used", queue);
+ return;
+ }
+
+ iwl_pcie_gen2_txq_unmap(trans, queue);
+
+ IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
+}
+
+void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+ /* Free all TX queues */
+ for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
+ if (!trans_pcie->txq[i])
+ continue;
+
+ iwl_pcie_gen2_txq_free(trans, i);
+ }
+}
+
+int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *cmd_queue;
+ int txq_id = trans_pcie->cmd_queue, ret;
+
+ /* alloc and init the command queue */
+ if (!trans_pcie->txq[txq_id]) {
+ cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
+ if (!cmd_queue) {
+ IWL_ERR(trans, "Not enough memory for command queue\n");
+ return -ENOMEM;
+ }
+ trans_pcie->txq[txq_id] = cmd_queue;
+ ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ } else {
+ cmd_queue = trans_pcie->txq[txq_id];
+ }
+
+ ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+ goto error;
+ }
+ trans_pcie->txq[txq_id]->id = txq_id;
+ set_bit(txq_id, trans_pcie->queue_used);
+
+ return 0;
+
+error:
+ iwl_pcie_gen2_tx_free(trans);
+ return ret;
+}
+
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 911cf9868107..386950a2d616 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -2,7 +2,7 @@
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -71,7 +71,7 @@
*
***************************************************/
-static int iwl_queue_space(const struct iwl_txq *q)
+int iwl_queue_space(const struct iwl_txq *q)
{
unsigned int max;
unsigned int used;
@@ -102,10 +102,9 @@ static int iwl_queue_space(const struct iwl_txq *q)
/*
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
*/
-static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id)
+static int iwl_queue_init(struct iwl_txq *q, int slots_num)
{
q->n_window = slots_num;
- q->id = id;
/* slots_num must be power-of-two size, otherwise
* get_cmd_index is broken. */
@@ -126,8 +125,8 @@ static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id)
return 0;
}
-static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
- struct iwl_dma_ptr *ptr, size_t size)
+int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr, size_t size)
{
if (WARN_ON(ptr->addr))
return -EINVAL;
@@ -140,8 +139,7 @@ static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
return 0;
}
-static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
- struct iwl_dma_ptr *ptr)
+void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
{
if (unlikely(!ptr->addr))
return;
@@ -164,9 +162,6 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
}
spin_unlock(&txq->lock);
- IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->id,
- jiffies_to_msecs(txq->wd_timeout));
-
iwl_trans_pcie_log_scd_error(trans, txq);
iwl_force_nmi(trans);
@@ -188,6 +183,7 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
__le16 bc_ent;
struct iwl_tx_cmd *tx_cmd =
(void *)txq->entries[txq->write_ptr].cmd->payload;
+ u8 sta_id = tx_cmd->sta_id;
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
@@ -210,26 +206,7 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
return;
- if (trans->cfg->use_tfh) {
- u8 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
- num_tbs * sizeof(struct iwl_tfh_tb);
- /*
- * filled_tfd_size contains the number of filled bytes in the
- * TFD.
- * Dividing it by 64 will give the number of chunks to fetch
- * to SRAM- 0 for one chunk, 1 for 2 and so on.
- * If, for example, TFD contains only 3 TBs then 32 bytes
- * of the TFD are used, and only one chunk of 64 bytes should
- * be fetched
- */
- u8 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
-
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- } else {
- u8 sta_id = tx_cmd->sta_id;
-
- bc_ent = cpu_to_le16(len | (sta_id << 12));
- }
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
@@ -319,23 +296,17 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
int i;
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = &trans_pcie->txq[i];
+ struct iwl_txq *txq = trans_pcie->txq[i];
spin_lock_bh(&txq->lock);
- if (trans_pcie->txq[i].need_update) {
+ if (txq->need_update) {
iwl_pcie_txq_inc_wr_ptr(trans, txq);
- trans_pcie->txq[i].need_update = false;
+ txq->need_update = false;
}
spin_unlock_bh(&txq->lock);
}
}
-static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
- struct iwl_txq *txq, int idx)
-{
- return txq->tfds + trans_pcie->tfd_size * idx;
-}
-
static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
void *_tfd, u8 idx)
{
@@ -368,28 +339,17 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
u8 idx, dma_addr_t addr, u16 len)
{
- if (trans->cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
- struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx];
+ struct iwl_tfd *tfd_fh = (void *)tfd;
+ struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
- put_unaligned_le64(addr, &tb->addr);
- tb->tb_len = cpu_to_le16(len);
+ u16 hi_n_len = len << 4;
- tfd_fh->num_tbs = cpu_to_le16(idx + 1);
- } else {
- struct iwl_tfd *tfd_fh = (void *)tfd;
- struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
-
- u16 hi_n_len = len << 4;
-
- put_unaligned_le32(addr, &tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+ put_unaligned_le32(addr, &tb->lo);
+ hi_n_len |= iwl_get_dma_hi_addr(addr);
- tb->hi_n_len = cpu_to_le16(hi_n_len);
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
- tfd_fh->num_tbs = idx + 1;
- }
+ tfd_fh->num_tbs = idx + 1;
}
static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd)
@@ -460,7 +420,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
* Does NOT advance any TFD circular buffer read/write indexes
* Does NOT free the TFD itself (which is within circular buffer)
*/
-static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
+void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
{
/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
* idx is bounded by n_window
@@ -522,9 +482,8 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
return num_tbs;
}
-static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
- struct iwl_txq *txq, int slots_num,
- u32 txq_id)
+int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
@@ -547,7 +506,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
if (!txq->entries)
goto error;
- if (txq_id == trans_pcie->cmd_queue)
+ if (cmd_queue)
for (i = 0; i < slots_num; i++) {
txq->entries[i].cmd =
kmalloc(sizeof(struct iwl_device_cmd),
@@ -573,13 +532,11 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
if (!txq->first_tb_bufs)
goto err_free_tfds;
- txq->id = txq_id;
-
return 0;
err_free_tfds:
dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
error:
- if (txq->entries && txq_id == trans_pcie->cmd_queue)
+ if (txq->entries && cmd_queue)
for (i = 0; i < slots_num; i++)
kfree(txq->entries[i].cmd);
kfree(txq->entries);
@@ -589,10 +546,9 @@ error:
}
-static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
- int slots_num, u32 txq_id)
+int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
txq->need_update = false;
@@ -602,13 +558,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
/* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(txq, slots_num, txq_id);
+ ret = iwl_queue_init(txq, slots_num);
if (ret)
return ret;
spin_lock_init(&txq->lock);
- if (txq_id == trans_pcie->cmd_queue) {
+ if (cmd_queue) {
static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
@@ -616,18 +572,6 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
__skb_queue_head_init(&txq->overflow_q);
- /*
- * Tell nic where to find circular buffer of Tx Frame Descriptors for
- * given Tx queue, and enable the DMA channel used for that queue.
- * Circular buffer (TFD queue in DRAM) physical base address */
- if (trans->cfg->use_tfh)
- iwl_write_direct64(trans,
- FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->dma_addr);
- else
- iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->dma_addr >> 8);
-
return 0;
}
@@ -672,7 +616,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
@@ -704,7 +648,6 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
}
- txq->active = false;
while (!skb_queue_empty(&txq->overflow_q)) {
struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
@@ -729,7 +672,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
struct device *dev = trans->dev;
int i;
@@ -780,9 +723,6 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
- if (trans->cfg->use_tfh)
- return;
-
trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
@@ -832,9 +772,16 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int txq_id;
+ /*
+ * we should never get here in gen2 trans mode return early to avoid
+ * having invalid accesses
+ */
+ if (WARN_ON_ONCE(trans->cfg->gen2))
+ return;
+
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
txq_id++) {
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
if (trans->cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
@@ -914,7 +861,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
/* This can happen: start_hw, stop_device */
- if (!trans_pcie->txq)
+ if (!trans_pcie->txq_memory)
return 0;
/* Unmap DMA from host system and free skb's */
@@ -935,15 +882,20 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
int txq_id;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
/* Tx queues */
- if (trans_pcie->txq) {
+ if (trans_pcie->txq_memory) {
for (txq_id = 0;
- txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
+ txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++) {
iwl_pcie_txq_free(trans, txq_id);
+ trans_pcie->txq[txq_id] = NULL;
+ }
}
- kfree(trans_pcie->txq);
- trans_pcie->txq = NULL;
+ kfree(trans_pcie->txq_memory);
+ trans_pcie->txq_memory = NULL;
iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
@@ -965,7 +917,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
- if (WARN_ON(trans_pcie->txq)) {
+ if (WARN_ON(trans_pcie->txq_memory)) {
ret = -EINVAL;
goto error;
}
@@ -984,9 +936,9 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
goto error;
}
- trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
- sizeof(struct iwl_txq), GFP_KERNEL);
- if (!trans_pcie->txq) {
+ trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues,
+ sizeof(struct iwl_txq), GFP_KERNEL);
+ if (!trans_pcie->txq_memory) {
IWL_ERR(trans, "Not enough memory for txq\n");
ret = -ENOMEM;
goto error;
@@ -995,14 +947,17 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
txq_id++) {
- slots_num = (txq_id == trans_pcie->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
- slots_num, txq_id);
+ bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
+
+ slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
+ ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
+ slots_num, cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
}
+ trans_pcie->txq[txq_id]->id = txq_id;
}
return 0;
@@ -1012,6 +967,7 @@ error:
return ret;
}
+
int iwl_pcie_tx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1019,7 +975,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
int txq_id, slots_num;
bool alloc = false;
- if (!trans_pcie->txq) {
+ if (!trans_pcie->txq_memory) {
ret = iwl_pcie_tx_alloc(trans);
if (ret)
goto error;
@@ -1040,22 +996,24 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
txq_id++) {
- slots_num = (txq_id == trans_pcie->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
- slots_num, txq_id);
+ bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
+
+ slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
+ slots_num, cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
goto error;
}
- }
- if (trans->cfg->use_tfh) {
- iwl_write_direct32(trans, TFH_TRANSFER_MODE,
- TFH_TRANSFER_MAX_PENDING_REQ |
- TFH_CHUNK_SIZE_128 |
- TFH_CHUNK_SPLIT_MODE);
- return 0;
+ /*
+ * Tell nic where to find circular buffer of TFDs for a
+ * given Tx queue, and enable the DMA channel used for that
+ * queue.
+ * Circular buffer (TFD queue in DRAM) physical base address
+ */
+ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
+ trans_pcie->txq[txq_id]->dma_addr >> 8);
}
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
@@ -1100,7 +1058,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
int last_to_free;
@@ -1110,7 +1068,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
spin_lock_bh(&txq->lock);
- if (!txq->active) {
+ if (!test_bit(txq_id, trans_pcie->queue_used)) {
IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
txq_id, ssn);
goto out;
@@ -1257,7 +1215,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
unsigned long flags;
int nfreed = 0;
@@ -1324,15 +1282,12 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
unsigned int wdg_timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
int fifo = -1;
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
- if (cfg && trans->cfg->use_tfh)
- WARN_ONCE(1, "Expected no calls to SCD configuration");
-
txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
if (cfg) {
@@ -1414,27 +1369,17 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
"Activate queue %d WrPtr: %d\n",
txq_id, ssn & 0xff);
}
-
- txq->active = true;
}
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
txq->ampdu = !shared_mode;
}
-dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- return trans_pcie->scd_bc_tbls.dma +
- txq * sizeof(struct iwlagn_scd_bc_tbl);
-}
-
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
bool configure_scd)
{
@@ -1443,8 +1388,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
static const u32 zero_val[4] = {};
- trans_pcie->txq[txq_id].frozen_expiry_remainder = 0;
- trans_pcie->txq[txq_id].frozen = false;
+ trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0;
+ trans_pcie->txq[txq_id]->frozen = false;
/*
* Upon HW Rfkill - we stop the device, and then stop the queues
@@ -1458,9 +1403,6 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
return;
}
- if (configure_scd && trans->cfg->use_tfh)
- WARN_ONCE(1, "Expected no calls to SCD configuration");
-
if (configure_scd) {
iwl_scd_txq_set_inactive(trans, txq_id);
@@ -1469,7 +1411,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
}
iwl_pcie_txq_unmap(trans, txq_id);
- trans_pcie->txq[txq_id].ampdu = false;
+ trans_pcie->txq[txq_id]->ampdu = false;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
@@ -1489,7 +1431,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
@@ -1774,16 +1716,15 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
if (WARN(txq_id != trans_pcie->cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans_pcie->cmd_queue, sequence,
- trans_pcie->txq[trans_pcie->cmd_queue].read_ptr,
- trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) {
+ txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr,
+ txq->write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
}
@@ -1867,6 +1808,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
int cmd_idx;
int ret;
@@ -1907,8 +1849,6 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
&trans->status),
HOST_COMPLETE_TIMEOUT);
if (!ret) {
- struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
-
IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
iwl_get_cmd_string(trans, cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
@@ -1959,8 +1899,7 @@ cancel:
* in later, it will possibly set an invalid
* address (cmd->meta.source).
*/
- trans_pcie->txq[trans_pcie->cmd_queue].
- entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
}
if (cmd->resp_pkt) {
@@ -2314,7 +2253,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
u16 wifi_seq;
bool amsdu;
- txq = &trans_pcie->txq[txq_id];
+ txq = trans_pcie->txq[txq_id];
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
"TX on unused queue %d\n", txq_id))
diff --git a/drivers/net/wireless/intersil/orinoco/cfg.c b/drivers/net/wireless/intersil/orinoco/cfg.c
index 7aa47069af0a..b2d5ec8634b5 100644
--- a/drivers/net/wireless/intersil/orinoco/cfg.c
+++ b/drivers/net/wireless/intersil/orinoco/cfg.c
@@ -97,7 +97,7 @@ int orinoco_wiphy_register(struct wiphy *wiphy)
}
static int orinoco_change_vif(struct wiphy *wiphy, struct net_device *dev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct orinoco_private *priv = wiphy_priv(wiphy);
diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c
index 28cf97489001..d9128bb25e85 100644
--- a/drivers/net/wireless/intersil/orinoco/main.c
+++ b/drivers/net/wireless/intersil/orinoco/main.c
@@ -2283,7 +2283,7 @@ int orinoco_if_add(struct orinoco_private *priv,
priv->ndev = dev;
/* Report what we've done */
- dev_dbg(priv->dev, "Registerred interface %s.\n", dev->name);
+ dev_dbg(priv->dev, "Registered interface %s.\n", dev->name);
return 0;
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 98e1380b9917..132f5fbda58b 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -769,18 +769,31 @@ static int ezusb_submit_in_urb(struct ezusb_priv *upriv)
static inline int ezusb_8051_cpucs(struct ezusb_priv *upriv, int reset)
{
- u8 res_val = reset; /* avoid argument promotion */
+ int ret;
+ u8 *res_val = NULL;
if (!upriv->udev) {
err("%s: !upriv->udev", __func__);
return -EFAULT;
}
- return usb_control_msg(upriv->udev,
+
+ res_val = kmalloc(sizeof(*res_val), GFP_KERNEL);
+
+ if (!res_val)
+ return -ENOMEM;
+
+ *res_val = reset; /* avoid argument promotion */
+
+ ret = usb_control_msg(upriv->udev,
usb_sndctrlpipe(upriv->udev, 0),
EZUSB_REQUEST_FW_TRANS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
- USB_DIR_OUT, EZUSB_CPUCS_REG, 0, &res_val,
- sizeof(res_val), DEF_TIMEOUT);
+ USB_DIR_OUT, EZUSB_CPUCS_REG, 0, res_val,
+ sizeof(*res_val), DEF_TIMEOUT);
+
+ kfree(res_val);
+
+ return ret;
}
static int ezusb_firmware_download(struct ezusb_priv *upriv,
diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
index 1af7da0b386e..5e1c91a80c58 100644
--- a/drivers/net/wireless/intersil/p54/txrx.c
+++ b/drivers/net/wireless/intersil/p54/txrx.c
@@ -352,7 +352,7 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi);
if (hdr->rate & 0x10)
- rx_status->flag |= RX_FLAG_SHORTPRE;
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ)
rx_status->rate_idx = (rate < 4) ? 0 : rate - 4;
else
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 50c219fb1a52..87444af20fc5 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -350,6 +350,7 @@ static const struct ieee80211_channel hwsim_channels_5ghz[] = {
CHAN5G(5785), /* Channel 157 */
CHAN5G(5805), /* Channel 161 */
CHAN5G(5825), /* Channel 165 */
+ CHAN5G(5845), /* Channel 169 */
};
static const struct ieee80211_rate hwsim_rates[] = {
@@ -389,7 +390,7 @@ static int mac80211_hwsim_vendor_cmd_test(struct wiphy *wiphy,
u32 val;
err = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_MAX, data, data_len,
- hwsim_vendor_test_policy);
+ hwsim_vendor_test_policy, NULL);
if (err)
return err;
if (!tb[QCA_WLAN_VENDOR_ATTR_TEST])
@@ -525,6 +526,11 @@ struct mac80211_hwsim_data {
struct ieee80211_vif *hw_scan_vif;
int scan_chan_idx;
u8 scan_addr[ETH_ALEN];
+ struct {
+ struct ieee80211_channel *channel;
+ unsigned long next_start, start, end;
+ } survey_data[ARRAY_SIZE(hwsim_channels_2ghz) +
+ ARRAY_SIZE(hwsim_channels_5ghz)];
struct ieee80211_channel *channel;
u64 beacon_int /* beacon interval in us */;
@@ -552,8 +558,6 @@ struct mac80211_hwsim_data {
/* wmediumd portid responsible for netgroup of this radio */
u32 wmediumd;
- int power_level;
-
/* difference between this hw's clock and the real clock, in usecs */
s64 tsf_offset;
s64 bcn_delta;
@@ -1188,20 +1192,22 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
if (info->control.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) {
rx_status.rate_idx =
ieee80211_rate_get_vht_mcs(&info->control.rates[0]);
- rx_status.vht_nss =
+ rx_status.nss =
ieee80211_rate_get_vht_nss(&info->control.rates[0]);
- rx_status.flag |= RX_FLAG_VHT;
+ rx_status.encoding = RX_ENC_VHT;
} else {
rx_status.rate_idx = info->control.rates[0].idx;
if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
- rx_status.flag |= RX_FLAG_HT;
+ rx_status.encoding = RX_ENC_HT;
}
if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- rx_status.flag |= RX_FLAG_40MHZ;
+ rx_status.enc_flags |= RX_ENC_FLAG_40MHZ;
if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
- rx_status.flag |= RX_FLAG_SHORT_GI;
+ rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
/* TODO: simulate real signal strength (and optional packet loss) */
- rx_status.signal = data->power_level - 50;
+ rx_status.signal = -50;
+ if (info->control.vif)
+ rx_status.signal += info->control.vif->bss_conf.txpower;
if (data->ps != PS_DISABLED)
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
@@ -1576,6 +1582,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
[IEEE80211_SMPS_STATIC] = "static",
[IEEE80211_SMPS_DYNAMIC] = "dynamic",
};
+ int idx;
if (conf->chandef.chan)
wiphy_debug(hw->wiphy,
@@ -1598,11 +1605,34 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
- data->channel = conf->chandef.chan;
+ WARN_ON(conf->chandef.chan && data->use_chanctx);
+
+ mutex_lock(&data->mutex);
+ if (data->scanning && conf->chandef.chan) {
+ for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) {
+ if (data->survey_data[idx].channel == data->channel) {
+ data->survey_data[idx].start =
+ data->survey_data[idx].next_start;
+ data->survey_data[idx].end = jiffies;
+ break;
+ }
+ }
- WARN_ON(data->channel && data->use_chanctx);
+ data->channel = conf->chandef.chan;
+
+ for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) {
+ if (data->survey_data[idx].channel &&
+ data->survey_data[idx].channel != data->channel)
+ continue;
+ data->survey_data[idx].channel = data->channel;
+ data->survey_data[idx].next_start = jiffies;
+ break;
+ }
+ } else {
+ data->channel = conf->chandef.chan;
+ }
+ mutex_unlock(&data->mutex);
- data->power_level = conf->power_level;
if (!data->started || !data->beacon_int)
tasklet_hrtimer_cancel(&data->beacon_timer);
else if (!hrtimer_is_queued(&data->beacon_timer.timer)) {
@@ -1787,28 +1817,37 @@ static int mac80211_hwsim_conf_tx(
return 0;
}
-static int mac80211_hwsim_get_survey(
- struct ieee80211_hw *hw, int idx,
- struct survey_info *survey)
+static int mac80211_hwsim_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
{
- struct ieee80211_conf *conf = &hw->conf;
-
- wiphy_debug(hw->wiphy, "%s (idx=%d)\n", __func__, idx);
+ struct mac80211_hwsim_data *hwsim = hw->priv;
- if (idx != 0)
+ if (idx < 0 || idx >= ARRAY_SIZE(hwsim->survey_data))
return -ENOENT;
- /* Current channel */
- survey->channel = conf->chandef.chan;
+ mutex_lock(&hwsim->mutex);
+ survey->channel = hwsim->survey_data[idx].channel;
+ if (!survey->channel) {
+ mutex_unlock(&hwsim->mutex);
+ return -ENOENT;
+ }
/*
- * Magically conjured noise level --- this is only ok for simulated hardware.
+ * Magically conjured dummy values --- this is only ok for simulated hardware.
*
- * A real driver which cannot determine the real channel noise MUST NOT
- * report any noise, especially not a magically conjured one :-)
+ * A real driver which cannot determine real values noise MUST NOT
+ * report any, especially not a magically conjured ones :-)
*/
- survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->filled = SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY;
survey->noise = -92;
+ survey->time =
+ jiffies_to_msecs(hwsim->survey_data[idx].end -
+ hwsim->survey_data[idx].start);
+ /* report 12.5% of channel time is used */
+ survey->time_busy = survey->time/8;
+ mutex_unlock(&hwsim->mutex);
return 0;
}
@@ -1852,7 +1891,7 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
int err, ps;
err = nla_parse(tb, HWSIM_TM_ATTR_MAX, data, len,
- hwsim_testmode_policy);
+ hwsim_testmode_policy, NULL);
if (err)
return err;
@@ -1986,6 +2025,10 @@ static void hw_scan_work(struct work_struct *work)
}
ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan,
msecs_to_jiffies(dwell));
+ hwsim->survey_data[hwsim->scan_chan_idx].channel = hwsim->tmp_chan;
+ hwsim->survey_data[hwsim->scan_chan_idx].start = jiffies;
+ hwsim->survey_data[hwsim->scan_chan_idx].end =
+ jiffies + msecs_to_jiffies(dwell);
hwsim->scan_chan_idx++;
mutex_unlock(&hwsim->mutex);
}
@@ -2011,6 +2054,7 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
hw_req->req.mac_addr_mask);
else
memcpy(hwsim->scan_addr, vif->addr, ETH_ALEN);
+ memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data));
mutex_unlock(&hwsim->mutex);
wiphy_debug(hw->wiphy, "hwsim hw_scan request\n");
@@ -2057,6 +2101,7 @@ static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw,
memcpy(hwsim->scan_addr, mac_addr, ETH_ALEN);
hwsim->scanning = true;
+ memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data));
out:
mutex_unlock(&hwsim->mutex);
@@ -2207,7 +2252,6 @@ static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = {
"d_tx_failed",
"d_ps_mode",
"d_group",
- "d_tx_power",
};
#define MAC80211_HWSIM_SSTATS_LEN ARRAY_SIZE(mac80211_hwsim_gstrings_stats)
@@ -2244,7 +2288,6 @@ static void mac80211_hwsim_get_et_stats(struct ieee80211_hw *hw,
data[i++] = ar->tx_failed;
data[i++] = ar->ps;
data[i++] = ar->group;
- data[i++] = ar->power_level;
WARN_ON(i != MAC80211_HWSIM_SSTATS_LEN);
}
@@ -2438,6 +2481,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
goto failed;
}
+ /* ieee80211_alloc_hw_nm may have used a default name */
+ param->hwname = wiphy_name(hw->wiphy);
+
if (info)
net = genl_info_net(info);
else
@@ -2645,6 +2691,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
if (param->no_vif)
ieee80211_hw_set(hw, NO_AUTO_VIF);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
err = ieee80211_register_hw(hw);
if (err < 0) {
printk(KERN_DEBUG "mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index 39f22467ca2a..3f5eda591dba 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -57,12 +57,12 @@ enum hwsim_tx_control_flags {
* @HWSIM_CMD_REGISTER: request to register and received all broadcasted
* frames by any mac80211_hwsim radio device.
* @HWSIM_CMD_FRAME: send/receive a broadcasted frame from/to kernel/user
- * space, uses:
+ * space, uses:
* %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_ADDR_RECEIVER,
* %HWSIM_ATTR_FRAME, %HWSIM_ATTR_FLAGS, %HWSIM_ATTR_RX_RATE,
* %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE, %HWSIM_ATTR_FREQ (optional)
* @HWSIM_CMD_TX_INFO_FRAME: Transmission info report from user space to
- * kernel, uses:
+ * kernel, uses:
* %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS,
* %HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
* @HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters,
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 3f97acb57e66..a0463fef79b0 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -1657,7 +1657,7 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
*/
static int lbs_change_intf(struct wiphy *wiphy, struct net_device *dev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct lbs_private *priv = wiphy_priv(wiphy);
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index c3a53cd6988e..7b4955cc38db 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -1181,6 +1181,10 @@ static int if_spi_probe(struct spi_device *spi)
/* Initialize interrupt handling stuff. */
card->workqueue = alloc_workqueue("libertas_spi", WQ_MEM_RECLAIM, 0);
+ if (!card->workqueue) {
+ err = -ENOMEM;
+ goto remove_card;
+ }
INIT_WORK(&card->packet_work, if_spi_host_to_card_worker);
INIT_WORK(&card->resume_work, if_spi_resume_worker);
@@ -1209,6 +1213,7 @@ release_irq:
free_irq(spi->irq, card);
terminate_workqueue:
destroy_workqueue(card->workqueue);
+remove_card:
lbs_remove_card(priv); /* will call free_netdev */
free_card:
free_if_spi_card(card);
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 54e426c1e405..d80333117989 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -641,6 +641,8 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
BIT(NL80211_IFTYPE_ADHOC);
skb_queue_head_init(&priv->bc_ps_buf);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
SET_IEEE80211_DEV(hw, dmdev);
INIT_WORK(&priv->cmd_work, lbtf_cmd_work);
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index 43dccd5b0291..366eb4991a7d 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -153,7 +153,8 @@ int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
cmd->command = cpu_to_le16(HostCmd_CMD_CHAN_REPORT_REQUEST);
cmd->size = cpu_to_le16(S_DS_GEN);
- le16_add_cpu(&cmd->size, sizeof(struct host_cmd_ds_chan_rpt_req));
+ le16_unaligned_add_cpu(&cmd->size,
+ sizeof(struct host_cmd_ds_chan_rpt_req));
cr_req->chan_desc.start_freq = cpu_to_le16(MWIFIEX_A_BAND_START_FREQ);
cr_req->chan_desc.chan_num = radar_params->chandef->chan->hw_value;
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 1e3bd435a694..7ec06bf13413 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -594,6 +594,24 @@ int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
return 0;
}
+static void mwifiex_reg_apply_radar_flags(struct wiphy *wiphy)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *chan;
+ unsigned int i;
+
+ if (!wiphy->bands[NL80211_BAND_5GHZ])
+ return;
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+ if ((!(chan->flags & IEEE80211_CHAN_DISABLED)) &&
+ (chan->flags & IEEE80211_CHAN_RADAR))
+ chan->flags |= IEEE80211_CHAN_NO_IR;
+ }
+}
+
/*
* CFG802.11 regulatory domain callback function.
*
@@ -613,6 +631,7 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
mwifiex_dbg(adapter, INFO,
"info: cfg80211 regulatory domain callback for %c%c\n",
request->alpha2[0], request->alpha2[1]);
+ mwifiex_reg_apply_radar_flags(wiphy);
switch (request->initiator) {
case NL80211_REGDOM_SET_BY_DRIVER:
@@ -916,7 +935,7 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
static int
mwifiex_change_vif_to_p2p(struct net_device *dev,
enum nl80211_iftype curr_iftype,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct mwifiex_private *priv;
@@ -988,7 +1007,7 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
static int
mwifiex_change_vif_to_sta_adhoc(struct net_device *dev,
enum nl80211_iftype curr_iftype,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct mwifiex_private *priv;
@@ -1047,7 +1066,7 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev,
static int
mwifiex_change_vif_to_ap(struct net_device *dev,
enum nl80211_iftype curr_iftype,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct mwifiex_private *priv;
@@ -1103,7 +1122,7 @@ mwifiex_change_vif_to_ap(struct net_device *dev,
static int
mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
struct net_device *dev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
@@ -1124,10 +1143,10 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
return mwifiex_change_vif_to_p2p(dev, curr_iftype,
- type, flags, params);
+ type, params);
case NL80211_IFTYPE_AP:
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
- flags, params);
+ params);
case NL80211_IFTYPE_UNSPECIFIED:
mwifiex_dbg(priv->adapter, INFO,
"%s: kept type as IBSS\n", dev->name);
@@ -1154,10 +1173,10 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
return mwifiex_change_vif_to_p2p(dev, curr_iftype,
- type, flags, params);
+ type, params);
case NL80211_IFTYPE_AP:
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
- flags, params);
+ params);
case NL80211_IFTYPE_UNSPECIFIED:
mwifiex_dbg(priv->adapter, INFO,
"%s: kept type as STA\n", dev->name);
@@ -1175,13 +1194,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_STATION:
return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
- type, flags,
- params);
+ type, params);
break;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
return mwifiex_change_vif_to_p2p(dev, curr_iftype,
- type, flags, params);
+ type, params);
case NL80211_IFTYPE_UNSPECIFIED:
mwifiex_dbg(priv->adapter, INFO,
"%s: kept type as AP\n", dev->name);
@@ -1214,14 +1232,13 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
if (mwifiex_cfg80211_deinit_p2p(priv))
return -EFAULT;
return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
- type, flags,
- params);
+ type, params);
break;
case NL80211_IFTYPE_AP:
if (mwifiex_cfg80211_deinit_p2p(priv))
return -EFAULT;
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
- flags, params);
+ params);
case NL80211_IFTYPE_UNSPECIFIED:
mwifiex_dbg(priv->adapter, INFO,
"%s: kept type as P2P\n", dev->name);
@@ -2036,7 +2053,7 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
if (!mwifiex_stop_bg_scan(priv))
- cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
+ cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy, 0);
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
@@ -2304,7 +2321,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
(int)sme->ssid_len, (char *)sme->ssid, sme->bssid);
if (!mwifiex_stop_bg_scan(priv))
- cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
+ cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy, 0);
ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
priv->bss_mode, sme->channel, sme, 0);
@@ -2513,7 +2530,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
priv->scan_block = false;
if (!mwifiex_stop_bg_scan(priv))
- cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
+ cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy, 0);
user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL);
if (!user_scan_cfg)
@@ -2528,9 +2545,11 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
priv->random_mac[i] |= get_random_int() &
~(request->mac_addr_mask[i]);
}
+ ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
+ } else {
+ eth_zero_addr(priv->random_mac);
}
- ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
user_scan_cfg->num_ssids = request->n_ssids;
user_scan_cfg->ssid_list = request->ssids;
@@ -2701,7 +2720,7 @@ mwifiex_cfg80211_sched_scan_start(struct wiphy *wiphy,
* previous bgscan configuration in the firmware
*/
static int mwifiex_cfg80211_sched_scan_stop(struct wiphy *wiphy,
- struct net_device *dev)
+ struct net_device *dev, u64 reqid)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
@@ -2822,7 +2841,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
- u32 *flags,
struct vif_params *params)
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
@@ -3997,8 +4015,8 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
if (!priv)
return -EINVAL;
- err = nla_parse(tb, MWIFIEX_TM_ATTR_MAX, data, len,
- mwifiex_tm_policy);
+ err = nla_parse(tb, MWIFIEX_TM_ATTR_MAX, data, len, mwifiex_tm_policy,
+ NULL);
if (err)
return err;
@@ -4279,7 +4297,6 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
- WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_HAS_CHANNEL_SWITCH |
WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -4298,6 +4315,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+ wiphy->max_sched_scan_reqs = 1;
wiphy->max_sched_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
wiphy->max_sched_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
wiphy->max_match_sets = MWIFIEX_MAX_SSID_LIST_LENGTH;
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 25a7475702f7..0c3b217247b1 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -242,7 +242,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
mwifiex_dbg(adapter, CMD,
"cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
cmd_code,
- le16_to_cpu(*(__le16 *)((u8 *)host_cmd + S_DS_GEN)),
+ get_unaligned_le16((u8 *)host_cmd + S_DS_GEN),
cmd_size, le16_to_cpu(host_cmd->seq_num));
mwifiex_dbg_dump(adapter, CMD_D, "cmd buffer:", host_cmd, cmd_size);
@@ -286,7 +286,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
(adapter->dbg.last_cmd_index + 1) % DBG_CMD_NUM;
adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index] = cmd_code;
adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index] =
- le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN));
+ get_unaligned_le16((u8 *)host_cmd + S_DS_GEN);
/* Clear BSS_NO_BITS from HostCmd */
cmd_code &= HostCmd_CMD_ID_MASK;
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index cb6a1a81d44e..6cf9ab9133ea 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -31,17 +31,35 @@ struct rfc_1042_hdr {
u8 llc_ctrl;
u8 snap_oui[3];
__be16 snap_type;
-};
+} __packed;
struct rx_packet_hdr {
struct ethhdr eth803_hdr;
struct rfc_1042_hdr rfc1042_hdr;
-};
+} __packed;
struct tx_packet_hdr {
struct ethhdr eth803_hdr;
struct rfc_1042_hdr rfc1042_hdr;
-};
+} __packed;
+
+struct mwifiex_fw_header {
+ __le32 dnld_cmd;
+ __le32 base_addr;
+ __le32 data_length;
+ __le32 crc;
+} __packed;
+
+struct mwifiex_fw_data {
+ struct mwifiex_fw_header header;
+ __le32 seq_num;
+ u8 data[1];
+} __packed;
+
+#define MWIFIEX_FW_DNLD_CMD_1 0x1
+#define MWIFIEX_FW_DNLD_CMD_5 0x5
+#define MWIFIEX_FW_DNLD_CMD_6 0x6
+#define MWIFIEX_FW_DNLD_CMD_7 0x7
#define B_SUPPORTED_RATES 5
#define G_SUPPORTED_RATES 9
@@ -707,7 +725,7 @@ struct uap_txpd {
u8 reserved1[2];
u8 tx_token_id;
u8 reserved[2];
-};
+} __packed;
struct uap_rxpd {
u8 bss_type;
@@ -723,7 +741,7 @@ struct uap_rxpd {
u8 ht_info;
u8 reserved[3];
u8 flags;
-};
+} __packed;
struct mwifiex_fw_chan_stats {
u8 chan_num;
@@ -987,7 +1005,7 @@ struct mwifiex_ps_param {
__le16 adhoc_wake_period;
__le16 mode;
__le16 delay_to_ps;
-};
+} __packed;
#define HS_DEF_WAKE_INTERVAL 100
#define HS_DEF_INACTIVITY_TIMEOUT 50
@@ -996,7 +1014,7 @@ struct mwifiex_ps_param_in_hs {
struct mwifiex_ie_types_header header;
__le32 hs_wake_int;
__le32 hs_inact_timeout;
-};
+} __packed;
#define BITMAP_AUTO_DS 0x01
#define BITMAP_STA_PS 0x10
@@ -1062,7 +1080,7 @@ struct host_cmd_ds_802_11_rssi_info {
__le16 nbcn;
__le16 reserved[9];
long long reserved_1;
-};
+} __packed;
struct host_cmd_ds_802_11_rssi_info_rsp {
__le16 action;
@@ -1077,12 +1095,12 @@ struct host_cmd_ds_802_11_rssi_info_rsp {
__le16 bcn_rssi_avg;
__le16 bcn_nf_avg;
long long tsf_bcn;
-};
+} __packed;
struct host_cmd_ds_802_11_mac_address {
__le16 action;
u8 mac_addr[ETH_ALEN];
-};
+} __packed;
struct host_cmd_ds_mac_control {
__le32 action;
@@ -1230,7 +1248,7 @@ struct host_cmd_ds_802_11_get_log {
__le32 wep_icv_err_cnt[4];
__le32 bcn_rcv_cnt;
__le32 bcn_miss_cnt;
-};
+} __packed;
/* Enumeration for rate format */
enum _mwifiex_rate_format {
@@ -1368,12 +1386,12 @@ struct host_cmd_ds_rf_ant_mimo {
__le16 tx_ant_mode;
__le16 action_rx;
__le16 rx_ant_mode;
-};
+} __packed;
struct host_cmd_ds_rf_ant_siso {
__le16 action;
__le16 ant_mode;
-};
+} __packed;
struct host_cmd_ds_tdls_oper {
__le16 tdls_action;
@@ -1383,13 +1401,13 @@ struct host_cmd_ds_tdls_oper {
struct mwifiex_tdls_config {
__le16 enable;
-};
+} __packed;
struct mwifiex_tdls_config_cs_params {
u8 unit_time;
u8 thr_otherlink;
u8 thr_directlink;
-};
+} __packed;
struct mwifiex_tdls_init_cs_params {
u8 peer_mac[ETH_ALEN];
@@ -1404,7 +1422,7 @@ struct mwifiex_tdls_init_cs_params {
struct mwifiex_tdls_stop_cs_params {
u8 peer_mac[ETH_ALEN];
-};
+} __packed;
struct host_cmd_ds_tdls_config {
__le16 tdls_action;
@@ -1709,7 +1727,7 @@ struct mwifiex_ie_types_local_pwr_constraint {
struct mwifiex_ie_types_wmm_param_set {
struct mwifiex_ie_types_header header;
u8 wmm_ie[1];
-};
+} __packed;
struct mwifiex_ie_types_mgmt_frame {
struct mwifiex_ie_types_header header;
@@ -1834,7 +1852,7 @@ struct host_cmd_ds_mem_access {
__le16 reserved;
__le32 addr;
__le32 value;
-};
+} __packed;
struct mwifiex_ie_types_qos_info {
struct mwifiex_ie_types_header header;
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index c488c3068abc..922e3d69fd84 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -131,9 +131,10 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
sizeof(struct mwifiex_ie));
}
- le16_add_cpu(&ie_list->len,
- le16_to_cpu(priv->mgmt_ie[index].ie_length) +
- MWIFIEX_IE_HDR_SIZE);
+ le16_unaligned_add_cpu(&ie_list->len,
+ le16_to_cpu(
+ priv->mgmt_ie[index].ie_length) +
+ MWIFIEX_IE_HDR_SIZE);
input_len -= tlv_len + MWIFIEX_IE_HDR_SIZE;
}
@@ -172,21 +173,21 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
le16_to_cpu(beacon_ie->ie_length);
memcpy(pos, beacon_ie, len);
pos += len;
- le16_add_cpu(&ap_custom_ie->len, len);
+ le16_unaligned_add_cpu(&ap_custom_ie->len, len);
}
if (pr_ie) {
len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(pr_ie->ie_length);
memcpy(pos, pr_ie, len);
pos += len;
- le16_add_cpu(&ap_custom_ie->len, len);
+ le16_unaligned_add_cpu(&ap_custom_ie->len, len);
}
if (ar_ie) {
len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
le16_to_cpu(ar_ie->ie_length);
memcpy(pos, ar_ie, len);
pos += len;
- le16_add_cpu(&ap_custom_ie->len, len);
+ le16_unaligned_add_cpu(&ap_custom_ie->len, len);
}
ret = mwifiex_update_autoindex_ies(priv, ap_custom_ie);
@@ -242,7 +243,7 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
vs_ie = (struct ieee_types_header *)vendor_ie;
memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
vs_ie, vs_ie->len + 2);
- le16_add_cpu(&ie->ie_length, vs_ie->len + 2);
+ le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);
ie->mgmt_subtype_mask = cpu_to_le16(mask);
ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
index 536ab834b126..48e154e1865d 100644
--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
+++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
@@ -91,6 +91,8 @@ struct wep_key {
#define MWIFIEX_TDLS_DEF_QOS_CAPAB 0xf
#define MWIFIEX_PRIO_BK 2
#define MWIFIEX_PRIO_VI 5
+#define MWIFIEX_SUPPORTED_CHANNELS 2
+#define MWIFIEX_OPERATING_CLASSES 16
struct mwifiex_uap_bss_param {
u8 channel;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index b62e03d11c2e..dd87b9ff64c3 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -17,6 +17,8 @@
* this warranty disclaimer.
*/
+#include <linux/suspend.h>
+
#include "main.h"
#include "wmm.h"
#include "cfg80211.h"
@@ -147,7 +149,6 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
kfree(adapter->regd);
- vfree(adapter->chan_stats);
kfree(adapter);
return 0;
}
@@ -511,7 +512,7 @@ static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
* - Download the correct firmware to card
* - Issue the init commands to firmware
*/
-static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
+static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
{
int ret;
char fmt[64];
@@ -594,7 +595,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
rtnl_lock();
/* Create station interface by default */
wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", NET_NAME_ENUM,
- NL80211_IFTYPE_STATION, NULL, NULL);
+ NL80211_IFTYPE_STATION, NULL);
if (IS_ERR(wdev)) {
mwifiex_dbg(adapter, ERROR,
"cannot create default STA interface\n");
@@ -604,7 +605,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
if (driver_mode & MWIFIEX_DRIVER_MODE_UAP) {
wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d", NET_NAME_ENUM,
- NL80211_IFTYPE_AP, NULL, NULL);
+ NL80211_IFTYPE_AP, NULL);
if (IS_ERR(wdev)) {
mwifiex_dbg(adapter, ERROR,
"cannot create AP interface\n");
@@ -615,8 +616,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
if (driver_mode & MWIFIEX_DRIVER_MODE_P2P) {
wdev = mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d", NET_NAME_ENUM,
- NL80211_IFTYPE_P2P_CLIENT, NULL,
- NULL);
+ NL80211_IFTYPE_P2P_CLIENT, NULL);
if (IS_ERR(wdev)) {
mwifiex_dbg(adapter, ERROR,
"cannot create p2p client interface\n");
@@ -631,6 +631,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto done;
err_add_intf:
+ vfree(adapter->chan_stats);
wiphy_unregister(adapter->wiphy);
wiphy_free(adapter->wiphy);
err_init_fw:
@@ -664,11 +665,18 @@ done:
mwifiex_free_adapter(adapter);
/* Tell all current and future waiters we're finished */
complete_all(fw_done);
- return;
+
+ return init_failed ? -EIO : 0;
+}
+
+static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
+{
+ _mwifiex_fw_dpc(firmware, context);
}
/*
- * This function initializes the hardware and gets firmware.
+ * This function gets the firmware and (if called asynchronously) kicks off the
+ * HW init when done.
*/
static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter,
bool req_fw_nowait)
@@ -691,20 +699,15 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter,
ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
adapter->dev, GFP_KERNEL, adapter,
mwifiex_fw_dpc);
- if (ret < 0)
- mwifiex_dbg(adapter, ERROR,
- "request_firmware_nowait error %d\n", ret);
} else {
ret = request_firmware(&adapter->firmware,
adapter->fw_name,
adapter->dev);
- if (ret < 0)
- mwifiex_dbg(adapter, ERROR,
- "request_firmware error %d\n", ret);
- else
- mwifiex_fw_dpc(adapter->firmware, (void *)adapter);
}
+ if (ret < 0)
+ mwifiex_dbg(adapter, ERROR, "request_firmware%s error %d\n",
+ req_fw_nowait ? "_nowait" : "", ret);
return ret;
}
@@ -745,7 +748,7 @@ mwifiex_close(struct net_device *dev)
mwifiex_dbg(priv->adapter, INFO,
"aborting bgscan on ndo_stop\n");
mwifiex_stop_bg_scan(priv);
- cfg80211_sched_scan_stopped(priv->wdev.wiphy);
+ cfg80211_sched_scan_stopped(priv->wdev.wiphy, 0);
}
return 0;
@@ -1413,6 +1416,7 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
rtnl_unlock();
}
+ vfree(adapter->chan_stats);
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
exit_return:
@@ -1426,6 +1430,8 @@ EXPORT_SYMBOL_GPL(mwifiex_shutdown_sw);
int
mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
{
+ int ret;
+
mwifiex_init_lock_list(adapter);
if (adapter->if_ops.up_dev)
adapter->if_ops.up_dev(adapter);
@@ -1435,6 +1441,7 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
init_waitqueue_head(&adapter->init_wait_q);
adapter->is_suspended = false;
adapter->hs_activated = false;
+ adapter->is_cmd_timedout = 0;
init_waitqueue_head(&adapter->hs_activate_wait_q);
init_waitqueue_head(&adapter->cmd_wait_q.wait);
adapter->cmd_wait_q.status = 0;
@@ -1472,9 +1479,15 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
"%s: firmware init failed\n", __func__);
goto err_init_fw;
}
+
+ /* _mwifiex_fw_dpc() does its own cleanup */
+ ret = _mwifiex_fw_dpc(adapter->firmware, adapter);
+ if (ret) {
+ pr_err("Failed to bring up adapter: %d\n", ret);
+ return ret;
+ }
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
- complete_all(adapter->fw_done);
return 0;
err_init_fw:
@@ -1502,14 +1515,13 @@ static irqreturn_t mwifiex_irq_wakeup_handler(int irq, void *priv)
{
struct mwifiex_adapter *adapter = priv;
- if (adapter->irq_wakeup >= 0) {
- dev_dbg(adapter->dev, "%s: wake by wifi", __func__);
- adapter->wake_by_wifi = true;
- disable_irq_nosync(irq);
- }
+ dev_dbg(adapter->dev, "%s: wake by wifi", __func__);
+ adapter->wake_by_wifi = true;
+ disable_irq_nosync(irq);
/* Notify PM core we are wakeup source */
pm_wakeup_event(adapter->dev, 0);
+ pm_system_wakeup();
return IRQ_HANDLED;
}
@@ -1714,6 +1726,7 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
rtnl_unlock();
}
+ vfree(adapter->chan_stats);
wiphy_unregister(adapter->wiphy);
wiphy_free(adapter->wiphy);
@@ -1742,7 +1755,7 @@ void _mwifiex_dbg(const struct mwifiex_adapter *adapter, int mask,
struct va_format vaf;
va_list args;
- if (!adapter->dev || !(adapter->debug_mask & mask))
+ if (!(adapter->debug_mask & mask))
return;
va_start(args, fmt);
@@ -1750,7 +1763,10 @@ void _mwifiex_dbg(const struct mwifiex_adapter *adapter, int mask,
vaf.fmt = fmt;
vaf.va = &args;
- dev_info(adapter->dev, "%pV", &vaf);
+ if (adapter->dev)
+ dev_info(adapter->dev, "%pV", &vaf);
+ else
+ pr_info("%pV", &vaf);
va_end(args);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 5c8297207f33..bb2a467d8b13 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1359,7 +1359,7 @@ mwifiex_netdev_get_priv(struct net_device *dev)
*/
static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb)
{
- return (le32_to_cpu(*(__le32 *)skb->data) == PKT_TYPE_MGMT);
+ return (get_unaligned_le32(skb->data) == PKT_TYPE_MGMT);
}
/* This function retrieves channel closed for operation by Channel
@@ -1529,7 +1529,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
- u32 *flags,
struct vif_params *params);
int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index b8c990d10d6e..ac62bce50e96 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -119,7 +119,7 @@ static int mwifiex_read_reg_byte(struct mwifiex_adapter *adapter,
*/
static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
{
- u32 *cookie_addr;
+ u32 cookie_value;
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
@@ -127,11 +127,11 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
return true;
if (card->sleep_cookie_vbase) {
- cookie_addr = (u32 *)card->sleep_cookie_vbase;
+ cookie_value = get_unaligned_le32(card->sleep_cookie_vbase);
mwifiex_dbg(adapter, INFO,
"info: ACCESS_HW: sleep cookie=0x%x\n",
- *cookie_addr);
- if (*cookie_addr == FW_AWAKE_COOKIE)
+ cookie_value);
+ if (cookie_value == FW_AWAKE_COOKIE)
return true;
}
@@ -294,8 +294,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
if (!adapter || !adapter->priv_num)
return;
- cancel_work_sync(&card->work);
-
reg = card->pcie.reg;
if (reg)
ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
@@ -350,22 +348,16 @@ MODULE_DEVICE_TABLE(pci, mwifiex_ids);
static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
{
- struct mwifiex_adapter *adapter;
- struct pcie_service_card *card;
-
- if (!pdev) {
- pr_err("%s: PCIe device is not specified\n", __func__);
- return;
- }
+ struct pcie_service_card *card = pci_get_drvdata(pdev);
+ struct mwifiex_adapter *adapter = card->adapter;
+ int ret;
- card = (struct pcie_service_card *)pci_get_drvdata(pdev);
- if (!card || !card->adapter) {
- pr_err("%s: Card or adapter structure is not valid (%ld)\n",
- __func__, (long)card);
+ if (!adapter) {
+ dev_err(&pdev->dev, "%s: adapter structure is not valid\n",
+ __func__);
return;
}
- adapter = card->adapter;
mwifiex_dbg(adapter, INFO,
"%s: vendor=0x%4.04x device=0x%4.04x rev=%d %s\n",
__func__, pdev->vendor, pdev->device,
@@ -379,13 +371,19 @@ static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
*/
mwifiex_shutdown_sw(adapter);
adapter->surprise_removed = true;
+ clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
+ clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
} else {
/* Kernel stores and restores PCIe function context before and
* after performing FLR respectively. Reconfigure the software
* and firmware including firmware redownload
*/
adapter->surprise_removed = false;
- mwifiex_reinit_sw(adapter);
+ ret = mwifiex_reinit_sw(adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "reinit failed: %d\n", ret);
+ return;
+ }
}
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
}
@@ -447,7 +445,7 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
sizeof(sleep_cookie),
PCI_DMA_FROMDEVICE);
buffer = cmdrsp->data;
- sleep_cookie = READ_ONCE(*(u32 *)buffer);
+ sleep_cookie = get_unaligned_le32(buffer);
if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) {
mwifiex_dbg(adapter, INFO,
@@ -1039,6 +1037,7 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
if (card && card->cmd_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(card->cmd_buf);
}
return 0;
}
@@ -1049,6 +1048,7 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
+ u32 tmp;
card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32),
&card->sleep_cookie_pbase);
@@ -1058,11 +1058,12 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
return -ENOMEM;
}
/* Init val of Sleep Cookie */
- *(u32 *)card->sleep_cookie_vbase = FW_AWAKE_COOKIE;
+ tmp = FW_AWAKE_COOKIE;
+ put_unaligned(tmp, card->sleep_cookie_vbase);
mwifiex_dbg(adapter, INFO,
"alloc_scook: sleep cookie=0x%x\n",
- *((u32 *)card->sleep_cookie_vbase));
+ get_unaligned(card->sleep_cookie_vbase));
return 0;
}
@@ -1223,7 +1224,6 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
dma_addr_t buf_pa;
struct mwifiex_pcie_buf_desc *desc = NULL;
struct mwifiex_pfu_buf_desc *desc2 = NULL;
- __le16 *tmp;
if (!(skb->data && skb->len)) {
mwifiex_dbg(adapter, ERROR,
@@ -1244,10 +1244,8 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
adapter->data_sent = true;
payload = skb->data;
- tmp = (__le16 *)&payload[0];
- *tmp = cpu_to_le16((u16)skb->len);
- tmp = (__le16 *)&payload[2];
- *tmp = cpu_to_le16(MWIFIEX_TYPE_DATA);
+ put_unaligned_le16((u16)skb->len, payload + 0);
+ put_unaligned_le16(MWIFIEX_TYPE_DATA, payload + 2);
if (mwifiex_map_pci_memory(adapter, skb, skb->len,
PCI_DMA_TODEVICE))
@@ -1376,7 +1374,6 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
(card->rxbd_rdptr & reg->rx_rollover_ind))) {
struct sk_buff *skb_data;
u16 rx_len;
- __le16 pkt_len;
rd_index = card->rxbd_rdptr & reg->rx_mask;
skb_data = card->rx_buf_list[rd_index];
@@ -1393,8 +1390,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
/* Get data length from interface header -
* first 2 bytes for len, next 2 bytes is for type
*/
- pkt_len = *((__le16 *)skb_data->data);
- rx_len = le16_to_cpu(pkt_len);
+ rx_len = get_unaligned_le16(skb_data->data);
if (WARN_ON(rx_len <= INTF_HEADER_LEN ||
rx_len > MWIFIEX_RX_DATA_BUF_SIZE)) {
mwifiex_dbg(adapter, ERROR,
@@ -1601,13 +1597,18 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
adapter->cmd_sent = true;
- *(__le16 *)&payload[0] = cpu_to_le16((u16)skb->len);
- *(__le16 *)&payload[2] = cpu_to_le16(MWIFIEX_TYPE_CMD);
+ put_unaligned_le16((u16)skb->len, &payload[0]);
+ put_unaligned_le16(MWIFIEX_TYPE_CMD, &payload[2]);
if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE))
return -1;
card->cmd_buf = skb;
+ /*
+ * Need to keep a reference, since core driver might free up this
+ * buffer before we've unmapped it.
+ */
+ skb_get(skb);
/* To send a command, the driver will:
1. Write the 64bit physical address of the data buffer to
@@ -1694,7 +1695,6 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
struct sk_buff *skb = card->cmdrsp_buf;
int count = 0;
u16 rx_len;
- __le16 pkt_len;
mwifiex_dbg(adapter, CMD,
"info: Rx CMD Response\n");
@@ -1711,11 +1711,11 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
if (card->cmd_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(card->cmd_buf);
card->cmd_buf = NULL;
}
- pkt_len = *((__le16 *)skb->data);
- rx_len = le16_to_cpu(pkt_len);
+ rx_len = get_unaligned_le16(skb->data);
skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len);
skb_trim(skb, rx_len);
@@ -1856,7 +1856,7 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
desc = card->evtbd_ring[rdptr];
memset(desc, 0, sizeof(*desc));
- event = *(u32 *) &skb_cmd->data[INTF_HEADER_LEN];
+ event = get_unaligned_le32(&skb_cmd->data[INTF_HEADER_LEN]);
adapter->event_cause = event;
/* The first 4bytes will be the event transfer header
len is 2 bytes followed by type which is 2 bytes */
@@ -1965,6 +1965,94 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
return ret;
}
+/* Combo firmware image is a combination of
+ * (1) combo crc heaer, start with CMD5
+ * (2) bluetooth image, start with CMD7, end with CMD6, data wrapped in CMD1.
+ * (3) wifi image.
+ *
+ * This function bypass the header and bluetooth part, return
+ * the offset of tail wifi-only part.
+ */
+
+static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter,
+ const void *firmware, u32 firmware_len) {
+ const struct mwifiex_fw_data *fwdata;
+ u32 offset = 0, data_len, dnld_cmd;
+ int ret = 0;
+ bool cmd7_before = false;
+
+ while (1) {
+ /* Check for integer and buffer overflow */
+ if (offset + sizeof(fwdata->header) < sizeof(fwdata->header) ||
+ offset + sizeof(fwdata->header) >= firmware_len) {
+ mwifiex_dbg(adapter, ERROR,
+ "extract wifi-only fw failure!\n");
+ ret = -1;
+ goto done;
+ }
+
+ fwdata = firmware + offset;
+ dnld_cmd = le32_to_cpu(fwdata->header.dnld_cmd);
+ data_len = le32_to_cpu(fwdata->header.data_length);
+
+ /* Skip past header */
+ offset += sizeof(fwdata->header);
+
+ switch (dnld_cmd) {
+ case MWIFIEX_FW_DNLD_CMD_1:
+ if (!cmd7_before) {
+ mwifiex_dbg(adapter, ERROR,
+ "no cmd7 before cmd1!\n");
+ ret = -1;
+ goto done;
+ }
+ if (offset + data_len < data_len) {
+ mwifiex_dbg(adapter, ERROR, "bad FW parse\n");
+ ret = -1;
+ goto done;
+ }
+ offset += data_len;
+ break;
+ case MWIFIEX_FW_DNLD_CMD_5:
+ /* Check for integer overflow */
+ if (offset + data_len < data_len) {
+ mwifiex_dbg(adapter, ERROR, "bad FW parse\n");
+ ret = -1;
+ goto done;
+ }
+ offset += data_len;
+ break;
+ case MWIFIEX_FW_DNLD_CMD_6:
+ /* Check for integer overflow */
+ if (offset + data_len < data_len) {
+ mwifiex_dbg(adapter, ERROR, "bad FW parse\n");
+ ret = -1;
+ goto done;
+ }
+ offset += data_len;
+ if (offset >= firmware_len) {
+ mwifiex_dbg(adapter, ERROR,
+ "extract wifi-only fw failure!\n");
+ ret = -1;
+ } else {
+ ret = offset;
+ }
+ goto done;
+ case MWIFIEX_FW_DNLD_CMD_7:
+ cmd7_before = true;
+ break;
+ default:
+ mwifiex_dbg(adapter, ERROR, "unknown dnld_cmd %d\n",
+ dnld_cmd);
+ ret = -1;
+ goto done;
+ }
+ }
+
+done:
+ return ret;
+}
+
/*
* This function downloads the firmware to the card.
*
@@ -1980,7 +2068,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
u32 firmware_len = fw->fw_len;
u32 offset = 0;
struct sk_buff *skb;
- u32 txlen, tx_blocks = 0, tries, len;
+ u32 txlen, tx_blocks = 0, tries, len, val;
u32 block_retry_cnt = 0;
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
@@ -2007,6 +2095,24 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
goto done;
}
+ ret = mwifiex_read_reg(adapter, PCIE_SCRATCH_13_REG, &val);
+ if (ret) {
+ mwifiex_dbg(adapter, FATAL, "Failed to read scratch register 13\n");
+ goto done;
+ }
+
+ /* PCIE FLR case: extract wifi part from combo firmware*/
+ if (val == MWIFIEX_PCIE_FLR_HAPPENS) {
+ ret = mwifiex_extract_wifi_fw(adapter, firmware, firmware_len);
+ if (ret < 0) {
+ mwifiex_dbg(adapter, ERROR, "Failed to extract wifi fw\n");
+ goto done;
+ }
+ offset = ret;
+ mwifiex_dbg(adapter, MSG,
+ "info: dnld wifi firmware from %d bytes\n", offset);
+ }
+
/* Perform firmware data transfer */
do {
u32 ireg_intr = 0;
@@ -2503,8 +2609,8 @@ mwifiex_pcie_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
int pcie_scratch_reg[] = {PCIE_SCRATCH_12_REG,
- PCIE_SCRATCH_13_REG,
- PCIE_SCRATCH_14_REG};
+ PCIE_SCRATCH_14_REG,
+ PCIE_SCRATCH_15_REG};
if (!p)
return 0;
@@ -2874,6 +2980,8 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
int ret;
u32 fw_status;
+ cancel_work_sync(&card->work);
+
ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
if (fw_status == FIRMWARE_READY_PCIE) {
mwifiex_dbg(adapter, INFO,
@@ -3077,12 +3185,6 @@ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
struct pci_dev *pdev = card->dev;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- /* Bluetooth is not on pcie interface. Download Wifi only firmware
- * during pcie FLR, so that bluetooth part of firmware which is
- * already running doesn't get affected.
- */
- strcpy(adapter->fw_name, PCIE8997_DEFAULT_WIFIFW_NAME);
-
/* tx_buf_size might be changed to 3584 by firmware during
* data transfer, we should reset it to default size.
*/
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index 00e8ee5ad4a8..f7ce9b6db6b4 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -35,7 +35,6 @@
#define PCIE8897_B0_FW_NAME "mrvl/pcie8897_uapsta.bin"
#define PCIEUART8997_FW_NAME_V4 "mrvl/pcieuart8997_combo_v4.bin"
#define PCIEUSB8997_FW_NAME_V4 "mrvl/pcieusb8997_combo_v4.bin"
-#define PCIE8997_DEFAULT_WIFIFW_NAME "mrvl/pcie8997_wlan_v4.bin"
#define PCIE_VENDOR_ID_MARVELL (0x11ab)
#define PCIE_VENDOR_ID_V2_MARVELL (0x1b4b)
@@ -77,8 +76,9 @@
#define PCIE_SCRATCH_10_REG 0xCE8
#define PCIE_SCRATCH_11_REG 0xCEC
#define PCIE_SCRATCH_12_REG 0xCF0
-#define PCIE_SCRATCH_13_REG 0xCF8
-#define PCIE_SCRATCH_14_REG 0xCFC
+#define PCIE_SCRATCH_13_REG 0xCF4
+#define PCIE_SCRATCH_14_REG 0xCF8
+#define PCIE_SCRATCH_15_REG 0xCFC
#define PCIE_RD_DATA_PTR_Q0_Q1 0xC08C
#define PCIE_WR_DATA_PTR_Q0_Q1 0xC05C
@@ -119,6 +119,8 @@
#define MWIFIEX_SLEEP_COOKIE_SIZE 4
#define MWIFIEX_MAX_DELAY_COUNT 100
+#define MWIFIEX_PCIE_FLR_HAPPENS 0xFEDCBABA
+
struct mwifiex_pcie_card_reg {
u16 cmd_addr_lo;
u16 cmd_addr_hi;
@@ -217,8 +219,8 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
.ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
.pfu_enabled = 1,
.sleep_cookie = 0,
- .fw_dump_ctrl = 0xcf4,
- .fw_dump_start = 0xcf8,
+ .fw_dump_ctrl = PCIE_SCRATCH_13_REG,
+ .fw_dump_start = PCIE_SCRATCH_14_REG,
.fw_dump_end = 0xcff,
.fw_dump_host_ready = 0xee,
.fw_dump_read_done = 0xfe,
@@ -254,8 +256,8 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
.ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
.pfu_enabled = 1,
.sleep_cookie = 0,
- .fw_dump_ctrl = 0xcf4,
- .fw_dump_start = 0xcf8,
+ .fw_dump_ctrl = PCIE_SCRATCH_13_REG,
+ .fw_dump_start = PCIE_SCRATCH_14_REG,
.fw_dump_end = 0xcff,
.fw_dump_host_ready = 0xcc,
.fw_dump_read_done = 0xdd,
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 181691684a08..ce6936d0c5c0 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -691,8 +691,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
/* Increment the TLV header length by the size
appended */
- le16_add_cpu(&chan_tlv_out->header.len,
- sizeof(chan_tlv_out->chan_scan_param));
+ le16_unaligned_add_cpu(&chan_tlv_out->header.len,
+ sizeof(
+ chan_tlv_out->chan_scan_param));
/*
* The tlv buffer length is set to the number of bytes
@@ -859,6 +860,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
*scan_current_only = false;
if (user_scan_in) {
+ u8 tmpaddr[ETH_ALEN];
/* Default the ssid_filter flag to TRUE, set false under
certain wildcard conditions and qualified by the existence
@@ -883,8 +885,10 @@ mwifiex_config_scan(struct mwifiex_private *priv,
user_scan_in->specific_bssid,
sizeof(scan_cfg_out->specific_bssid));
+ memcpy(tmpaddr, scan_cfg_out->specific_bssid, ETH_ALEN);
+
if (adapter->ext_scan &&
- !is_zero_ether_addr(scan_cfg_out->specific_bssid)) {
+ !is_zero_ether_addr(tmpaddr)) {
bssid_tlv =
(struct mwifiex_ie_types_bssid_list *)tlv_pos;
bssid_tlv->header.type = cpu_to_le16(TLV_TYPE_BSSID);
@@ -947,8 +951,9 @@ mwifiex_config_scan(struct mwifiex_private *priv,
* truncate scan results. That is not an issue with an SSID
* or BSSID filter applied to the scan results in the firmware.
*/
+ memcpy(tmpaddr, scan_cfg_out->specific_bssid, ETH_ALEN);
if ((i && ssid_filter) ||
- !is_zero_ether_addr(scan_cfg_out->specific_bssid))
+ !is_zero_ether_addr(tmpaddr))
*filtered_scan = true;
if (user_scan_in->scan_chan_gap) {
@@ -989,10 +994,15 @@ mwifiex_config_scan(struct mwifiex_private *priv,
* If a specific BSSID or SSID is used, the number of channels in the
* scan command will be increased to the absolute maximum.
*/
- if (*filtered_scan)
+ if (*filtered_scan) {
*max_chan_per_scan = MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN;
- else
- *max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD;
+ } else {
+ if (!priv->media_connected)
+ *max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD;
+ else
+ *max_chan_per_scan =
+ MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD / 2;
+ }
if (adapter->ext_scan) {
bss_mode = (struct mwifiex_ie_types_bss_mode *)tlv_pos;
@@ -1742,7 +1752,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
if (*bytes_left >= sizeof(beacon_size)) {
/* Extract & convert beacon size from command buffer */
- beacon_size = le16_to_cpu(*(__le16 *)(*bss_info));
+ beacon_size = get_unaligned_le16((*bss_info));
*bytes_left -= sizeof(beacon_size);
*bss_info += sizeof(beacon_size);
}
@@ -2369,8 +2379,9 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
temp_chan = chan_list_tlv->chan_scan_param + chan_idx;
/* Increment the TLV header length by size appended */
- le16_add_cpu(&chan_list_tlv->header.len,
- sizeof(chan_list_tlv->chan_scan_param));
+ le16_unaligned_add_cpu(&chan_list_tlv->header.len,
+ sizeof(
+ chan_list_tlv->chan_scan_param));
temp_chan->chan_number =
bgscan_cfg_in->chan_list[chan_idx].chan_number;
@@ -2407,8 +2418,8 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
mwifiex_bgscan_create_channel_list(priv, bgscan_cfg_in,
chan_list_tlv->
chan_scan_param);
- le16_add_cpu(&chan_list_tlv->header.len,
- chan_num *
+ le16_unaligned_add_cpu(&chan_list_tlv->header.len,
+ chan_num *
sizeof(chan_list_tlv->chan_scan_param[0]));
}
@@ -2432,7 +2443,7 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
/* Append vendor specific IE TLV */
mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_BGSCAN, &tlv_pos);
- le16_add_cpu(&cmd->size, tlv_pos - bgscan_config->tlv);
+ le16_unaligned_add_cpu(&cmd->size, tlv_pos - bgscan_config->tlv);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index a4b356d267f9..0af1c6733c92 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -387,8 +387,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
if (!adapter || !adapter->priv_num)
return;
- cancel_work_sync(&card->work);
-
mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num);
ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat);
@@ -943,7 +941,7 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter,
return -1;
}
- nb = le16_to_cpu(*(__le16 *) (buffer));
+ nb = get_unaligned_le16((buffer));
if (nb > npayload) {
mwifiex_dbg(adapter, ERROR,
"%s: invalid packet, nb=%d npayload=%d\n",
@@ -951,7 +949,7 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter,
return -1;
}
- *type = le16_to_cpu(*(__le16 *) (buffer + 2));
+ *type = get_unaligned_le16((buffer + 2));
return ret;
}
@@ -1139,7 +1137,8 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
__func__, blk_num, blk_size, total_pkt_len);
break;
}
- pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET));
+ pkt_len = get_unaligned_le16((data +
+ SDIO_HEADER_OFFSET));
if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) {
mwifiex_dbg(adapter, ERROR,
"%s: error in pkt_len,\t"
@@ -1172,10 +1171,11 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
struct sk_buff *skb, u32 upld_typ)
{
u8 *cmd_buf;
- __le16 *curr_ptr = (__le16 *)skb->data;
- u16 pkt_len = le16_to_cpu(*curr_ptr);
+ u16 pkt_len;
struct mwifiex_rxinfo *rx_info;
+ pkt_len = get_unaligned_le16(skb->data);
+
if (upld_typ != MWIFIEX_TYPE_AGGR_DATA) {
skb_trim(skb, pkt_len);
skb_pull(skb, INTF_HEADER_LEN);
@@ -1235,7 +1235,7 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
case MWIFIEX_TYPE_EVENT:
mwifiex_dbg(adapter, EVENT,
"info: --- Rx: Event ---\n");
- adapter->event_cause = le32_to_cpu(*(__le32 *) skb->data);
+ adapter->event_cause = get_unaligned_le32(skb->data);
if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE))
memcpy(adapter->event_body,
@@ -1380,7 +1380,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
}
if (card->mpa_rx.pkt_cnt == 1)
- mport = adapter->ioport + port;
+ mport = adapter->ioport + card->mpa_rx.start_port;
if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf,
card->mpa_rx.buf_len, mport, 1))
@@ -1392,8 +1392,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
u32 *len_arr = card->mpa_rx.len_arr;
/* get curr PKT len & type */
- pkt_len = le16_to_cpu(*(__le16 *) &curr_ptr[0]);
- pkt_type = le16_to_cpu(*(__le16 *) &curr_ptr[2]);
+ pkt_len = get_unaligned_le16(&curr_ptr[0]);
+ pkt_type = get_unaligned_le16(&curr_ptr[2]);
/* copy pkt to deaggr buf */
skb_deaggr = mwifiex_alloc_dma_align_buf(len_arr[pind],
@@ -1813,7 +1813,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
}
if (card->mpa_tx.pkt_cnt == 1)
- mport = adapter->ioport + port;
+ mport = adapter->ioport + card->mpa_tx.start_port;
ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf,
card->mpa_tx.buf_len, mport);
@@ -1874,8 +1874,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
/* Allocate buffer and copy payload */
blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
buf_block_len = (pkt_len + blk_size - 1) / blk_size;
- *(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len);
- *(__le16 *)&payload[2] = cpu_to_le16(type);
+ put_unaligned_le16((u16)pkt_len, payload + 0);
+ put_unaligned_le16((u32)type, payload + 2);
+
/*
* This is SDIO specific header
@@ -2155,6 +2156,8 @@ static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
+ cancel_work_sync(&card->work);
+
kfree(card->mp_regs);
kfree(card->mpa_rx.skb_arr);
kfree(card->mpa_rx.len_arr);
@@ -2193,6 +2196,7 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
struct sdio_func *func = card->func;
+ int ret;
mwifiex_shutdown_sw(adapter);
@@ -2207,7 +2211,9 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
- mwifiex_reinit_sw(adapter);
+ ret = mwifiex_reinit_sw(adapter);
+ if (ret)
+ dev_err(&func->dev, "reinit failed: %d\n", ret);
}
/* This function read/write firmware */
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 2f1f4d190b28..83916c1439af 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -126,19 +126,19 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
if (cmd_action == HostCmd_ACT_GEN_GET) {
snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_GET);
snmp_mib->buf_size = cpu_to_le16(MAX_SNMP_BUF_SIZE);
- le16_add_cpu(&cmd->size, MAX_SNMP_BUF_SIZE);
+ le16_unaligned_add_cpu(&cmd->size, MAX_SNMP_BUF_SIZE);
} else if (cmd_action == HostCmd_ACT_GEN_SET) {
snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
- *((__le16 *) (snmp_mib->value)) = cpu_to_le16(*ul_temp);
- le16_add_cpu(&cmd->size, sizeof(u16));
+ put_unaligned_le16(*ul_temp, snmp_mib->value);
+ le16_unaligned_add_cpu(&cmd->size, sizeof(u16));
}
mwifiex_dbg(priv->adapter, CMD,
"cmd: SNMP_CMD: Action=0x%x, OID=0x%x,\t"
"OIDSize=0x%x, Value=0x%x\n",
cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size),
- le16_to_cpu(*(__le16 *)snmp_mib->value));
+ get_unaligned_le16(snmp_mib->value));
return 0;
}
@@ -1357,8 +1357,9 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq);
pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
- le16_add_cpu(&cmd->size,
- sizeof(struct mwifiex_ie_types_rssi_threshold));
+ le16_unaligned_add_cpu(&cmd->size,
+ sizeof(
+ struct mwifiex_ie_types_rssi_threshold));
}
if (event_bitmap & BITMASK_BCN_RSSI_HIGH) {
@@ -1378,8 +1379,9 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq);
pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
- le16_add_cpu(&cmd->size,
- sizeof(struct mwifiex_ie_types_rssi_threshold));
+ le16_unaligned_add_cpu(&cmd->size,
+ sizeof(
+ struct mwifiex_ie_types_rssi_threshold));
}
return 0;
@@ -1398,7 +1400,7 @@ mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv,
filter = &mef_entry->filter[i];
if (!filter->filt_type)
break;
- *(__le32 *)stack_ptr = cpu_to_le32((u32)filter->repeat);
+ put_unaligned_le32((u32)filter->repeat, stack_ptr);
stack_ptr += 4;
*stack_ptr = TYPE_DNUM;
stack_ptr += 1;
@@ -1410,8 +1412,7 @@ mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv,
stack_ptr += 1;
*stack_ptr = TYPE_BYTESEQ;
stack_ptr += 1;
-
- *(__le32 *)stack_ptr = cpu_to_le32((u32)filter->offset);
+ put_unaligned_le32((u32)filter->offset, stack_ptr);
stack_ptr += 4;
*stack_ptr = TYPE_DNUM;
stack_ptr += 1;
@@ -1683,14 +1684,15 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
sizeof(u8) + sizeof(u8));
/* Add the rule length to the command size*/
- le16_add_cpu(&cmd->size, le16_to_cpu(rule->header.len) +
- sizeof(struct mwifiex_ie_types_header));
+ le16_unaligned_add_cpu(&cmd->size,
+ le16_to_cpu(rule->header.len) +
+ sizeof(struct mwifiex_ie_types_header));
rule = (void *)((u8 *)rule->params + length);
}
/* Add sizeof action, num_of_rules to total command length */
- le16_add_cpu(&cmd->size, sizeof(u16) + sizeof(u16));
+ le16_unaligned_add_cpu(&cmd->size, sizeof(u16) + sizeof(u16));
return 0;
}
@@ -1708,7 +1710,7 @@ mwifiex_cmd_tdls_config(struct mwifiex_private *priv,
cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_CONFIG);
cmd->size = cpu_to_le16(S_DS_GEN);
tdls_config->tdls_action = cpu_to_le16(cmd_action);
- le16_add_cpu(&cmd->size, sizeof(tdls_config->tdls_action));
+ le16_unaligned_add_cpu(&cmd->size, sizeof(tdls_config->tdls_action));
switch (cmd_action) {
case ACT_TDLS_CS_ENABLE_CONFIG:
@@ -1735,7 +1737,7 @@ mwifiex_cmd_tdls_config(struct mwifiex_private *priv,
return -ENOTSUPP;
}
- le16_add_cpu(&cmd->size, len);
+ le16_unaligned_add_cpu(&cmd->size, len);
return 0;
}
@@ -1759,7 +1761,8 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_OPER);
cmd->size = cpu_to_le16(S_DS_GEN);
- le16_add_cpu(&cmd->size, sizeof(struct host_cmd_ds_tdls_oper));
+ le16_unaligned_add_cpu(&cmd->size,
+ sizeof(struct host_cmd_ds_tdls_oper));
tdls_oper->reason = 0;
memcpy(tdls_oper->peer_mac, oper->peer_mac, ETH_ALEN);
@@ -1783,7 +1786,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
return -ENODATA;
}
- *(__le16 *)pos = cpu_to_le16(params->capability);
+ put_unaligned_le16(params->capability, pos);
config_len += sizeof(params->capability);
qos_info = params->uapsd_queues | (params->max_sp << 5);
@@ -1861,7 +1864,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
return -ENOTSUPP;
}
- le16_add_cpu(&cmd->size, config_len);
+ le16_unaligned_add_cpu(&cmd->size, config_len);
return 0;
}
@@ -2032,7 +2035,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
case HostCmd_CMD_VERSION_EXT:
cmd_ptr->command = cpu_to_le16(cmd_no);
cmd_ptr->params.verext.version_str_sel =
- (u8) (*((u32 *) data_buf));
+ (u8)(get_unaligned((u32 *)data_buf));
memcpy(&cmd_ptr->params, data_buf,
sizeof(struct host_cmd_ds_version_ext));
cmd_ptr->size =
@@ -2043,7 +2046,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
case HostCmd_CMD_MGMT_FRAME_REG:
cmd_ptr->command = cpu_to_le16(cmd_no);
cmd_ptr->params.reg_mask.action = cpu_to_le16(cmd_action);
- cmd_ptr->params.reg_mask.mask = cpu_to_le32(*(u32 *)data_buf);
+ cmd_ptr->params.reg_mask.mask = cpu_to_le32(
+ get_unaligned((u32 *)data_buf));
cmd_ptr->size =
cpu_to_le16(sizeof(struct host_cmd_ds_mgmt_frame_reg) +
S_DS_GEN);
@@ -2063,7 +2067,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
case HostCmd_CMD_P2P_MODE_CFG:
cmd_ptr->command = cpu_to_le16(cmd_no);
cmd_ptr->params.mode_cfg.action = cpu_to_le16(cmd_action);
- cmd_ptr->params.mode_cfg.mode = cpu_to_le16(*(u16 *)data_buf);
+ cmd_ptr->params.mode_cfg.mode = cpu_to_le16(
+ get_unaligned((u16 *)data_buf));
cmd_ptr->size =
cpu_to_le16(sizeof(struct host_cmd_ds_p2p_mode_cfg) +
S_DS_GEN);
@@ -2359,8 +2364,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
if (ret)
return -1;
- if (!disable_auto_ds &&
- first_sta && priv->adapter->iface_type != MWIFIEX_USB &&
+ if (!disable_auto_ds && first_sta &&
priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
/* Enable auto deep sleep */
auto_ds.auto_ds = DEEP_SLEEP_ON;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 8548027abf71..f1d1f56fc23f 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -183,7 +183,7 @@ static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv,
"query_type = %#x, buf size = %#x\n",
oid, query_type, le16_to_cpu(smib->buf_size));
if (query_type == HostCmd_ACT_GEN_GET) {
- ul_temp = le16_to_cpu(*((__le16 *) (smib->value)));
+ ul_temp = get_unaligned_le16(smib->value);
if (data_buf)
*data_buf = ul_temp;
switch (oid) {
@@ -741,7 +741,7 @@ mwifiex_ret_p2p_mode_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_p2p_mode_cfg *mode_cfg = &resp->params.mode_cfg;
if (data_buf)
- *((u16 *)data_buf) = le16_to_cpu(mode_cfg->mode);
+ put_unaligned_le16(le16_to_cpu(mode_cfg->mode), data_buf);
return 0;
}
@@ -1201,7 +1201,7 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_802_11_BG_SCAN_QUERY:
ret = mwifiex_ret_802_11_scan(priv, resp);
- cfg80211_sched_scan_results(priv->wdev.wiphy);
+ cfg80211_sched_scan_results(priv->wdev.wiphy, 0);
mwifiex_dbg(adapter, CMD,
"info: CMD_RESP: BG_SCAN result is ready!\n");
break;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index d63d163eb1ec..839df8a9634e 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -670,7 +670,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
adapter->dbg.num_event_deauth++;
if (priv->media_connected) {
reason_code =
- le16_to_cpu(*(__le16 *)adapter->event_body);
+ get_unaligned_le16(adapter->event_body);
mwifiex_reset_connect_state(priv, reason_code, true);
}
break;
@@ -685,7 +685,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
adapter->dbg.num_event_disassoc++;
if (priv->media_connected) {
reason_code =
- le16_to_cpu(*(__le16 *)adapter->event_body);
+ get_unaligned_le16(adapter->event_body);
mwifiex_reset_connect_state(priv, reason_code, true);
}
break;
@@ -695,7 +695,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
adapter->dbg.num_event_link_lost++;
if (priv->media_connected) {
reason_code =
- le16_to_cpu(*(__le16 *)adapter->event_body);
+ get_unaligned_le16(adapter->event_body);
mwifiex_reset_connect_state(priv, reason_code, true);
}
break;
@@ -793,7 +793,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_BG_SCAN_STOPPED:
dev_dbg(adapter->dev, "event: BGS_STOPPED\n");
- cfg80211_sched_scan_stopped(priv->wdev.wiphy);
+ cfg80211_sched_scan_stopped(priv->wdev.wiphy, 0);
if (priv->sched_scanning)
priv->sched_scanning = false;
break;
@@ -923,7 +923,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
adapter->event_body);
break;
case EVENT_AMSDU_AGGR_CTRL:
- ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
+ ctrl = get_unaligned_le16(adapter->event_body);
mwifiex_dbg(adapter, EVENT,
"event: AMSDU_AGGR_CTRL %d\n", ctrl);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 1532ac9cee0b..42997e05d90f 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -560,7 +560,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
#endif
mwifiex_dbg(adapter, CMD, "aborting bgscan!\n");
mwifiex_stop_bg_scan(priv);
- cfg80211_sched_scan_stopped(priv->wdev.wiphy);
+ cfg80211_sched_scan_stopped(priv->wdev.wiphy, 0);
#ifdef CONFIG_PM
}
#endif
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index df9704de0715..7d0d3ff3dd4c 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -349,7 +349,7 @@ static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
chan_bw = IEEE80211_VHT_CHANWIDTH_USE_HT;
break;
}
- vht_oper->center_freq_seg1_idx =
+ vht_oper->center_freq_seg0_idx =
mwifiex_get_center_freq_index(priv, BAND_AAC,
bss_desc->channel,
chan_bw);
@@ -431,6 +431,41 @@ mwifiex_add_wmm_info_ie(struct mwifiex_private *priv, struct sk_buff *skb,
*buf++ = qosinfo; /* U-APSD no in use */
}
+static void mwifiex_tdls_add_bss_co_2040(struct sk_buff *skb)
+{
+ struct ieee_types_bss_co_2040 *bssco;
+
+ bssco = (void *)skb_put(skb, sizeof(struct ieee_types_bss_co_2040));
+ bssco->ieee_hdr.element_id = WLAN_EID_BSS_COEX_2040;
+ bssco->ieee_hdr.len = sizeof(struct ieee_types_bss_co_2040) -
+ sizeof(struct ieee_types_header);
+ bssco->bss_2040co = 0x01;
+}
+
+static void mwifiex_tdls_add_supported_chan(struct sk_buff *skb)
+{
+ struct ieee_types_generic *supp_chan;
+ u8 chan_supp[] = {1, 11};
+
+ supp_chan = (void *)skb_put(skb, (sizeof(struct ieee_types_header) +
+ sizeof(chan_supp)));
+ supp_chan->ieee_hdr.element_id = WLAN_EID_SUPPORTED_CHANNELS;
+ supp_chan->ieee_hdr.len = sizeof(chan_supp);
+ memcpy(supp_chan->data, chan_supp, sizeof(chan_supp));
+}
+
+static void mwifiex_tdls_add_oper_class(struct sk_buff *skb)
+{
+ struct ieee_types_generic *reg_class;
+ u8 rc_list[] = {1,
+ 1, 2, 3, 4, 12, 22, 23, 24, 25, 27, 28, 29, 30, 32, 33};
+ reg_class = (void *)skb_put(skb, (sizeof(struct ieee_types_header) +
+ sizeof(rc_list)));
+ reg_class->ieee_hdr.element_id = WLAN_EID_SUPPORTED_REGULATORY_CLASSES;
+ reg_class->ieee_hdr.len = sizeof(rc_list);
+ memcpy(reg_class->data, rc_list, sizeof(rc_list));
+}
+
static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
const u8 *peer, u8 action_code,
u8 dialog_token,
@@ -484,7 +519,9 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
}
mwifiex_tdls_add_ext_capab(priv, skb);
- mwifiex_tdls_add_qos_capab(skb);
+ mwifiex_tdls_add_bss_co_2040(skb);
+ mwifiex_tdls_add_supported_chan(skb);
+ mwifiex_tdls_add_oper_class(skb);
mwifiex_add_wmm_info_ie(priv, skb, 0);
break;
@@ -522,7 +559,9 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
}
mwifiex_tdls_add_ext_capab(priv, skb);
- mwifiex_tdls_add_qos_capab(skb);
+ mwifiex_tdls_add_bss_co_2040(skb);
+ mwifiex_tdls_add_supported_chan(skb);
+ mwifiex_tdls_add_oper_class(skb);
mwifiex_add_wmm_info_ie(priv, skb, 0);
break;
@@ -612,6 +651,9 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
sizeof(struct ieee_types_bss_co_2040) +
sizeof(struct ieee80211_ht_operation) +
sizeof(struct ieee80211_tdls_lnkie) +
+ (2 * (sizeof(struct ieee_types_header))) +
+ MWIFIEX_SUPPORTED_CHANNELS +
+ MWIFIEX_OPERATING_CLASSES +
sizeof(struct ieee80211_wmm_param_ie) +
extra_ies_len;
@@ -760,7 +802,10 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
}
mwifiex_tdls_add_ext_capab(priv, skb);
+ mwifiex_tdls_add_bss_co_2040(skb);
+ mwifiex_tdls_add_supported_chan(skb);
mwifiex_tdls_add_qos_capab(skb);
+ mwifiex_tdls_add_oper_class(skb);
break;
default:
mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS action frame type\n");
@@ -857,7 +902,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
struct mwifiex_sta_node *sta_ptr;
u8 *peer, *pos, *end;
u8 i, action, basic;
- __le16 cap = 0;
+ u16 cap = 0;
int ie_len = 0;
if (len < (sizeof(struct ethhdr) + 3))
@@ -879,7 +924,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
pos = buf + sizeof(struct ethhdr) + 4;
/* payload 1+ category 1 + action 1 + dialog 1 */
- cap = cpu_to_le16(*(u16 *)pos);
+ cap = get_unaligned_le16(pos);
ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
pos += 2;
break;
@@ -889,7 +934,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
return;
/* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
pos = buf + sizeof(struct ethhdr) + 6;
- cap = cpu_to_le16(*(u16 *)pos);
+ cap = get_unaligned_le16(pos);
ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
pos += 2;
break;
@@ -909,7 +954,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
if (!sta_ptr)
return;
- sta_ptr->tdls_cap.capab = cap;
+ sta_ptr->tdls_cap.capab = cpu_to_le16(cap);
for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
if (pos + 2 + pos[1] > end)
@@ -969,7 +1014,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
case WLAN_EID_AID:
if (priv->adapter->is_hw_11ac_capable)
sta_ptr->tdls_cap.aid =
- le16_to_cpu(*(__le16 *)(pos + 2));
+ get_unaligned_le16((pos + 2));
default:
break;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c
index d24eca34ac11..e10b2a52e78f 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c
@@ -202,7 +202,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
"AP EVENT: event id: %#x\n", eventcause);
break;
case EVENT_AMSDU_AGGR_CTRL:
- ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
+ ctrl = get_unaligned_le16(adapter->event_body);
mwifiex_dbg(adapter, EVENT,
"event: AMSDU_AGGR_CTRL %d\n", ctrl);
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 9cf3334adf4d..2f7705c50161 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -306,9 +306,17 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size)
}
}
- usb_fill_bulk_urb(ctx->urb, card->udev,
- usb_rcvbulkpipe(card->udev, ctx->ep), ctx->skb->data,
- size, mwifiex_usb_rx_complete, (void *)ctx);
+ if (card->rx_cmd_ep == ctx->ep &&
+ card->rx_cmd_ep_type == USB_ENDPOINT_XFER_INT)
+ usb_fill_int_urb(ctx->urb, card->udev,
+ usb_rcvintpipe(card->udev, ctx->ep),
+ ctx->skb->data, size, mwifiex_usb_rx_complete,
+ (void *)ctx, card->rx_cmd_interval);
+ else
+ usb_fill_bulk_urb(ctx->urb, card->udev,
+ usb_rcvbulkpipe(card->udev, ctx->ep),
+ ctx->skb->data, size, mwifiex_usb_rx_complete,
+ (void *)ctx);
if (card->rx_cmd_ep == ctx->ep)
atomic_inc(&card->rx_cmd_urb_pending);
@@ -424,10 +432,13 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
epd = &iface_desc->endpoint[i].desc;
if (usb_endpoint_dir_in(epd) &&
usb_endpoint_num(epd) == MWIFIEX_USB_EP_CMD_EVENT &&
- usb_endpoint_xfer_bulk(epd)) {
- pr_debug("info: bulk IN: max pkt size: %d, addr: %d\n",
+ (usb_endpoint_xfer_bulk(epd) ||
+ usb_endpoint_xfer_int(epd))) {
+ card->rx_cmd_ep_type = usb_endpoint_type(epd);
+ card->rx_cmd_interval = epd->bInterval;
+ pr_debug("info: Rx CMD/EVT:: max pkt size: %d, addr: %d, ep_type: %d\n",
le16_to_cpu(epd->wMaxPacketSize),
- epd->bEndpointAddress);
+ epd->bEndpointAddress, card->rx_cmd_ep_type);
card->rx_cmd_ep = usb_endpoint_num(epd);
atomic_set(&card->rx_cmd_urb_pending, 0);
}
@@ -461,10 +472,16 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
}
if (usb_endpoint_dir_out(epd) &&
usb_endpoint_num(epd) == MWIFIEX_USB_EP_CMD_EVENT &&
- usb_endpoint_xfer_bulk(epd)) {
+ (usb_endpoint_xfer_bulk(epd) ||
+ usb_endpoint_xfer_int(epd))) {
+ card->tx_cmd_ep_type = usb_endpoint_type(epd);
+ card->tx_cmd_interval = epd->bInterval;
pr_debug("info: bulk OUT: max pkt size: %d, addr: %d\n",
le16_to_cpu(epd->wMaxPacketSize),
epd->bEndpointAddress);
+ pr_debug("info: Tx CMD:: max pkt size: %d, addr: %d, ep_type: %d\n",
+ le16_to_cpu(epd->wMaxPacketSize),
+ epd->bEndpointAddress, card->tx_cmd_ep_type);
card->tx_cmd_ep = usb_endpoint_num(epd);
atomic_set(&card->tx_cmd_urb_pending, 0);
card->bulk_out_maxpktsize =
@@ -884,9 +901,17 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
context->skb = skb;
tx_urb = context->urb;
- usb_fill_bulk_urb(tx_urb, card->udev, usb_sndbulkpipe(card->udev, ep),
- data, skb->len, mwifiex_usb_tx_complete,
- (void *)context);
+ if (ep == card->tx_cmd_ep &&
+ card->tx_cmd_ep_type == USB_ENDPOINT_XFER_INT)
+ usb_fill_int_urb(tx_urb, card->udev,
+ usb_sndintpipe(card->udev, ep), data,
+ skb->len, mwifiex_usb_tx_complete,
+ (void *)context, card->tx_cmd_interval);
+ else
+ usb_fill_bulk_urb(tx_urb, card->udev,
+ usb_sndbulkpipe(card->udev, ep), data,
+ skb->len, mwifiex_usb_tx_complete,
+ (void *)context);
tx_urb->transfer_flags |= URB_ZERO_PACKET;
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.h b/drivers/net/wireless/marvell/mwifiex/usb.h
index e5f204ea018b..e36bd63172ff 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.h
+++ b/drivers/net/wireless/marvell/mwifiex/usb.h
@@ -90,6 +90,10 @@ struct usb_card_rec {
struct urb_context tx_cmd;
u8 mc_resync_flag;
struct usb_tx_data_port port[MWIFIEX_TX_DATA_PORT];
+ int rx_cmd_ep_type;
+ u8 rx_cmd_interval;
+ int tx_cmd_ep_type;
+ u8 tx_cmd_interval;
};
struct fw_header {
@@ -102,12 +106,12 @@ struct fw_header {
struct fw_sync_header {
__le32 cmd;
__le32 seq_num;
-};
+} __packed;
struct fw_data {
struct fw_header fw_hdr;
__le32 seq_num;
u8 data[1];
-};
+} __packed;
#endif /*_MWIFIEX_USB_H */
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index b1ab8da121dd..0cd68ffc2c74 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -274,13 +274,13 @@ int mwifiex_debug_info_to_buffer(struct mwifiex_private *priv, char *buf,
val = *((u8 *)addr);
break;
case 2:
- val = *((u16 *)addr);
+ val = get_unaligned((u16 *)addr);
break;
case 4:
- val = *((u32 *)addr);
+ val = get_unaligned((u32 *)addr);
break;
case 8:
- val = *((long long *)addr);
+ val = get_unaligned((long long *)addr);
break;
default:
val = -1;
diff --git a/drivers/net/wireless/marvell/mwifiex/util.h b/drivers/net/wireless/marvell/mwifiex/util.h
index b541d66c01eb..c386992abcdb 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.h
+++ b/drivers/net/wireless/marvell/mwifiex/util.h
@@ -93,4 +93,9 @@ static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb)
int mwifiex_debug_info_to_buffer(struct mwifiex_private *priv, char *buf,
struct mwifiex_debug_info *info);
+static inline void le16_unaligned_add_cpu(__le16 *var, u16 val)
+{
+ put_unaligned_le16(get_unaligned_le16(var) + val, var);
+}
+
#endif /* !_MWIFIEX_UTIL_H_ */
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index b1b400b59d86..e813b2ca740c 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -994,9 +994,9 @@ mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status,
*noise = -rxd->noise_floor;
if (rxd->rate & MWL8K_AP_RATE_INFO_MCS_FORMAT) {
- status->flag |= RX_FLAG_HT;
+ status->encoding = RX_ENC_HT;
if (rxd->rate & MWL8K_AP_RATE_INFO_40MHZ)
- status->flag |= RX_FLAG_40MHZ;
+ status->bw = RATE_INFO_BW_40;
status->rate_idx = MWL8K_AP_RATE_INFO_RATEID(rxd->rate);
} else {
int i;
@@ -1011,7 +1011,7 @@ mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status,
if (rxd->channel > 14) {
status->band = NL80211_BAND_5GHZ;
- if (!(status->flag & RX_FLAG_HT))
+ if (!(status->encoding == RX_ENC_HT))
status->rate_idx -= 5;
} else {
status->band = NL80211_BAND_2GHZ;
@@ -1109,17 +1109,17 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info);
if (rate_info & MWL8K_STA_RATE_INFO_SHORTPRE)
- status->flag |= RX_FLAG_SHORTPRE;
+ status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (rate_info & MWL8K_STA_RATE_INFO_40MHZ)
- status->flag |= RX_FLAG_40MHZ;
+ status->bw = RATE_INFO_BW_40;
if (rate_info & MWL8K_STA_RATE_INFO_SHORTGI)
- status->flag |= RX_FLAG_SHORT_GI;
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_info & MWL8K_STA_RATE_INFO_MCS_FORMAT)
- status->flag |= RX_FLAG_HT;
+ status->encoding = RX_ENC_HT;
if (rxd->channel > 14) {
status->band = NL80211_BAND_5GHZ;
- if (!(status->flag & RX_FLAG_HT))
+ if (!(status->encoding == RX_ENC_HT))
status->rate_idx -= 5;
} else {
status->band = NL80211_BAND_2GHZ;
@@ -6144,6 +6144,8 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
if (priv->sta_macids_supported || priv->device_info->fw_image_sta)
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
rc = ieee80211_register_hw(hw);
if (rc) {
wiphy_err(hw->wiphy, "Cannot register device\n");
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index a6e901766226..d3b611aaf061 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -615,6 +615,8 @@ int mt7601u_register_device(struct mt7601u_dev *dev)
wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
ret = mt76_init_sband_2g(dev);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c
index 3c576392ed89..d6dc59bb00df 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mac.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.c
@@ -401,7 +401,7 @@ mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
case MT_PHY_TYPE_CCK:
if (idx >= 8) {
idx -= 8;
- status->flag |= RX_FLAG_SHORTPRE;
+ status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
}
if (WARN_ON(idx >= 4))
@@ -410,10 +410,10 @@ mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
status->rate_idx = idx;
return;
case MT_PHY_TYPE_HT_GF:
- status->flag |= RX_FLAG_HT_GF;
+ status->enc_flags |= RX_ENC_FLAG_HT_GF;
/* fall through */
case MT_PHY_TYPE_HT:
- status->flag |= RX_FLAG_HT;
+ status->encoding = RX_ENC_HT;
status->rate_idx = idx;
break;
default:
@@ -422,13 +422,13 @@ mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
}
if (rate & MT_RXWI_RATE_SGI)
- status->flag |= RX_FLAG_SHORT_GI;
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate & MT_RXWI_RATE_STBC)
- status->flag |= 1 << RX_FLAG_STBC_SHIFT;
+ status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
if (rate & MT_RXWI_RATE_BW)
- status->flag |= RX_FLAG_40MHZ;
+ status->bw = RATE_INFO_BW_40;
}
static void
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c
index dbdfb3f5c507..a9f5f398b2f8 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mcu.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c
@@ -66,8 +66,10 @@ mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len)
WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
- skb_reserve(skb, MT_DMA_HDR_LEN);
- memcpy(skb_put(skb, len), data, len);
+ if (skb) {
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ memcpy(skb_put(skb, len), data, len);
+ }
return skb;
}
@@ -170,6 +172,8 @@ static int mt7601u_mcu_function_select(struct mt7601u_dev *dev,
};
skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
}
@@ -205,6 +209,8 @@ mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val)
};
skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/Kconfig b/drivers/net/wireless/ralink/rt2x00/Kconfig
index de62f5dcb62f..a1d1cfe214d2 100644
--- a/drivers/net/wireless/ralink/rt2x00/Kconfig
+++ b/drivers/net/wireless/ralink/rt2x00/Kconfig
@@ -201,7 +201,7 @@ endif
config RT2800SOC
tristate "Ralink WiSoC support"
- depends on SOC_RT288X || SOC_RT305X
+ depends on SOC_RT288X || SOC_RT305X || SOC_MT7620
select RT2X00_LIB_SOC
select RT2X00_LIB_MMIO
select RT2X00_LIB_CRYPTO
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h
index 256496bfbafb..6a8c93fb6a43 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h
@@ -79,6 +79,7 @@
#define RF5372 0x5372
#define RF5390 0x5390
#define RF5392 0x5392
+#define RF7620 0x7620
/*
* Chipset revisions.
@@ -639,6 +640,24 @@
#define RF_CSR_CFG_BUSY FIELD32(0x00020000)
/*
+ * MT7620 RF registers (reversed order)
+ */
+#define RF_CSR_CFG_DATA_MT7620 FIELD32(0x0000ff00)
+#define RF_CSR_CFG_REGNUM_MT7620 FIELD32(0x03ff0000)
+#define RF_CSR_CFG_WRITE_MT7620 FIELD32(0x00000010)
+#define RF_CSR_CFG_BUSY_MT7620 FIELD32(0x00000001)
+
+/* undocumented registers for calibration of new MAC */
+#define RF_CONTROL0 0x0518
+#define RF_BYPASS0 0x051c
+#define RF_CONTROL1 0x0520
+#define RF_BYPASS1 0x0524
+#define RF_CONTROL2 0x0528
+#define RF_BYPASS2 0x052c
+#define RF_CONTROL3 0x0530
+#define RF_BYPASS3 0x0534
+
+/*
* EFUSE_CSR: RT30x0 EEPROM
*/
#define EFUSE_CTRL 0x0580
@@ -1022,6 +1041,16 @@
#define AUTOWAKEUP_CFG_AUTOWAKE FIELD32(0x00008000)
/*
+ * MIMO_PS_CFG: MIMO Power-save Configuration
+ */
+#define MIMO_PS_CFG 0x1210
+#define MIMO_PS_CFG_MMPS_BB_EN FIELD32(0x00000001)
+#define MIMO_PS_CFG_MMPS_RX_ANT_NUM FIELD32(0x00000006)
+#define MIMO_PS_CFG_MMPS_RF_EN FIELD32(0x00000008)
+#define MIMO_PS_CFG_RX_STBY_POL FIELD32(0x00000010)
+#define MIMO_PS_CFG_RX_RX_STBY0 FIELD32(0x00000020)
+
+/*
* EDCA_AC0_CFG:
*/
#define EDCA_AC0_CFG 0x1300
@@ -1095,6 +1124,12 @@
#define TX_PWR_CFG_0_OFDM6_CH1 FIELD32(0x00f00000)
#define TX_PWR_CFG_0_OFDM12_CH0 FIELD32(0x0f000000)
#define TX_PWR_CFG_0_OFDM12_CH1 FIELD32(0xf0000000)
+/* bits for new 2T devices */
+#define TX_PWR_CFG_0B_1MBS_2MBS FIELD32(0x000000ff)
+#define TX_PWR_CFG_0B_5MBS_11MBS FIELD32(0x0000ff00)
+#define TX_PWR_CFG_0B_6MBS_9MBS FIELD32(0x00ff0000)
+#define TX_PWR_CFG_0B_12MBS_18MBS FIELD32(0xff000000)
+
/*
* TX_PWR_CFG_1:
@@ -1117,6 +1152,11 @@
#define TX_PWR_CFG_1_MCS0_CH1 FIELD32(0x00f00000)
#define TX_PWR_CFG_1_MCS2_CH0 FIELD32(0x0f000000)
#define TX_PWR_CFG_1_MCS2_CH1 FIELD32(0xf0000000)
+/* bits for new 2T devices */
+#define TX_PWR_CFG_1B_24MBS_36MBS FIELD32(0x000000ff)
+#define TX_PWR_CFG_1B_48MBS FIELD32(0x0000ff00)
+#define TX_PWR_CFG_1B_MCS0_MCS1 FIELD32(0x00ff0000)
+#define TX_PWR_CFG_1B_MCS2_MCS3 FIELD32(0xff000000)
/*
* TX_PWR_CFG_2:
@@ -1139,6 +1179,11 @@
#define TX_PWR_CFG_2_MCS8_CH1 FIELD32(0x00f00000)
#define TX_PWR_CFG_2_MCS10_CH0 FIELD32(0x0f000000)
#define TX_PWR_CFG_2_MCS10_CH1 FIELD32(0xf0000000)
+/* bits for new 2T devices */
+#define TX_PWR_CFG_2B_MCS4_MCS5 FIELD32(0x000000ff)
+#define TX_PWR_CFG_2B_MCS6_MCS7 FIELD32(0x0000ff00)
+#define TX_PWR_CFG_2B_MCS8_MCS9 FIELD32(0x00ff0000)
+#define TX_PWR_CFG_2B_MCS10_MCS11 FIELD32(0xff000000)
/*
* TX_PWR_CFG_3:
@@ -1161,6 +1206,11 @@
#define TX_PWR_CFG_3_STBC0_CH1 FIELD32(0x00f00000)
#define TX_PWR_CFG_3_STBC2_CH0 FIELD32(0x0f000000)
#define TX_PWR_CFG_3_STBC2_CH1 FIELD32(0xf0000000)
+/* bits for new 2T devices */
+#define TX_PWR_CFG_3B_MCS12_MCS13 FIELD32(0x000000ff)
+#define TX_PWR_CFG_3B_MCS14 FIELD32(0x0000ff00)
+#define TX_PWR_CFG_3B_STBC_MCS0_MCS1 FIELD32(0x00ff0000)
+#define TX_PWR_CFG_3B_STBC_MCS2_MSC3 FIELD32(0xff000000)
/*
* TX_PWR_CFG_4:
@@ -1171,10 +1221,13 @@
#define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00)
#define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000)
/* bits for 3T devices */
-#define TX_PWR_CFG_3_STBC4_CH0 FIELD32(0x0000000f)
-#define TX_PWR_CFG_3_STBC4_CH1 FIELD32(0x000000f0)
-#define TX_PWR_CFG_3_STBC6_CH0 FIELD32(0x00000f00)
-#define TX_PWR_CFG_3_STBC6_CH1 FIELD32(0x0000f000)
+#define TX_PWR_CFG_4_STBC4_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_4_STBC4_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_4_STBC6_CH0 FIELD32(0x00000f00)
+#define TX_PWR_CFG_4_STBC6_CH1 FIELD32(0x0000f000)
+/* bits for new 2T devices */
+#define TX_PWR_CFG_4B_STBC_MCS4_MCS5 FIELD32(0x000000ff)
+#define TX_PWR_CFG_4B_STBC_MCS6 FIELD32(0x0000ff00)
/*
* TX_PIN_CFG:
@@ -1201,6 +1254,8 @@
#define TX_PIN_CFG_RFTR_POL FIELD32(0x00020000)
#define TX_PIN_CFG_TRSW_EN FIELD32(0x00040000)
#define TX_PIN_CFG_TRSW_POL FIELD32(0x00080000)
+#define TX_PIN_CFG_RFRX_EN FIELD32(0x00100000)
+#define TX_PIN_CFG_RFRX_POL FIELD32(0x00200000)
#define TX_PIN_CFG_PA_PE_A2_EN FIELD32(0x01000000)
#define TX_PIN_CFG_PA_PE_G2_EN FIELD32(0x02000000)
#define TX_PIN_CFG_PA_PE_A2_POL FIELD32(0x04000000)
@@ -1547,6 +1602,95 @@
#define TX_PWR_CFG_4_EXT_STBC4_CH2 FIELD32(0x0000000f)
#define TX_PWR_CFG_4_EXT_STBC6_CH2 FIELD32(0x00000f00)
+/* TXn_RF_GAIN_CORRECT: RF Gain Correction for each RF_ALC[3:2]
+ * Unit: 0.1 dB, Range: -3.2 dB to 3.1 dB
+ */
+#define TX0_RF_GAIN_CORRECT 0x13a0
+#define TX0_RF_GAIN_CORRECT_GAIN_CORR_0 FIELD32(0x0000003f)
+#define TX0_RF_GAIN_CORRECT_GAIN_CORR_1 FIELD32(0x00003f00)
+#define TX0_RF_GAIN_CORRECT_GAIN_CORR_2 FIELD32(0x003f0000)
+#define TX0_RF_GAIN_CORRECT_GAIN_CORR_3 FIELD32(0x3f000000)
+
+#define TX1_RF_GAIN_CORRECT 0x13a4
+#define TX1_RF_GAIN_CORRECT_GAIN_CORR_0 FIELD32(0x0000003f)
+#define TX1_RF_GAIN_CORRECT_GAIN_CORR_1 FIELD32(0x00003f00)
+#define TX1_RF_GAIN_CORRECT_GAIN_CORR_2 FIELD32(0x003f0000)
+#define TX1_RF_GAIN_CORRECT_GAIN_CORR_3 FIELD32(0x3f000000)
+
+/* TXn_RF_GAIN_ATTEN: TXn RF Gain Attenuation Level
+ * Format: 7-bit, signed value
+ * Unit: 0.5 dB, Range: -20 dB to -5 dB
+ */
+#define TX0_RF_GAIN_ATTEN 0x13a8
+#define TX0_RF_GAIN_ATTEN_LEVEL_0 FIELD32(0x0000007f)
+#define TX0_RF_GAIN_ATTEN_LEVEL_1 FIELD32(0x00007f00)
+#define TX0_RF_GAIN_ATTEN_LEVEL_2 FIELD32(0x007f0000)
+#define TX0_RF_GAIN_ATTEN_LEVEL_3 FIELD32(0x7f000000)
+#define TX1_RF_GAIN_ATTEN 0x13ac
+#define TX1_RF_GAIN_ATTEN_LEVEL_0 FIELD32(0x0000007f)
+#define TX1_RF_GAIN_ATTEN_LEVEL_1 FIELD32(0x00007f00)
+#define TX1_RF_GAIN_ATTEN_LEVEL_2 FIELD32(0x007f0000)
+#define TX1_RF_GAIN_ATTEN_LEVEL_3 FIELD32(0x7f000000)
+
+/* TX_ALC_CFG_0: TX Automatic Level Control Configuration 0
+ * TX_ALC_LIMIT_n: TXn upper limit
+ * TX_ALC_CH_INIT_n: TXn channel initial transmission gain
+ * Unit: 0.5 dB, Range: 0 to 23.5 dB
+ */
+#define TX_ALC_CFG_0 0x13b0
+#define TX_ALC_CFG_0_CH_INIT_0 FIELD32(0x0000003f)
+#define TX_ALC_CFG_0_CH_INIT_1 FIELD32(0x00003f00)
+#define TX_ALC_CFG_0_LIMIT_0 FIELD32(0x003f0000)
+#define TX_ALC_CFG_0_LIMIT_1 FIELD32(0x3f000000)
+
+/* TX_ALC_CFG_1: TX Automatic Level Control Configuration 1
+ * TX_TEMP_COMP: TX Power Temperature Compensation
+ * Unit: 0.5 dB, Range: -10 dB to 10 dB
+ * TXn_GAIN_FINE: TXn Gain Fine Adjustment
+ * Unit: 0.1 dB, Range: -0.8 dB to 0.7 dB
+ * RF_TOS_DLY: Sets the RF_TOS_EN assertion delay after
+ * deassertion of PA_PE.
+ * Unit: 0.25 usec
+ * TXn_RF_GAIN_ATTEN: TXn RF gain attentuation selector
+ * RF_TOS_TIMEOUT: time-out value for RF_TOS_ENABLE
+ * deassertion if RF_TOS_DONE is missing.
+ * Unit: 0.25 usec
+ * RF_TOS_ENABLE: TX offset calibration enable
+ * ROS_BUSY_EN: RX offset calibration busy enable
+ */
+#define TX_ALC_CFG_1 0x13b4
+#define TX_ALC_CFG_1_TX_TEMP_COMP FIELD32(0x0000003f)
+#define TX_ALC_CFG_1_TX0_GAIN_FINE FIELD32(0x00000f00)
+#define TX_ALC_CFG_1_TX1_GAIN_FINE FIELD32(0x0000f000)
+#define TX_ALC_CFG_1_RF_TOS_DLY FIELD32(0x00070000)
+#define TX_ALC_CFG_1_TX0_RF_GAIN_ATTEN FIELD32(0x00300000)
+#define TX_ALC_CFG_1_TX1_RF_GAIN_ATTEN FIELD32(0x00c00000)
+#define TX_ALC_CFG_1_RF_TOS_TIMEOUT FIELD32(0x3f000000)
+#define TX_ALC_CFG_1_RF_TOS_ENABLE FIELD32(0x40000000)
+#define TX_ALC_CFG_1_ROS_BUSY_EN FIELD32(0x80000000)
+
+/* TXn_BB_GAIN_ATTEN: TXn RF Gain Attenuation Level
+ * Format: 5-bit signed values
+ * Unit: 0.5 dB, Range: -8 dB to 7 dB
+ */
+#define TX0_BB_GAIN_ATTEN 0x13c0
+#define TX0_BB_GAIN_ATTEN_LEVEL_0 FIELD32(0x0000001f)
+#define TX0_BB_GAIN_ATTEN_LEVEL_1 FIELD32(0x00001f00)
+#define TX0_BB_GAIN_ATTEN_LEVEL_2 FIELD32(0x001f0000)
+#define TX0_BB_GAIN_ATTEN_LEVEL_3 FIELD32(0x1f000000)
+#define TX1_BB_GAIN_ATTEN 0x13c4
+#define TX1_BB_GAIN_ATTEN_LEVEL_0 FIELD32(0x0000001f)
+#define TX1_BB_GAIN_ATTEN_LEVEL_1 FIELD32(0x00001f00)
+#define TX1_BB_GAIN_ATTEN_LEVEL_2 FIELD32(0x001f0000)
+#define TX1_BB_GAIN_ATTEN_LEVEL_3 FIELD32(0x1f000000)
+
+/* TX_ALC_VGA3: TX Automatic Level Correction Variable Gain Amplifier 3 */
+#define TX_ALC_VGA3 0x13c8
+#define TX_ALC_VGA3_TX0_ALC_VGA3 FIELD32(0x0000001f)
+#define TX_ALC_VGA3_TX1_ALC_VGA3 FIELD32(0x00001f00)
+#define TX_ALC_VGA3_TX0_ALC_VGA2 FIELD32(0x001f0000)
+#define TX_ALC_VGA3_TX1_ALC_VGA2 FIELD32(0x1f000000)
+
/* TX_PWR_CFG_7 */
#define TX_PWR_CFG_7 0x13d4
#define TX_PWR_CFG_7_OFDM54_CH0 FIELD32(0x0000000f)
@@ -1555,6 +1699,10 @@
#define TX_PWR_CFG_7_MCS7_CH0 FIELD32(0x000f0000)
#define TX_PWR_CFG_7_MCS7_CH1 FIELD32(0x00f00000)
#define TX_PWR_CFG_7_MCS7_CH2 FIELD32(0x0f000000)
+/* bits for new 2T devices */
+#define TX_PWR_CFG_7B_54MBS FIELD32(0x000000ff)
+#define TX_PWR_CFG_7B_MCS7 FIELD32(0x00ff0000)
+
/* TX_PWR_CFG_8 */
#define TX_PWR_CFG_8 0x13d8
@@ -1564,12 +1712,17 @@
#define TX_PWR_CFG_8_MCS23_CH0 FIELD32(0x000f0000)
#define TX_PWR_CFG_8_MCS23_CH1 FIELD32(0x00f00000)
#define TX_PWR_CFG_8_MCS23_CH2 FIELD32(0x0f000000)
+/* bits for new 2T devices */
+#define TX_PWR_CFG_8B_MCS15 FIELD32(0x000000ff)
+
/* TX_PWR_CFG_9 */
#define TX_PWR_CFG_9 0x13dc
#define TX_PWR_CFG_9_STBC7_CH0 FIELD32(0x0000000f)
#define TX_PWR_CFG_9_STBC7_CH1 FIELD32(0x000000f0)
#define TX_PWR_CFG_9_STBC7_CH2 FIELD32(0x00000f00)
+/* bits for new 2T devices */
+#define TX_PWR_CFG_9B_STBC_MCS7 FIELD32(0x000000ff)
/*
* RX_FILTER_CFG: RX configuration register.
@@ -1760,6 +1913,8 @@
#define TX_STA_FIFO_WCID FIELD32(0x0000ff00)
#define TX_STA_FIFO_SUCCESS_RATE FIELD32(0xffff0000)
#define TX_STA_FIFO_MCS FIELD32(0x007f0000)
+#define TX_STA_FIFO_BW FIELD32(0x00800000)
+#define TX_STA_FIFO_SGI FIELD32(0x01000000)
#define TX_STA_FIFO_PHYMODE FIELD32(0xc0000000)
/*
@@ -2135,11 +2290,14 @@ struct mac_iveiv_entry {
#define RFCSR1_TX1_PD FIELD8(0x20)
#define RFCSR1_RX2_PD FIELD8(0x40)
#define RFCSR1_TX2_PD FIELD8(0x80)
+#define RFCSR1_TX2_EN_MT7620 FIELD8(0x02)
/*
* RFCSR 2:
*/
#define RFCSR2_RESCAL_EN FIELD8(0x80)
+#define RFCSR2_RX2_EN_MT7620 FIELD8(0x02)
+#define RFCSR2_TX2_EN_MT7620 FIELD8(0x20)
/*
* RFCSR 3:
@@ -2158,6 +2316,12 @@ struct mac_iveiv_entry {
#define RFCSR3_BIT5 FIELD8(0x20)
/*
+ * RFCSR 4:
+ * VCOCAL_EN used by MT7620
+ */
+#define RFCSR4_VCOCAL_EN FIELD8(0x80)
+
+/*
* FRCSR 5:
*/
#define RFCSR5_R1 FIELD8(0x0c)
@@ -2212,6 +2376,7 @@ struct mac_iveiv_entry {
*/
#define RFCSR13_TX_POWER FIELD8(0x1f)
#define RFCSR13_DR0 FIELD8(0xe0)
+#define RFCSR13_RDIV_MT7620 FIELD8(0x03)
/*
* RFCSR 15:
@@ -2222,6 +2387,8 @@ struct mac_iveiv_entry {
* RFCSR 16:
*/
#define RFCSR16_TXMIXER_GAIN FIELD8(0x07)
+#define RFCSR16_RF_PLL_FREQ_SEL_MT7620 FIELD8(0x0F)
+#define RFCSR16_SDM_MODE_MT7620 FIELD8(0xE0)
/*
* RFCSR 17:
@@ -2234,6 +2401,8 @@ struct mac_iveiv_entry {
/* RFCSR 18 */
#define RFCSR18_XO_TUNE_BYPASS FIELD8(0x40)
+/* RFCSR 19 */
+#define RFCSR19_K FIELD8(0x03)
/*
* RFCSR 20:
@@ -2244,11 +2413,14 @@ struct mac_iveiv_entry {
* RFCSR 21:
*/
#define RFCSR21_RX_LO2_EN FIELD8(0x08)
+#define RFCSR21_BIT1 FIELD8(0x01)
+#define RFCSR21_BIT8 FIELD8(0x80)
/*
* RFCSR 22:
*/
#define RFCSR22_BASEBAND_LOOPBACK FIELD8(0x01)
+#define RFCSR22_FREQPLAN_D_MT7620 FIELD8(0x07)
/*
* RFCSR 23:
@@ -2271,6 +2443,11 @@ struct mac_iveiv_entry {
#define RFCSR27_R4 FIELD8(0x40)
/*
+ * RFCSR 28:
+ */
+#define RFCSR28_CH11_HT40 FIELD8(0x04)
+
+/*
* RFCSR 29:
*/
#define RFCSR29_ADC6_TEST FIELD8(0x01)
@@ -2331,6 +2508,7 @@ struct mac_iveiv_entry {
*/
#define RFCSR42_BIT1 FIELD8(0x01)
#define RFCSR42_BIT4 FIELD8(0x08)
+#define RFCSR42_TX2_EN_MT7620 FIELD8(0x40)
/*
* RFCSR 49:
@@ -2433,6 +2611,7 @@ enum rt2800_eeprom_word {
EEPROM_TSSI_BOUND_BG5,
EEPROM_TXPOWER_A1,
EEPROM_TXPOWER_A2,
+ EEPROM_TXPOWER_INIT,
EEPROM_TSSI_BOUND_A1,
EEPROM_TSSI_BOUND_A2,
EEPROM_TSSI_BOUND_A3,
@@ -2987,29 +3166,4 @@ enum rt2800_eeprom_word {
*/
#define BCN_TBTT_OFFSET 64
-/*
- * Hardware has 255 WCID table entries. First 32 entries are reserved for
- * shared keys. Since parts of the pairwise key table might be shared with
- * the beacon frame buffers 6 & 7 we could only use the first 222 entries.
- */
-#define WCID_START 33
-#define WCID_END 222
-#define STA_IDS_SIZE (WCID_END - WCID_START + 2)
-
-/*
- * RT2800 driver data structure
- */
-struct rt2800_drv_data {
- u8 calibration_bw20;
- u8 calibration_bw40;
- u8 bbp25;
- u8 bbp26;
- u8 txmixer_gain_24g;
- u8 txmixer_gain_5g;
- u8 max_psdu;
- unsigned int tbtt_tick;
- unsigned int ampdu_factor_cnt[4];
- DECLARE_BITMAP(sta_ids, STA_IDS_SIZE);
-};
-
#endif /* RT2800_H */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 8223a1520316..d11c7b210e81 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -59,6 +59,9 @@
rt2800_regbusy_read((__dev), BBP_CSR_CFG, BBP_CSR_CFG_BUSY, (__reg))
#define WAIT_FOR_RFCSR(__dev, __reg) \
rt2800_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY, (__reg))
+#define WAIT_FOR_RFCSR_MT7620(__dev, __reg) \
+ rt2800_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY_MT7620, \
+ (__reg))
#define WAIT_FOR_RF(__dev, __reg) \
rt2800_regbusy_read((__dev), RF_CSR_CFG0, RF_CSR_CFG0_BUSY, (__reg))
#define WAIT_FOR_MCU(__dev, __reg) \
@@ -150,19 +153,56 @@ static void rt2800_rfcsr_write(struct rt2x00_dev *rt2x00dev,
* Wait until the RFCSR becomes available, afterwards we
* can safely write the new data into the register.
*/
- if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
- reg = 0;
- rt2x00_set_field32(&reg, RF_CSR_CFG_DATA, value);
- rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
- rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 1);
- rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
+ switch (rt2x00dev->chip.rt) {
+ case RT6352:
+ if (WAIT_FOR_RFCSR_MT7620(rt2x00dev, &reg)) {
+ reg = 0;
+ rt2x00_set_field32(&reg, RF_CSR_CFG_DATA_MT7620, value);
+ rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM_MT7620,
+ word);
+ rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE_MT7620, 1);
+ rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY_MT7620, 1);
+
+ rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
+ }
+ break;
- rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
+ default:
+ if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
+ reg = 0;
+ rt2x00_set_field32(&reg, RF_CSR_CFG_DATA, value);
+ rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
+ rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 1);
+ rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
+
+ rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
+ }
+ break;
}
mutex_unlock(&rt2x00dev->csr_mutex);
}
+static void rt2800_rfcsr_write_bank(struct rt2x00_dev *rt2x00dev, const u8 bank,
+ const unsigned int reg, const u8 value)
+{
+ rt2800_rfcsr_write(rt2x00dev, (reg | (bank << 6)), value);
+}
+
+static void rt2800_rfcsr_write_chanreg(struct rt2x00_dev *rt2x00dev,
+ const unsigned int reg, const u8 value)
+{
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, reg, value);
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, reg, value);
+}
+
+static void rt2800_rfcsr_write_dccal(struct rt2x00_dev *rt2x00dev,
+ const unsigned int reg, const u8 value)
+{
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, reg, value);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, reg, value);
+}
+
static void rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev,
const unsigned int word, u8 *value)
{
@@ -178,22 +218,48 @@ static void rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev,
* doesn't become available in time, reg will be 0xffffffff
* which means we return 0xff to the caller.
*/
- if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
- reg = 0;
- rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
- rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 0);
- rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
+ switch (rt2x00dev->chip.rt) {
+ case RT6352:
+ if (WAIT_FOR_RFCSR_MT7620(rt2x00dev, &reg)) {
+ reg = 0;
+ rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM_MT7620,
+ word);
+ rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE_MT7620, 0);
+ rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY_MT7620, 1);
- rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
+ rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
- WAIT_FOR_RFCSR(rt2x00dev, &reg);
- }
+ WAIT_FOR_RFCSR_MT7620(rt2x00dev, &reg);
+ }
+
+ *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA_MT7620);
+ break;
+
+ default:
+ if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
+ reg = 0;
+ rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
+ rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 0);
+ rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
- *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA);
+ rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
+
+ WAIT_FOR_RFCSR(rt2x00dev, &reg);
+ }
+
+ *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA);
+ break;
+ }
mutex_unlock(&rt2x00dev->csr_mutex);
}
+static void rt2800_rfcsr_read_bank(struct rt2x00_dev *rt2x00dev, const u8 bank,
+ const unsigned int reg, u8 *value)
+{
+ rt2800_rfcsr_read(rt2x00dev, (reg | (bank << 6)), value);
+}
+
static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
const unsigned int word, const u32 value)
{
@@ -250,6 +316,7 @@ static const unsigned int rt2800_eeprom_map[EEPROM_WORD_COUNT] = {
[EEPROM_TSSI_BOUND_BG5] = 0x003b,
[EEPROM_TXPOWER_A1] = 0x003c,
[EEPROM_TXPOWER_A2] = 0x0053,
+ [EEPROM_TXPOWER_INIT] = 0x0068,
[EEPROM_TSSI_BOUND_A1] = 0x006a,
[EEPROM_TSSI_BOUND_A2] = 0x006b,
[EEPROM_TSSI_BOUND_A3] = 0x006c,
@@ -524,6 +591,7 @@ void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev,
break;
case RT5592:
+ case RT6352:
*txwi_size = TXWI_DESC_SIZE_5WORDS;
*rxwi_size = RXWI_DESC_SIZE_6WORDS;
break;
@@ -821,10 +889,10 @@ void rt2800_process_rxwi(struct queue_entry *entry,
rt2x00_desc_read(rxwi, 1, &word);
if (rt2x00_get_field32(word, RXWI_W1_SHORT_GI))
- rxdesc->flags |= RX_FLAG_SHORT_GI;
+ rxdesc->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rt2x00_get_field32(word, RXWI_W1_BW))
- rxdesc->flags |= RX_FLAG_40MHZ;
+ rxdesc->bw = RATE_INFO_BW_40;
/*
* Detect RX rate, always use MCS as signal type.
@@ -852,14 +920,49 @@ void rt2800_process_rxwi(struct queue_entry *entry,
}
EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
-void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi)
+static void rt2800_rate_from_status(struct skb_frame_desc *skbdesc,
+ u32 status, enum nl80211_band band)
+{
+ u8 flags = 0;
+ u8 idx = rt2x00_get_field32(status, TX_STA_FIFO_MCS);
+
+ switch (rt2x00_get_field32(status, TX_STA_FIFO_PHYMODE)) {
+ case RATE_MODE_HT_GREENFIELD:
+ flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ /* fall through */
+ case RATE_MODE_HT_MIX:
+ flags |= IEEE80211_TX_RC_MCS;
+ break;
+ case RATE_MODE_OFDM:
+ if (band == NL80211_BAND_2GHZ)
+ idx += 4;
+ break;
+ case RATE_MODE_CCK:
+ if (idx >= 8)
+ idx -= 8;
+ break;
+ }
+
+ if (rt2x00_get_field32(status, TX_STA_FIFO_BW))
+ flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+
+ if (rt2x00_get_field32(status, TX_STA_FIFO_SGI))
+ flags |= IEEE80211_TX_RC_SHORT_GI;
+
+ skbdesc->tx_rate_idx = idx;
+ skbdesc->tx_rate_flags = flags;
+}
+
+void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi,
+ bool match)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
struct txdone_entry_desc txdesc;
u32 word;
u16 mcs, real_mcs;
- int aggr, ampdu;
+ int aggr, ampdu, wcid, ack_req;
/*
* Obtain the status about this packet.
@@ -872,6 +975,8 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi)
real_mcs = rt2x00_get_field32(status, TX_STA_FIFO_MCS);
aggr = rt2x00_get_field32(status, TX_STA_FIFO_TX_AGGRE);
+ wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
+ ack_req = rt2x00_get_field32(status, TX_STA_FIFO_TX_ACK_REQUIRED);
/*
* If a frame was meant to be sent as a single non-aggregated MPDU
@@ -888,15 +993,22 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi)
* Hence, replace the requested rate with the real tx rate to not
* confuse the rate control algortihm by providing clearly wrong
* data.
- */
- if (unlikely(aggr == 1 && ampdu == 0 && real_mcs != mcs)) {
- skbdesc->tx_rate_idx = real_mcs;
+ *
+ * FIXME: if we do not find matching entry, we tell that frame was
+ * posted without any retries. We need to find a way to fix that
+ * and provide retry count.
+ */
+ if (unlikely((aggr == 1 && ampdu == 0 && real_mcs != mcs)) || !match) {
+ rt2800_rate_from_status(skbdesc, status, rt2x00dev->curr_band);
mcs = real_mcs;
}
if (aggr == 1 || ampdu == 1)
__set_bit(TXDONE_AMPDU, &txdesc.flags);
+ if (!ack_req)
+ __set_bit(TXDONE_NO_ACK_REQ, &txdesc.flags);
+
/*
* Ralink has a retry mechanism using a global fallback
* table. We setup this fallback table to try the immediate
@@ -928,7 +1040,18 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi)
if (txdesc.retry)
__set_bit(TXDONE_FALLBACK, &txdesc.flags);
- rt2x00lib_txdone(entry, &txdesc);
+ if (!match) {
+ /* RCU assures non-null sta will not be freed by mac80211. */
+ rcu_read_lock();
+ if (likely(wcid >= WCID_START && wcid <= WCID_END))
+ skbdesc->sta = drv_data->wcid_to_sta[wcid - WCID_START];
+ else
+ skbdesc->sta = NULL;
+ rt2x00lib_txdone_nomatch(entry, &txdesc);
+ rcu_read_unlock();
+ } else {
+ rt2x00lib_txdone(entry, &txdesc);
+ }
}
EXPORT_SYMBOL_GPL(rt2800_txdone_entry);
@@ -1468,6 +1591,7 @@ int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif,
return 0;
__set_bit(wcid - WCID_START, drv_data->sta_ids);
+ drv_data->wcid_to_sta[wcid - WCID_START] = sta;
/*
* Clean up WCID attributes and write STA address to the device.
@@ -1498,6 +1622,7 @@ int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta)
* get renewed when the WCID is reused.
*/
rt2800_config_wcid(rt2x00dev, NULL, wcid);
+ drv_data->wcid_to_sta[wcid - WCID_START] = NULL;
__clear_bit(wcid - WCID_START, drv_data->sta_ids);
return 0;
@@ -2753,7 +2878,8 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 59,
r59_nonbt_rev[idx]);
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392)) {
+ rt2x00_rt(rt2x00dev, RT5392) ||
+ rt2x00_rt(rt2x00dev, RT6352)) {
static const char r59_non_bt[] = {0x8f, 0x8f,
0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
@@ -3047,6 +3173,244 @@ static void rt2800_config_channel_rf55xx(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x19 : 0x7F);
}
+static void rt2800_config_channel_rf7620(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
+{
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+ u8 rx_agc_fc, tx_agc_fc;
+ u8 rfcsr;
+
+ /* Frequeny plan setting */
+ /* Rdiv setting (set 0x03 if Xtal==20)
+ * R13[1:0]
+ */
+ rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR13_RDIV_MT7620,
+ rt2800_clk_is_20mhz(rt2x00dev) ? 3 : 0);
+ rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
+
+ /* N setting
+ * R20[7:0] in rf->rf1
+ * R21[0] always 0
+ */
+ rt2800_rfcsr_read(rt2x00dev, 20, &rfcsr);
+ rfcsr = (rf->rf1 & 0x00ff);
+ rt2800_rfcsr_write(rt2x00dev, 20, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR21_BIT1, 0);
+ rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
+
+ /* K setting (always 0)
+ * R16[3:0] (RF PLL freq selection)
+ */
+ rt2800_rfcsr_read(rt2x00dev, 16, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR16_RF_PLL_FREQ_SEL_MT7620, 0);
+ rt2800_rfcsr_write(rt2x00dev, 16, rfcsr);
+
+ /* D setting (always 0)
+ * R22[2:0] (D=15, R22[2:0]=<111>)
+ */
+ rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR22_FREQPLAN_D_MT7620, 0);
+ rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
+
+ /* Ksd setting
+ * Ksd: R17<7:0> in rf->rf2
+ * R18<7:0> in rf->rf3
+ * R19<1:0> in rf->rf4
+ */
+ rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ rfcsr = rf->rf2;
+ rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 18, &rfcsr);
+ rfcsr = rf->rf3;
+ rt2800_rfcsr_write(rt2x00dev, 18, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 19, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR19_K, rf->rf4);
+ rt2800_rfcsr_write(rt2x00dev, 19, rfcsr);
+
+ /* Default: XO=20MHz , SDM mode */
+ rt2800_rfcsr_read(rt2x00dev, 16, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR16_SDM_MODE_MT7620, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 16, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR21_BIT8, 1);
+ rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_EN_MT7620,
+ rt2x00dev->default_ant.tx_chain_num != 1);
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR2_TX2_EN_MT7620,
+ rt2x00dev->default_ant.tx_chain_num != 1);
+ rt2x00_set_field8(&rfcsr, RFCSR2_RX2_EN_MT7620,
+ rt2x00dev->default_ant.rx_chain_num != 1);
+ rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 42, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR42_TX2_EN_MT7620,
+ rt2x00dev->default_ant.tx_chain_num != 1);
+ rt2800_rfcsr_write(rt2x00dev, 42, rfcsr);
+
+ /* RF for DC Cal BW */
+ if (conf_is_ht40(conf)) {
+ rt2800_rfcsr_write_dccal(rt2x00dev, 6, 0x10);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 7, 0x10);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 8, 0x04);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x10);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x10);
+ } else {
+ rt2800_rfcsr_write_dccal(rt2x00dev, 6, 0x20);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 7, 0x20);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 8, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x20);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x20);
+ }
+
+ if (conf_is_ht40(conf)) {
+ rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x08);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x08);
+ } else {
+ rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x28);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x28);
+ }
+
+ rt2800_rfcsr_read(rt2x00dev, 28, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR28_CH11_HT40,
+ conf_is_ht40(conf) && (rf->channel == 11));
+ rt2800_rfcsr_write(rt2x00dev, 28, rfcsr);
+
+ if (!test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags)) {
+ if (conf_is_ht40(conf)) {
+ rx_agc_fc = drv_data->rx_calibration_bw40;
+ tx_agc_fc = drv_data->tx_calibration_bw40;
+ } else {
+ rx_agc_fc = drv_data->rx_calibration_bw20;
+ tx_agc_fc = drv_data->tx_calibration_bw20;
+ }
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 6, &rfcsr);
+ rfcsr &= (~0x3F);
+ rfcsr |= rx_agc_fc;
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, rfcsr);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 7, &rfcsr);
+ rfcsr &= (~0x3F);
+ rfcsr |= rx_agc_fc;
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, rfcsr);
+ rt2800_rfcsr_read_bank(rt2x00dev, 7, 6, &rfcsr);
+ rfcsr &= (~0x3F);
+ rfcsr |= rx_agc_fc;
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 6, rfcsr);
+ rt2800_rfcsr_read_bank(rt2x00dev, 7, 7, &rfcsr);
+ rfcsr &= (~0x3F);
+ rfcsr |= rx_agc_fc;
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 7, rfcsr);
+
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 58, &rfcsr);
+ rfcsr &= (~0x3F);
+ rfcsr |= tx_agc_fc;
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, rfcsr);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 59, &rfcsr);
+ rfcsr &= (~0x3F);
+ rfcsr |= tx_agc_fc;
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, rfcsr);
+ rt2800_rfcsr_read_bank(rt2x00dev, 7, 58, &rfcsr);
+ rfcsr &= (~0x3F);
+ rfcsr |= tx_agc_fc;
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 58, rfcsr);
+ rt2800_rfcsr_read_bank(rt2x00dev, 7, 59, &rfcsr);
+ rfcsr &= (~0x3F);
+ rfcsr |= tx_agc_fc;
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 59, rfcsr);
+ }
+}
+
+static void rt2800_config_alc(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_channel *chan,
+ int power_level) {
+ u16 eeprom, target_power, max_power;
+ u32 mac_sys_ctrl, mac_status;
+ u32 reg;
+ u8 bbp;
+ int i;
+
+ /* hardware unit is 0.5dBm, limited to 23.5dBm */
+ power_level *= 2;
+ if (power_level > 0x2f)
+ power_level = 0x2f;
+
+ max_power = chan->max_power * 2;
+ if (max_power > 0x2f)
+ max_power = 0x2f;
+
+ rt2800_register_read(rt2x00dev, TX_ALC_CFG_0, &reg);
+ rt2x00_set_field32(&reg, TX_ALC_CFG_0_CH_INIT_0, power_level);
+ rt2x00_set_field32(&reg, TX_ALC_CFG_0_CH_INIT_1, power_level);
+ rt2x00_set_field32(&reg, TX_ALC_CFG_0_LIMIT_0, max_power);
+ rt2x00_set_field32(&reg, TX_ALC_CFG_0_LIMIT_1, max_power);
+
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_INTERNAL_TX_ALC)) {
+ /* init base power by eeprom target power */
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_INIT,
+ &target_power);
+ rt2x00_set_field32(&reg, TX_ALC_CFG_0_CH_INIT_0, target_power);
+ rt2x00_set_field32(&reg, TX_ALC_CFG_0_CH_INIT_1, target_power);
+ }
+ rt2800_register_write(rt2x00dev, TX_ALC_CFG_0, reg);
+
+ rt2800_register_read(rt2x00dev, TX_ALC_CFG_1, &reg);
+ rt2x00_set_field32(&reg, TX_ALC_CFG_1_TX_TEMP_COMP, 0);
+ rt2800_register_write(rt2x00dev, TX_ALC_CFG_1, reg);
+
+ /* Save MAC SYS CTRL registers */
+ rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &mac_sys_ctrl);
+ /* Disable Tx/Rx */
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
+ /* Check MAC Tx/Rx idle */
+ for (i = 0; i < 10000; i++) {
+ rt2800_register_read(rt2x00dev, MAC_STATUS_CFG,
+ &mac_status);
+ if (mac_status & 0x3)
+ usleep_range(50, 200);
+ else
+ break;
+ }
+
+ if (i == 10000)
+ rt2x00_warn(rt2x00dev, "Wait MAC Status to MAX !!!\n");
+
+ if (chan->center_freq > 2457) {
+ rt2800_bbp_read(rt2x00dev, 30, &bbp);
+ bbp = 0x40;
+ rt2800_bbp_write(rt2x00dev, 30, bbp);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0);
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
+ rt2800_rfcsr_write(rt2x00dev, 42, 0xfb);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 42, 0x7b);
+ } else {
+ rt2800_bbp_read(rt2x00dev, 30, &bbp);
+ bbp = 0x1f;
+ rt2800_bbp_write(rt2x00dev, 30, bbp);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x80);
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
+ rt2800_rfcsr_write(rt2x00dev, 42, 0xdb);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 42, 0x5b);
+ }
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, mac_sys_ctrl);
+
+ rt2800_vco_calibration(rt2x00dev);
+}
+
static void rt2800_bbp_write_with_rx_chain(struct rt2x00_dev *rt2x00dev,
const unsigned int word,
const u8 value)
@@ -3171,7 +3535,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
struct channel_info *info)
{
u32 reg;
- unsigned int tx_pin;
+ u32 tx_pin;
u8 bbp, rfcsr;
info->default_power1 = rt2800_txpower_to_dev(rt2x00dev, rf->channel,
@@ -3216,6 +3580,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
case RF5592:
rt2800_config_channel_rf55xx(rt2x00dev, conf, rf, info);
break;
+ case RF7620:
+ rt2800_config_channel_rf7620(rt2x00dev, conf, rf, info);
+ break;
default:
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
}
@@ -3290,7 +3657,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
if (rf->channel <= 14) {
if (!rt2x00_rt(rt2x00dev, RT5390) &&
- !rt2x00_rt(rt2x00dev, RT5392)) {
+ !rt2x00_rt(rt2x00dev, RT5392) &&
+ !rt2x00_rt(rt2x00dev, RT6352)) {
if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
@@ -3310,7 +3678,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 82, 0x94);
else if (rt2x00_rt(rt2x00dev, RT3593))
rt2800_bbp_write(rt2x00dev, 82, 0x82);
- else
+ else if (!rt2x00_rt(rt2x00dev, RT6352))
rt2800_bbp_write(rt2x00dev, 82, 0xf2);
if (rt2x00_rt(rt2x00dev, RT3593))
@@ -3331,7 +3699,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
if (rt2x00_rt(rt2x00dev, RT3572))
rt2800_rfcsr_write(rt2x00dev, 8, 0);
- tx_pin = 0;
+ rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
@@ -3380,6 +3748,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFRX_EN, 1); /* mt7620 */
rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
@@ -3438,12 +3807,26 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
usleep_range(1000, 1500);
}
- if (rt2x00_rt(rt2x00dev, RT5592)) {
+ if (rt2x00_rt(rt2x00dev, RT5592) || rt2x00_rt(rt2x00dev, RT6352)) {
+ reg = 0x10;
+ if (!conf_is_ht40(conf)) {
+ if (rt2x00_rt(rt2x00dev, RT6352) &&
+ rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
+ reg |= 0x5;
+ } else {
+ reg |= 0xa;
+ }
+ }
rt2800_bbp_write(rt2x00dev, 195, 141);
- rt2800_bbp_write(rt2x00dev, 196, conf_is_ht40(conf) ? 0x10 : 0x1a);
+ rt2800_bbp_write(rt2x00dev, 196, reg);
/* AGC init */
- reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2 * rt2x00dev->lna_gain;
+ if (rt2x00_rt(rt2x00dev, RT6352))
+ reg = 0x04;
+ else
+ reg = rf->channel <= 14 ? 0x1c : 0x24;
+
+ reg += 2 * rt2x00dev->lna_gain;
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
rt2800_iq_calibrate(rt2x00dev, rf->channel);
@@ -4125,6 +4508,128 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
(unsigned long) regs[i]);
}
+static void rt2800_config_txpower_rt6352(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_channel *chan,
+ int power_level)
+{
+ u32 reg, pwreg;
+ u16 eeprom;
+ u32 data, gdata;
+ u8 t, i;
+ enum nl80211_band band = chan->band;
+ int delta;
+
+ /* Warn user if bw_comp is set in EEPROM */
+ delta = rt2800_get_txpower_bw_comp(rt2x00dev, band);
+
+ if (delta)
+ rt2x00_warn(rt2x00dev, "ignoring EEPROM HT40 power delta: %d\n",
+ delta);
+
+ /* populate TX_PWR_CFG_0 up to TX_PWR_CFG_4 from EEPROM for HT20, limit
+ * value to 0x3f and replace 0x20 by 0x21 as this is what the vendor
+ * driver does as well, though it looks kinda wrong.
+ * Maybe some misunderstanding of what a signed 8-bit value is? Maybe
+ * the hardware has a problem handling 0x20, and as the code initially
+ * used a fixed offset between HT20 and HT40 rates they had to work-
+ * around that issue and most likely just forgot about it later on.
+ * Maybe we should use rt2800_get_txpower_bw_comp() here as well,
+ * however, the corresponding EEPROM value is not respected by the
+ * vendor driver, so maybe this is rather being taken care of the
+ * TXALC and the driver doesn't need to handle it...?
+ * Though this is all very awkward, just do as they did, as that's what
+ * board vendors expected when they populated the EEPROM...
+ */
+ for (i = 0; i < 5; i++) {
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ i * 2, &eeprom);
+
+ data = eeprom;
+
+ t = eeprom & 0x3f;
+ if (t == 32)
+ t++;
+
+ gdata = t;
+
+ t = (eeprom & 0x3f00) >> 8;
+ if (t == 32)
+ t++;
+
+ gdata |= (t << 8);
+
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ (i * 2) + 1, &eeprom);
+
+ t = eeprom & 0x3f;
+ if (t == 32)
+ t++;
+
+ gdata |= (t << 16);
+
+ t = (eeprom & 0x3f00) >> 8;
+ if (t == 32)
+ t++;
+
+ gdata |= (t << 24);
+ data |= (eeprom << 16);
+
+ if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) {
+ /* HT20 */
+ if (data != 0xffffffff)
+ rt2800_register_write(rt2x00dev,
+ TX_PWR_CFG_0 + (i * 4),
+ data);
+ } else {
+ /* HT40 */
+ if (gdata != 0xffffffff)
+ rt2800_register_write(rt2x00dev,
+ TX_PWR_CFG_0 + (i * 4),
+ gdata);
+ }
+ }
+
+ /* Aparently Ralink ran out of space in the BYRATE calibration section
+ * of the EERPOM which is copied to the corresponding TX_PWR_CFG_x
+ * registers. As recent 2T chips use 8-bit instead of 4-bit values for
+ * power-offsets more space would be needed. Ralink decided to keep the
+ * EEPROM layout untouched and rather have some shared values covering
+ * multiple bitrates.
+ * Populate the registers not covered by the EEPROM in the same way the
+ * vendor driver does.
+ */
+
+ /* For OFDM 54MBS use value from OFDM 48MBS */
+ pwreg = 0;
+ rt2800_register_read(rt2x00dev, TX_PWR_CFG_1, &reg);
+ t = rt2x00_get_field32(reg, TX_PWR_CFG_1B_48MBS);
+ rt2x00_set_field32(&pwreg, TX_PWR_CFG_7B_54MBS, t);
+
+ /* For MCS 7 use value from MCS 6 */
+ rt2800_register_read(rt2x00dev, TX_PWR_CFG_2, &reg);
+ t = rt2x00_get_field32(reg, TX_PWR_CFG_2B_MCS6_MCS7);
+ rt2x00_set_field32(&pwreg, TX_PWR_CFG_7B_MCS7, t);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_7, pwreg);
+
+ /* For MCS 15 use value from MCS 14 */
+ pwreg = 0;
+ rt2800_register_read(rt2x00dev, TX_PWR_CFG_3, &reg);
+ t = rt2x00_get_field32(reg, TX_PWR_CFG_3B_MCS14);
+ rt2x00_set_field32(&pwreg, TX_PWR_CFG_8B_MCS15, t);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_8, pwreg);
+
+ /* For STBC MCS 7 use value from STBC MCS 6 */
+ pwreg = 0;
+ rt2800_register_read(rt2x00dev, TX_PWR_CFG_4, &reg);
+ t = rt2x00_get_field32(reg, TX_PWR_CFG_4B_STBC_MCS6);
+ rt2x00_set_field32(&pwreg, TX_PWR_CFG_9B_STBC_MCS7, t);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_9, pwreg);
+
+ rt2800_config_alc(rt2x00dev, chan, power_level);
+
+ /* TODO: temperature compensation code! */
+}
+
/*
* We configure transmit power using MAC TX_PWR_CFG_{0,...,N} registers and
* BBP R1 register. TX_PWR_CFG_X allow to configure per rate TX power values,
@@ -4321,6 +4826,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
{
if (rt2x00_rt(rt2x00dev, RT3593))
rt2800_config_txpower_rt3593(rt2x00dev, chan, power_level);
+ else if (rt2x00_rt(rt2x00dev, RT6352))
+ rt2800_config_txpower_rt6352(rt2x00dev, chan, power_level);
else
rt2800_config_txpower_rt28xx(rt2x00dev, chan, power_level);
}
@@ -4336,6 +4843,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
{
u32 tx_pin;
u8 rfcsr;
+ unsigned long min_sleep = 0;
/*
* A voltage-controlled oscillator(VCO) is an electronic oscillator
@@ -4374,6 +4882,15 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
+ min_sleep = 1000;
+ break;
+ case RF7620:
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x0C);
+ rt2800_rfcsr_read(rt2x00dev, 4, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR4_VCOCAL_EN, 1);
+ rt2800_rfcsr_write(rt2x00dev, 4, rfcsr);
+ min_sleep = 2000;
break;
default:
WARN_ONCE(1, "Not supported RF chipet %x for VCO recalibration",
@@ -4381,7 +4898,8 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
return;
}
- usleep_range(1000, 1500);
+ if (min_sleep > 0)
+ usleep_range(min_sleep, min_sleep * 2);
rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
if (rt2x00dev->rf_channel <= 14) {
@@ -4413,6 +4931,42 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
}
rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
+ if (rt2x00_rt(rt2x00dev, RT6352)) {
+ if (rt2x00dev->default_ant.rx_chain_num == 1) {
+ rt2800_bbp_write(rt2x00dev, 91, 0x07);
+ rt2800_bbp_write(rt2x00dev, 95, 0x1A);
+ rt2800_bbp_write(rt2x00dev, 195, 128);
+ rt2800_bbp_write(rt2x00dev, 196, 0xA0);
+ rt2800_bbp_write(rt2x00dev, 195, 170);
+ rt2800_bbp_write(rt2x00dev, 196, 0x12);
+ rt2800_bbp_write(rt2x00dev, 195, 171);
+ rt2800_bbp_write(rt2x00dev, 196, 0x10);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 91, 0x06);
+ rt2800_bbp_write(rt2x00dev, 95, 0x9A);
+ rt2800_bbp_write(rt2x00dev, 195, 128);
+ rt2800_bbp_write(rt2x00dev, 196, 0xE0);
+ rt2800_bbp_write(rt2x00dev, 195, 170);
+ rt2800_bbp_write(rt2x00dev, 196, 0x30);
+ rt2800_bbp_write(rt2x00dev, 195, 171);
+ rt2800_bbp_write(rt2x00dev, 196, 0x30);
+ }
+
+ if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
+ rt2800_bbp_write(rt2x00dev, 75, 0x68);
+ rt2800_bbp_write(rt2x00dev, 76, 0x4C);
+ rt2800_bbp_write(rt2x00dev, 79, 0x1C);
+ rt2800_bbp_write(rt2x00dev, 80, 0x0C);
+ rt2800_bbp_write(rt2x00dev, 82, 0xB6);
+ }
+
+ /* On 11A, We should delay and wait RF/BBP to be stable
+ * and the appropriate time should be 1000 micro seconds
+ * 2005/06/05 - On 11G, we also need this delay time.
+ * Otherwise it's difficult to pass the WHQL.
+ */
+ usleep_range(1000, 1500);
+ }
}
EXPORT_SYMBOL_GPL(rt2800_vco_calibration);
@@ -4511,7 +5065,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
rt2x00_rt(rt2x00dev, RT3593) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
- rt2x00_rt(rt2x00dev, RT5592))
+ rt2x00_rt(rt2x00dev, RT5592) ||
+ rt2x00_rt(rt2x00dev, RT6352))
vgc = 0x1c + (2 * rt2x00dev->lna_gain);
else
vgc = 0x2e + rt2x00dev->lna_gain;
@@ -4738,7 +5293,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
0x00000000);
}
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392)) {
+ rt2x00_rt(rt2x00dev, RT5392) ||
+ rt2x00_rt(rt2x00dev, RT6352)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -4748,6 +5304,24 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
} else if (rt2x00_rt(rt2x00dev, RT5350)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
+ } else if (rt2x00_rt(rt2x00dev, RT6352)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0000);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
+ rt2800_register_write(rt2x00dev, MIMO_PS_CFG, 0x00000002);
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x00150F0F);
+ rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x06060606);
+ rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0);
+ rt2800_register_write(rt2x00dev, TX1_BB_GAIN_ATTEN, 0x0);
+ rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN, 0x6C6C666C);
+ rt2800_register_write(rt2x00dev, TX1_RF_GAIN_ATTEN, 0x6C6C666C);
+ rt2800_register_write(rt2x00dev, TX0_RF_GAIN_CORRECT,
+ 0x3630363A);
+ rt2800_register_write(rt2x00dev, TX1_RF_GAIN_CORRECT,
+ 0x3630363A);
+ rt2800_register_read(rt2x00dev, TX_ALC_CFG_1, &reg);
+ rt2x00_set_field32(&reg, TX_ALC_CFG_1_ROS_BUSY_EN, 0);
+ rt2800_register_write(rt2x00dev, TX_ALC_CFG_1, reg);
} else {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -5729,6 +6303,231 @@ static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
}
+static void rt2800_bbp_glrt_write(struct rt2x00_dev *rt2x00dev,
+ const u8 reg, const u8 value)
+{
+ rt2800_bbp_write(rt2x00dev, 195, reg);
+ rt2800_bbp_write(rt2x00dev, 196, value);
+}
+
+static void rt2800_bbp_dcoc_write(struct rt2x00_dev *rt2x00dev,
+ const u8 reg, const u8 value)
+{
+ rt2800_bbp_write(rt2x00dev, 158, reg);
+ rt2800_bbp_write(rt2x00dev, 159, value);
+}
+
+static void rt2800_bbp_dcoc_read(struct rt2x00_dev *rt2x00dev,
+ const u8 reg, u8 *value)
+{
+ rt2800_bbp_write(rt2x00dev, 158, reg);
+ rt2800_bbp_read(rt2x00dev, 159, value);
+}
+
+static void rt2800_init_bbp_6352(struct rt2x00_dev *rt2x00dev)
+{
+ u8 bbp;
+
+ /* Apply Maximum Likelihood Detection (MLD) for 2 stream case */
+ rt2800_bbp_read(rt2x00dev, 105, &bbp);
+ rt2x00_set_field8(&bbp, BBP105_MLD,
+ rt2x00dev->default_ant.rx_chain_num == 2);
+ rt2800_bbp_write(rt2x00dev, 105, bbp);
+
+ /* Avoid data loss and CRC errors */
+ rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+
+ /* Fix I/Q swap issue */
+ rt2800_bbp_read(rt2x00dev, 1, &bbp);
+ bbp |= 0x04;
+ rt2800_bbp_write(rt2x00dev, 1, bbp);
+
+ /* BBP for G band */
+ rt2800_bbp_write(rt2x00dev, 3, 0x08);
+ rt2800_bbp_write(rt2x00dev, 4, 0x00); /* rt2800_bbp4_mac_if_ctrl? */
+ rt2800_bbp_write(rt2x00dev, 6, 0x08);
+ rt2800_bbp_write(rt2x00dev, 14, 0x09);
+ rt2800_bbp_write(rt2x00dev, 15, 0xFF);
+ rt2800_bbp_write(rt2x00dev, 16, 0x01);
+ rt2800_bbp_write(rt2x00dev, 20, 0x06);
+ rt2800_bbp_write(rt2x00dev, 21, 0x00);
+ rt2800_bbp_write(rt2x00dev, 22, 0x00);
+ rt2800_bbp_write(rt2x00dev, 27, 0x00);
+ rt2800_bbp_write(rt2x00dev, 28, 0x00);
+ rt2800_bbp_write(rt2x00dev, 30, 0x00);
+ rt2800_bbp_write(rt2x00dev, 31, 0x48);
+ rt2800_bbp_write(rt2x00dev, 47, 0x40);
+ rt2800_bbp_write(rt2x00dev, 62, 0x00);
+ rt2800_bbp_write(rt2x00dev, 63, 0x00);
+ rt2800_bbp_write(rt2x00dev, 64, 0x00);
+ rt2800_bbp_write(rt2x00dev, 65, 0x2C);
+ rt2800_bbp_write(rt2x00dev, 66, 0x1C);
+ rt2800_bbp_write(rt2x00dev, 67, 0x20);
+ rt2800_bbp_write(rt2x00dev, 68, 0xDD);
+ rt2800_bbp_write(rt2x00dev, 69, 0x10);
+ rt2800_bbp_write(rt2x00dev, 70, 0x05);
+ rt2800_bbp_write(rt2x00dev, 73, 0x18);
+ rt2800_bbp_write(rt2x00dev, 74, 0x0F);
+ rt2800_bbp_write(rt2x00dev, 75, 0x60);
+ rt2800_bbp_write(rt2x00dev, 76, 0x44);
+ rt2800_bbp_write(rt2x00dev, 77, 0x59);
+ rt2800_bbp_write(rt2x00dev, 78, 0x1E);
+ rt2800_bbp_write(rt2x00dev, 79, 0x1C);
+ rt2800_bbp_write(rt2x00dev, 80, 0x0C);
+ rt2800_bbp_write(rt2x00dev, 81, 0x3A);
+ rt2800_bbp_write(rt2x00dev, 82, 0xB6);
+ rt2800_bbp_write(rt2x00dev, 83, 0x9A);
+ rt2800_bbp_write(rt2x00dev, 84, 0x9A);
+ rt2800_bbp_write(rt2x00dev, 86, 0x38);
+ rt2800_bbp_write(rt2x00dev, 88, 0x90);
+ rt2800_bbp_write(rt2x00dev, 91, 0x04);
+ rt2800_bbp_write(rt2x00dev, 92, 0x02);
+ rt2800_bbp_write(rt2x00dev, 95, 0x9A);
+ rt2800_bbp_write(rt2x00dev, 96, 0x00);
+ rt2800_bbp_write(rt2x00dev, 103, 0xC0);
+ rt2800_bbp_write(rt2x00dev, 104, 0x92);
+ /* FIXME BBP105 owerwrite */
+ rt2800_bbp_write(rt2x00dev, 105, 0x3C);
+ rt2800_bbp_write(rt2x00dev, 106, 0x12);
+ rt2800_bbp_write(rt2x00dev, 109, 0x00);
+ rt2800_bbp_write(rt2x00dev, 134, 0x10);
+ rt2800_bbp_write(rt2x00dev, 135, 0xA6);
+ rt2800_bbp_write(rt2x00dev, 137, 0x04);
+ rt2800_bbp_write(rt2x00dev, 142, 0x30);
+ rt2800_bbp_write(rt2x00dev, 143, 0xF7);
+ rt2800_bbp_write(rt2x00dev, 160, 0xEC);
+ rt2800_bbp_write(rt2x00dev, 161, 0xC4);
+ rt2800_bbp_write(rt2x00dev, 162, 0x77);
+ rt2800_bbp_write(rt2x00dev, 163, 0xF9);
+ rt2800_bbp_write(rt2x00dev, 164, 0x00);
+ rt2800_bbp_write(rt2x00dev, 165, 0x00);
+ rt2800_bbp_write(rt2x00dev, 186, 0x00);
+ rt2800_bbp_write(rt2x00dev, 187, 0x00);
+ rt2800_bbp_write(rt2x00dev, 188, 0x00);
+ rt2800_bbp_write(rt2x00dev, 186, 0x00);
+ rt2800_bbp_write(rt2x00dev, 187, 0x01);
+ rt2800_bbp_write(rt2x00dev, 188, 0x00);
+ rt2800_bbp_write(rt2x00dev, 189, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 91, 0x06);
+ rt2800_bbp_write(rt2x00dev, 92, 0x04);
+ rt2800_bbp_write(rt2x00dev, 93, 0x54);
+ rt2800_bbp_write(rt2x00dev, 99, 0x50);
+ rt2800_bbp_write(rt2x00dev, 148, 0x84);
+ rt2800_bbp_write(rt2x00dev, 167, 0x80);
+ rt2800_bbp_write(rt2x00dev, 178, 0xFF);
+ rt2800_bbp_write(rt2x00dev, 106, 0x13);
+
+ /* BBP for G band GLRT function (BBP_128 ~ BBP_221) */
+ rt2800_bbp_glrt_write(rt2x00dev, 0, 0x00);
+ rt2800_bbp_glrt_write(rt2x00dev, 1, 0x14);
+ rt2800_bbp_glrt_write(rt2x00dev, 2, 0x20);
+ rt2800_bbp_glrt_write(rt2x00dev, 3, 0x0A);
+ rt2800_bbp_glrt_write(rt2x00dev, 10, 0x16);
+ rt2800_bbp_glrt_write(rt2x00dev, 11, 0x06);
+ rt2800_bbp_glrt_write(rt2x00dev, 12, 0x02);
+ rt2800_bbp_glrt_write(rt2x00dev, 13, 0x07);
+ rt2800_bbp_glrt_write(rt2x00dev, 14, 0x05);
+ rt2800_bbp_glrt_write(rt2x00dev, 15, 0x09);
+ rt2800_bbp_glrt_write(rt2x00dev, 16, 0x20);
+ rt2800_bbp_glrt_write(rt2x00dev, 17, 0x08);
+ rt2800_bbp_glrt_write(rt2x00dev, 18, 0x4A);
+ rt2800_bbp_glrt_write(rt2x00dev, 19, 0x00);
+ rt2800_bbp_glrt_write(rt2x00dev, 20, 0x00);
+ rt2800_bbp_glrt_write(rt2x00dev, 128, 0xE0);
+ rt2800_bbp_glrt_write(rt2x00dev, 129, 0x1F);
+ rt2800_bbp_glrt_write(rt2x00dev, 130, 0x4F);
+ rt2800_bbp_glrt_write(rt2x00dev, 131, 0x32);
+ rt2800_bbp_glrt_write(rt2x00dev, 132, 0x08);
+ rt2800_bbp_glrt_write(rt2x00dev, 133, 0x28);
+ rt2800_bbp_glrt_write(rt2x00dev, 134, 0x19);
+ rt2800_bbp_glrt_write(rt2x00dev, 135, 0x0A);
+ rt2800_bbp_glrt_write(rt2x00dev, 138, 0x16);
+ rt2800_bbp_glrt_write(rt2x00dev, 139, 0x10);
+ rt2800_bbp_glrt_write(rt2x00dev, 140, 0x10);
+ rt2800_bbp_glrt_write(rt2x00dev, 141, 0x1A);
+ rt2800_bbp_glrt_write(rt2x00dev, 142, 0x36);
+ rt2800_bbp_glrt_write(rt2x00dev, 143, 0x2C);
+ rt2800_bbp_glrt_write(rt2x00dev, 144, 0x26);
+ rt2800_bbp_glrt_write(rt2x00dev, 145, 0x24);
+ rt2800_bbp_glrt_write(rt2x00dev, 146, 0x42);
+ rt2800_bbp_glrt_write(rt2x00dev, 147, 0x40);
+ rt2800_bbp_glrt_write(rt2x00dev, 148, 0x30);
+ rt2800_bbp_glrt_write(rt2x00dev, 149, 0x29);
+ rt2800_bbp_glrt_write(rt2x00dev, 150, 0x4C);
+ rt2800_bbp_glrt_write(rt2x00dev, 151, 0x46);
+ rt2800_bbp_glrt_write(rt2x00dev, 152, 0x3D);
+ rt2800_bbp_glrt_write(rt2x00dev, 153, 0x40);
+ rt2800_bbp_glrt_write(rt2x00dev, 154, 0x3E);
+ rt2800_bbp_glrt_write(rt2x00dev, 155, 0x38);
+ rt2800_bbp_glrt_write(rt2x00dev, 156, 0x3D);
+ rt2800_bbp_glrt_write(rt2x00dev, 157, 0x2F);
+ rt2800_bbp_glrt_write(rt2x00dev, 158, 0x3C);
+ rt2800_bbp_glrt_write(rt2x00dev, 159, 0x34);
+ rt2800_bbp_glrt_write(rt2x00dev, 160, 0x2C);
+ rt2800_bbp_glrt_write(rt2x00dev, 161, 0x2F);
+ rt2800_bbp_glrt_write(rt2x00dev, 162, 0x3C);
+ rt2800_bbp_glrt_write(rt2x00dev, 163, 0x35);
+ rt2800_bbp_glrt_write(rt2x00dev, 164, 0x2E);
+ rt2800_bbp_glrt_write(rt2x00dev, 165, 0x2F);
+ rt2800_bbp_glrt_write(rt2x00dev, 166, 0x49);
+ rt2800_bbp_glrt_write(rt2x00dev, 167, 0x41);
+ rt2800_bbp_glrt_write(rt2x00dev, 168, 0x36);
+ rt2800_bbp_glrt_write(rt2x00dev, 169, 0x39);
+ rt2800_bbp_glrt_write(rt2x00dev, 170, 0x30);
+ rt2800_bbp_glrt_write(rt2x00dev, 171, 0x30);
+ rt2800_bbp_glrt_write(rt2x00dev, 172, 0x0E);
+ rt2800_bbp_glrt_write(rt2x00dev, 173, 0x0D);
+ rt2800_bbp_glrt_write(rt2x00dev, 174, 0x28);
+ rt2800_bbp_glrt_write(rt2x00dev, 175, 0x21);
+ rt2800_bbp_glrt_write(rt2x00dev, 176, 0x1C);
+ rt2800_bbp_glrt_write(rt2x00dev, 177, 0x16);
+ rt2800_bbp_glrt_write(rt2x00dev, 178, 0x50);
+ rt2800_bbp_glrt_write(rt2x00dev, 179, 0x4A);
+ rt2800_bbp_glrt_write(rt2x00dev, 180, 0x43);
+ rt2800_bbp_glrt_write(rt2x00dev, 181, 0x50);
+ rt2800_bbp_glrt_write(rt2x00dev, 182, 0x10);
+ rt2800_bbp_glrt_write(rt2x00dev, 183, 0x10);
+ rt2800_bbp_glrt_write(rt2x00dev, 184, 0x10);
+ rt2800_bbp_glrt_write(rt2x00dev, 185, 0x10);
+ rt2800_bbp_glrt_write(rt2x00dev, 200, 0x7D);
+ rt2800_bbp_glrt_write(rt2x00dev, 201, 0x14);
+ rt2800_bbp_glrt_write(rt2x00dev, 202, 0x32);
+ rt2800_bbp_glrt_write(rt2x00dev, 203, 0x2C);
+ rt2800_bbp_glrt_write(rt2x00dev, 204, 0x36);
+ rt2800_bbp_glrt_write(rt2x00dev, 205, 0x4C);
+ rt2800_bbp_glrt_write(rt2x00dev, 206, 0x43);
+ rt2800_bbp_glrt_write(rt2x00dev, 207, 0x2C);
+ rt2800_bbp_glrt_write(rt2x00dev, 208, 0x2E);
+ rt2800_bbp_glrt_write(rt2x00dev, 209, 0x36);
+ rt2800_bbp_glrt_write(rt2x00dev, 210, 0x30);
+ rt2800_bbp_glrt_write(rt2x00dev, 211, 0x6E);
+
+ /* BBP for G band DCOC function */
+ rt2800_bbp_dcoc_write(rt2x00dev, 140, 0x0C);
+ rt2800_bbp_dcoc_write(rt2x00dev, 141, 0x00);
+ rt2800_bbp_dcoc_write(rt2x00dev, 142, 0x10);
+ rt2800_bbp_dcoc_write(rt2x00dev, 143, 0x10);
+ rt2800_bbp_dcoc_write(rt2x00dev, 144, 0x10);
+ rt2800_bbp_dcoc_write(rt2x00dev, 145, 0x10);
+ rt2800_bbp_dcoc_write(rt2x00dev, 146, 0x08);
+ rt2800_bbp_dcoc_write(rt2x00dev, 147, 0x40);
+ rt2800_bbp_dcoc_write(rt2x00dev, 148, 0x04);
+ rt2800_bbp_dcoc_write(rt2x00dev, 149, 0x04);
+ rt2800_bbp_dcoc_write(rt2x00dev, 150, 0x08);
+ rt2800_bbp_dcoc_write(rt2x00dev, 151, 0x08);
+ rt2800_bbp_dcoc_write(rt2x00dev, 152, 0x03);
+ rt2800_bbp_dcoc_write(rt2x00dev, 153, 0x03);
+ rt2800_bbp_dcoc_write(rt2x00dev, 154, 0x03);
+ rt2800_bbp_dcoc_write(rt2x00dev, 155, 0x02);
+ rt2800_bbp_dcoc_write(rt2x00dev, 156, 0x40);
+ rt2800_bbp_dcoc_write(rt2x00dev, 157, 0x40);
+ rt2800_bbp_dcoc_write(rt2x00dev, 158, 0x64);
+ rt2800_bbp_dcoc_write(rt2x00dev, 159, 0x64);
+
+ rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+}
+
static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
{
unsigned int i;
@@ -5773,6 +6572,9 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
case RT5592:
rt2800_init_bbp_5592(rt2x00dev);
return;
+ case RT6352:
+ rt2800_init_bbp_6352(rt2x00dev);
+ break;
}
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
@@ -6228,9 +7030,9 @@ static void rt2800_init_rfcsr_3290(struct rt2x00_dev *rt2x00dev)
static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
{
- int tx0_int_pa = test_bit(CAPABILITY_INTERNAL_PA_TX0,
+ int tx0_ext_pa = test_bit(CAPABILITY_EXTERNAL_PA_TX0,
&rt2x00dev->cap_flags);
- int tx1_int_pa = test_bit(CAPABILITY_INTERNAL_PA_TX1,
+ int tx1_ext_pa = test_bit(CAPABILITY_EXTERNAL_PA_TX1,
&rt2x00dev->cap_flags);
u8 rfcsr;
@@ -6270,9 +7072,9 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
rfcsr = 0x01;
- if (!tx0_int_pa)
+ if (tx0_ext_pa)
rt2x00_set_field8(&rfcsr, RFCSR34_TX0_EXT_PA, 1);
- if (!tx1_int_pa)
+ if (tx1_ext_pa)
rt2x00_set_field8(&rfcsr, RFCSR34_TX1_EXT_PA, 1);
rt2800_rfcsr_write(rt2x00dev, 34, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 35, 0x03);
@@ -6282,13 +7084,13 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 39, 0xc5);
rt2800_rfcsr_write(rt2x00dev, 40, 0x33);
rfcsr = 0x52;
- if (tx0_int_pa) {
+ if (!tx0_ext_pa) {
rt2x00_set_field8(&rfcsr, RFCSR41_BIT1, 1);
rt2x00_set_field8(&rfcsr, RFCSR41_BIT4, 1);
}
rt2800_rfcsr_write(rt2x00dev, 41, rfcsr);
rfcsr = 0x52;
- if (tx1_int_pa) {
+ if (!tx1_ext_pa) {
rt2x00_set_field8(&rfcsr, RFCSR42_BIT1, 1);
rt2x00_set_field8(&rfcsr, RFCSR42_BIT4, 1);
}
@@ -6301,19 +7103,19 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 48, 0x14);
rt2800_rfcsr_write(rt2x00dev, 49, 0x00);
rfcsr = 0x2d;
- if (!tx0_int_pa)
+ if (tx0_ext_pa)
rt2x00_set_field8(&rfcsr, RFCSR50_TX0_EXT_PA, 1);
- if (!tx1_int_pa)
+ if (tx1_ext_pa)
rt2x00_set_field8(&rfcsr, RFCSR50_TX1_EXT_PA, 1);
rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
- rt2800_rfcsr_write(rt2x00dev, 51, (tx0_int_pa ? 0x7f : 0x52));
- rt2800_rfcsr_write(rt2x00dev, 52, (tx0_int_pa ? 0x00 : 0xc0));
- rt2800_rfcsr_write(rt2x00dev, 53, (tx0_int_pa ? 0x52 : 0xd2));
- rt2800_rfcsr_write(rt2x00dev, 54, (tx0_int_pa ? 0x1b : 0xc0));
- rt2800_rfcsr_write(rt2x00dev, 55, (tx1_int_pa ? 0x7f : 0x52));
- rt2800_rfcsr_write(rt2x00dev, 56, (tx1_int_pa ? 0x00 : 0xc0));
- rt2800_rfcsr_write(rt2x00dev, 57, (tx0_int_pa ? 0x52 : 0x49));
- rt2800_rfcsr_write(rt2x00dev, 58, (tx1_int_pa ? 0x1b : 0xc0));
+ rt2800_rfcsr_write(rt2x00dev, 51, (tx0_ext_pa ? 0x52 : 0x7f));
+ rt2800_rfcsr_write(rt2x00dev, 52, (tx0_ext_pa ? 0xc0 : 0x00));
+ rt2800_rfcsr_write(rt2x00dev, 53, (tx0_ext_pa ? 0xd2 : 0x52));
+ rt2800_rfcsr_write(rt2x00dev, 54, (tx0_ext_pa ? 0xc0 : 0x1b));
+ rt2800_rfcsr_write(rt2x00dev, 55, (tx1_ext_pa ? 0x52 : 0x7f));
+ rt2800_rfcsr_write(rt2x00dev, 56, (tx1_ext_pa ? 0xc0 : 0x00));
+ rt2800_rfcsr_write(rt2x00dev, 57, (tx0_ext_pa ? 0x49 : 0x52));
+ rt2800_rfcsr_write(rt2x00dev, 58, (tx1_ext_pa ? 0xc0 : 0x1b));
rt2800_rfcsr_write(rt2x00dev, 59, 0x00);
rt2800_rfcsr_write(rt2x00dev, 60, 0x00);
rt2800_rfcsr_write(rt2x00dev, 61, 0x00);
@@ -6844,6 +7646,617 @@ static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev)
rt2800_led_open_drain_enable(rt2x00dev);
}
+static void rt2800_bbp_core_soft_reset(struct rt2x00_dev *rt2x00dev,
+ bool set_bw, bool is_ht40)
+{
+ u8 bbp_val;
+
+ rt2800_bbp_read(rt2x00dev, 21, &bbp_val);
+ bbp_val |= 0x1;
+ rt2800_bbp_write(rt2x00dev, 21, bbp_val);
+ usleep_range(100, 200);
+
+ if (set_bw) {
+ rt2800_bbp_read(rt2x00dev, 4, &bbp_val);
+ rt2x00_set_field8(&bbp_val, BBP4_BANDWIDTH, 2 * is_ht40);
+ rt2800_bbp_write(rt2x00dev, 4, bbp_val);
+ usleep_range(100, 200);
+ }
+
+ rt2800_bbp_read(rt2x00dev, 21, &bbp_val);
+ bbp_val &= (~0x1);
+ rt2800_bbp_write(rt2x00dev, 21, bbp_val);
+ usleep_range(100, 200);
+}
+
+static int rt2800_rf_lp_config(struct rt2x00_dev *rt2x00dev, bool btxcal)
+{
+ u8 rf_val;
+
+ if (btxcal)
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x04);
+ else
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x02);
+
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x06);
+
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 17, &rf_val);
+ rf_val |= 0x80;
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, rf_val);
+
+ if (btxcal) {
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0xC1);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0x20);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x02);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 3, &rf_val);
+ rf_val &= (~0x3F);
+ rf_val |= 0x3F;
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, rf_val);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 4, &rf_val);
+ rf_val &= (~0x3F);
+ rf_val |= 0x3F;
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, rf_val);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 5, 0x31);
+ } else {
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0xF1);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0x18);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x02);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 3, &rf_val);
+ rf_val &= (~0x3F);
+ rf_val |= 0x34;
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, rf_val);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 4, &rf_val);
+ rf_val &= (~0x3F);
+ rf_val |= 0x34;
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, rf_val);
+ }
+
+ return 0;
+}
+
+static char rt2800_lp_tx_filter_bw_cal(struct rt2x00_dev *rt2x00dev)
+{
+ unsigned int cnt;
+ u8 bbp_val;
+ char cal_val;
+
+ rt2800_bbp_dcoc_write(rt2x00dev, 0, 0x82);
+
+ cnt = 0;
+ do {
+ usleep_range(500, 2000);
+ rt2800_bbp_read(rt2x00dev, 159, &bbp_val);
+ if (bbp_val == 0x02 || cnt == 20)
+ break;
+
+ cnt++;
+ } while (cnt < 20);
+
+ rt2800_bbp_dcoc_read(rt2x00dev, 0x39, &bbp_val);
+ cal_val = bbp_val & 0x7F;
+ if (cal_val >= 0x40)
+ cal_val -= 128;
+
+ return cal_val;
+}
+
+static void rt2800_bw_filter_calibration(struct rt2x00_dev *rt2x00dev,
+ bool btxcal)
+{
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+ u8 tx_agc_fc = 0, rx_agc_fc = 0, cmm_agc_fc;
+ u8 filter_target;
+ u8 tx_filter_target_20m = 0x09, tx_filter_target_40m = 0x02;
+ u8 rx_filter_target_20m = 0x27, rx_filter_target_40m = 0x31;
+ int loop = 0, is_ht40, cnt;
+ u8 bbp_val, rf_val;
+ char cal_r32_init, cal_r32_val, cal_diff;
+ u8 saverfb5r00, saverfb5r01, saverfb5r03, saverfb5r04, saverfb5r05;
+ u8 saverfb5r06, saverfb5r07;
+ u8 saverfb5r08, saverfb5r17, saverfb5r18, saverfb5r19, saverfb5r20;
+ u8 saverfb5r37, saverfb5r38, saverfb5r39, saverfb5r40, saverfb5r41;
+ u8 saverfb5r42, saverfb5r43, saverfb5r44, saverfb5r45, saverfb5r46;
+ u8 saverfb5r58, saverfb5r59;
+ u8 savebbp159r0, savebbp159r2, savebbpr23;
+ u32 MAC_RF_CONTROL0, MAC_RF_BYPASS0;
+
+ /* Save MAC registers */
+ rt2800_register_read(rt2x00dev, RF_CONTROL0, &MAC_RF_CONTROL0);
+ rt2800_register_read(rt2x00dev, RF_BYPASS0, &MAC_RF_BYPASS0);
+
+ /* save BBP registers */
+ rt2800_bbp_read(rt2x00dev, 23, &savebbpr23);
+
+ rt2800_bbp_dcoc_read(rt2x00dev, 0, &savebbp159r0);
+ rt2800_bbp_dcoc_read(rt2x00dev, 2, &savebbp159r2);
+
+ /* Save RF registers */
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 0, &saverfb5r00);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 1, &saverfb5r01);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 3, &saverfb5r03);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 4, &saverfb5r04);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 5, &saverfb5r05);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 6, &saverfb5r06);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 7, &saverfb5r07);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 8, &saverfb5r08);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 17, &saverfb5r17);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 18, &saverfb5r18);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 19, &saverfb5r19);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 20, &saverfb5r20);
+
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 37, &saverfb5r37);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 38, &saverfb5r38);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 39, &saverfb5r39);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 40, &saverfb5r40);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 41, &saverfb5r41);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 42, &saverfb5r42);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 43, &saverfb5r43);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 44, &saverfb5r44);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 45, &saverfb5r45);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 46, &saverfb5r46);
+
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 58, &saverfb5r58);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 59, &saverfb5r59);
+
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 0, &rf_val);
+ rf_val |= 0x3;
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 0, rf_val);
+
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 1, &rf_val);
+ rf_val |= 0x1;
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, rf_val);
+
+ cnt = 0;
+ do {
+ usleep_range(500, 2000);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 1, &rf_val);
+ if (((rf_val & 0x1) == 0x00) || (cnt == 40))
+ break;
+ cnt++;
+ } while (cnt < 40);
+
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 0, &rf_val);
+ rf_val &= (~0x3);
+ rf_val |= 0x1;
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 0, rf_val);
+
+ /* I-3 */
+ rt2800_bbp_read(rt2x00dev, 23, &bbp_val);
+ bbp_val &= (~0x1F);
+ bbp_val |= 0x10;
+ rt2800_bbp_write(rt2x00dev, 23, bbp_val);
+
+ do {
+ /* I-4,5,6,7,8,9 */
+ if (loop == 0) {
+ is_ht40 = false;
+
+ if (btxcal)
+ filter_target = tx_filter_target_20m;
+ else
+ filter_target = rx_filter_target_20m;
+ } else {
+ is_ht40 = true;
+
+ if (btxcal)
+ filter_target = tx_filter_target_40m;
+ else
+ filter_target = rx_filter_target_40m;
+ }
+
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 8, &rf_val);
+ rf_val &= (~0x04);
+ if (loop == 1)
+ rf_val |= 0x4;
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 8, rf_val);
+
+ rt2800_bbp_core_soft_reset(rt2x00dev, true, is_ht40);
+
+ rt2800_rf_lp_config(rt2x00dev, btxcal);
+ if (btxcal) {
+ tx_agc_fc = 0;
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 58, &rf_val);
+ rf_val &= (~0x7F);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, rf_val);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 59, &rf_val);
+ rf_val &= (~0x7F);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, rf_val);
+ } else {
+ rx_agc_fc = 0;
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 6, &rf_val);
+ rf_val &= (~0x7F);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, rf_val);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 7, &rf_val);
+ rf_val &= (~0x7F);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, rf_val);
+ }
+
+ usleep_range(1000, 2000);
+
+ rt2800_bbp_dcoc_read(rt2x00dev, 2, &bbp_val);
+ bbp_val &= (~0x6);
+ rt2800_bbp_dcoc_write(rt2x00dev, 2, bbp_val);
+
+ rt2800_bbp_core_soft_reset(rt2x00dev, false, is_ht40);
+
+ cal_r32_init = rt2800_lp_tx_filter_bw_cal(rt2x00dev);
+
+ rt2800_bbp_dcoc_read(rt2x00dev, 2, &bbp_val);
+ bbp_val |= 0x6;
+ rt2800_bbp_dcoc_write(rt2x00dev, 2, bbp_val);
+do_cal:
+ if (btxcal) {
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 58, &rf_val);
+ rf_val &= (~0x7F);
+ rf_val |= tx_agc_fc;
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, rf_val);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 59, &rf_val);
+ rf_val &= (~0x7F);
+ rf_val |= tx_agc_fc;
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, rf_val);
+ } else {
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 6, &rf_val);
+ rf_val &= (~0x7F);
+ rf_val |= rx_agc_fc;
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, rf_val);
+ rt2800_rfcsr_read_bank(rt2x00dev, 5, 7, &rf_val);
+ rf_val &= (~0x7F);
+ rf_val |= rx_agc_fc;
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, rf_val);
+ }
+
+ usleep_range(500, 1000);
+
+ rt2800_bbp_core_soft_reset(rt2x00dev, false, is_ht40);
+
+ cal_r32_val = rt2800_lp_tx_filter_bw_cal(rt2x00dev);
+
+ cal_diff = cal_r32_init - cal_r32_val;
+
+ if (btxcal)
+ cmm_agc_fc = tx_agc_fc;
+ else
+ cmm_agc_fc = rx_agc_fc;
+
+ if (((cal_diff > filter_target) && (cmm_agc_fc == 0)) ||
+ ((cal_diff < filter_target) && (cmm_agc_fc == 0x3f))) {
+ if (btxcal)
+ tx_agc_fc = 0;
+ else
+ rx_agc_fc = 0;
+ } else if ((cal_diff <= filter_target) && (cmm_agc_fc < 0x3f)) {
+ if (btxcal)
+ tx_agc_fc++;
+ else
+ rx_agc_fc++;
+ goto do_cal;
+ }
+
+ if (btxcal) {
+ if (loop == 0)
+ drv_data->tx_calibration_bw20 = tx_agc_fc;
+ else
+ drv_data->tx_calibration_bw40 = tx_agc_fc;
+ } else {
+ if (loop == 0)
+ drv_data->rx_calibration_bw20 = rx_agc_fc;
+ else
+ drv_data->rx_calibration_bw40 = rx_agc_fc;
+ }
+
+ loop++;
+ } while (loop <= 1);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 0, saverfb5r00);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, saverfb5r01);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, saverfb5r03);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, saverfb5r04);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 5, saverfb5r05);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, saverfb5r06);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, saverfb5r07);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 8, saverfb5r08);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, saverfb5r17);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, saverfb5r18);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, saverfb5r19);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, saverfb5r20);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 37, saverfb5r37);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 38, saverfb5r38);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 39, saverfb5r39);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 40, saverfb5r40);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 41, saverfb5r41);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 42, saverfb5r42);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 43, saverfb5r43);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 44, saverfb5r44);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 45, saverfb5r45);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 46, saverfb5r46);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, saverfb5r58);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, saverfb5r59);
+
+ rt2800_bbp_write(rt2x00dev, 23, savebbpr23);
+
+ rt2800_bbp_dcoc_write(rt2x00dev, 0, savebbp159r0);
+ rt2800_bbp_dcoc_write(rt2x00dev, 2, savebbp159r2);
+
+ rt2800_bbp_read(rt2x00dev, 4, &bbp_val);
+ rt2x00_set_field8(&bbp_val, BBP4_BANDWIDTH,
+ 2 * test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags));
+ rt2800_bbp_write(rt2x00dev, 4, bbp_val);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, MAC_RF_CONTROL0);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, MAC_RF_BYPASS0);
+}
+
+static void rt2800_init_rfcsr_6352(struct rt2x00_dev *rt2x00dev)
+{
+ /* Initialize RF central register to default value */
+ rt2800_rfcsr_write(rt2x00dev, 0, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0x33);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0xFF);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x0C);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 8, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 12, rt2x00dev->freq_offset);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x22);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0x4C);
+ rt2800_rfcsr_write(rt2x00dev, 17, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0xA0);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0x12);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x07);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x13);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0xFE);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x24);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x7A);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x05);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 37, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 41, 0xD0);
+ rt2800_rfcsr_write(rt2x00dev, 42, 0x5B);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x00);
+
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
+ if (rt2800_clk_is_20mhz(rt2x00dev))
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x03);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x7C);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 17, 0x99);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x99);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x09);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0x50);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0xB0);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x06);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x5D);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x61);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0xB5);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x02);
+
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x62);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0xAD);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x80);
+
+ /* Initialize RF channel register to default value */
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 0, 0x03);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 1, 0x00);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 2, 0x00);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 3, 0x00);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 4, 0x00);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 5, 0x08);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 6, 0x00);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 7, 0x51);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 8, 0x53);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 9, 0x16);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 10, 0x61);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 11, 0x53);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 12, 0x22);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 13, 0x3D);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x06);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 15, 0x13);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 16, 0x22);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 17, 0x27);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 18, 0x02);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 19, 0xA7);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 20, 0x01);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 21, 0x52);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 22, 0x80);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 23, 0xB3);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 24, 0x00);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 25, 0x00);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 26, 0x00);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 27, 0x00);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 28, 0x5C);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 29, 0x6B);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 30, 0x6B);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 31, 0x31);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 32, 0x5D);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 33, 0x00);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 34, 0xE6);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 35, 0x55);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 36, 0x00);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 37, 0xBB);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 38, 0xB3);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 39, 0xB3);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 40, 0x03);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 41, 0x00);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 42, 0x00);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0xB3);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0xD3);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0xD5);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x07);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0x68);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xEF);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x1C);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x07);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0xA8);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0x85);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x10);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x07);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0x6A);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0x85);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x10);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 62, 0x1C);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 63, 0x00);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 45, 0xC5);
+
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 9, 0x47);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 10, 0x71);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 11, 0x33);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x0E);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 17, 0x23);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 19, 0xA4);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 20, 0x02);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 21, 0x12);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 28, 0x1C);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 29, 0xEB);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 32, 0x7D);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 34, 0xD6);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 36, 0x08);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 38, 0xB4);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0xD3);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0xB3);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0xD5);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x27);
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 47, 0x67);
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 47, 0x69);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xFF);
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 54, 0x27);
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 54, 0x20);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x66);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xFF);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x1C);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x20);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0x6B);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xF7);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x09);
+
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 10, 0x51);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x06);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 19, 0xA7);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 28, 0x2C);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x64);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 8, 0x51);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 9, 0x36);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 11, 0x53);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x16);
+
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0x6C);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xFC);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x1F);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x27);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x66);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0x6B);
+
+ /* Initialize RF channel register for DRQFN */
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0xD3);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0xE3);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0xE5);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0x28);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x68);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xF7);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x02);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xC7);
+
+ /* Initialize RF DC calibration register to default value */
+ rt2800_rfcsr_write_dccal(rt2x00dev, 0, 0x47);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 1, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 2, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 6, 0x10);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 7, 0x10);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 8, 0x04);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 9, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 10, 0x07);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 11, 0x01);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 12, 0x07);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 13, 0x07);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 14, 0x07);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 15, 0x20);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 16, 0x22);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 18, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 19, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 20, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 21, 0xF1);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 22, 0x11);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 23, 0x02);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 24, 0x41);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 25, 0x20);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 26, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 27, 0xD7);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 28, 0xA2);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 29, 0x20);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 30, 0x49);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 31, 0x20);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 32, 0x04);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 33, 0xF1);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 34, 0xA1);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 35, 0x01);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 41, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 42, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 43, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 44, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 45, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 46, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 47, 0x3E);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 48, 0x3D);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 49, 0x3E);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 50, 0x3D);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 51, 0x3E);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 52, 0x3D);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 53, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 54, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 55, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 56, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 57, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x10);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x10);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 60, 0x0A);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 61, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 62, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 63, 0x00);
+
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, 0x08);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x04);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x20);
+
+ rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x00);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x7C);
+
+ rt2800_bw_filter_calibration(rt2x00dev, true);
+ rt2800_bw_filter_calibration(rt2x00dev, false);
+}
+
static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
{
if (rt2800_is_305x_soc(rt2x00dev)) {
@@ -6884,6 +8297,9 @@ static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
case RT5592:
rt2800_init_rfcsr_5592(rt2x00dev);
break;
+ case RT6352:
+ rt2800_init_rfcsr_6352(rt2x00dev);
+ break;
}
}
@@ -7250,7 +8666,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
*/
if (rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ rt2x00_rt(rt2x00dev, RT5392) ||
+ rt2x00_rt(rt2x00dev, RT6352))
rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf);
else if (rt2x00_rt(rt2x00dev, RT3352))
rf = RF3322;
@@ -7282,6 +8699,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RF5390:
case RF5392:
case RF5592:
+ case RF7620:
break;
default:
rt2x00_err(rt2x00dev, "Invalid RF chipset 0x%04x detected\n",
@@ -7382,13 +8800,13 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
if (rt2x00_rt(rt2x00dev, RT3352)) {
- if (!rt2x00_get_field16(eeprom,
+ if (rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_EXTERNAL_TX0_PA_3352))
- __set_bit(CAPABILITY_INTERNAL_PA_TX0,
+ __set_bit(CAPABILITY_EXTERNAL_PA_TX0,
&rt2x00dev->cap_flags);
- if (!rt2x00_get_field16(eeprom,
+ if (rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_EXTERNAL_TX1_PA_3352))
- __set_bit(CAPABILITY_INTERNAL_PA_TX1,
+ __set_bit(CAPABILITY_EXTERNAL_PA_TX1,
&rt2x00dev->cap_flags);
}
@@ -7689,6 +9107,23 @@ static const struct rf_channel rf_vals_5592_xtal40[] = {
{196, 83, 0, 12, 1},
};
+static const struct rf_channel rf_vals_7620[] = {
+ {1, 0x50, 0x99, 0x99, 1},
+ {2, 0x50, 0x44, 0x44, 2},
+ {3, 0x50, 0xEE, 0xEE, 2},
+ {4, 0x50, 0x99, 0x99, 3},
+ {5, 0x51, 0x44, 0x44, 0},
+ {6, 0x51, 0xEE, 0xEE, 0},
+ {7, 0x51, 0x99, 0x99, 1},
+ {8, 0x51, 0x44, 0x44, 2},
+ {9, 0x51, 0xEE, 0xEE, 2},
+ {10, 0x51, 0x99, 0x99, 3},
+ {11, 0x52, 0x44, 0x44, 0},
+ {12, 0x52, 0xEE, 0xEE, 0},
+ {13, 0x52, 0x99, 0x99, 1},
+ {14, 0x52, 0x33, 0x33, 3},
+};
+
static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
@@ -7792,6 +9227,11 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels = rf_vals_3x;
break;
+ case RF7620:
+ spec->num_channels = ARRAY_SIZE(rf_vals_7620);
+ spec->channels = rf_vals_7620;
+ break;
+
case RF3052:
case RF3053:
spec->num_channels = ARRAY_SIZE(rf_vals_3x);
@@ -7923,6 +9363,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
case RF5390:
case RF5392:
case RF5592:
+ case RF7620:
__set_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags);
break;
}
@@ -7967,6 +9408,9 @@ static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev)
return -ENODEV;
}
+ if (rt == RT5390 && rt2x00_is_soc(rt2x00dev))
+ rt = RT6352;
+
rt2x00_set_rt(rt2x00dev, rt, rev);
return 0;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
index 0a8b4df665fe..f357531d9488 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
@@ -20,6 +20,34 @@
#ifndef RT2800LIB_H
#define RT2800LIB_H
+/*
+ * Hardware has 255 WCID table entries. First 32 entries are reserved for
+ * shared keys. Since parts of the pairwise key table might be shared with
+ * the beacon frame buffers 6 & 7 we could only use the first 222 entries.
+ */
+#define WCID_START 33
+#define WCID_END 222
+#define STA_IDS_SIZE (WCID_END - WCID_START + 2)
+
+/* RT2800 driver data structure */
+struct rt2800_drv_data {
+ u8 calibration_bw20;
+ u8 calibration_bw40;
+ char rx_calibration_bw20;
+ char rx_calibration_bw40;
+ char tx_calibration_bw20;
+ char tx_calibration_bw40;
+ u8 bbp25;
+ u8 bbp26;
+ u8 txmixer_gain_24g;
+ u8 txmixer_gain_5g;
+ u8 max_psdu;
+ unsigned int tbtt_tick;
+ unsigned int ampdu_factor_cnt[4];
+ DECLARE_BITMAP(sta_ids, STA_IDS_SIZE);
+ struct ieee80211_sta *wcid_to_sta[STA_IDS_SIZE];
+};
+
struct rt2800_ops {
void (*register_read)(struct rt2x00_dev *rt2x00dev,
const unsigned int offset, u32 *value);
@@ -167,7 +195,8 @@ void rt2800_write_tx_data(struct queue_entry *entry,
struct txentry_desc *txdesc);
void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *txdesc);
-void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32* txwi);
+void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi,
+ bool match);
void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
void rt2800_clear_beacon(struct queue_entry *entry);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
index de4790b41be7..3ab3b5323897 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
@@ -239,7 +239,7 @@ static bool rt2800mmio_txdone_release_entries(struct queue_entry *entry,
{
if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
rt2800_txdone_entry(entry, entry->status,
- rt2800mmio_get_txwi(entry));
+ rt2800mmio_get_txwi(entry), true);
return false;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 205a7b8ac8a7..f11e3f532a84 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -501,8 +501,7 @@ static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
/*
* TX control handlers
*/
-static enum txdone_entry_desc_flags
-rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
+static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
{
__le32 *txwi;
u32 word;
@@ -515,7 +514,7 @@ rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
* frame.
*/
if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
- return TXDONE_FAILURE;
+ return false;
wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
@@ -537,10 +536,10 @@ rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
rt2x00_dbg(entry->queue->rt2x00dev,
"TX status report missed for queue %d entry %d\n",
entry->queue->qid, entry->entry_idx);
- return TXDONE_UNKNOWN;
+ return false;
}
- return TXDONE_SUCCESS;
+ return true;
}
static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
@@ -549,7 +548,7 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
struct queue_entry *entry;
u32 reg;
u8 qid;
- enum txdone_entry_desc_flags done_status;
+ bool match;
while (kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
/*
@@ -574,11 +573,8 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
break;
}
- done_status = rt2800usb_txdone_entry_check(entry, reg);
- if (likely(done_status == TXDONE_SUCCESS))
- rt2800_txdone_entry(entry, reg, rt2800usb_get_txwi(entry));
- else
- rt2x00lib_txdone_noinfo(entry, done_status);
+ match = rt2800usb_txdone_entry_check(entry, reg);
+ rt2800_txdone_entry(entry, reg, rt2800usb_get_txwi(entry), match);
}
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 340787894c69..1bc353eafe37 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -174,6 +174,7 @@ struct rt2x00_chip {
#define RT5390 0x5390 /* 2.4GHz */
#define RT5392 0x5392 /* 2.4GHz */
#define RT5592 0x5592
+#define RT6352 0x6352 /* WSOC 2.4GHz */
u16 rf;
u16 rev;
@@ -718,8 +719,8 @@ enum rt2x00_capability_flags {
CAPABILITY_DOUBLE_ANTENNA,
CAPABILITY_BT_COEXIST,
CAPABILITY_VCO_RECALIBRATION,
- CAPABILITY_INTERNAL_PA_TX0,
- CAPABILITY_INTERNAL_PA_TX1,
+ CAPABILITY_EXTERNAL_PA_TX0,
+ CAPABILITY_EXTERNAL_PA_TX1,
};
/*
@@ -1396,7 +1397,7 @@ void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop);
* rt2x00debug_dump_frame - Dump a frame to userspace through debugfs.
* @rt2x00dev: Pointer to &struct rt2x00_dev.
* @type: The type of frame that is being dumped.
- * @skb: The skb containing the frame to be dumped.
+ * @entry: The queue entry containing the frame to be dumped.
*/
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
@@ -1425,6 +1426,8 @@ void rt2x00lib_dmastart(struct queue_entry *entry);
void rt2x00lib_dmadone(struct queue_entry *entry);
void rt2x00lib_txdone(struct queue_entry *entry,
struct txdone_entry_desc *txdesc);
+void rt2x00lib_txdone_nomatch(struct queue_entry *entry,
+ struct txdone_entry_desc *txdesc);
void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status);
void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index dd6678109b7e..357c0941aaad 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -313,73 +313,14 @@ static inline int rt2x00lib_txdone_bar_status(struct queue_entry *entry)
return ret;
}
-void rt2x00lib_txdone(struct queue_entry *entry,
- struct txdone_entry_desc *txdesc)
+static void rt2x00lib_fill_tx_status(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_tx_info *tx_info,
+ struct skb_frame_desc *skbdesc,
+ struct txdone_entry_desc *txdesc,
+ bool success)
{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
- unsigned int header_length, i;
u8 rate_idx, rate_flags, retry_rates;
- u8 skbdesc_flags = skbdesc->flags;
- bool success;
-
- /*
- * Unmap the skb.
- */
- rt2x00queue_unmap_skb(entry);
-
- /*
- * Remove the extra tx headroom from the skb.
- */
- skb_pull(entry->skb, rt2x00dev->extra_tx_headroom);
-
- /*
- * Signal that the TX descriptor is no longer in the skb.
- */
- skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
-
- /*
- * Determine the length of 802.11 header.
- */
- header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
-
- /*
- * Remove L2 padding which was added during
- */
- if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_L2PAD))
- rt2x00queue_remove_l2pad(entry->skb, header_length);
-
- /*
- * If the IV/EIV data was stripped from the frame before it was
- * passed to the hardware, we should now reinsert it again because
- * mac80211 will expect the same data to be present it the
- * frame as it was passed to us.
- */
- if (rt2x00_has_cap_hw_crypto(rt2x00dev))
- rt2x00crypto_tx_insert_iv(entry->skb, header_length);
-
- /*
- * Send frame to debugfs immediately, after this call is completed
- * we are going to overwrite the skb->cb array.
- */
- rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry);
-
- /*
- * Determine if the frame has been successfully transmitted and
- * remove BARs from our check list while checking for their
- * TX status.
- */
- success =
- rt2x00lib_txdone_bar_status(entry) ||
- test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
- test_bit(TXDONE_UNKNOWN, &txdesc->flags);
-
- /*
- * Update TX statistics.
- */
- rt2x00dev->link.qual.tx_success += success;
- rt2x00dev->link.qual.tx_failed += !success;
+ int i;
rate_idx = skbdesc->tx_rate_idx;
rate_flags = skbdesc->tx_rate_flags;
@@ -416,6 +357,9 @@ void rt2x00lib_txdone(struct queue_entry *entry,
if (i < (IEEE80211_TX_MAX_RATES - 1))
tx_info->status.rates[i].idx = -1; /* terminate */
+ if (test_bit(TXDONE_NO_ACK_REQ, &txdesc->flags))
+ tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
+
if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
if (success)
tx_info->flags |= IEEE80211_TX_STAT_ACK;
@@ -434,7 +378,8 @@ void rt2x00lib_txdone(struct queue_entry *entry,
*/
if (test_bit(TXDONE_AMPDU, &txdesc->flags) ||
tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
- tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
+ tx_info->flags |= IEEE80211_TX_STAT_AMPDU |
+ IEEE80211_TX_CTL_AMPDU;
tx_info->status.ampdu_len = 1;
tx_info->status.ampdu_ack_len = success ? 1 : 0;
@@ -448,21 +393,11 @@ void rt2x00lib_txdone(struct queue_entry *entry,
else
rt2x00dev->low_level_stats.dot11RTSFailureCount++;
}
+}
- /*
- * Only send the status report to mac80211 when it's a frame
- * that originated in mac80211. If this was a extra frame coming
- * through a mac80211 library call (RTS/CTS) then we should not
- * send the status report back.
- */
- if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) {
- if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TASKLET_CONTEXT))
- ieee80211_tx_status(rt2x00dev->hw, entry->skb);
- else
- ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb);
- } else
- dev_kfree_skb_any(entry->skb);
-
+static void rt2x00lib_clear_entry(struct rt2x00_dev *rt2x00dev,
+ struct queue_entry *entry)
+{
/*
* Make this entry available for reuse.
*/
@@ -485,6 +420,143 @@ void rt2x00lib_txdone(struct queue_entry *entry,
rt2x00queue_unpause_queue(entry->queue);
spin_unlock_bh(&entry->queue->tx_lock);
}
+
+void rt2x00lib_txdone_nomatch(struct queue_entry *entry,
+ struct txdone_entry_desc *txdesc)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+ struct ieee80211_tx_info txinfo = {};
+ bool success;
+
+ /*
+ * Unmap the skb.
+ */
+ rt2x00queue_unmap_skb(entry);
+
+ /*
+ * Signal that the TX descriptor is no longer in the skb.
+ */
+ skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
+
+ /*
+ * Send frame to debugfs immediately, after this call is completed
+ * we are going to overwrite the skb->cb array.
+ */
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry);
+
+ /*
+ * Determine if the frame has been successfully transmitted and
+ * remove BARs from our check list while checking for their
+ * TX status.
+ */
+ success =
+ rt2x00lib_txdone_bar_status(entry) ||
+ test_bit(TXDONE_SUCCESS, &txdesc->flags);
+
+ if (!test_bit(TXDONE_UNKNOWN, &txdesc->flags)) {
+ /*
+ * Update TX statistics.
+ */
+ rt2x00dev->link.qual.tx_success += success;
+ rt2x00dev->link.qual.tx_failed += !success;
+
+ rt2x00lib_fill_tx_status(rt2x00dev, &txinfo, skbdesc, txdesc,
+ success);
+ ieee80211_tx_status_noskb(rt2x00dev->hw, skbdesc->sta, &txinfo);
+ }
+
+ dev_kfree_skb_any(entry->skb);
+ rt2x00lib_clear_entry(rt2x00dev, entry);
+}
+EXPORT_SYMBOL_GPL(rt2x00lib_txdone_nomatch);
+
+void rt2x00lib_txdone(struct queue_entry *entry,
+ struct txdone_entry_desc *txdesc)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+ u8 skbdesc_flags = skbdesc->flags;
+ unsigned int header_length;
+ bool success;
+
+ /*
+ * Unmap the skb.
+ */
+ rt2x00queue_unmap_skb(entry);
+
+ /*
+ * Remove the extra tx headroom from the skb.
+ */
+ skb_pull(entry->skb, rt2x00dev->extra_tx_headroom);
+
+ /*
+ * Signal that the TX descriptor is no longer in the skb.
+ */
+ skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
+
+ /*
+ * Determine the length of 802.11 header.
+ */
+ header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
+
+ /*
+ * Remove L2 padding which was added during
+ */
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_L2PAD))
+ rt2x00queue_remove_l2pad(entry->skb, header_length);
+
+ /*
+ * If the IV/EIV data was stripped from the frame before it was
+ * passed to the hardware, we should now reinsert it again because
+ * mac80211 will expect the same data to be present it the
+ * frame as it was passed to us.
+ */
+ if (rt2x00_has_cap_hw_crypto(rt2x00dev))
+ rt2x00crypto_tx_insert_iv(entry->skb, header_length);
+
+ /*
+ * Send frame to debugfs immediately, after this call is completed
+ * we are going to overwrite the skb->cb array.
+ */
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry);
+
+ /*
+ * Determine if the frame has been successfully transmitted and
+ * remove BARs from our check list while checking for their
+ * TX status.
+ */
+ success =
+ rt2x00lib_txdone_bar_status(entry) ||
+ test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
+ test_bit(TXDONE_UNKNOWN, &txdesc->flags);
+
+ /*
+ * Update TX statistics.
+ */
+ rt2x00dev->link.qual.tx_success += success;
+ rt2x00dev->link.qual.tx_failed += !success;
+
+ rt2x00lib_fill_tx_status(rt2x00dev, tx_info, skbdesc, txdesc, success);
+
+ /*
+ * Only send the status report to mac80211 when it's a frame
+ * that originated in mac80211. If this was a extra frame coming
+ * through a mac80211 library call (RTS/CTS) then we should not
+ * send the status report back.
+ */
+ if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) {
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TASKLET_CONTEXT))
+ ieee80211_tx_status(rt2x00dev->hw, entry->skb);
+ else
+ ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb);
+ } else {
+ dev_kfree_skb_any(entry->skb);
+ }
+
+ rt2x00lib_clear_entry(rt2x00dev, entry);
+}
EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status)
@@ -753,7 +825,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
rate_idx = rt2x00lib_rxdone_read_signal(rt2x00dev, &rxdesc);
if (rxdesc.rate_mode == RATE_MODE_HT_MIX ||
rxdesc.rate_mode == RATE_MODE_HT_GREENFIELD)
- rxdesc.flags |= RX_FLAG_HT;
+ rxdesc.encoding = RX_ENC_HT;
/*
* Check if this is a beacon, and more frames have been
@@ -793,6 +865,9 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
rx_status->rate_idx = rate_idx;
rx_status->signal = rxdesc.rssi;
rx_status->flag = rxdesc.flags;
+ rx_status->enc_flags = rxdesc.enc_flags;
+ rx_status->encoding = rxdesc.encoding;
+ rx_status->bw = rxdesc.bw;
rx_status->antenna = rt2x00dev->link.ant.active.rx;
ieee80211_rx_ni(rt2x00dev->hw, entry->skb);
@@ -1384,6 +1459,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy_ext_feature_set(rt2x00dev->hw->wiphy,
+ NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
/*
* Initialize ieee80211 structure.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index e1660b92b20c..a2c1ca5c76d1 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -372,15 +372,16 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
/*
* Determine IFS values
- * - Use TXOP_BACKOFF for management frames except beacons
+ * - Use TXOP_BACKOFF for probe and management frames except beacons
* - Use TXOP_SIFS for fragment bursts
* - Use TXOP_HTTXOP for everything else
*
* Note: rt2800 devices won't use CTS protection (if used)
* for frames not transmitted with TXOP_HTTXOP
*/
- if (ieee80211_is_mgmt(hdr->frame_control) &&
- !ieee80211_is_beacon(hdr->frame_control))
+ if ((ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_is_beacon(hdr->frame_control)) ||
+ (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
txdesc->u.ht.txop = TXOP_BACKOFF;
else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
txdesc->u.ht.txop = TXOP_SIFS;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
index 22d18818e850..6055f36211b9 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
@@ -102,7 +102,7 @@ enum skb_frame_desc_flags {
* of the scope of the skb->data pointer.
* @iv: IV/EIV data used during encryption/decryption.
* @skb_dma: (PCI-only) the DMA address associated with the sk buffer.
- * @entry: The entry to which this sk buffer belongs.
+ * @sta: The station where sk buffer was sent.
*/
struct skb_frame_desc {
u8 flags;
@@ -116,6 +116,7 @@ struct skb_frame_desc {
__le32 iv[2];
dma_addr_t skb_dma;
+ struct ieee80211_sta *sta;
};
/**
@@ -183,6 +184,9 @@ struct rxdone_entry_desc {
int flags;
int dev_flags;
u16 rate_mode;
+ u16 enc_flags;
+ enum mac80211_rx_encoding encoding;
+ enum rate_info_bw bw;
u8 cipher;
u8 cipher_status;
@@ -214,6 +218,7 @@ enum txdone_entry_desc_flags {
TXDONE_FAILURE,
TXDONE_EXCESSIVE_RETRY,
TXDONE_AMPDU,
+ TXDONE_NO_ACK_REQ,
};
/**
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index e895a84481da..225c1c8851cc 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -315,7 +315,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
rx_status.mactime = tsft;
rx_status.flag |= RX_FLAG_MACTIME_START;
if (flags & RTL818X_RX_DESC_FLAG_SPLCP)
- rx_status.flag |= RX_FLAG_SHORTPRE;
+ rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -1877,6 +1877,8 @@ static int rtl8180_probe(struct pci_dev *pdev,
else
ieee80211_hw_set(dev, SIGNAL_UNSPEC);
+ wiphy_ext_feature_set(dev->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
rtl8180_eeprom_read(priv);
switch (priv->rf_type) {
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index 231f84db9ab0..35fe991dcc56 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -389,7 +389,7 @@ static void rtl8187_rx_cb(struct urb *urb)
rx_status.band = dev->conf.chandef.chan->band;
rx_status.flag |= RX_FLAG_MACTIME_START;
if (flags & RTL818X_RX_DESC_FLAG_SPLCP)
- rx_status.flag |= RX_FLAG_SHORTPRE;
+ rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
@@ -946,8 +946,7 @@ static int rtl8187_start(struct ieee80211_hw *dev)
(7 << 13 /* RX FIFO threshold NONE */) |
(7 << 10 /* MAX RX DMA */) |
RTL818X_RX_CONF_RX_AUTORESETPHY |
- RTL818X_RX_CONF_ONLYERLPKT |
- RTL818X_RX_CONF_MULTICAST;
+ RTL818X_RX_CONF_ONLYERLPKT;
priv->rx_conf = reg;
rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
@@ -1319,12 +1318,11 @@ static void rtl8187_configure_filter(struct ieee80211_hw *dev,
priv->rx_conf ^= RTL818X_RX_CONF_FCS;
if (changed_flags & FIF_CONTROL)
priv->rx_conf ^= RTL818X_RX_CONF_CTRL;
- if (changed_flags & FIF_OTHER_BSS)
- priv->rx_conf ^= RTL818X_RX_CONF_MONITOR;
- if (*total_flags & FIF_ALLMULTI || multicast > 0)
- priv->rx_conf |= RTL818X_RX_CONF_MULTICAST;
+ if (*total_flags & FIF_OTHER_BSS ||
+ *total_flags & FIF_ALLMULTI || multicast > 0)
+ priv->rx_conf |= RTL818X_RX_CONF_MONITOR;
else
- priv->rx_conf &= ~RTL818X_RX_CONF_MULTICAST;
+ priv->rx_conf &= ~RTL818X_RX_CONF_MONITOR;
*total_flags = 0;
@@ -1332,10 +1330,10 @@ static void rtl8187_configure_filter(struct ieee80211_hw *dev,
*total_flags |= FIF_FCSFAIL;
if (priv->rx_conf & RTL818X_RX_CONF_CTRL)
*total_flags |= FIF_CONTROL;
- if (priv->rx_conf & RTL818X_RX_CONF_MONITOR)
+ if (priv->rx_conf & RTL818X_RX_CONF_MONITOR) {
*total_flags |= FIF_OTHER_BSS;
- if (priv->rx_conf & RTL818X_RX_CONF_MULTICAST)
*total_flags |= FIF_ALLMULTI;
+ }
rtl818x_iowrite32_async(priv, &priv->map->RX_CONF, priv->rx_conf);
}
@@ -1609,6 +1607,8 @@ static int rtl8187_probe(struct usb_interface *intf,
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) ;
+ wiphy_ext_feature_set(dev->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b)
printk(KERN_INFO "rtl8187: inconsistency between id with OEM"
" info!\n");
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index e544dd1d618c..39d56313bc94 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -5041,7 +5041,7 @@ static void rtl8xxxu_rx_parse_phystats(struct rtl8xxxu_priv *priv,
u32 rxmcs)
{
if (phy_stats->sgi_en)
- rx_status->flag |= RX_FLAG_SHORT_GI;
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rxmcs < DESC_RATE_6M) {
/*
@@ -5267,10 +5267,10 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
if (rx_desc->crc32)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (rx_desc->bw)
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
if (rx_desc->rxht) {
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
} else {
rx_status->rate_idx = rx_desc->rxmcs;
@@ -5337,10 +5337,10 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
if (rx_desc->crc32)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (rx_desc->bw)
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
if (rx_desc->rxmcs >= DESC_RATE_MCS0) {
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
} else {
rx_status->rate_idx = rx_desc->rxmcs;
@@ -6135,6 +6135,8 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
ret = ieee80211_register_hw(priv->hw);
if (ret) {
dev_err(&udev->dev, "%s: Failed to register: %i\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index ffa1f438424d..57e633dbf9a9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -44,7 +44,7 @@ static struct coex_dm_8192e_2ant *coex_dm = &glcoex_dm_8192e_2ant;
static struct coex_sta_8192e_2ant glcoex_sta_8192e_2ant;
static struct coex_sta_8192e_2ant *coex_sta = &glcoex_sta_8192e_2ant;
-static const char *const GLBtInfoSrc8192e2Ant[] = {
+static const char *const glbt_info_src_8192e_2ant[] = {
"BT Info[wifi fw]",
"BT Info[bt rsp]",
"BT Info[bt auto report]",
@@ -57,31 +57,31 @@ static u32 glcoex_ver_8192e_2ant = 0x34;
* local function proto type if needed
**************************************************************/
/**************************************************************
- * local function start with halbtc8192e2ant_
+ * local function start with btc8192e2ant_
**************************************************************/
-static u8 halbtc8192e2ant_btrssi_state(struct btc_coexist *btcoexist,
- u8 level_num, u8 rssi_thresh,
- u8 rssi_thresh1)
+static u8 btc8192e2ant_bt_rssi_state(struct btc_coexist *btcoexist,
+ u8 level_num, u8 rssi_thresh,
+ u8 rssi_thresh1)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- int btrssi = 0;
- u8 btrssi_state = coex_sta->pre_bt_rssi_state;
+ int bt_rssi = 0;
+ u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
- btrssi = coex_sta->bt_rssi;
+ bt_rssi = coex_sta->bt_rssi;
if (level_num == 2) {
if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
(coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- if (btrssi >=
+ if (bt_rssi >=
(rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
- btrssi_state = BTC_RSSI_STATE_HIGH;
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
else
- btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
} else {
- if (btrssi < rssi_thresh)
- btrssi_state = BTC_RSSI_STATE_LOW;
+ if (bt_rssi < rssi_thresh)
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
else
- btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
@@ -89,62 +89,63 @@ static u8 halbtc8192e2ant_btrssi_state(struct btc_coexist *btcoexist,
"[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
+
if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
(coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- if (btrssi >=
+ if (bt_rssi >=
(rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
- btrssi_state = BTC_RSSI_STATE_MEDIUM;
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
else
- btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
(coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_STAY_MEDIUM)) {
- if (btrssi >= (rssi_thresh1 +
+ if (bt_rssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
- btrssi_state = BTC_RSSI_STATE_HIGH;
- else if (btrssi < rssi_thresh)
- btrssi_state = BTC_RSSI_STATE_LOW;
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ else if (bt_rssi < rssi_thresh)
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
else
- btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
} else {
- if (btrssi < rssi_thresh1)
- btrssi_state = BTC_RSSI_STATE_MEDIUM;
+ if (bt_rssi < rssi_thresh1)
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
else
- btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
}
}
- coex_sta->pre_bt_rssi_state = btrssi_state;
+ coex_sta->pre_bt_rssi_state = bt_rssi_state;
- return btrssi_state;
+ return bt_rssi_state;
}
-static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
- u8 index, u8 level_num, u8 rssi_thresh,
- u8 rssi_thresh1)
+static u8 btc8192e2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
+ u8 index, u8 level_num, u8 rssi_thresh,
+ u8 rssi_thresh1)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- int wifirssi = 0;
- u8 wifirssi_state = coex_sta->pre_wifi_rssi_state[index];
+ int wifi_rssi = 0;
+ u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
- btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
if (level_num == 2) {
if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_LOW) ||
(coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_STAY_LOW)) {
- if (wifirssi >= (rssi_thresh +
- BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
- wifirssi_state = BTC_RSSI_STATE_HIGH;
+ if (wifi_rssi >=
+ (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
else
- wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
} else {
- if (wifirssi < rssi_thresh)
- wifirssi_state = BTC_RSSI_STATE_LOW;
+ if (wifi_rssi < rssi_thresh)
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
else
- wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
@@ -157,36 +158,37 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
BTC_RSSI_STATE_LOW) ||
(coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_STAY_LOW)) {
- if (wifirssi >= (rssi_thresh +
- BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
- wifirssi_state = BTC_RSSI_STATE_MEDIUM;
+ if (wifi_rssi >=
+ (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
else
- wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
(coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_STAY_MEDIUM)) {
- if (wifirssi >= (rssi_thresh1 +
+ if (wifi_rssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
- wifirssi_state = BTC_RSSI_STATE_HIGH;
- else if (wifirssi < rssi_thresh)
- wifirssi_state = BTC_RSSI_STATE_LOW;
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ else if (wifi_rssi < rssi_thresh)
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
else
- wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
} else {
- if (wifirssi < rssi_thresh1)
- wifirssi_state = BTC_RSSI_STATE_MEDIUM;
+ if (wifi_rssi < rssi_thresh1)
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
else
- wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
}
}
- coex_sta->pre_wifi_rssi_state[index] = wifirssi_state;
+ coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
- return wifirssi_state;
+ return wifi_rssi_state;
}
-static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist)
+static void btc8192e2ant_monitor_bt_enable_disable(struct btc_coexist
+ *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
static bool pre_bt_disabled;
@@ -236,57 +238,57 @@ static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist)
}
}
-static u32 halbtc8192e2ant_decidera_mask(struct btc_coexist *btcoexist,
- u8 sstype, u32 ra_masktype)
+static u32 btc8192e2ant_decide_ra_mask(struct btc_coexist *btcoexist,
+ u8 ss_type, u32 ra_mask_type)
{
- u32 disra_mask = 0x0;
+ u32 dis_ra_mask = 0x0;
- switch (ra_masktype) {
+ switch (ra_mask_type) {
case 0: /* normal mode */
- if (sstype == 2)
- disra_mask = 0x0; /* enable 2ss */
+ if (ss_type == 2)
+ dis_ra_mask = 0x0; /* enable 2ss */
else
- disra_mask = 0xfff00000;/* disable 2ss */
+ dis_ra_mask = 0xfff00000; /* disable 2ss */
break;
case 1: /* disable cck 1/2 */
- if (sstype == 2)
- disra_mask = 0x00000003;/* enable 2ss */
+ if (ss_type == 2)
+ dis_ra_mask = 0x00000003; /* enable 2ss */
else
- disra_mask = 0xfff00003;/* disable 2ss */
+ dis_ra_mask = 0xfff00003; /* disable 2ss */
break;
case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4 */
- if (sstype == 2)
- disra_mask = 0x0001f1f7;/* enable 2ss */
+ if (ss_type == 2)
+ dis_ra_mask = 0x0001f1f7; /* enable 2ss */
else
- disra_mask = 0xfff1f1f7;/* disable 2ss */
+ dis_ra_mask = 0xfff1f1f7; /* disable 2ss */
break;
default:
break;
}
- return disra_mask;
+ return dis_ra_mask;
}
-static void halbtc8192e2ant_Updatera_mask(struct btc_coexist *btcoexist,
- bool force_exec, u32 dis_ratemask)
+static void btc8192e2ant_update_ra_mask(struct btc_coexist *btcoexist,
+ bool force_exec, u32 dis_rate_mask)
{
- coex_dm->curra_mask = dis_ratemask;
+ coex_dm->cur_ra_mask = dis_rate_mask;
- if (force_exec || (coex_dm->prera_mask != coex_dm->curra_mask))
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask,
- &coex_dm->curra_mask);
- coex_dm->prera_mask = coex_dm->curra_mask;
+ if (force_exec || (coex_dm->pre_ra_mask != coex_dm->cur_ra_mask))
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_RAMASK,
+ &coex_dm->cur_ra_mask);
+ coex_dm->pre_ra_mask = coex_dm->cur_ra_mask;
}
-static void btc8192e2ant_autorate_fallback_retry(struct btc_coexist *btcoexist,
- bool force_exec, u8 type)
+static void btc8192e2ant_auto_rate_fallback_retry(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
{
- bool wifi_under_bmode = false;
+ bool wifi_under_b_mode = false;
- coex_dm->cur_arfrtype = type;
+ coex_dm->cur_arfr_type = type;
- if (force_exec || (coex_dm->pre_arfrtype != coex_dm->cur_arfrtype)) {
- switch (coex_dm->cur_arfrtype) {
+ if (force_exec || (coex_dm->pre_arfr_type != coex_dm->cur_arfr_type)) {
+ switch (coex_dm->cur_arfr_type) {
case 0: /* normal mode */
btcoexist->btc_write_4byte(btcoexist, 0x430,
coex_dm->backup_arfr_cnt1);
@@ -296,8 +298,8 @@ static void btc8192e2ant_autorate_fallback_retry(struct btc_coexist *btcoexist,
case 1:
btcoexist->btc_get(btcoexist,
BTC_GET_BL_WIFI_UNDER_B_MODE,
- &wifi_under_bmode);
- if (wifi_under_bmode) {
+ &wifi_under_b_mode);
+ if (wifi_under_b_mode) {
btcoexist->btc_write_4byte(btcoexist, 0x430,
0x0);
btcoexist->btc_write_4byte(btcoexist, 0x434,
@@ -314,46 +316,45 @@ static void btc8192e2ant_autorate_fallback_retry(struct btc_coexist *btcoexist,
}
}
- coex_dm->pre_arfrtype = coex_dm->cur_arfrtype;
+ coex_dm->pre_arfr_type = coex_dm->cur_arfr_type;
}
-static void halbtc8192e2ant_retrylimit(struct btc_coexist *btcoexist,
- bool force_exec, u8 type)
+static void btc8192e2ant_retry_limit(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
{
- coex_dm->cur_retrylimit_type = type;
+ coex_dm->cur_retry_limit_type = type;
- if (force_exec || (coex_dm->pre_retrylimit_type !=
- coex_dm->cur_retrylimit_type)) {
- switch (coex_dm->cur_retrylimit_type) {
+ if (force_exec || (coex_dm->pre_retry_limit_type !=
+ coex_dm->cur_retry_limit_type)) {
+ switch (coex_dm->cur_retry_limit_type) {
case 0: /* normal mode */
- btcoexist->btc_write_2byte(btcoexist, 0x42a,
- coex_dm->backup_retrylimit);
- break;
+ btcoexist->btc_write_2byte(btcoexist, 0x42a,
+ coex_dm->backup_retry_limit);
+ break;
case 1: /* retry limit = 8 */
- btcoexist->btc_write_2byte(btcoexist, 0x42a,
- 0x0808);
- break;
+ btcoexist->btc_write_2byte(btcoexist, 0x42a, 0x0808);
+ break;
default:
- break;
+ break;
}
}
- coex_dm->pre_retrylimit_type = coex_dm->cur_retrylimit_type;
+ coex_dm->pre_retry_limit_type = coex_dm->cur_retry_limit_type;
}
-static void halbtc8192e2ant_ampdu_maxtime(struct btc_coexist *btcoexist,
- bool force_exec, u8 type)
+static void btc8192e2ant_ampdu_maxtime(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
{
- coex_dm->cur_ampdutime_type = type;
+ coex_dm->cur_ampdu_time_type = type;
- if (force_exec || (coex_dm->pre_ampdutime_type !=
- coex_dm->cur_ampdutime_type)) {
- switch (coex_dm->cur_ampdutime_type) {
+ if (force_exec || (coex_dm->pre_ampdu_time_type !=
+ coex_dm->cur_ampdu_time_type)) {
+ switch (coex_dm->cur_ampdu_time_type) {
case 0: /* normal mode */
btcoexist->btc_write_1byte(btcoexist, 0x456,
coex_dm->backup_ampdu_maxtime);
break;
- case 1: /* AMPDU timw = 0x38 * 32us */
+ case 1: /* AMPDU time = 0x38 * 32us */
btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38);
break;
default:
@@ -361,30 +362,30 @@ static void halbtc8192e2ant_ampdu_maxtime(struct btc_coexist *btcoexist,
}
}
- coex_dm->pre_ampdutime_type = coex_dm->cur_ampdutime_type;
+ coex_dm->pre_ampdu_time_type = coex_dm->cur_ampdu_time_type;
}
-static void halbtc8192e2ant_limited_tx(struct btc_coexist *btcoexist,
- bool force_exec, u8 ra_masktype,
- u8 arfr_type, u8 retrylimit_type,
- u8 ampdutime_type)
+static void btc8192e2ant_limited_tx(struct btc_coexist *btcoexist,
+ bool force_exec, u8 ra_mask_type,
+ u8 arfr_type, u8 retry_limit_type,
+ u8 ampdu_time_type)
{
- u32 disra_mask = 0x0;
+ u32 dis_ra_mask = 0x0;
- coex_dm->curra_masktype = ra_masktype;
- disra_mask = halbtc8192e2ant_decidera_mask(btcoexist,
- coex_dm->cur_sstype,
- ra_masktype);
- halbtc8192e2ant_Updatera_mask(btcoexist, force_exec, disra_mask);
-btc8192e2ant_autorate_fallback_retry(btcoexist, force_exec, arfr_type);
- halbtc8192e2ant_retrylimit(btcoexist, force_exec, retrylimit_type);
- halbtc8192e2ant_ampdu_maxtime(btcoexist, force_exec, ampdutime_type);
+ coex_dm->cur_ra_mask_type = ra_mask_type;
+ dis_ra_mask =
+ btc8192e2ant_decide_ra_mask(btcoexist, coex_dm->cur_ss_type,
+ ra_mask_type);
+ btc8192e2ant_update_ra_mask(btcoexist, force_exec, dis_ra_mask);
+ btc8192e2ant_auto_rate_fallback_retry(btcoexist, force_exec, arfr_type);
+ btc8192e2ant_retry_limit(btcoexist, force_exec, retry_limit_type);
+ btc8192e2ant_ampdu_maxtime(btcoexist, force_exec, ampdu_time_type);
}
-static void halbtc8192e2ant_limited_rx(struct btc_coexist *btcoexist,
- bool force_exec, bool rej_ap_agg_pkt,
- bool bt_ctrl_agg_buf_size,
- u8 agg_buf_size)
+static void btc8192e2ant_limited_rx(struct btc_coexist *btcoexist,
+ bool force_exec, bool rej_ap_agg_pkt,
+ bool bt_ctrl_agg_buf_size,
+ u8 agg_buf_size)
{
bool reject_rx_agg = rej_ap_agg_pkt;
bool bt_ctrl_rx_agg_size = bt_ctrl_agg_buf_size;
@@ -406,7 +407,7 @@ static void halbtc8192e2ant_limited_rx(struct btc_coexist *btcoexist,
btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
}
-static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+static void btc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
@@ -417,11 +418,11 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
reg_hp_tx = u32tmp & MASKLWORD;
- reg_hp_rx = (u32tmp & MASKHWORD)>>16;
+ reg_hp_rx = (u32tmp & MASKHWORD) >> 16;
u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
reg_lp_tx = u32tmp & MASKLWORD;
- reg_lp_rx = (u32tmp & MASKHWORD)>>16;
+ reg_lp_rx = (u32tmp & MASKHWORD) >> 16;
coex_sta->high_priority_tx = reg_hp_tx;
coex_sta->high_priority_rx = reg_hp_rx;
@@ -439,14 +440,14 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
}
-static void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist)
+static void btc8192e2ant_query_bt_info(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
coex_sta->c2h_bt_info_req_sent = true;
- h2c_parameter[0] |= BIT0; /* trigger */
+ h2c_parameter[0] |= BIT0; /* trigger */
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
@@ -455,12 +456,12 @@ static void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist)
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
-static void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist)
+static void btc8192e2ant_update_bt_link_info(struct btc_coexist *btcoexist)
{
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool bt_hson = false;
+ bool bt_hs_on = false;
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
bt_link_info->sco_exist = coex_sta->sco_exist;
@@ -469,7 +470,7 @@ static void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist)
bt_link_info->hid_exist = coex_sta->hid_exist;
/* work around for HS mode. */
- if (bt_hson) {
+ if (bt_hs_on) {
bt_link_info->pan_exist = true;
bt_link_info->bt_link_exist = true;
}
@@ -511,16 +512,16 @@ static void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist)
bt_link_info->hid_only = false;
}
-static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
+static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- bool bt_hson = false;
+ bool bt_hs_on = false;
u8 algorithm = BT_8192E_2ANT_COEX_ALGO_UNDEFINED;
- u8 numdiffprofile = 0;
+ u8 num_of_diff_profile = 0;
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -529,15 +530,15 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
}
if (bt_link_info->sco_exist)
- numdiffprofile++;
+ num_of_diff_profile++;
if (bt_link_info->hid_exist)
- numdiffprofile++;
+ num_of_diff_profile++;
if (bt_link_info->pan_exist)
- numdiffprofile++;
+ num_of_diff_profile++;
if (bt_link_info->a2dp_exist)
- numdiffprofile++;
+ num_of_diff_profile++;
- if (numdiffprofile == 1) {
+ if (num_of_diff_profile == 1) {
if (bt_link_info->sco_exist) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"SCO only\n");
@@ -552,7 +553,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
"A2DP only\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
- if (bt_hson) {
+ if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"PAN(HS) only\n");
@@ -567,7 +568,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
}
}
}
- } else if (numdiffprofile == 2) {
+ } else if (num_of_diff_profile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -578,7 +579,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
"SCO + A2DP ==> SCO\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->pan_exist) {
- if (bt_hson) {
+ if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"SCO + PAN(HS)\n");
@@ -609,7 +610,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
}
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
- if (bt_hson) {
+ if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"HID + PAN(HS)\n");
@@ -623,7 +624,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
- if (bt_hson) {
+ if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"A2DP + PAN(HS)\n");
@@ -638,7 +639,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
}
}
}
- } else if (numdiffprofile == 3) {
+ } else if (num_of_diff_profile == 3) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
@@ -647,7 +648,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
- if (bt_hson) {
+ if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"SCO + HID + PAN(HS)\n");
@@ -661,7 +662,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
- if (bt_hson) {
+ if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"SCO + A2DP + PAN(HS)\n");
@@ -678,7 +679,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->hid_exist &&
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
- if (bt_hson) {
+ if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"HID + A2DP + PAN(HS)\n");
@@ -693,12 +694,12 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
}
}
}
- } else if (numdiffprofile >= 3) {
+ } else if (num_of_diff_profile >= 3) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
- if (bt_hson) {
+ if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"ErrorSCO+HID+A2DP+PAN(HS)\n");
@@ -717,8 +718,8 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
return algorithm;
}
-static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
- u8 dac_swinglvl)
+static void btc8192e2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
+ u8 dac_swing_lvl)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
@@ -726,81 +727,81 @@ static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
/* There are several type of dacswing
* 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
*/
- h2c_parameter[0] = dac_swinglvl;
+ h2c_parameter[0] = dac_swing_lvl;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl);
+ "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
-static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
- u8 dec_btpwr_lvl)
+static void btc8192e2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
+ u8 dec_bt_pwr_lvl)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
- h2c_parameter[0] = dec_btpwr_lvl;
+ h2c_parameter[0] = dec_bt_pwr_lvl;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
- dec_btpwr_lvl, h2c_parameter[0]);
+ dec_bt_pwr_lvl, h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
-static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist,
- bool force_exec, u8 dec_btpwr_lvl)
+static void btc8192e2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
+ bool force_exec, u8 dec_bt_pwr_lvl)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], %s Dec BT power level = %d\n",
- force_exec ? "force to" : "", dec_btpwr_lvl);
- coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl;
+ force_exec ? "force to" : "", dec_bt_pwr_lvl);
+ coex_dm->cur_dec_bt_pwr = dec_bt_pwr_lvl;
if (!force_exec) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
}
- halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+ btc8192e2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
}
-static void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist,
- bool enable_autoreport)
+static void btc8192e2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
+ bool enable_auto_report)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
h2c_parameter[0] = 0;
- if (enable_autoreport)
+ if (enable_auto_report)
h2c_parameter[0] |= BIT0;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
- (enable_autoreport ? "Enabled!!" : "Disabled!!"),
+ (enable_auto_report ? "Enabled!!" : "Disabled!!"),
h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
}
-static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
- bool force_exec,
- bool enable_autoreport)
+static void btc8192e2ant_bt_auto_report(struct btc_coexist *btcoexist,
+ bool force_exec,
+ bool enable_auto_report)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], %s BT Auto report = %s\n",
(force_exec ? "force to" : ""),
- ((enable_autoreport) ? "Enabled" : "Disabled"));
- coex_dm->cur_bt_auto_report = enable_autoreport;
+ ((enable_auto_report) ? "Enabled" : "Disabled"));
+ coex_dm->cur_bt_auto_report = enable_auto_report;
if (!force_exec) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -811,21 +812,21 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
return;
}
- halbtc8192e2ant_set_bt_autoreport(btcoexist,
- coex_dm->cur_bt_auto_report);
+ btc8192e2ant_set_bt_auto_report(btcoexist,
+ coex_dm->cur_bt_auto_report);
coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
}
-static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist,
- bool force_exec, u8 fw_dac_swinglvl)
+static void btc8192e2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
+ bool force_exec, u8 fw_dac_swing_lvl)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], %s set FW Dac Swing level = %d\n",
- (force_exec ? "force to" : ""), fw_dac_swinglvl);
- coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl;
+ (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+ coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
if (!force_exec) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -838,8 +839,8 @@ static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist,
return;
}
- halbtc8192e2ant_setfw_dac_swinglevel(btcoexist,
- coex_dm->cur_fw_dac_swing_lvl);
+ btc8192e2ant_set_fw_dac_swing_level(btcoexist,
+ coex_dm->cur_fw_dac_swing_lvl);
coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
}
@@ -869,8 +870,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
}
}
-static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
- bool force_exec, bool rx_rf_shrink_on)
+static void btc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
+ bool force_exec, bool rx_rf_shrink_on)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -896,8 +897,8 @@ static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
}
-static void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist,
- u32 level)
+static void btc8192e2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
+ u32 level)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 val = (u8)level;
@@ -907,28 +908,28 @@ static void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist,
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
}
-static void btc8192e2ant_setsw_full_swing(struct btc_coexist *btcoexist,
- bool sw_dac_swingon,
- u32 sw_dac_swinglvl)
+static void btc8192e2ant_set_sw_full_swing(struct btc_coexist *btcoexist,
+ bool sw_dac_swing_on,
+ u32 sw_dac_swing_lvl)
{
- if (sw_dac_swingon)
- halbtc8192e2ant_set_dac_swingreg(btcoexist, sw_dac_swinglvl);
+ if (sw_dac_swing_on)
+ btc8192e2ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl);
else
- halbtc8192e2ant_set_dac_swingreg(btcoexist, 0x18);
+ btc8192e2ant_set_dac_swing_reg(btcoexist, 0x18);
}
-static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist,
- bool force_exec, bool dac_swingon,
- u32 dac_swinglvl)
+static void btc8192e2ant_dac_swing(struct btc_coexist *btcoexist,
+ bool force_exec, bool dac_swing_on,
+ u32 dac_swing_lvl)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n",
+ "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl = 0x%x\n",
(force_exec ? "force to" : ""),
- ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl);
- coex_dm->cur_dac_swing_on = dac_swingon;
- coex_dm->cur_dac_swing_lvl = dac_swinglvl;
+ ((dac_swing_on) ? "ON" : "OFF"), dac_swing_lvl);
+ coex_dm->cur_dac_swing_on = dac_swing_on;
+ coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
if (!force_exec) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -945,14 +946,14 @@ static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist,
return;
}
mdelay(30);
- btc8192e2ant_setsw_full_swing(btcoexist, dac_swingon, dac_swinglvl);
+ btc8192e2ant_set_sw_full_swing(btcoexist, dac_swing_on, dac_swing_lvl);
coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
}
-static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
- bool agc_table_en)
+static void btc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
+ bool agc_table_en)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -978,8 +979,8 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
}
}
-static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist,
- bool force_exec, bool agc_table_en)
+static void btc8192e2ant_agc_table(struct btc_coexist *btcoexist,
+ bool force_exec, bool agc_table_en)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -998,14 +999,14 @@ static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist,
if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
return;
}
- halbtc8192e2ant_set_agc_table(btcoexist, agc_table_en);
+ btc8192e2ant_set_agc_table(btcoexist, agc_table_en);
coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
}
-static void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
- u32 val0x6c0, u32 val0x6c4,
- u32 val0x6c8, u8 val0x6cc)
+static void btc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
+ u32 val0x6c0, u32 val0x6c4,
+ u32 val0x6c8, u8 val0x6cc)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -1026,10 +1027,9 @@ static void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
-static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist,
- bool force_exec,
- u32 val0x6c0, u32 val0x6c4,
- u32 val0x6c8, u8 val0x6cc)
+static void btc8192e2ant_coex_table(struct btc_coexist *btcoexist,
+ bool force_exec, u32 val0x6c0, u32 val0x6c4,
+ u32 val0x6c8, u8 val0x6cc)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -1064,8 +1064,8 @@ static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist,
(coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
return;
}
- halbtc8192e2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
- val0x6c8, val0x6cc);
+ btc8192e2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, val0x6c8,
+ val0x6cc);
coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
@@ -1073,37 +1073,37 @@ static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist,
coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
}
-static void btc8192e2ant_coex_tbl_w_type(struct btc_coexist *btcoexist,
- bool force_exec, u8 type)
+static void btc8192e2ant_coex_table_with_type(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
{
switch (type) {
case 0:
- halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0x5a5a5a5a, 0xffffff, 0x3);
+ btc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5a5a5a5a, 0xffffff, 0x3);
break;
case 1:
- halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
- 0x5a5a5a5a, 0xffffff, 0x3);
+ btc8192e2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+ 0x5a5a5a5a, 0xffffff, 0x3);
break;
case 2:
- halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0x5ffb5ffb, 0xffffff, 0x3);
+ btc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5ffb5ffb, 0xffffff, 0x3);
break;
case 3:
- halbtc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff,
- 0x5fdb5fdb, 0xffffff, 0x3);
+ btc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff,
+ 0x5fdb5fdb, 0xffffff, 0x3);
break;
case 4:
- halbtc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff,
- 0x5ffb5ffb, 0xffffff, 0x3);
+ btc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff,
+ 0x5ffb5ffb, 0xffffff, 0x3);
break;
default:
break;
}
}
-static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
- bool enable)
+static void btc8192e2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
+ bool enable)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
@@ -1118,8 +1118,8 @@ static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
-static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist,
- bool force_exec, bool enable)
+static void btc8192e2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -1140,12 +1140,12 @@ static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist,
coex_dm->cur_ignore_wlan_act)
return;
}
- halbtc8192e2ant_set_fw_ignore_wlanact(btcoexist, enable);
+ btc8192e2ant_set_fw_ignore_wlan_act(btcoexist, enable);
coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
}
-static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1,
+static void btc8192e2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
u8 byte2, u8 byte3, u8 byte4, u8 byte5)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -1173,24 +1173,24 @@ static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1,
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
-static void btc8192e2ant_sw_mec1(struct btc_coexist *btcoexist,
- bool shrink_rx_lpf, bool low_penalty_ra,
- bool limited_dig, bool btlan_constrain)
+static void btc8192e2ant_sw_mechanism1(struct btc_coexist *btcoexist,
+ bool shrink_rx_lpf, bool low_penalty_ra,
+ bool limited_dig, bool btlan_constrain)
{
- halbtc8192e2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
+ btc8192e2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
}
-static void btc8192e2ant_sw_mec2(struct btc_coexist *btcoexist,
- bool agc_table_shift, bool adc_backoff,
- bool sw_dac_swing, u32 dac_swinglvl)
+static void btc8192e2ant_sw_mechanism2(struct btc_coexist *btcoexist,
+ bool agc_table_shift, bool adc_backoff,
+ bool sw_dac_swing, u32 dac_swing_lvl)
{
- halbtc8192e2ant_AgcTable(btcoexist, NORMAL_EXEC, agc_table_shift);
- halbtc8192e2ant_DacSwing(btcoexist, NORMAL_EXEC, sw_dac_swing,
- dac_swinglvl);
+ btc8192e2ant_agc_table(btcoexist, NORMAL_EXEC, agc_table_shift);
+ btc8192e2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing,
+ dac_swing_lvl);
}
-static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
- bool force_exec, bool turn_on, u8 type)
+static void btc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
+ bool force_exec, bool turn_on, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -1217,91 +1217,91 @@ static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
switch (type) {
case 1:
default:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
0x1a, 0xe1, 0x90);
break;
case 2:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
0x12, 0xe1, 0x90);
break;
case 3:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
0x3, 0xf1, 0x90);
break;
case 4:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x10,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
0x3, 0xf1, 0x90);
break;
case 5:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
0x1a, 0x60, 0x90);
break;
case 6:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
0x12, 0x60, 0x90);
break;
case 7:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
0x3, 0x70, 0x90);
break;
case 8:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xa3, 0x10,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10,
0x3, 0x70, 0x90);
break;
case 9:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
0x1a, 0xe1, 0x10);
break;
case 10:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
0x12, 0xe1, 0x10);
break;
case 11:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
0x3, 0xf1, 0x10);
break;
case 12:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x10,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
0x3, 0xf1, 0x10);
break;
case 13:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
0x1a, 0xe0, 0x10);
break;
case 14:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
0x12, 0xe0, 0x10);
break;
case 15:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
0x3, 0xf0, 0x10);
break;
case 16:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
0x3, 0xf0, 0x10);
break;
case 17:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0x61, 0x20,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0x61, 0x20,
0x03, 0x10, 0x10);
break;
case 18:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x5,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
0x5, 0xe1, 0x90);
break;
case 19:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x25,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
0x25, 0xe1, 0x90);
break;
case 20:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x25,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
0x25, 0x60, 0x90);
break;
case 21:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x15,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
0x03, 0x70, 0x90);
break;
case 71:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
0x1a, 0xe1, 0x90);
break;
}
@@ -1310,12 +1310,12 @@ static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
switch (type) {
default:
case 0:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0x8, 0x0, 0x0,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0x8, 0x0, 0x0,
0x0, 0x0);
btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x4);
break;
case 1:
- halbtc8192e2ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0,
+ btc8192e2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
0x8, 0x0);
mdelay(5);
btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20);
@@ -1328,22 +1328,22 @@ static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
}
-static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
- u8 sstype)
+static void btc8192e2ant_set_switch_ss_type(struct btc_coexist *btcoexist,
+ u8 ss_type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 mimops = BTC_MIMO_PS_DYNAMIC;
- u32 disra_mask = 0x0;
+ u32 dis_ra_mask = 0x0;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], REAL set SS Type = %d\n", sstype);
+ "[BTCoex], REAL set SS Type = %d\n", ss_type);
- disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype,
- coex_dm->curra_masktype);
- halbtc8192e2ant_Updatera_mask(btcoexist, FORCE_EXEC, disra_mask);
+ dis_ra_mask = btc8192e2ant_decide_ra_mask(btcoexist, ss_type,
+ coex_dm->cur_ra_mask_type);
+ btc8192e2ant_update_ra_mask(btcoexist, FORCE_EXEC, dis_ra_mask);
- if (sstype == 1) {
- halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+ if (ss_type == 1) {
+ btc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
/* switch ofdm path */
btcoexist->btc_write_1byte(btcoexist, 0xc04, 0x11);
btcoexist->btc_write_1byte(btcoexist, 0xd04, 0x1);
@@ -1352,8 +1352,8 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
btcoexist->btc_write_1byte_bitmask(btcoexist, 0xe77, 0x4, 0x1);
btcoexist->btc_write_1byte(btcoexist, 0xa07, 0x81);
mimops = BTC_MIMO_PS_STATIC;
- } else if (sstype == 2) {
- halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
+ } else if (ss_type == 2) {
+ btc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
btcoexist->btc_write_1byte(btcoexist, 0xc04, 0x33);
btcoexist->btc_write_1byte(btcoexist, 0xd04, 0x3);
btcoexist->btc_write_4byte(btcoexist, 0x90c, 0x81121313);
@@ -1365,89 +1365,89 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
btcoexist->btc_set(btcoexist, BTC_SET_ACT_SEND_MIMO_PS, &mimops);
}
-static void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist,
- bool force_exec, u8 new_sstype)
+static void btc8192e2ant_switch_ss_type(struct btc_coexist *btcoexist,
+ bool force_exec, u8 new_ss_type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], %s Switch SS Type = %d\n",
- (force_exec ? "force to" : ""), new_sstype);
- coex_dm->cur_sstype = new_sstype;
+ (force_exec ? "force to" : ""), new_ss_type);
+ coex_dm->cur_ss_type = new_ss_type;
if (!force_exec) {
- if (coex_dm->pre_sstype == coex_dm->cur_sstype)
+ if (coex_dm->pre_ss_type == coex_dm->cur_ss_type)
return;
}
- halbtc8192e2ant_set_switch_sstype(btcoexist, coex_dm->cur_sstype);
+ btc8192e2ant_set_switch_ss_type(btcoexist, coex_dm->cur_ss_type);
- coex_dm->pre_sstype = coex_dm->cur_sstype;
+ coex_dm->pre_ss_type = coex_dm->cur_ss_type;
}
-static void halbtc8192e2ant_coex_alloff(struct btc_coexist *btcoexist)
+static void btc8192e2ant_coex_all_off(struct btc_coexist *btcoexist)
{
/* fw all off */
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
/* sw all off */
- btc8192e2ant_sw_mec1(btcoexist, false, false, false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false, false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
/* hw all off */
- btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 0);
+ btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
}
-static void halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
+static void btc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
/* force to reset coex mechanism */
- halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist, FORCE_EXEC, 6);
- halbtc8192e2ant_dec_btpwr(btcoexist, FORCE_EXEC, 0);
+ btc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+ btc8192e2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, 0);
- btc8192e2ant_coex_tbl_w_type(btcoexist, FORCE_EXEC, 0);
- halbtc8192e2ant_switch_sstype(btcoexist, FORCE_EXEC, 2);
+ btc8192e2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+ btc8192e2ant_switch_ss_type(btcoexist, FORCE_EXEC, 2);
- btc8192e2ant_sw_mec1(btcoexist, false, false, false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false, false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
}
-static void halbtc8192e2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
{
bool low_pwr_disable = true;
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
+ btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
- btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
- btc8192e2ant_sw_mec1(btcoexist, false, false, false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false, false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
}
-static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
+static bool btc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
bool common = false, wifi_connected = false, wifi_busy = false;
- bool bt_hson = false, low_pwr_disable = false;
+ bool bt_hs_on = false, low_pwr_disable = false;
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
if (bt_link_info->sco_exist || bt_link_info->hid_exist)
- halbtc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 0, 0, 0);
+ btc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 0, 0, 0);
else
- halbtc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+ btc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
if (!wifi_connected) {
low_pwr_disable = false;
@@ -1461,26 +1461,24 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
coex_dm->bt_status) ||
(BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- halbtc8192e2ant_switch_sstype(btcoexist,
- NORMAL_EXEC, 2);
- btc8192e2ant_coex_tbl_w_type(btcoexist,
- NORMAL_EXEC, 1);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 0);
+ btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
} else {
- halbtc8192e2ant_switch_sstype(btcoexist,
- NORMAL_EXEC, 1);
- btc8192e2ant_coex_tbl_w_type(btcoexist,
- NORMAL_EXEC, 0);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 1);
+ btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+ btc8192e2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 0);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
}
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
- btc8192e2ant_sw_mec1(btcoexist, false, false, false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false, false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, false, false, false,
+ false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false, false,
+ 0x18);
common = true;
} else {
@@ -1494,20 +1492,18 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Wifi connected + BT non connected-idle!!\n");
- halbtc8192e2ant_switch_sstype(btcoexist,
- NORMAL_EXEC, 2);
- btc8192e2ant_coex_tbl_w_type(btcoexist,
- NORMAL_EXEC, 1);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 0);
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist,
- NORMAL_EXEC, 6);
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
-
- btc8192e2ant_sw_mec1(btcoexist, false, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist,
+ NORMAL_EXEC, 6);
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+ btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
common = true;
} else if (BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE ==
@@ -1517,25 +1513,25 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- if (bt_hson)
+ if (bt_hs_on)
return false;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Wifi connected + BT connected-idle!!\n");
- halbtc8192e2ant_switch_sstype(btcoexist,
- NORMAL_EXEC, 2);
- btc8192e2ant_coex_tbl_w_type(btcoexist,
- NORMAL_EXEC, 1);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 0);
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist,
- NORMAL_EXEC, 6);
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
-
- btc8192e2ant_sw_mec1(btcoexist, true, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_switch_ss_type(btcoexist,
+ NORMAL_EXEC, 2);
+ btc8192e2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ false, 0);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist,
+ NORMAL_EXEC, 6);
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+ btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
common = true;
} else {
@@ -1552,20 +1548,21 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Wifi Connected-Idle + BT Busy!!\n");
- halbtc8192e2ant_switch_sstype(btcoexist,
- NORMAL_EXEC, 1);
- btc8192e2ant_coex_tbl_w_type(btcoexist,
- NORMAL_EXEC, 2);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 21);
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist,
- NORMAL_EXEC, 6);
- halbtc8192e2ant_dec_btpwr(btcoexist,
- NORMAL_EXEC, 0);
- btc8192e2ant_sw_mec1(btcoexist, false,
- false, false, false);
- btc8192e2ant_sw_mec2(btcoexist, false,
- false, false, 0x18);
+ btc8192e2ant_switch_ss_type(btcoexist,
+ NORMAL_EXEC, 1);
+ btc8192e2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC,
+ 2);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 21);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist,
+ NORMAL_EXEC, 6);
+ btc8192e2ant_dec_bt_pwr(btcoexist,
+ NORMAL_EXEC, 0);
+ btc8192e2ant_sw_mechanism1(btcoexist, false,
+ false, false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false,
+ false, false, 0x18);
common = true;
}
}
@@ -1573,588 +1570,9 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
return common;
}
-static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause,
- int result)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- if (tx_pause) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 1\n");
-
- if (coex_dm->cur_ps_tdma == 71) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
- coex_dm->tdma_adj_type = 5;
- } else if (coex_dm->cur_ps_tdma == 1) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
- coex_dm->tdma_adj_type = 5;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 4) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 8);
- coex_dm->tdma_adj_type = 8;
- }
- if (coex_dm->cur_ps_tdma == 9) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 13);
- coex_dm->tdma_adj_type = 13;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 12) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 16);
- coex_dm->tdma_adj_type = 16;
- }
-
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 5) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 8);
- coex_dm->tdma_adj_type = 8;
- } else if (coex_dm->cur_ps_tdma == 13) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 16);
- coex_dm->tdma_adj_type = 16;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 8) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
- coex_dm->tdma_adj_type = 5;
- } else if (coex_dm->cur_ps_tdma == 16) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 13);
- coex_dm->tdma_adj_type = 13;
- }
- }
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 0\n");
- if (coex_dm->cur_ps_tdma == 5) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 71);
- coex_dm->tdma_adj_type = 71;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 8) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 4);
- coex_dm->tdma_adj_type = 4;
- }
- if (coex_dm->cur_ps_tdma == 13) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- coex_dm->tdma_adj_type = 9;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 16) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 12);
- coex_dm->tdma_adj_type = 12;
- }
-
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 71) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 1);
- coex_dm->tdma_adj_type = 1;
- } else if (coex_dm->cur_ps_tdma == 1) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 4);
- coex_dm->tdma_adj_type = 4;
- } else if (coex_dm->cur_ps_tdma == 9) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 12);
- coex_dm->tdma_adj_type = 12;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 4) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 1);
- coex_dm->tdma_adj_type = 1;
- } else if (coex_dm->cur_ps_tdma == 1) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 71);
- coex_dm->tdma_adj_type = 71;
- } else if (coex_dm->cur_ps_tdma == 12) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- coex_dm->tdma_adj_type = 9;
- }
- }
- }
-}
-
-static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause,
- int result)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- if (tx_pause) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 1\n");
- if (coex_dm->cur_ps_tdma == 1) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 4) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 8);
- coex_dm->tdma_adj_type = 8;
- }
- if (coex_dm->cur_ps_tdma == 9) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 12) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 16);
- coex_dm->tdma_adj_type = 16;
- }
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 5) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 8);
- coex_dm->tdma_adj_type = 8;
- } else if (coex_dm->cur_ps_tdma == 13) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 16);
- coex_dm->tdma_adj_type = 16;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 8) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 16) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- }
- }
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 0\n");
- if (coex_dm->cur_ps_tdma == 5) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 8) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 4);
- coex_dm->tdma_adj_type = 4;
- }
- if (coex_dm->cur_ps_tdma == 13) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 16) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 12);
- coex_dm->tdma_adj_type = 12;
- }
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 1) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 4);
- coex_dm->tdma_adj_type = 4;
- } else if (coex_dm->cur_ps_tdma == 9) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 12);
- coex_dm->tdma_adj_type = 12;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 4) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 12) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- }
- }
- }
-}
-
-static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause,
- int result)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- if (tx_pause) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 1\n");
- if (coex_dm->cur_ps_tdma == 1) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 4) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 8);
- coex_dm->tdma_adj_type = 8;
- }
- if (coex_dm->cur_ps_tdma == 9) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 12) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 16);
- coex_dm->tdma_adj_type = 16;
- }
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 5) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 8);
- coex_dm->tdma_adj_type = 8;
- } else if (coex_dm->cur_ps_tdma == 13) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 16);
- coex_dm->tdma_adj_type = 16;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 8) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 16) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- }
- }
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 0\n");
- if (coex_dm->cur_ps_tdma == 5) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 8) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 4);
- coex_dm->tdma_adj_type = 4;
- }
- if (coex_dm->cur_ps_tdma == 13) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 16) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 12);
- coex_dm->tdma_adj_type = 12;
- }
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 1) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 4);
- coex_dm->tdma_adj_type = 4;
- } else if (coex_dm->cur_ps_tdma == 9) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 12);
- coex_dm->tdma_adj_type = 12;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 4) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 12) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- }
- }
- }
-}
-
-static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
- bool sco_hid, bool tx_pause,
- u8 max_interval)
+static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
+ bool sco_hid, bool tx_pause,
+ u8 max_interval)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
static int up, dn, m, n, wait_cnt;
@@ -2174,72 +1592,72 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
- halbtc8192e2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 13);
+ btc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 13);
coex_dm->tdma_adj_type = 13;
} else if (max_interval == 2) {
- halbtc8192e2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 14);
+ btc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
coex_dm->tdma_adj_type = 14;
} else {
- halbtc8192e2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 15);
+ btc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
coex_dm->tdma_adj_type = 15;
}
} else {
if (max_interval == 1) {
- halbtc8192e2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 9);
+ btc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 9);
coex_dm->tdma_adj_type = 9;
} else if (max_interval == 2) {
- halbtc8192e2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 10);
+ btc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
coex_dm->tdma_adj_type = 10;
} else {
- halbtc8192e2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 11);
+ btc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
coex_dm->tdma_adj_type = 11;
}
}
} else {
if (tx_pause) {
if (max_interval == 1) {
- halbtc8192e2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 5);
+ btc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
coex_dm->tdma_adj_type = 5;
} else if (max_interval == 2) {
- halbtc8192e2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 6);
+ btc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
coex_dm->tdma_adj_type = 6;
} else {
- halbtc8192e2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 7);
+ btc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
coex_dm->tdma_adj_type = 7;
}
} else {
if (max_interval == 1) {
- halbtc8192e2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 1);
+ btc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 1);
coex_dm->tdma_adj_type = 1;
} else if (max_interval == 2) {
- halbtc8192e2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 2);
+ btc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
coex_dm->tdma_adj_type = 2;
} else {
- halbtc8192e2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 3);
+ btc8192e2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
coex_dm->tdma_adj_type = 3;
}
}
@@ -2322,12 +1740,6 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], max Interval = %d\n", max_interval);
- if (max_interval == 1)
- btc8192e_int1(btcoexist, tx_pause, result);
- else if (max_interval == 2)
- btc8192e_int2(btcoexist, tx_pause, result);
- else if (max_interval == 3)
- btc8192e_int3(btcoexist, tx_pause, result);
}
/* if current PsTdma not match with
@@ -2348,9 +1760,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
if (!scan && !link && !roam)
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true,
- coex_dm->tdma_adj_type);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, coex_dm->tdma_adj_type);
else
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
@@ -2358,583 +1769,578 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
}
/* SCO only or SCO+PAN(HS) */
-static void halbtc8192e2ant_action_sco(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_sco(struct btc_coexist *btcoexist)
{
- u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+ u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
u32 wifi_bw;
- wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
- halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
- halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+ btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+ btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 4);
+ btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
- btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+ bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
- if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
- } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
- } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
}
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, true, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x6);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x6);
} else {
- btc8192e2ant_sw_mec1(btcoexist, true, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x6);
+ btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x6);
}
} else {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, false, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x6);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x6);
} else {
- btc8192e2ant_sw_mec1(btcoexist, false, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x6);
+ btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x6);
}
}
}
-static void halbtc8192e2ant_action_sco_pan(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_sco_pan(struct btc_coexist *btcoexist)
{
- u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_STAY_LOW;
+ u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
u32 wifi_bw;
- wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
- halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
- halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+ btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+ btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 4);
+ btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
- btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+ bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
- if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
- } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
- } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
}
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, true, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x6);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x6);
} else {
- btc8192e2ant_sw_mec1(btcoexist, true, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x6);
+ btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x6);
}
} else {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, false, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x6);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x6);
} else {
- btc8192e2ant_sw_mec1(btcoexist, false, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x6);
+ btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x6);
}
}
}
-static void halbtc8192e2ant_action_hid(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_hid(struct btc_coexist *btcoexist)
{
- u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
u32 wifi_bw;
- wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+ wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
- halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
- halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+ btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+ btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 3);
+ btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
- if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
- } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
- } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
}
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, true, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x18);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8192e2ant_sw_mec1(btcoexist, true, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, false, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x18);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8192e2ant_sw_mec1(btcoexist, false, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
-static void halbtc8192e2ant_action_a2dp(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_a2dp(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
u32 wifi_bw;
bool long_dist = false;
- wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+ wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
- if ((btrssi_state == BTC_RSSI_STATE_LOW ||
- btrssi_state == BTC_RSSI_STATE_STAY_LOW) &&
- (wifirssi_state == BTC_RSSI_STATE_LOW ||
- wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ if ((bt_rssi_state == BTC_RSSI_STATE_LOW ||
+ bt_rssi_state == BTC_RSSI_STATE_STAY_LOW) &&
+ (wifi_rssi_state == BTC_RSSI_STATE_LOW ||
+ wifi_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
long_dist = true;
}
if (long_dist) {
- halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 2);
- halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, true,
- 0x4);
+ btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, true,
+ 0x4);
} else {
- halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
- halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false,
- 0x8);
+ btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+ btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false,
+ 0x8);
}
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
if (long_dist)
- btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 0);
+ btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
else
- btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
if (long_dist) {
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 17);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 17);
coex_dm->auto_tdma_adjust = false;
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
} else {
- if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
- true, 1);
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
- } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
- halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
- false, 1);
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
- } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
- false, 1);
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ btc8192e2ant_tdma_duration_adjust(btcoexist, false,
+ true, 1);
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ btc8192e2ant_tdma_duration_adjust(btcoexist, false,
+ false, 1);
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_tdma_duration_adjust(btcoexist, false,
+ false, 1);
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
}
}
/* sw mechanism */
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, true, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x18);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8192e2ant_sw_mec1(btcoexist, true, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, false, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x18);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8192e2ant_sw_mec1(btcoexist, false, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
-static void halbtc8192e2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
{
- u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
u32 wifi_bw;
- wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+ wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
- halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
- halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+ btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+ btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
- btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
- if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 2);
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
- } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
- halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
- false, 2);
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
- } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
- false, 2);
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ btc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 2);
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ btc8192e2ant_tdma_duration_adjust(btcoexist, false, false, 2);
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_tdma_duration_adjust(btcoexist, false, false, 2);
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
}
/* sw mechanism */
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, true, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- true, 0x6);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x6);
} else {
- btc8192e2ant_sw_mec1(btcoexist, true, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- true, 0x6);
+ btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ true, 0x6);
}
} else {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, false, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- true, 0x6);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x6);
} else {
- btc8192e2ant_sw_mec1(btcoexist, false, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- true, 0x6);
+ btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ true, 0x6);
}
}
}
-static void halbtc8192e2ant_action_pan_edr(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_pan_edr(struct btc_coexist *btcoexist)
{
- u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
u32 wifi_bw;
- wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+ wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
- halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
- halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+ btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+ btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
- if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
- } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
- } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
+ if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
}
/* sw mechanism */
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, true, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x18);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8192e2ant_sw_mec1(btcoexist, true, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, false, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x18);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8192e2ant_sw_mec1(btcoexist, false, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
/* PAN(HS) only */
-static void halbtc8192e2ant_action_pan_hs(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_pan_hs(struct btc_coexist *btcoexist)
{
- u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
u32 wifi_bw;
- wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+ wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
- halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
- halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+ btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+ btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
- if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
- } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
- } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
+ if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
}
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, true, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x18);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8192e2ant_sw_mec1(btcoexist, true, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, false, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x18);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8192e2ant_sw_mec1(btcoexist, false, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
/* PAN(EDR)+A2DP */
-static void halbtc8192e2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
{
- u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
u32 wifi_bw;
- wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+ wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
- halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
- halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+ btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+ btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
- halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 3);
- } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
- halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
- false, 3);
- } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
- halbtc8192e2ant_tdma_duration_adjust(btcoexist, false,
- false, 3);
+ if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ btc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 3);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_tdma_duration_adjust(btcoexist, false, false, 3);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
+ btc8192e2ant_tdma_duration_adjust(btcoexist, false, false, 3);
}
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, true, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x18);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8192e2ant_sw_mec1(btcoexist, true, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, false, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x18);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8192e2ant_sw_mec1(btcoexist, false, false,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
-static void halbtc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
{
- u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
u32 wifi_bw;
- wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+ wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
- halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+ btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+ btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 3);
+ btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
- if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
- } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
- halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
+ if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
+ btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 10);
}
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, true, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x18);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8192e2ant_sw_mec1(btcoexist, true, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, false, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x18);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8192e2ant_sw_mec1(btcoexist, false, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
@@ -2942,125 +2348,125 @@ static void halbtc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
/* HID+A2DP+PAN(EDR) */
static void btc8192e2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
{
- u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
u32 wifi_bw;
- wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+ wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
- halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
- halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+ btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+ btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
- halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6);
+ btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 3);
+ btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
- if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
- halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 3);
- } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
- halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3);
- } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
- halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3);
+ if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ btc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 3);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
+ btc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3);
}
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, true, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x18);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8192e2ant_sw_mec1(btcoexist, true, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, false, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x18);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8192e2ant_sw_mec1(btcoexist, false, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
-static void halbtc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+static void btc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
{
- u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
+ u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH;
u32 wifi_bw;
- wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
- btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
+ wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42);
- halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
- halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+ btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1);
+ btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 3);
+ btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
- if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0);
- halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 2);
- } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2);
- halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2);
- } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) ||
- (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4);
- halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+ if ((bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ btc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ btc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+ } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4);
+ btc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2);
}
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, true, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x18);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8192e2ant_sw_mec1(btcoexist, true, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
- if ((wifirssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8192e2ant_sw_mec1(btcoexist, false, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, true, false,
- false, 0x18);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8192e2ant_sw_mec1(btcoexist, false, true,
- false, false);
- btc8192e2ant_sw_mec2(btcoexist, false, false,
- false, 0x18);
+ btc8192e2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8192e2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
-static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+static void btc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 algorithm = 0;
@@ -3080,12 +2486,12 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
return;
}
- algorithm = halbtc8192e2ant_action_algorithm(btcoexist);
+ algorithm = btc8192e2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], BT is under inquiry/page scan !!\n");
- halbtc8192e2ant_action_bt_inquiry(btcoexist);
+ btc8192e2ant_action_bt_inquiry(btcoexist);
return;
}
@@ -3093,7 +2499,7 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
- if (halbtc8192e2ant_is_common_action(btcoexist)) {
+ if (btc8192e2ant_is_common_action(btcoexist)) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action 2-Ant common\n");
coex_dm->auto_tdma_adjust = false;
@@ -3109,47 +2515,47 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
case BT_8192E_2ANT_COEX_ALGO_SCO:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Action 2-Ant, algorithm = SCO\n");
- halbtc8192e2ant_action_sco(btcoexist);
+ btc8192e2ant_action_sco(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_SCO_PAN:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Action 2-Ant, algorithm = SCO+PAN(EDR)\n");
- halbtc8192e2ant_action_sco_pan(btcoexist);
+ btc8192e2ant_action_sco_pan(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Action 2-Ant, algorithm = HID\n");
- halbtc8192e2ant_action_hid(btcoexist);
+ btc8192e2ant_action_hid(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_A2DP:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Action 2-Ant, algorithm = A2DP\n");
- halbtc8192e2ant_action_a2dp(btcoexist);
+ btc8192e2ant_action_a2dp(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
- halbtc8192e2ant_action_a2dp_pan_hs(btcoexist);
+ btc8192e2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Action 2-Ant, algorithm = PAN(EDR)\n");
- halbtc8192e2ant_action_pan_edr(btcoexist);
+ btc8192e2ant_action_pan_edr(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANHS:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Action 2-Ant, algorithm = HS mode\n");
- halbtc8192e2ant_action_pan_hs(btcoexist);
+ btc8192e2ant_action_pan_hs(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Action 2-Ant, algorithm = PAN+A2DP\n");
- halbtc8192e2ant_action_pan_edr_a2dp(btcoexist);
+ btc8192e2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Action 2-Ant, algorithm = PAN(EDR)+HID\n");
- halbtc8192e2ant_action_pan_edr_hid(btcoexist);
+ btc8192e2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -3159,20 +2565,20 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
case BT_8192E_2ANT_COEX_ALGO_HID_A2DP:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Action 2-Ant, algorithm = HID+A2DP\n");
- halbtc8192e2ant_action_hid_a2dp(btcoexist);
+ btc8192e2ant_action_hid_a2dp(btcoexist);
break;
default:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"Action 2-Ant, algorithm = unknown!!\n");
- /* halbtc8192e2ant_coex_alloff(btcoexist); */
+ /* btc8192e2ant_coex_all_off(btcoexist); */
break;
}
coex_dm->pre_algorithm = coex_dm->cur_algorithm;
}
}
-static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
- bool backup)
+static void btc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
+ bool backup)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u16 u16tmp = 0;
@@ -3191,7 +2597,7 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
0x430);
coex_dm->backup_arfr_cnt2 = btcoexist->btc_read_4byte(btcoexist,
0x434);
- coex_dm->backup_retrylimit = btcoexist->btc_read_2byte(
+ coex_dm->backup_retry_limit = btcoexist->btc_read_2byte(
btcoexist,
0x42a);
coex_dm->backup_ampdu_maxtime = btcoexist->btc_read_1byte(
@@ -3209,7 +2615,7 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
else
btcoexist->btc_write_4byte(btcoexist, 0x64, 0x30030004);
- btc8192e2ant_coex_tbl_w_type(btcoexist, FORCE_EXEC, 0);
+ btc8192e2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
/* antenna switch control parameter */
btcoexist->btc_write_4byte(btcoexist, 0x858, 0x55555555);
@@ -3232,7 +2638,7 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
u16tmp |= BIT9;
btcoexist->btc_write_2byte(btcoexist, 0x40, u16tmp);
- /* enable PTA I2C mailbox */
+ /* enable PTA I2C mailbox */
u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x101);
u8tmp |= BIT4;
btcoexist->btc_write_1byte(btcoexist, 0x101, u8tmp);
@@ -3247,29 +2653,25 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
btcoexist->btc_write_1byte(btcoexist, 0x7, u8tmp);
}
-/*************************************************************
- * work around function start with wa_halbtc8192e2ant_
- *************************************************************/
-
/************************************************************
- * extern function start with EXhalbtc8192e2ant_
+ * extern function start with ex_btc8192e2ant_
************************************************************/
-void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist)
+void ex_btc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist)
{
- halbtc8192e2ant_init_hwconfig(btcoexist, true);
+ btc8192e2ant_init_hwconfig(btcoexist, true);
}
-void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
+void ex_btc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Coex Mechanism Init!!\n");
- halbtc8192e2ant_init_coex_dm(btcoexist);
+ btc8192e2ant_init_coex_dm(btcoexist);
}
-void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
{
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
@@ -3278,8 +2680,8 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
u16 u16tmp[4];
u32 u32tmp[4];
bool roam = false, scan = false, link = false, wifi_under_5g = false;
- bool bt_hson = false, wifi_busy = false;
- int wifirssi = 0, bt_hs_rssi = 0;
+ bool bt_hs_on = false, wifi_busy = false;
+ int wifi_rssi = 0, bt_hs_rssi = 0;
u32 wifi_bw, wifi_traffic_dir;
u8 wifi_dot11_chnl, wifi_hs_chnl;
u32 fw_ver = 0, bt_patch_ver = 0;
@@ -3316,21 +2718,21 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
fw_ver, bt_patch_ver, bt_patch_ver);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
&wifi_dot11_chnl);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
"Dot11 channel / HsMode(HsChnl)",
- wifi_dot11_chnl, bt_hson, wifi_hs_chnl);
+ wifi_dot11_chnl, bt_hs_on, wifi_hs_chnl);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
"H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
- btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "Wifi rssi/ HS rssi", wifirssi, bt_hs_rssi);
+ "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -3377,7 +2779,7 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
if (coex_sta->bt_info_c2h_cnt[i]) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"\r\n %-35s = %7ph(%d)",
- GLBtInfoSrc8192e2Ant[i],
+ glbt_info_src_8192e_2ant[i],
coex_sta->bt_info_c2h[i],
coex_sta->bt_info_c2h_cnt[i]);
}
@@ -3390,7 +2792,7 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "SS Type",
- coex_dm->cur_sstype);
+ coex_dm->cur_ss_type);
/* Sw mechanism */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
@@ -3429,7 +2831,7 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
"backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
- coex_dm->backup_arfr_cnt2, coex_dm->backup_retrylimit,
+ coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
coex_dm->backup_ampdu_maxtime);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
@@ -3485,12 +2887,12 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
"0x774(lp rx[31:16]/tx[15:0])",
coex_sta->low_priority_rx, coex_sta->low_priority_tx);
#if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 1)
- halbtc8192e2ant_monitor_bt_ctr(btcoexist);
+ btc8192e2ant_monitor_bt_ctr(btcoexist);
#endif
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
}
-void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -3498,7 +2900,7 @@ void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
- halbtc8192e2ant_coex_alloff(btcoexist);
+ btc8192e2ant_coex_all_off(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], IPS LEAVE notify\n");
@@ -3506,7 +2908,7 @@ void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
}
}
-void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -3521,7 +2923,7 @@ void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
}
}
-void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -3533,7 +2935,7 @@ void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
"[BTCoex], SCAN FINISH notify\n");
}
-void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -3545,8 +2947,8 @@ void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
"[BTCoex], CONNECT FINISH notify\n");
}
-void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
- u8 type)
+void ex_btc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[3] = {0};
@@ -3591,8 +2993,8 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
-void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
- u8 type)
+void ex_btc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -3601,8 +3003,8 @@ void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
"[BTCoex], DHCP Packet notify\n");
}
-void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
- u8 *tmp_buf, u8 length)
+void ex_btc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmp_buf, u8 length)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 bt_info = 0;
@@ -3633,7 +3035,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
}
if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rsp_source) {
- coex_sta->bt_retry_cnt = /* [3:0] */
+ /* [3:0] */
+ coex_sta->bt_retry_cnt =
coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
coex_sta->bt_rssi =
@@ -3651,11 +3054,11 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
- ex_halbtc8192e2ant_media_status_notify(
+ ex_btc8192e2ant_media_status_notify(
btcoexist,
BTC_MEDIA_CONNECT);
else
- ex_halbtc8192e2ant_media_status_notify(
+ ex_btc8192e2ant_media_status_notify(
btcoexist,
BTC_MEDIA_DISCONNECT);
}
@@ -3665,9 +3068,9 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
!btcoexist->stop_coex_dm) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"bit3, BT NOT ignore Wlan active!\n");
- halbtc8192e2ant_IgnoreWlanAct(btcoexist,
- FORCE_EXEC,
- false);
+ btc8192e2ant_ignore_wlan_act(btcoexist,
+ FORCE_EXEC,
+ false);
}
} else {
/* BT already NOT ignore Wlan active,
@@ -3679,8 +3082,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
if ((coex_sta->bt_info_ext & BIT4)) {
/* BT auto report already enabled, do nothing */
} else {
- halbtc8192e2ant_bt_autoreport(btcoexist, FORCE_EXEC,
- true);
+ btc8192e2ant_bt_auto_report(btcoexist, FORCE_EXEC,
+ true);
}
#endif
}
@@ -3718,9 +3121,9 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
coex_sta->sco_exist = false;
}
- halbtc8192e2ant_update_btlink_info(btcoexist);
+ btc8192e2ant_update_bt_link_info(btcoexist);
- if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) {
+ if (!(bt_info & BT_INFO_8192E_2ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], BT Non-Connected idle!!!\n");
@@ -3728,12 +3131,12 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
- } else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) ||
- (bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) {
+ } else if ((bt_info & BT_INFO_8192E_2ANT_B_SCO_ESCO) ||
+ (bt_info & BT_INFO_8192E_2ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
- } else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) {
+ } else if (bt_info & BT_INFO_8192E_2ANT_B_ACL_BUSY) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
@@ -3758,12 +3161,7 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
coex_dm->limited_dig = limited_dig;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
- halbtc8192e2ant_run_coexist_mechanism(btcoexist);
-}
-
-void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
- u8 type)
-{
+ btc8192e2ant_run_coexist_mechanism(btcoexist);
}
void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
@@ -3772,11 +3170,11 @@ void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
- halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
- ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+ btc8192e2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+ ex_btc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
}
-void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist)
+void ex_btc8192e2ant_periodical(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
static u8 dis_ver_info_cnt;
@@ -3810,12 +3208,12 @@ void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist)
}
#if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
- halbtc8192e2ant_querybt_info(btcoexist);
- halbtc8192e2ant_monitor_bt_ctr(btcoexist);
- btc8192e2ant_monitor_bt_enable_dis(btcoexist);
+ btc8192e2ant_query_bt_info(btcoexist);
+ btc8192e2ant_monitor_bt_ctr(btcoexist);
+ btc8192e2ant_monitor_bt_enable_disable(btcoexist);
#else
- if (halbtc8192e2ant_iswifi_status_changed(btcoexist) ||
+ if (btc8192e2ant_is_wifi_status_changed(btcoexist) ||
coex_dm->auto_tdma_adjust)
- halbtc8192e2ant_run_coexist_mechanism(btcoexist);
+ btc8192e2ant_run_coexist_mechanism(btcoexist);
#endif
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
index 75e1f7d0db06..fc0fa87ec404 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
@@ -116,7 +116,7 @@ struct coex_dm_8192e_2ant {
u32 backup_arfr_cnt1; /* Auto Rate Fallback Retry cnt */
u32 backup_arfr_cnt2; /* Auto Rate Fallback Retry cnt */
- u16 backup_retrylimit;
+ u16 backup_retry_limit;
u8 backup_ampdu_maxtime;
/* algorithm related */
@@ -125,18 +125,18 @@ struct coex_dm_8192e_2ant {
u8 bt_status;
u8 wifi_chnl_info[3];
- u8 pre_sstype;
- u8 cur_sstype;
+ u8 pre_ss_type;
+ u8 cur_ss_type;
- u32 prera_mask;
- u32 curra_mask;
- u8 curra_masktype;
- u8 pre_arfrtype;
- u8 cur_arfrtype;
- u8 pre_retrylimit_type;
- u8 cur_retrylimit_type;
- u8 pre_ampdutime_type;
- u8 cur_ampdutime_type;
+ u32 pre_ra_mask;
+ u32 cur_ra_mask;
+ u8 cur_ra_mask_type;
+ u8 pre_arfr_type;
+ u8 cur_arfr_type;
+ u8 pre_retry_limit_type;
+ u8 cur_retry_limit_type;
+ u8 pre_ampdu_time_type;
+ u8 cur_ampdu_time_type;
};
struct coex_sta_8192e_2ant {
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index d67bbfb6ad8e..2003c8c51dcc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -45,7 +45,7 @@ static struct coex_dm_8723b_1ant *coex_dm = &glcoex_dm_8723b_1ant;
static struct coex_sta_8723b_1ant glcoex_sta_8723b_1ant;
static struct coex_sta_8723b_1ant *coex_sta = &glcoex_sta_8723b_1ant;
-static const char *const GLBtInfoSrc8723b1Ant[] = {
+static const char *const glbt_info_src_8723b_1ant[] = {
"BT Info[wifi fw]",
"BT Info[bt rsp]",
"BT Info[bt auto report]",
@@ -60,188 +60,6 @@ static u32 glcoex_ver_8723b_1ant = 0x47;
/***************************************************************
* local function start with halbtc8723b1ant_
***************************************************************/
-static u8 halbtc8723b1ant_bt_rssi_state(struct btc_coexist *btcoexist,
- u8 level_num, u8 rssi_thresh,
- u8 rssi_thresh1)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- s32 bt_rssi = 0;
- u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
-
- bt_rssi = coex_sta->bt_rssi;
-
- if (level_num == 2) {
- if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
- (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- if (bt_rssi >= rssi_thresh +
- BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
- bt_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to High\n");
- } else {
- bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Low\n");
- }
- } else {
- if (bt_rssi < rssi_thresh) {
- bt_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Low\n");
- } else {
- bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at High\n");
- }
- }
- } else if (level_num == 3) {
- if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi thresh error!!\n");
- return coex_sta->pre_bt_rssi_state;
- }
-
- if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
- (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- if (bt_rssi >= rssi_thresh +
- BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
- bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Medium\n");
- } else {
- bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Low\n");
- }
- } else if ((coex_sta->pre_bt_rssi_state ==
- BTC_RSSI_STATE_MEDIUM) ||
- (coex_sta->pre_bt_rssi_state ==
- BTC_RSSI_STATE_STAY_MEDIUM)) {
- if (bt_rssi >= rssi_thresh1 +
- BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
- bt_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to High\n");
- } else if (bt_rssi < rssi_thresh) {
- bt_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Low\n");
- } else {
- bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Medium\n");
- }
- } else {
- if (bt_rssi < rssi_thresh1) {
- bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Medium\n");
- } else {
- bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at High\n");
- }
- }
- }
-
- coex_sta->pre_bt_rssi_state = bt_rssi_state;
-
- return bt_rssi_state;
-}
-
-static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
- u8 index, u8 level_num,
- u8 rssi_thresh, u8 rssi_thresh1)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- s32 wifi_rssi = 0;
- u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
-
- btcoexist->btc_get(btcoexist,
- BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
-
- if (level_num == 2) {
- if ((coex_sta->pre_wifi_rssi_state[index] ==
- BTC_RSSI_STATE_LOW) ||
- (coex_sta->pre_wifi_rssi_state[index] ==
- BTC_RSSI_STATE_STAY_LOW)) {
- if (wifi_rssi >= rssi_thresh +
- BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
- wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to High\n");
- } else {
- wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Low\n");
- }
- } else {
- if (wifi_rssi < rssi_thresh) {
- wifi_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Low\n");
- } else {
- wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at High\n");
- }
- }
- } else if (level_num == 3) {
- if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI thresh error!!\n");
- return coex_sta->pre_wifi_rssi_state[index];
- }
-
- if ((coex_sta->pre_wifi_rssi_state[index] ==
- BTC_RSSI_STATE_LOW) ||
- (coex_sta->pre_wifi_rssi_state[index] ==
- BTC_RSSI_STATE_STAY_LOW)) {
- if (wifi_rssi >= rssi_thresh +
- BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
- wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Medium\n");
- } else {
- wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Low\n");
- }
- } else if ((coex_sta->pre_wifi_rssi_state[index] ==
- BTC_RSSI_STATE_MEDIUM) ||
- (coex_sta->pre_wifi_rssi_state[index] ==
- BTC_RSSI_STATE_STAY_MEDIUM)) {
- if (wifi_rssi >= rssi_thresh1 +
- BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
- wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to High\n");
- } else if (wifi_rssi < rssi_thresh) {
- wifi_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Low\n");
- } else {
- wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Medium\n");
- }
- } else {
- if (wifi_rssi < rssi_thresh1) {
- wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Medium\n");
- } else {
- wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at High\n");
- }
- }
- }
-
- coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
-
- return wifi_rssi_state;
-}
static void halbtc8723b1ant_updatera_mask(struct btc_coexist *btcoexist,
bool force_exec, u32 dis_rate_mask)
@@ -249,7 +67,7 @@ static void halbtc8723b1ant_updatera_mask(struct btc_coexist *btcoexist,
coex_dm->curra_mask = dis_rate_mask;
if (force_exec || (coex_dm->prera_mask != coex_dm->curra_mask))
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask,
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_RAMASK,
&coex_dm->curra_mask);
coex_dm->prera_mask = coex_dm->curra_mask;
@@ -326,15 +144,14 @@ static void halbtc8723b1ant_ampdu_maxtime(struct btc_coexist *btcoexist,
coex_dm->cur_ampdu_time_type)) {
switch (coex_dm->cur_ampdu_time_type) {
case 0: /* normal mode */
- btcoexist->btc_write_1byte(btcoexist, 0x456,
- coex_dm->backup_ampdu_max_time);
- break;
+ btcoexist->btc_write_1byte(btcoexist, 0x456,
+ coex_dm->backup_ampdu_max_time);
+ break;
case 1: /* AMPDU timw = 0x38 * 32us */
- btcoexist->btc_write_1byte(btcoexist,
- 0x456, 0x38);
- break;
+ btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38);
+ break;
default:
- break;
+ break;
}
}
@@ -354,7 +171,7 @@ static void halbtc8723b1ant_limited_tx(struct btc_coexist *btcoexist,
halbtc8723b1ant_updatera_mask(btcoexist, force_exec,
0x00000003);
break;
- /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4*/
+ /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4 */
case 2:
halbtc8723b1ant_updatera_mask(btcoexist, force_exec,
0x0001f1f7);
@@ -426,7 +243,8 @@ static void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist)
coex_sta->c2h_bt_info_req_sent = true;
- h2c_parameter[0] |= BIT0; /* trigger*/
+ /* trigger */
+ h2c_parameter[0] |= BIT0;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
@@ -515,202 +333,6 @@ static void halbtc8723b1ant_update_bt_link_info(struct btc_coexist *btcoexist)
bt_link_info->hid_only = false;
}
-static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool bt_hs_on = false;
- u8 algorithm = BT_8723B_1ANT_COEX_ALGO_UNDEFINED;
- u8 numdiffprofile = 0;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
-
- if (!bt_link_info->bt_link_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], No BT link exists!!!\n");
- return algorithm;
- }
-
- if (bt_link_info->sco_exist)
- numdiffprofile++;
- if (bt_link_info->hid_exist)
- numdiffprofile++;
- if (bt_link_info->pan_exist)
- numdiffprofile++;
- if (bt_link_info->a2dp_exist)
- numdiffprofile++;
-
- if (numdiffprofile == 1) {
- if (bt_link_info->sco_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = SCO only\n");
- algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
- } else {
- if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = HID only\n");
- algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
- } else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = A2DP only\n");
- algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP;
- } else if (bt_link_info->pan_exist) {
- if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = PAN(HS) only\n");
- algorithm =
- BT_8723B_1ANT_COEX_ALGO_PANHS;
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = PAN(EDR) only\n");
- algorithm =
- BT_8723B_1ANT_COEX_ALGO_PANEDR;
- }
- }
- }
- } else if (numdiffprofile == 2) {
- if (bt_link_info->sco_exist) {
- if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID\n");
- algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
- } else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
- algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
- } else if (bt_link_info->pan_exist) {
- if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + PAN(HS)\n");
- algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
- algorithm =
- BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
- }
- }
- } else {
- if (bt_link_info->hid_exist &&
- bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = HID + A2DP\n");
- algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
- } else if (bt_link_info->hid_exist &&
- bt_link_info->pan_exist) {
- if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = HID + PAN(HS)\n");
- algorithm =
- BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = HID + PAN(EDR)\n");
- algorithm =
- BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
- }
- } else if (bt_link_info->pan_exist &&
- bt_link_info->a2dp_exist) {
- if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
- algorithm =
- BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS;
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
- algorithm =
- BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP;
- }
- }
- }
- } else if (numdiffprofile == 3) {
- if (bt_link_info->sco_exist) {
- if (bt_link_info->hid_exist &&
- bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
- algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
- } else if (bt_link_info->hid_exist &&
- bt_link_info->pan_exist) {
- if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
- algorithm =
- BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
- algorithm =
- BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
- }
- } else if (bt_link_info->pan_exist &&
- bt_link_info->a2dp_exist) {
- if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
- algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
- algorithm =
- BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
- }
- }
- } else {
- if (bt_link_info->hid_exist &&
- bt_link_info->pan_exist &&
- bt_link_info->a2dp_exist) {
- if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
- algorithm =
- BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
- algorithm =
- BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
- }
- }
- }
- } else if (numdiffprofile >= 3) {
- if (bt_link_info->sco_exist) {
- if (bt_link_info->hid_exist &&
- bt_link_info->pan_exist &&
- bt_link_info->a2dp_exist) {
- if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
- algorithm =
- BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
- }
- }
- }
- }
-
- return algorithm;
-}
-
static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist,
bool low_penalty_ra)
{
@@ -721,11 +343,11 @@ static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist,
if (low_penalty_ra) {
h2c_parameter[1] |= BIT0;
- /*normal rate except MCS7/6/5, OFDM54/48/36 */
+ /* normal rate except MCS7/6/5, OFDM54/48/36 */
h2c_parameter[2] = 0x00;
- h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54 */
- h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48 */
- h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36 */
+ h2c_parameter[3] = 0xf7; /* MCS7 or OFDM54 */
+ h2c_parameter[4] = 0xf8; /* MCS6 or OFDM48 */
+ h2c_parameter[5] = 0xf9; /* MCS5 or OFDM36 */
}
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -846,8 +468,9 @@ static void halbtc8723b1ant_coex_table_with_type(struct btc_coexist *btcoexist,
}
}
-static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
- bool enable)
+static void
+halbtc8723b1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
+ bool enable)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
@@ -882,7 +505,7 @@ static void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
coex_dm->cur_ignore_wlan_act)
return;
}
- halbtc8723b1ant_SetFwIgnoreWlanAct(btcoexist, enable);
+ halbtc8723b1ant_set_fw_ignore_wlan_act(btcoexist, enable);
coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
}
@@ -944,9 +567,9 @@ static void halbtc8723b1ant_set_lps_rpwm(struct btc_coexist *btcoexist,
btcoexist->btc_set(btcoexist, BTC_SET_U1_RPWM_VAL, &rpwm);
}
-static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist,
- bool force_exec,
- u8 lps_val, u8 rpwm_val)
+static void halbtc8723b1ant_lps_rpwm(struct btc_coexist *btcoexist,
+ bool force_exec,
+ u8 lps_val, u8 rpwm_val)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -987,9 +610,9 @@ static void halbtc8723b1ant_sw_mechanism(struct btc_coexist *btcoexist,
halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
}
-static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist,
- u8 ant_pos_type, bool init_hw_cfg,
- bool wifi_off)
+static void halbtc8723b1ant_set_ant_path(struct btc_coexist *btcoexist,
+ u8 ant_pos_type, bool init_hw_cfg,
+ bool wifi_off)
{
struct btc_board_info *board_info = &btcoexist->board_info;
u32 fw_ver = 0, u32tmp = 0;
@@ -1028,7 +651,7 @@ static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist,
if (use_ext_switch) {
if (init_hw_cfg) {
/* 0x4c[23] = 0, 0x4c[24] = 1
- * Antenna control by WL/BT
+ * Antenna control by WL/BT
*/
u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u32tmp &= ~BIT23;
@@ -1037,35 +660,36 @@ static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist,
if (board_info->btdm_ant_pos ==
BTC_ANTENNA_AT_MAIN_PORT) {
- /* Main Ant to BT for IPS case 0x4c[23] = 1 */
+ /* Main Ant to BT for IPS case 0x4c[23] = 1 */
btcoexist->btc_write_1byte_bitmask(btcoexist,
0x64, 0x1,
0x1);
- /*tell firmware "no antenna inverse"*/
+ /* tell firmware "no antenna inverse" */
h2c_parameter[0] = 0;
h2c_parameter[1] = 1; /*ext switch type*/
btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
h2c_parameter);
} else {
- /*Aux Ant to BT for IPS case 0x4c[23] = 1 */
+ /* Aux Ant to BT for IPS case 0x4c[23] = 1 */
btcoexist->btc_write_1byte_bitmask(btcoexist,
0x64, 0x1,
0x0);
- /*tell firmware "antenna inverse"*/
+ /* tell firmware "antenna inverse" */
h2c_parameter[0] = 1;
- h2c_parameter[1] = 1; /*ext switch type*/
+ h2c_parameter[1] = 1; /* ext switch type */
btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
h2c_parameter);
}
}
- /* fixed internal switch first*/
- /* fixed internal switch S1->WiFi, S0->BT*/
+ /* fixed internal switch first
+ * fixed internal switch S1->WiFi, S0->BT
+ */
if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
- else/* fixed internal switch S0->WiFi, S1->BT*/
+ else /* fixed internal switch S0->WiFi, S1->BT */
btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
/* ext switch setting */
@@ -1108,7 +732,7 @@ static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist,
} else {
if (init_hw_cfg) {
- /* 0x4c[23] = 1, 0x4c[24] = 0 Antenna control by 0x64*/
+ /* 0x4c[23] = 1, 0x4c[24] = 0 Antenna control by 0x64 */
u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u32tmp |= BIT23;
u32tmp &= ~BIT24;
@@ -1116,41 +740,42 @@ static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist,
if (board_info->btdm_ant_pos ==
BTC_ANTENNA_AT_MAIN_PORT) {
- /*Main Ant to WiFi for IPS case 0x4c[23] = 1*/
+ /* Main Ant to WiFi for IPS case 0x4c[23] = 1 */
btcoexist->btc_write_1byte_bitmask(btcoexist,
0x64, 0x1,
0x0);
- /*tell firmware "no antenna inverse"*/
+ /* tell firmware "no antenna inverse" */
h2c_parameter[0] = 0;
- h2c_parameter[1] = 0; /*internal switch type*/
+ h2c_parameter[1] = 0; /* internal switch type */
btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
h2c_parameter);
} else {
- /*Aux Ant to BT for IPS case 0x4c[23] = 1*/
+ /* Aux Ant to BT for IPS case 0x4c[23] = 1 */
btcoexist->btc_write_1byte_bitmask(btcoexist,
0x64, 0x1,
0x1);
- /*tell firmware "antenna inverse"*/
+ /* tell firmware "antenna inverse" */
h2c_parameter[0] = 1;
- h2c_parameter[1] = 0; /*internal switch type*/
+ h2c_parameter[1] = 0; /* internal switch type */
btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
h2c_parameter);
}
}
- /* fixed external switch first*/
- /*Main->WiFi, Aux->BT*/
+ /* fixed external switch first
+ * Main->WiFi, Aux->BT
+ */
if (board_info->btdm_ant_pos ==
BTC_ANTENNA_AT_MAIN_PORT)
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
0x3, 0x1);
- else/*Main->BT, Aux->WiFi */
+ else /* Main->BT, Aux->WiFi */
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
0x3, 0x2);
- /* internal switch setting*/
+ /* internal switch setting */
switch (ant_pos_type) {
case BTC_ANT_PATH_WIFI:
if (board_info->btdm_ant_pos ==
@@ -1365,7 +990,7 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x12,
0x3, 0x14, 0x50);
break;
- /* SoftAP only with no sta associated,BT disable ,
+ /* SoftAP only with no sta associated, BT disable,
* TDMA mode for power saving
* here softap mode screen off will cost 70-80mA for phone
*/
@@ -1376,24 +1001,29 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
}
} else {
switch (type) {
- case 8: /*PTA Control */
+ case 8: /* PTA Control */
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x8, 0x0,
0x0, 0x0, 0x0);
- halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_PTA,
- false, false);
+ halbtc8723b1ant_set_ant_path(btcoexist,
+ BTC_ANT_PATH_PTA,
+ false, false);
break;
case 0:
- default: /*Software control, Antenna at BT side */
+ default:
+ /* Software control, Antenna at BT side */
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0,
0x0, 0x0, 0x0);
- halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
- false, false);
+ halbtc8723b1ant_set_ant_path(btcoexist,
+ BTC_ANT_PATH_BT,
+ false, false);
break;
- case 9: /*Software control, Antenna at WiFi side */
+ case 9:
+ /* Software control, Antenna at WiFi side */
halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0,
0x0, 0x0, 0x0);
- halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_WIFI,
- false, false);
+ halbtc8723b1ant_set_ant_path(btcoexist,
+ BTC_ANT_PATH_WIFI,
+ false, false);
break;
}
}
@@ -1407,247 +1037,15 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
}
-static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool commom = false, wifi_connected = false;
- bool wifi_busy = false;
-
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
- &wifi_connected);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
-
- if (!wifi_connected &&
- BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
- halbtc8723b1ant_sw_mechanism(btcoexist, false);
- commom = true;
- } else if (wifi_connected &&
- (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
- coex_dm->bt_status)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT non connected-idle!!\n");
- halbtc8723b1ant_sw_mechanism(btcoexist, false);
- commom = true;
- } else if (!wifi_connected &&
- (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
- coex_dm->bt_status)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
- halbtc8723b1ant_sw_mechanism(btcoexist, false);
- commom = true;
- } else if (wifi_connected &&
- (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
- coex_dm->bt_status)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT connected-idle!!\n");
- halbtc8723b1ant_sw_mechanism(btcoexist, false);
- commom = true;
- } else if (!wifi_connected &&
- (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE !=
- coex_dm->bt_status)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
- halbtc8723b1ant_sw_mechanism(btcoexist, false);
- commom = true;
- } else {
- if (wifi_busy)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
- else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
-
- commom = false;
- }
-
- return commom;
-}
-
-static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
- u8 wifi_status)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static s32 up, dn, m, n, wait_count;
- /* 0: no change, +1: increase WiFi duration,
- * -1: decrease WiFi duration
- */
- s32 result;
- u8 retry_count = 0, bt_info_ext;
- bool wifi_busy = false;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TdmaDurationAdjustForAcl()\n");
-
- if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status)
- wifi_busy = true;
- else
- wifi_busy = false;
-
- if ((BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
- wifi_status) ||
- (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifi_status) ||
- (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifi_status)) {
- if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
- coex_dm->cur_ps_tdma != 3 && coex_dm->cur_ps_tdma != 9) {
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- coex_dm->tdma_adj_type = 9;
-
- up = 0;
- dn = 0;
- m = 1;
- n = 3;
- result = 0;
- wait_count = 0;
- }
- return;
- }
-
- if (!coex_dm->auto_tdma_adjust) {
- coex_dm->auto_tdma_adjust = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
-
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
- coex_dm->tdma_adj_type = 2;
-
- up = 0;
- dn = 0;
- m = 1;
- n = 3;
- result = 0;
- wait_count = 0;
- } else {
- /*accquire the BT TRx retry count from BT_Info byte2 */
- retry_count = coex_sta->bt_retry_cnt;
- bt_info_ext = coex_sta->bt_info_ext;
- result = 0;
- wait_count++;
- /* no retry in the last 2-second duration */
- if (retry_count == 0) {
- up++;
- dn--;
-
- if (dn <= 0)
- dn = 0;
-
- if (up >= n) {
- wait_count = 0;
- n = 3;
- up = 0;
- dn = 0;
- result = 1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Increase wifi duration!!\n");
- }
- } else if (retry_count <= 3) {
- up--;
- dn++;
-
- if (up <= 0)
- up = 0;
-
- if (dn == 2) {
- if (wait_count <= 2)
- m++;
- else
- m = 1;
-
- if (m >= 20)
- m = 20;
-
- n = 3 * m;
- up = 0;
- dn = 0;
- wait_count = 0;
- result = -1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
- }
- } else {
- if (wait_count == 1)
- m++;
- else
- m = 1;
-
- if (m >= 20)
- m = 20;
-
- n = 3 * m;
- up = 0;
- dn = 0;
- wait_count = 0;
- result = -1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
- }
-
- if (result == -1) {
- if ((BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
- ((coex_dm->cur_ps_tdma == 1) ||
- (coex_dm->cur_ps_tdma == 2))) {
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- coex_dm->tdma_adj_type = 9;
- } else if (coex_dm->cur_ps_tdma == 1) {
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- coex_dm->tdma_adj_type = 9;
- } else if (coex_dm->cur_ps_tdma == 9) {
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- }
- } else if (result == 1) {
- if ((BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
- ((coex_dm->cur_ps_tdma == 1) ||
- (coex_dm->cur_ps_tdma == 2))) {
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- coex_dm->tdma_adj_type = 9;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- coex_dm->tdma_adj_type = 9;
- } else if (coex_dm->cur_ps_tdma == 9) {
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 1);
- coex_dm->tdma_adj_type = 1;
- }
- } else { /*no change */
- /*if busy / idle change */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex],********* TDMA(on, %d) ********\n",
- coex_dm->cur_ps_tdma);
- }
-
- if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
- coex_dm->cur_ps_tdma != 9 && coex_dm->cur_ps_tdma != 11) {
- /* recover to previous adjust type */
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- coex_dm->tdma_adj_type);
- }
- }
-}
-
-static void btc8723b1ant_pstdmachkpwrsave(struct btc_coexist *btcoexist,
- bool new_ps_state)
+static void halbtc8723b1ant_ps_tdma_chk_pwr_save(struct btc_coexist *btcoexist,
+ bool new_ps_state)
{
u8 lps_mode = 0x0;
btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode);
- if (lps_mode) { /* already under LPS state */
+ if (lps_mode) {
+ /* already under LPS state */
if (new_ps_state) {
/* keep state under LPS, do nothing. */
} else {
@@ -1655,7 +1053,8 @@ static void btc8723b1ant_pstdmachkpwrsave(struct btc_coexist *btcoexist,
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
false, 0);
}
- } else { /* NO PS state */
+ } else {
+ /* NO PS state */
if (new_ps_state) {
/* will enter LPS state, turn off psTdma first */
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1681,18 +1080,18 @@ static void halbtc8723b1ant_power_save_state(struct btc_coexist *btcoexist,
btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
break;
case BTC_PS_LPS_ON:
- btc8723b1ant_pstdmachkpwrsave(btcoexist, true);
- halbtc8723b1ant_LpsRpwm(btcoexist, NORMAL_EXEC, lps_val,
- rpwm_val);
- /* when coex force to enter LPS, do not enter 32k low power. */
+ halbtc8723b1ant_ps_tdma_chk_pwr_save(btcoexist, true);
+ halbtc8723b1ant_lps_rpwm(btcoexist, NORMAL_EXEC, lps_val,
+ rpwm_val);
+ /* when coex force to enter LPS, do not enter 32k low power */
low_pwr_disable = true;
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- /* power save must executed before psTdma. */
+ /* power save must executed before psTdma */
btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
break;
case BTC_PS_LPS_OFF:
- btc8723b1ant_pstdmachkpwrsave(btcoexist, false);
+ halbtc8723b1ant_ps_tdma_chk_pwr_save(btcoexist, false);
btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
break;
default:
@@ -1700,66 +1099,6 @@ static void halbtc8723b1ant_power_save_state(struct btc_coexist *btcoexist,
}
}
-/***************************************************
- *
- * Software Coex Mechanism start
- *
- ***************************************************/
-/* SCO only or SCO+PAN(HS) */
-static void halbtc8723b1ant_action_sco(struct btc_coexist *btcoexist)
-{
- halbtc8723b1ant_sw_mechanism(btcoexist, true);
-}
-
-static void halbtc8723b1ant_action_hid(struct btc_coexist *btcoexist)
-{
- halbtc8723b1ant_sw_mechanism(btcoexist, true);
-}
-
-/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
-static void halbtc8723b1ant_action_a2dp(struct btc_coexist *btcoexist)
-{
- halbtc8723b1ant_sw_mechanism(btcoexist, false);
-}
-
-static void halbtc8723b1ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
-{
- halbtc8723b1ant_sw_mechanism(btcoexist, false);
-}
-
-static void halbtc8723b1ant_action_pan_edr(struct btc_coexist *btcoexist)
-{
- halbtc8723b1ant_sw_mechanism(btcoexist, false);
-}
-
-/* PAN(HS) only */
-static void halbtc8723b1ant_action_pan_hs(struct btc_coexist *btcoexist)
-{
- halbtc8723b1ant_sw_mechanism(btcoexist, false);
-}
-
-/*PAN(EDR)+A2DP */
-static void halbtc8723b1ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
-{
- halbtc8723b1ant_sw_mechanism(btcoexist, false);
-}
-
-static void halbtc8723b1ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
-{
- halbtc8723b1ant_sw_mechanism(btcoexist, true);
-}
-
-/* HID+A2DP+PAN(EDR) */
-static void btc8723b1ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
-{
- halbtc8723b1ant_sw_mechanism(btcoexist, true);
-}
-
-static void halbtc8723b1ant_action_hid_a2dp(struct btc_coexist *btcoexist)
-{
- halbtc8723b1ant_sw_mechanism(btcoexist, true);
-}
-
/*****************************************************
*
* Non-Software Coex Mechanism start
@@ -1826,11 +1165,11 @@ static void btc8723b1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist,
&wifi_connected);
/* tdma and coex table */
-
if (bt_link_info->sco_exist) {
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
- } else { /* HID */
+ } else {
+ /* HID */
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5);
}
@@ -1840,30 +1179,21 @@ static void halbtc8723b1ant_action_wifi_connected_bt_acl_busy(
struct btc_coexist *btcoexist,
u8 wifi_status)
{
- u8 bt_rssi_state;
-
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bt_rssi_state = halbtc8723b1ant_bt_rssi_state(btcoexist, 2, 28, 0);
- if (bt_link_info->hid_only) { /*HID */
+ if (bt_link_info->hid_only) { /* HID */
btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist, wifi_status);
coex_dm->auto_tdma_adjust = false;
return;
- } else if (bt_link_info->a2dp_only) { /*A2DP */
- if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE == wifi_status) {
+ } else if (bt_link_info->a2dp_only) { /* A2DP */
+ if (wifi_status == BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE) {
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
false, 8);
halbtc8723b1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 2);
coex_dm->auto_tdma_adjust = false;
- } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b1ant_tdma_dur_adj_for_acl(btcoexist,
- wifi_status);
- halbtc8723b1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
- } else { /*for low BT RSSI */
+ } else { /* for low BT RSSI */
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 11);
halbtc8723b1ant_coex_table_with_type(btcoexist,
@@ -1871,18 +1201,18 @@ static void halbtc8723b1ant_action_wifi_connected_bt_acl_busy(
coex_dm->auto_tdma_adjust = false;
}
} else if (bt_link_info->hid_exist &&
- bt_link_info->a2dp_exist) { /*HID+A2DP */
- halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ bt_link_info->a2dp_exist) { /* HID + A2DP */
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
coex_dm->auto_tdma_adjust = false;
halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
- /*PAN(OPP,FTP), HID+PAN(OPP,FTP) */
+ /* PAN(OPP,FTP), HID + PAN(OPP,FTP) */
} else if (bt_link_info->pan_only ||
(bt_link_info->hid_exist && bt_link_info->pan_exist)) {
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
coex_dm->auto_tdma_adjust = false;
- /*A2DP+PAN(OPP,FTP), HID+A2DP+PAN(OPP,FTP)*/
+ /* A2DP + PAN(OPP,FTP), HID + A2DP + PAN(OPP,FTP) */
} else if ((bt_link_info->a2dp_exist && bt_link_info->pan_exist) ||
(bt_link_info->hid_exist && bt_link_info->a2dp_exist &&
bt_link_info->pan_exist)) {
@@ -1907,57 +1237,59 @@ static void btc8723b1ant_action_wifi_not_conn(struct btc_coexist *btcoexist)
halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
}
-static void btc8723b1ant_action_wifi_not_conn_scan(struct btc_coexist *btcoex)
+static void
+btc8723b1ant_action_wifi_not_conn_scan(struct btc_coexist *btcoexist)
{
- struct btc_bt_link_info *bt_link_info = &btcoex->bt_link_info;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- halbtc8723b1ant_power_save_state(btcoex, BTC_PS_WIFI_NATIVE,
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
0x0, 0x0);
/* tdma and coex table */
if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) {
- halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC,
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 22);
- halbtc8723b1ant_coex_table_with_type(btcoex,
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 1);
} else if (bt_link_info->pan_only) {
- halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC,
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 20);
- halbtc8723b1ant_coex_table_with_type(btcoex,
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 2);
} else {
- halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC,
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 20);
- halbtc8723b1ant_coex_table_with_type(btcoex,
+ halbtc8723b1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 1);
}
} else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
(BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY ==
coex_dm->bt_status)){
- btc8723b1ant_act_bt_sco_hid_only_busy(btcoex,
+ btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist,
BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN);
} else {
- halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 8);
- halbtc8723b1ant_coex_table_with_type(btcoex, NORMAL_EXEC, 2);
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
}
}
-static void btc8723b1ant_act_wifi_not_conn_asso_auth(struct btc_coexist *btcoex)
+static void
+btc8723b1ant_act_wifi_not_conn_asso_auth(struct btc_coexist *btcoexist)
{
- struct btc_bt_link_info *bt_link_info = &btcoex->bt_link_info;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- halbtc8723b1ant_power_save_state(btcoex, BTC_PS_WIFI_NATIVE,
+ halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
0x0, 0x0);
if ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) ||
(bt_link_info->sco_exist) || (bt_link_info->hid_only) ||
(bt_link_info->a2dp_only) || (bt_link_info->pan_only)) {
- halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 8);
- halbtc8723b1ant_coex_table_with_type(btcoex, NORMAL_EXEC, 7);
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
} else {
- halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, true, 20);
- halbtc8723b1ant_coex_table_with_type(btcoex, NORMAL_EXEC, 1);
+ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+ halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
}
}
@@ -2109,75 +1441,6 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
}
}
-static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 algorithm = 0;
-
- algorithm = halbtc8723b1ant_action_algorithm(btcoexist);
- coex_dm->cur_algorithm = algorithm;
-
- if (!halbtc8723b1ant_is_common_action(btcoexist)) {
- switch (coex_dm->cur_algorithm) {
- case BT_8723B_1ANT_COEX_ALGO_SCO:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = SCO\n");
- halbtc8723b1ant_action_sco(btcoexist);
- break;
- case BT_8723B_1ANT_COEX_ALGO_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = HID\n");
- halbtc8723b1ant_action_hid(btcoexist);
- break;
- case BT_8723B_1ANT_COEX_ALGO_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = A2DP\n");
- halbtc8723b1ant_action_a2dp(btcoexist);
- break;
- case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
- halbtc8723b1ant_action_a2dp_pan_hs(btcoexist);
- break;
- case BT_8723B_1ANT_COEX_ALGO_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = PAN(EDR)\n");
- halbtc8723b1ant_action_pan_edr(btcoexist);
- break;
- case BT_8723B_1ANT_COEX_ALGO_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = HS mode\n");
- halbtc8723b1ant_action_pan_hs(btcoexist);
- break;
- case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = PAN+A2DP\n");
- halbtc8723b1ant_action_pan_edr_a2dp(btcoexist);
- break;
- case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
- halbtc8723b1ant_action_pan_edr_hid(btcoexist);
- break;
- case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
- btc8723b1ant_action_hid_a2dp_pan_edr(btcoexist);
- break;
- case BT_8723B_1ANT_COEX_ALGO_HID_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = HID+A2DP\n");
- halbtc8723b1ant_action_hid_a2dp(btcoexist);
- break;
- default:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = coexist All Off!!\n");
- break;
- }
- coex_dm->pre_algorithm = coex_dm->cur_algorithm;
- }
-}
-
static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -2186,7 +1449,6 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
bool increase_scan_dev_num = false;
bool bt_ctrl_agg_buf_size = false;
u8 agg_buf_size = 5;
- u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
u32 wifi_link_status = 0;
u32 num_of_wifi_link = 0;
@@ -2238,16 +1500,12 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if (!bt_link_info->sco_exist && !bt_link_info->hid_exist) {
halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
} else {
- if (wifi_connected) {
- wifi_rssi_state =
- halbtc8723b1ant_wifi_rssi_state(btcoexist,
- 1, 2, 30, 0);
+ if (wifi_connected)
halbtc8723b1ant_limited_tx(btcoexist,
NORMAL_EXEC, 1, 1, 1, 1);
- } else {
+ else
halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC,
0, 0, 0, 0);
- }
}
if (bt_link_info->sco_exist) {
@@ -2263,8 +1521,6 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
halbtc8723b1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
bt_ctrl_agg_buf_size, agg_buf_size);
- btc8723b1ant_run_sw_coex_mech(btcoexist);
-
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (coex_sta->c2h_bt_inquiry_page) {
@@ -2364,28 +1620,19 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
/* Enable counter statistics */
- /*0x76e[3] =1, WLAN_Act control by PTA */
+ /*0x76e[3] = 1, WLAN_Act control by PTA */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
btcoexist->btc_write_1byte(btcoexist, 0x778, 0x1);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
- /*Antenna config */
- halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_PTA, true, false);
+ /* Antenna config */
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA, true, false);
/* PTA parameter */
halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
}
-static void halbtc8723b1ant_wifi_off_hw_cfg(struct btc_coexist *btcoexist)
-{
- /* set wlan_act to low */
- btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
-}
-
/**************************************************************
- * work around function start with wa_halbtc8723b1ant_
- **************************************************************/
-/**************************************************************
- * extern function start with EXhalbtc8723b1ant_
+ * extern function start with ex_halbtc8723b1ant_
**************************************************************/
void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist)
@@ -2539,7 +1786,7 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
if (coex_sta->bt_info_c2h_cnt[i]) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"\r\n %-35s = %7ph(%d)",
- GLBtInfoSrc8723b1Ant[i],
+ glbt_info_src_8723b_1ant[i],
coex_sta->bt_info_c2h[i],
coex_sta->bt_info_c2h_cnt[i]);
}
@@ -2697,13 +1944,12 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
"[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
- halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
- false, true);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
+ false, true);
/* set PTA control */
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
halbtc8723b1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 0);
- halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], IPS LEAVE notify\n");
@@ -2774,14 +2020,17 @@ void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
if (BTC_SCAN_START == type) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], SCAN START notify\n");
- if (!wifi_connected) /* non-connected scan */
+ if (!wifi_connected)
+ /* non-connected scan */
btc8723b1ant_action_wifi_not_conn_scan(btcoexist);
- else /* wifi is connected */
+ else
+ /* wifi is connected */
btc8723b1ant_action_wifi_conn_scan(btcoexist);
} else if (BTC_SCAN_FINISH == type) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], SCAN FINISH notify\n");
- if (!wifi_connected) /* non-connected scan */
+ if (!wifi_connected)
+ /* non-connected scan */
btc8723b1ant_action_wifi_not_conn(btcoexist);
else
halbtc8723b1ant_action_wifi_connected(btcoexist);
@@ -2831,7 +2080,8 @@ void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
- if (!wifi_connected) /* non-connected scan */
+ if (!wifi_connected)
+ /* non-connected scan */
btc8723b1ant_action_wifi_not_conn(btcoexist);
else
halbtc8723b1ant_action_wifi_connected(btcoexist);
@@ -3020,7 +2270,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
coex_sta->a2dp_exist = false;
coex_sta->hid_exist = false;
coex_sta->sco_exist = false;
- } else { /* connection exists */
+ } else {
+ /* connection exists */
coex_sta->bt_link_exist = true;
if (bt_info & BT_INFO_8723B_1ANT_B_FTP)
coex_sta->pan_exist = true;
@@ -3089,9 +2340,8 @@ void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
btcoexist->stop_coex_dm = true;
- halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false, true);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false, true);
- halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
halbtc8723b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
@@ -3111,13 +2361,12 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Pnp notify to SLEEP\n");
btcoexist->stop_coex_dm = true;
- halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false,
- true);
+ halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false,
+ true);
halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
0x0, 0x0);
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
- halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
} else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Pnp notify to WAKE UP\n");
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index 12125966a911..2f3946be4ce2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -240,9 +240,33 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
return wifi_rssi_state;
}
+static
+void btc8723b2ant_limited_rx(struct btc_coexist *btcoexist, bool force_exec,
+ bool rej_ap_agg_pkt, bool bt_ctrl_agg_buf_size,
+ u8 agg_buf_size)
+{
+ bool reject_rx_agg = rej_ap_agg_pkt;
+ bool bt_ctrl_rx_agg_size = bt_ctrl_agg_buf_size;
+ u8 rx_agg_size = agg_buf_size;
+
+ /* ============================================ */
+ /* Rx Aggregation related setting */
+ /* ============================================ */
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+ &reject_rx_agg);
+ /* decide BT control aggregation buf size or not */
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE,
+ &bt_ctrl_rx_agg_size);
+ /* aggregate buf size, only work when BT control Rx aggregate size */
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rx_agg_size);
+ /* real update aggregation setting */
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
+}
+
static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
u32 reg_hp_tx = 0, reg_hp_rx = 0;
u32 reg_lp_tx = 0, reg_lp_rx = 0;
@@ -263,6 +287,17 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
coex_sta->low_priority_tx = reg_lp_tx;
coex_sta->low_priority_rx = reg_lp_rx;
+ if ((coex_sta->low_priority_tx > 1050) &&
+ (!coex_sta->c2h_bt_inquiry_page))
+ coex_sta->pop_event_cnt++;
+
+ if ((coex_sta->low_priority_rx >= 950) &&
+ (coex_sta->low_priority_rx >= coex_sta->low_priority_tx) &&
+ (!coex_sta->under_ips))
+ bt_link_info->slave_role = true;
+ else
+ bt_link_info->slave_role = false;
+
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
@@ -274,6 +309,43 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
}
+static void btc8723b2ant_monitor_wifi_ctr(struct btc_coexist *btcoexist)
+{
+ if (coex_sta->under_ips) {
+ coex_sta->crc_ok_cck = 0;
+ coex_sta->crc_ok_11g = 0;
+ coex_sta->crc_ok_11n = 0;
+ coex_sta->crc_ok_11n_agg = 0;
+
+ coex_sta->crc_err_cck = 0;
+ coex_sta->crc_err_11g = 0;
+ coex_sta->crc_err_11n = 0;
+ coex_sta->crc_err_11n_agg = 0;
+ } else {
+ coex_sta->crc_ok_cck =
+ btcoexist->btc_read_4byte(btcoexist, 0xf88);
+ coex_sta->crc_ok_11g =
+ btcoexist->btc_read_2byte(btcoexist, 0xf94);
+ coex_sta->crc_ok_11n =
+ btcoexist->btc_read_2byte(btcoexist, 0xf90);
+ coex_sta->crc_ok_11n_agg =
+ btcoexist->btc_read_2byte(btcoexist, 0xfb8);
+
+ coex_sta->crc_err_cck =
+ btcoexist->btc_read_4byte(btcoexist, 0xf84);
+ coex_sta->crc_err_11g =
+ btcoexist->btc_read_2byte(btcoexist, 0xf96);
+ coex_sta->crc_err_11n =
+ btcoexist->btc_read_2byte(btcoexist, 0xf92);
+ coex_sta->crc_err_11n_agg =
+ btcoexist->btc_read_2byte(btcoexist, 0xfba);
+ }
+
+ /* reset counter */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xf16, 0x1, 0x1);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xf16, 0x1, 0x0);
+}
+
static void btc8723b2ant_query_bt_info(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -297,6 +369,8 @@ static bool btc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
static bool pre_bt_hs_on;
bool wifi_busy = false, under_4way = false, bt_hs_on = false;
bool wifi_connected = false;
+ u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ u8 tmp;
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
@@ -320,6 +394,15 @@ static bool btc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
pre_bt_hs_on = bt_hs_on;
return true;
}
+
+ tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
+ wifi_rssi_state =
+ btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, tmp, 0);
+
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_LOW))
+ return true;
}
return false;
@@ -327,11 +410,9 @@ static bool btc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
static void btc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist)
{
- /*struct btc_stack_info *stack_info = &btcoexist->stack_info;*/
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
bool bt_hs_on = false;
-#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1) /* profile from bt patch */
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
@@ -345,21 +426,7 @@ static void btc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist)
bt_link_info->pan_exist = true;
bt_link_info->bt_link_exist = true;
}
-#else /* profile from bt stack */
- bt_link_info->bt_link_exist = stack_info->bt_link_exist;
- bt_link_info->sco_exist = stack_info->sco_exist;
- bt_link_info->a2dp_exist = stack_info->a2dp_exist;
- bt_link_info->pan_exist = stack_info->pan_exist;
- bt_link_info->hid_exist = stack_info->hid_exist;
-
- /*for win-8 stack HID report error*/
- if (!stack_info->hid_exist)
- stack_info->hid_exist = coex_sta->hid_exist;
- /*sync BTInfo with BT firmware and stack*/
- /* when stack HID report error, here we use the info from bt fw.*/
- if (!stack_info->bt_link_exist)
- stack_info->bt_link_exist = coex_sta->bt_link_exist;
-#endif
+
/* check if Sco only */
if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
!bt_link_info->pan_exist && !bt_link_info->hid_exist)
@@ -584,44 +651,6 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
return algorithm;
}
-static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool ret = false;
- bool bt_hs_on = false, wifi_connected = false;
- s32 bt_hs_rssi = 0;
- u8 bt_rssi_state;
-
- if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on))
- return false;
- if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
- &wifi_connected))
- return false;
- if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
- return false;
-
- bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
-
- if (wifi_connected) {
- if (bt_hs_on) {
- if (bt_hs_rssi > 37) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Need to decrease bt power for HS mode!!\n");
- ret = true;
- }
- } else {
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
- ret = true;
- }
- }
- }
-
- return ret;
-}
-
static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
u8 dac_swing_lvl)
{
@@ -642,44 +671,40 @@ static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
}
static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
- bool dec_bt_pwr)
+ u8 dec_bt_pwr_lvl)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
- h2c_parameter[0] = 0;
-
- if (dec_bt_pwr)
- h2c_parameter[0] |= BIT1;
+ h2c_parameter[0] = dec_bt_pwr_lvl;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
- (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+ "[BTCoex], decrease Bt Power Level : %u\n", dec_bt_pwr_lvl);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
- bool force_exec, bool dec_bt_pwr)
+ bool force_exec, u8 dec_bt_pwr_lvl)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s Dec BT power = %s\n",
- force_exec ? "force to" : "", dec_bt_pwr ? "ON" : "OFF");
- coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
+ "[BTCoex], Dec BT power level = %u\n", dec_bt_pwr_lvl);
+ coex_dm->cur_dec_bt_pwr_lvl = dec_bt_pwr_lvl;
if (!force_exec) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
- coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+ "[BTCoex], PreDecBtPwrLvl=%d, CurDecBtPwrLvl=%d\n",
+ coex_dm->pre_dec_bt_pwr_lvl,
+ coex_dm->cur_dec_bt_pwr_lvl);
- if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
+ if (coex_dm->pre_dec_bt_pwr_lvl == coex_dm->cur_dec_bt_pwr_lvl)
return;
}
- btc8723b2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+ btc8723b2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr_lvl);
- coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+ coex_dm->pre_dec_bt_pwr_lvl = coex_dm->cur_dec_bt_pwr_lvl;
}
static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
@@ -708,72 +733,21 @@ static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
}
-static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
- bool rx_rf_shrink_on)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- if (rx_rf_shrink_on) {
- /* Shrink RF Rx LPF corner */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Shrink RF Rx LPF corner!!\n");
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
- 0xfffff, 0xffffc);
- } else {
- /* Resume RF Rx LPF corner */
- /* After initialized, we can use coex_dm->btRf0x1eBackup */
- if (btcoexist->initilized) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Resume RF Rx LPF corner!!\n");
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
- 0xfffff,
- coex_dm->bt_rf0x1e_backup);
- }
- }
-}
-
-static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
- bool force_exec, bool rx_rf_shrink_on)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn Rx RF Shrink = %s\n",
- (force_exec ? "force to" : ""), (rx_rf_shrink_on ?
- "ON" : "OFF"));
- coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
-
- if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
- coex_dm->pre_rf_rx_lpf_shrink,
- coex_dm->cur_rf_rx_lpf_shrink);
-
- if (coex_dm->pre_rf_rx_lpf_shrink ==
- coex_dm->cur_rf_rx_lpf_shrink)
- return;
- }
- btc8723b2ant_set_sw_rf_rx_lpf_corner(btcoexist,
- coex_dm->cur_rf_rx_lpf_shrink);
-
- coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
-}
-
static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
bool low_penalty_ra)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[6] = {0};
- h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/
+ h2c_parameter[0] = 0x6; /* op_code, 0x6 = Retry_Penalty */
if (low_penalty_ra) {
h2c_parameter[1] |= BIT0;
- /*normal rate except MCS7/6/5, OFDM54/48/36*/
+ /* normal rate except MCS7/6/5, OFDM54/48/36 */
h2c_parameter[2] = 0x00;
- h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54*/
- h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48*/
- h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/
+ h2c_parameter[3] = 0xf4; /* MCS7 or OFDM54 */
+ h2c_parameter[4] = 0xf5; /* MCS6 or OFDM48 */
+ h2c_parameter[5] = 0xf6; /* MCS5 or OFDM36 */
}
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -788,7 +762,6 @@ static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- /*return; */
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], %s turn LowPenaltyRA = %s\n",
(force_exec ? "force to" : ""), (low_penalty_ra ?
@@ -830,9 +803,9 @@ static void btc8723b2ant_set_sw_fulltime_dac_swing(struct btc_coexist *btcoex,
btc8723b2ant_set_dac_swing_reg(btcoex, 0x18);
}
-static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
- bool force_exec, bool dac_swing_on,
- u32 dac_swing_lvl)
+void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
+ bool force_exec, bool dac_swing_on,
+ u32 dac_swing_lvl)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -863,105 +836,6 @@ static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
}
-static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
- bool agc_table_en)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 rssi_adjust_val = 0;
-
- /* BB AGC Gain Table */
- if (agc_table_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BB Agc Table On!\n");
- btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001);
- btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001);
- btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001);
- btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6b1D0001);
- btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6a1E0001);
- btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001);
- btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001);
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BB Agc Table Off!\n");
- btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
- btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
- btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
- btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001);
- btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001);
- btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001);
- btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa4200001);
- }
-
- /* RF Gain */
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
- if (agc_table_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Agc Table On!\n");
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
- 0xfffff, 0x38fff);
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
- 0xfffff, 0x38ffe);
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Agc Table Off!\n");
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
- 0xfffff, 0x380c3);
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
- 0xfffff, 0x28ce6);
- }
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
-
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
-
- if (agc_table_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Agc Table On!\n");
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
- 0xfffff, 0x38fff);
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
- 0xfffff, 0x38ffe);
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Agc Table Off!\n");
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
- 0xfffff, 0x380c3);
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
- 0xfffff, 0x28ce6);
- }
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x0);
-
- /* set rssiAdjustVal for wifi module. */
- if (agc_table_en)
- rssi_adjust_val = 8;
- btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
- &rssi_adjust_val);
-}
-
-static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist,
- bool force_exec, bool agc_table_en)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s %s Agc Table\n",
- (force_exec ? "force to" : ""),
- (agc_table_en ? "Enable" : "Disable"));
- coex_dm->cur_agc_table_en = agc_table_en;
-
- if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
- coex_dm->pre_agc_table_en,
- coex_dm->cur_agc_table_en);
-
- if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
- return;
- }
- btc8723b2ant_set_agc_table(btcoexist, agc_table_en);
-
- coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
-}
-
static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
@@ -1026,61 +900,73 @@ static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist,
coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
}
-static void btc8723b_coex_tbl_type(struct btc_coexist *btcoexist,
- bool force_exec, u8 type)
+static void btc8723b2ant_coex_table_with_type(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
{
switch (type) {
case 0:
btc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0x55555555, 0xffff, 0x3);
+ 0x55555555, 0xffffff, 0x3);
break;
case 1:
btc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0x5afa5afa, 0xffff, 0x3);
+ 0x5afa5afa, 0xffffff, 0x3);
break;
case 2:
- btc8723b2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
- 0x5a5a5a5a, 0xffff, 0x3);
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x5ada5ada,
+ 0x5ada5ada, 0xffffff, 0x3);
break;
case 3:
btc8723b2ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa,
- 0xaaaaaaaa, 0xffff, 0x3);
+ 0xaaaaaaaa, 0xffffff, 0x3);
break;
case 4:
btc8723b2ant_coex_table(btcoexist, force_exec, 0xffffffff,
- 0xffffffff, 0xffff, 0x3);
+ 0xffffffff, 0xffffff, 0x3);
break;
case 5:
btc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
- 0x5fff5fff, 0xffff, 0x3);
+ 0x5fff5fff, 0xffffff, 0x3);
break;
case 6:
btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
- 0x5a5a5a5a, 0xffff, 0x3);
+ 0x5a5a5a5a, 0xffffff, 0x3);
break;
case 7:
- btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
- 0x5afa5afa, 0xffff, 0x3);
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
break;
case 8:
- btc8723b2ant_coex_table(btcoexist, force_exec, 0x5aea5aea,
- 0x5aea5aea, 0xffff, 0x3);
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
break;
case 9:
- btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
- 0x5aea5aea, 0xffff, 0x3);
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
break;
case 10:
- btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
- 0x5aff5aff, 0xffff, 0x3);
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
break;
case 11:
- btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
- 0x5a5f5a5f, 0xffff, 0x3);
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
break;
case 12:
- btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
- 0x5f5f5f5f, 0xffff, 0x3);
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
+ break;
+ case 13:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+ 0xaaaaaaaa, 0xffffff, 0x3);
+ break;
+ case 14:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+ 0x5ada5ada, 0xffffff, 0x3);
+ break;
+ case 15:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0xaaaaaaaa, 0xffffff, 0x3);
break;
default:
break;
@@ -1094,7 +980,7 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
u8 h2c_parameter[1] = {0};
if (enable)
- h2c_parameter[0] |= BIT0;/* function enable*/
+ h2c_parameter[0] |= BIT0; /* function enable */
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
@@ -1103,6 +989,33 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
+static void btc8723b2ant_set_lps_rpwm(struct btc_coexist *btcoexist,
+ u8 lps_val, u8 rpwm_val)
+{
+ u8 lps = lps_val;
+ u8 rpwm = rpwm_val;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_LPS_VAL, &lps);
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_RPWM_VAL, &rpwm);
+}
+
+static void btc8723b2ant_lps_rpwm(struct btc_coexist *btcoexist,
+ bool force_exec, u8 lps_val, u8 rpwm_val)
+{
+ coex_dm->cur_lps = lps_val;
+ coex_dm->cur_rpwm = rpwm_val;
+
+ if (!force_exec) {
+ if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
+ (coex_dm->pre_rpwm == coex_dm->cur_rpwm))
+ return;
+ }
+ btc8723b2ant_set_lps_rpwm(btcoexist, lps_val, rpwm_val);
+
+ coex_dm->pre_lps = coex_dm->cur_lps;
+ coex_dm->pre_rpwm = coex_dm->cur_rpwm;
+}
+
static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
bool force_exec, bool enable)
{
@@ -1133,6 +1046,8 @@ static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[5];
+ if ((coex_sta->a2dp_exist) && (coex_sta->hid_exist))
+ byte5 = byte5 | 0x1;
h2c_parameter[0] = byte1;
h2c_parameter[1] = byte2;
@@ -1155,23 +1070,13 @@ static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
-static void btc8723b2ant_sw_mechanism1(struct btc_coexist *btcoexist,
- bool shrink_rx_lpf, bool low_penalty_ra,
- bool limited_dig, bool bt_lna_constrain)
+static void btc8723b2ant_sw_mechanism(struct btc_coexist *btcoexist,
+ bool shrink_rx_lpf, bool low_penalty_ra,
+ bool limited_dig, bool bt_lna_constrain)
{
- btc8723b2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
btc8723b2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
}
-static void btc8723b2ant_sw_mechanism2(struct btc_coexist *btcoexist,
- bool agc_table_shift, bool adc_backoff,
- bool sw_dac_swing, u32 dac_swing_lvl)
-{
- btc8723b2ant_agc_table(btcoexist, NORMAL_EXEC, agc_table_shift);
- btc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing,
- dac_swing_lvl);
-}
-
static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
u8 antpos_type, bool init_hwcfg,
bool wifi_off)
@@ -1189,44 +1094,66 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
use_ext_switch = true;
if (init_hwcfg) {
- /* 0x4c[23] = 0, 0x4c[24] = 1 Antenna control by WL/BT */
- u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
- u32tmp &= ~BIT23;
- u32tmp |= BIT24;
- btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
-
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x39, 0x8, 0x1);
btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3);
btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1);
- /* Force GNT_BT to low */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
+ if (fw_ver >= 0x180000) {
+ /* Use H2C to set GNT_BT to High to avoid A2DP click */
+ h2c_parameter[0] = 1;
+ btcoexist->btc_fill_h2c(btcoexist, 0x6E, 1,
+ h2c_parameter);
+ } else {
+ btcoexist->btc_write_1byte(btcoexist, 0x765, 0x18);
+ }
+
+ btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0);
+
+ /* WiFi TRx Mask off */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
+ 0x1, 0xfffff, 0x0);
if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
/* tell firmware "no antenna inverse" */
h2c_parameter[0] = 0;
- h2c_parameter[1] = 1; /* ext switch type */
- btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
- h2c_parameter);
- btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
} else {
/* tell firmware "antenna inverse" */
h2c_parameter[0] = 1;
- h2c_parameter[1] = 1; /* ext switch type */
- btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ }
+
+ if (use_ext_switch) {
+ /* ext switch type */
+ h2c_parameter[1] = 1;
+ } else {
+ /* int switch type */
+ h2c_parameter[1] = 0;
+ }
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, h2c_parameter);
+ } else {
+ if (fw_ver >= 0x180000) {
+ /* Use H2C to set GNT_BT to "Control by PTA"*/
+ h2c_parameter[0] = 0;
+ btcoexist->btc_fill_h2c(btcoexist, 0x6E, 1,
h2c_parameter);
- btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+ } else {
+ btcoexist->btc_write_1byte(btcoexist, 0x765, 0x0);
}
}
/* ext switch setting */
if (use_ext_switch) {
+ if (init_hwcfg) {
+ /* 0x4c[23] = 0, 0x4c[24] = 1 Ant controlled by WL/BT */
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u32tmp &= ~BIT23;
+ u32tmp |= BIT24;
+ btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+ }
+
/* fixed internal switch S1->WiFi, S0->BT */
- if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
- btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
- else
- btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+ btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0);
switch (antpos_type) {
case BTC_ANT_WIFI_AT_MAIN:
@@ -1240,9 +1167,18 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
0x92c, 0x3, 0x2);
break;
}
- } else { /* internal switch */
- /* fixed ext switch */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, 0x3, 0x1);
+ } else {
+ /* internal switch */
+ if (init_hwcfg) {
+ /* 0x4c[23] = 0, 0x4c[24] = 1 Ant controlled by WL/BT */
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u32tmp |= BIT23;
+ u32tmp &= ~BIT24;
+ btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+ }
+
+ /* fixed ext switch, S1->Main, S0->Aux */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, 0x0);
switch (antpos_type) {
case BTC_ANT_WIFI_AT_MAIN:
/* fixed internal switch S1->WiFi, S0->BT */
@@ -1260,6 +1196,17 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
bool turn_on, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ u8 wifi_rssi_state, bt_rssi_state;
+ s8 wifi_duration_adjust = 0x0;
+ u8 tdma_byte4_modify = 0x0;
+ u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, tmp, 0);
+ tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], %s turn %s PS TDMA, type=%d\n",
@@ -1268,6 +1215,15 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
+ if (!(BTC_RSSI_HIGH(wifi_rssi_state) &&
+ BTC_RSSI_HIGH(bt_rssi_state)) && turn_on) {
+ /* for WiFi RSSI low or BT RSSI low */
+ type = type + 100;
+ coex_dm->is_switch_to_1dot5_ant = true;
+ } else {
+ coex_dm->is_switch_to_1dot5_ant = false;
+ }
+
if (!force_exec) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
@@ -1280,83 +1236,131 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
return;
}
+
+ if (coex_sta->scan_ap_num <= 5) {
+ if (coex_sta->a2dp_bit_pool >= 45)
+ wifi_duration_adjust = -15;
+ else if (coex_sta->a2dp_bit_pool >= 35)
+ wifi_duration_adjust = -10;
+ else
+ wifi_duration_adjust = 5;
+ } else if (coex_sta->scan_ap_num <= 20) {
+ if (coex_sta->a2dp_bit_pool >= 45)
+ wifi_duration_adjust = -15;
+ else if (coex_sta->a2dp_bit_pool >= 35)
+ wifi_duration_adjust = -10;
+ else
+ wifi_duration_adjust = 0;
+ } else if (coex_sta->scan_ap_num <= 40) {
+ if (coex_sta->a2dp_bit_pool >= 45)
+ wifi_duration_adjust = -15;
+ else if (coex_sta->a2dp_bit_pool >= 35)
+ wifi_duration_adjust = -10;
+ else
+ wifi_duration_adjust = -5;
+ } else {
+ if (coex_sta->a2dp_bit_pool >= 45)
+ wifi_duration_adjust = -15;
+ else if (coex_sta->a2dp_bit_pool >= 35)
+ wifi_duration_adjust = -10;
+ else
+ wifi_duration_adjust = -10;
+ }
+
+ if ((bt_link_info->slave_role) && (bt_link_info->a2dp_exist))
+ /* 0x778 = 0x1 at wifi slot (no blocking BT Low-Pri pkts) */
+ tdma_byte4_modify = 0x1;
+
if (turn_on) {
switch (type) {
case 1:
default:
- btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
- 0x1a, 0xe1, 0x90);
+ btc8723b2ant_set_fw_ps_tdma(
+ btcoexist, 0xe3, 0x3c,
+ 0x03, 0xf1, 0x90 | tdma_byte4_modify);
break;
case 2:
- btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
- 0x12, 0xe1, 0x90);
+ btc8723b2ant_set_fw_ps_tdma(
+ btcoexist, 0xe3, 0x2d,
+ 0x03, 0xf1, 0x90 | tdma_byte4_modify);
break;
case 3:
- /* This call breaks BT when wireless is active -
- * comment it out for now until a better fix is found:
- * btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
- * 0x3, 0xf1, 0x90);
- */
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0xf1,
+ 0x90 | tdma_byte4_modify);
break;
case 4:
btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
- 0x03, 0xf1, 0x90);
+ 0x03, 0xf1,
+ 0x90 | tdma_byte4_modify);
break;
case 5:
- btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
- 0x1a, 0x60, 0x90);
+ btc8723b2ant_set_fw_ps_tdma(
+ btcoexist, 0xe3, 0x3c,
+ 0x3, 0x70, 0x90 | tdma_byte4_modify);
break;
case 6:
- btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
- 0x12, 0x60, 0x90);
+ btc8723b2ant_set_fw_ps_tdma(
+ btcoexist, 0xe3, 0x2d,
+ 0x3, 0x70, 0x90 | tdma_byte4_modify);
break;
case 7:
btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
- 0x3, 0x70, 0x90);
+ 0x3, 0x70,
+ 0x90 | tdma_byte4_modify);
break;
case 8:
btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10,
- 0x3, 0x70, 0x90);
+ 0x3, 0x70,
+ 0x90 | tdma_byte4_modify);
break;
case 9:
- btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
- 0x1a, 0xe1, 0x90);
+ btc8723b2ant_set_fw_ps_tdma(
+ btcoexist, 0xe3, 0x3c + wifi_duration_adjust,
+ 0x03, 0xf1, 0x90 | tdma_byte4_modify);
break;
case 10:
- btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
- 0x12, 0xe1, 0x90);
+ btc8723b2ant_set_fw_ps_tdma(
+ btcoexist, 0xe3, 0x2d,
+ 0x03, 0xf1, 0x90 | tdma_byte4_modify);
break;
case 11:
- btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
- 0xa, 0xe1, 0x90);
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0xf1,
+ 0x90 | tdma_byte4_modify);
break;
case 12:
- btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
- 0x5, 0xe1, 0x90);
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
+ 0x3, 0xf1,
+ 0x90 | tdma_byte4_modify);
break;
case 13:
- btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
- 0x1a, 0x60, 0x90);
+ btc8723b2ant_set_fw_ps_tdma(
+ btcoexist, 0xe3, 0x3c,
+ 0x3, 0x70, 0x90 | tdma_byte4_modify);
break;
case 14:
- btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
- 0x12, 0x60, 0x90);
+ btc8723b2ant_set_fw_ps_tdma(
+ btcoexist, 0xe3, 0x2d,
+ 0x3, 0x70, 0x90 | tdma_byte4_modify);
break;
case 15:
- btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
- 0xa, 0x60, 0x90);
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0x70,
+ 0x90 | tdma_byte4_modify);
break;
case 16:
- btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
- 0x5, 0x60, 0x90);
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
+ 0x3, 0x70,
+ 0x90 | tdma_byte4_modify);
break;
case 17:
btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x2f,
0x2f, 0x60, 0x90);
break;
case 18:
- btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
- 0x5, 0xe1, 0x90);
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5, 0x5,
+ 0xe1, 0x90);
break;
case 19:
btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
@@ -1370,9 +1374,63 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
0x03, 0x70, 0x90);
break;
+
+ case 23:
+ case 123:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x35,
+ 0x03, 0x71, 0x10);
+ break;
case 71:
- btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
- 0x1a, 0xe1, 0x90);
+ btc8723b2ant_set_fw_ps_tdma(
+ btcoexist, 0xe3, 0x3c + wifi_duration_adjust,
+ 0x03, 0xf1, 0x90);
+ break;
+ case 101:
+ case 105:
+ case 113:
+ case 171:
+ btc8723b2ant_set_fw_ps_tdma(
+ btcoexist, 0xd3, 0x3a + wifi_duration_adjust,
+ 0x03, 0x70, 0x50 | tdma_byte4_modify);
+ break;
+ case 102:
+ case 106:
+ case 110:
+ case 114:
+ btc8723b2ant_set_fw_ps_tdma(
+ btcoexist, 0xd3, 0x2d + wifi_duration_adjust,
+ 0x03, 0x70, 0x50 | tdma_byte4_modify);
+ break;
+ case 103:
+ case 107:
+ case 111:
+ case 115:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1c,
+ 0x03, 0x70,
+ 0x50 | tdma_byte4_modify);
+ break;
+ case 104:
+ case 108:
+ case 112:
+ case 116:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x10,
+ 0x03, 0x70,
+ 0x50 | tdma_byte4_modify);
+ break;
+ case 109:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c,
+ 0x03, 0xf1,
+ 0x90 | tdma_byte4_modify);
+ break;
+ case 121:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+ 0x03, 0x70,
+ 0x90 | tdma_byte4_modify);
+ break;
+ case 22:
+ case 122:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x35,
+ 0x03, 0x71, 0x11);
break;
}
} else {
@@ -1398,62 +1456,202 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
}
+static void btc8723b2ant_ps_tdma_check_for_power_save_state(
+ struct btc_coexist *btcoexist, bool new_ps_state)
+{
+ u8 lps_mode = 0x0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode);
+
+ if (lps_mode) {
+ /* already under LPS state */
+ if (new_ps_state) {
+ /* keep state under LPS, do nothing. */
+ } else {
+ /* will leave LPS state, turn off psTdma first */
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ }
+ } else {
+ /* NO PS state */
+ if (new_ps_state) {
+ /* will enter LPS state, turn off psTdma first */
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ } else {
+ /* keep state under NO PS state, do nothing. */
+ }
+ }
+}
+
+static void btc8723b2ant_power_save_state(struct btc_coexist *btcoexist,
+ u8 ps_type, u8 lps_val, u8 rpwm_val)
+{
+ bool low_pwr_disable = false;
+
+ switch (ps_type) {
+ case BTC_PS_WIFI_NATIVE:
+ /* recover to original 32k low power setting */
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+ coex_sta->force_lps_on = false;
+ break;
+ case BTC_PS_LPS_ON:
+ btc8723b2ant_ps_tdma_check_for_power_save_state(btcoexist,
+ true);
+ btc8723b2ant_lps_rpwm(btcoexist, NORMAL_EXEC, lps_val,
+ rpwm_val);
+ /* when coex force to enter LPS, do not enter 32k low power */
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+ /* power save must executed before psTdma */
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ coex_sta->force_lps_on = true;
+ break;
+ case BTC_PS_LPS_OFF:
+ btc8723b2ant_ps_tdma_check_for_power_save_state(btcoexist,
+ false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ coex_sta->force_lps_on = false;
+ break;
+ default:
+ break;
+ }
+}
+
static void btc8723b2ant_coex_alloff(struct btc_coexist *btcoexist)
{
/* fw all off */
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
/* sw all off */
- btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, false, false, false);
/* hw all off */
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
}
static void btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
/* force to reset coex mechanism*/
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
btc8723b2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
btc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
- btc8723b2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false);
+ btc8723b2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, 0);
- btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, false, false, false);
+
+ coex_sta->pop_event_cnt = 0;
}
static void btc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
bool wifi_connected = false;
bool low_pwr_disable = true;
+ bool scan = false, link = false, roam = false;
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
- if (wifi_connected) {
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ if (coex_sta->bt_abnormal_scan) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23);
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
+ } else if (scan || link || roam) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi link process + BT Inq/Page!!\n");
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
+ } else if (wifi_connected) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT Inq/Page!!\n");
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
} else {
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
}
btc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+ btc8723b2ant_sw_mechanism(btcoexist, false, false, false, false);
+}
+
+static void btc8723b2ant_action_wifi_link_process(struct btc_coexist
+ *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u32 u32tmp;
+ u8 u8tmpa, u8tmpb;
- btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
- coex_dm->need_recover_0x948 = true;
- coex_dm->backup_0x948 = btcoexist->btc_read_2byte(btcoexist, 0x948);
+ btc8723b2ant_sw_mechanism(btcoexist, false, false, false, false);
- btc8723b2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_AUX,
- false, false);
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x948);
+ u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
+ u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x76e);
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 0x948 = 0x%x, 0x765 = 0x%x, 0x76e = 0x%x\n",
+ u32tmp, u8tmpa, u8tmpb);
+}
+
+static bool btc8723b2ant_action_wifi_idle_process(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+ u8 ap_num = 0;
+ u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset - coex_dm->switch_thres_offset;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
+ tmp, 0);
+ tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset - coex_dm->switch_thres_offset;
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
+
+ /* office environment */
+ if (BTC_RSSI_HIGH(wifi_rssi_state1) && (coex_sta->hid_exist) &&
+ (coex_sta->a2dp_exist)) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi idle process for BT HID+A2DP exist!!\n");
+
+ btc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x6);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+ /* sw all off */
+ btc8723b2ant_sw_mechanism(btcoexist, false, false, false,
+ false);
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+
+ return true;
+ }
+
+ btc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x18);
+ return false;
}
static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
@@ -1472,21 +1670,21 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
low_pwr_disable = false;
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
+ btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC,
+ false, false, 0x8);
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Wifi non-connected idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
0x0);
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
- btc8723b2ant_sw_mechanism1(btcoexist, false, false, false,
- false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false, false,
- 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, false, false,
+ false);
common = true;
} else {
@@ -1496,23 +1694,23 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist,
BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
+ btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC,
+ false, false, 0x8);
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Wifi connected + BT non connected-idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
0xfffff, 0x0);
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 0);
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
0xb);
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
- false);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
- btc8723b2ant_sw_mechanism1(btcoexist, false, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
common = true;
} else if (BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE ==
@@ -1526,20 +1724,20 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
return false;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Wifi connected + BT connected-idle!!\n");
+ btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC,
+ false, false, 0x8);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
0xfffff, 0x0);
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 0);
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
0xb);
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
- false);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
- btc8723b2ant_sw_mechanism1(btcoexist, true, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
common = true;
} else {
@@ -1553,36 +1751,12 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
"[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
common = false;
} else {
- if (bt_hs_on)
- return false;
-
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
- 0x1, 0xfffff, 0x0);
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC,
- 7);
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 21);
- btc8723b2ant_fw_dac_swing_lvl(btcoexist,
- NORMAL_EXEC,
- 0xb);
- if (btc8723b_need_dec_pwr(btcoexist))
- btc8723b2ant_dec_bt_pwr(btcoexist,
- NORMAL_EXEC,
- true);
- else
- btc8723b2ant_dec_bt_pwr(btcoexist,
- NORMAL_EXEC,
- false);
- btc8723b2ant_sw_mechanism1(btcoexist, false,
- false, false,
- false);
- btc8723b2ant_sw_mechanism2(btcoexist, false,
- false, false,
- 0x18);
- common = true;
+ common =
+ btc8723b2ant_action_wifi_idle_process(
+ btcoexist);
}
}
}
@@ -1590,550 +1764,6 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
return common;
}
-static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
- s32 result)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- /* Set PS TDMA for max interval == 1 */
- if (tx_pause) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 1\n");
-
- if (coex_dm->cur_ps_tdma == 71) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
- coex_dm->tdma_adj_type = 5;
- } else if (coex_dm->cur_ps_tdma == 1) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
- coex_dm->tdma_adj_type = 5;
- } else if (coex_dm->cur_ps_tdma == 2) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 3) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 4) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 8);
- coex_dm->tdma_adj_type = 8;
- }
-
- if (coex_dm->cur_ps_tdma == 9) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 13);
- coex_dm->tdma_adj_type = 13;
- } else if (coex_dm->cur_ps_tdma == 10) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 11) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 12) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 16);
- coex_dm->tdma_adj_type = 16;
- }
-
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 5) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 6) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 8);
- coex_dm->tdma_adj_type = 8;
- } else if (coex_dm->cur_ps_tdma == 13) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 14) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 16);
- coex_dm->tdma_adj_type = 16;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 8) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 6) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
- coex_dm->tdma_adj_type = 5;
- } else if (coex_dm->cur_ps_tdma == 16) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 14) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 13);
- coex_dm->tdma_adj_type = 13;
- }
- }
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 0\n");
- if (coex_dm->cur_ps_tdma == 5) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71);
- coex_dm->tdma_adj_type = 71;
- } else if (coex_dm->cur_ps_tdma == 6) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 7) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 8) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
- coex_dm->tdma_adj_type = 4;
- }
-
- if (coex_dm->cur_ps_tdma == 13) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
- coex_dm->tdma_adj_type = 9;
- } else if (coex_dm->cur_ps_tdma == 14) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 15) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 16) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
- coex_dm->tdma_adj_type = 12;
- }
-
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 71) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 1);
- coex_dm->tdma_adj_type = 1;
- } else if (coex_dm->cur_ps_tdma == 1) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 2) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 4);
- coex_dm->tdma_adj_type = 4;
- } else if (coex_dm->cur_ps_tdma == 9) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 10) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 12);
- coex_dm->tdma_adj_type = 12;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 4) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 2) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 1);
- coex_dm->tdma_adj_type = 1;
- } else if (coex_dm->cur_ps_tdma == 1) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 71);
- coex_dm->tdma_adj_type = 71;
- } else if (coex_dm->cur_ps_tdma == 12) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 10) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- coex_dm->tdma_adj_type = 9;
- }
- }
- }
-}
-
-static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
- s32 result)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- /* Set PS TDMA for max interval == 2 */
- if (tx_pause) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 1\n");
- if (coex_dm->cur_ps_tdma == 1) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 2) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 3) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 4) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8);
- coex_dm->tdma_adj_type = 8;
- }
- if (coex_dm->cur_ps_tdma == 9) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 10) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 11) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 12) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16);
- coex_dm->tdma_adj_type = 16;
- }
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 5) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 6) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 8);
- coex_dm->tdma_adj_type = 8;
- } else if (coex_dm->cur_ps_tdma == 13) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 14) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 16);
- coex_dm->tdma_adj_type = 16;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 8) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 6) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 16) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 14) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- }
- }
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 0\n");
- if (coex_dm->cur_ps_tdma == 5) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 6) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 7) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 8) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
- coex_dm->tdma_adj_type = 4;
- }
- if (coex_dm->cur_ps_tdma == 13) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 14) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 15) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 16) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
- coex_dm->tdma_adj_type = 12;
- }
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 1) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 2) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 4);
- coex_dm->tdma_adj_type = 4;
- } else if (coex_dm->cur_ps_tdma == 9) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 10) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 12);
- coex_dm->tdma_adj_type = 12;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 4) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 2) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 12) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 10) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- }
- }
- }
-}
-
-static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
- s32 result)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- /* Set PS TDMA for max interval == 3 */
- if (tx_pause) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 1\n");
- if (coex_dm->cur_ps_tdma == 1) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 2) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 3) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 4) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8);
- coex_dm->tdma_adj_type = 8;
- }
- if (coex_dm->cur_ps_tdma == 9) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 10) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 11) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 12) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16);
- coex_dm->tdma_adj_type = 16;
- }
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 5) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 6) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 8);
- coex_dm->tdma_adj_type = 8;
- } else if (coex_dm->cur_ps_tdma == 13) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 14) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 16);
- coex_dm->tdma_adj_type = 16;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 8) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 6) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 16) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 14) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- }
- }
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 0\n");
- if (coex_dm->cur_ps_tdma == 5) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 6) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 7) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 8) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
- coex_dm->tdma_adj_type = 4;
- }
- if (coex_dm->cur_ps_tdma == 13) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 14) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 15) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 16) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
- coex_dm->tdma_adj_type = 12;
- }
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 1) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 2) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 4);
- coex_dm->tdma_adj_type = 4;
- } else if (coex_dm->cur_ps_tdma == 9) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 10) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 12);
- coex_dm->tdma_adj_type = 12;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 4) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 2) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 12) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 10) {
- btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- }
- }
- }
-}
-
static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
bool sco_hid, bool tx_pause,
u8 max_interval)
@@ -2157,34 +1787,44 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8723b2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
true, 13);
- coex_dm->tdma_adj_type = 13;
+ coex_dm->ps_tdma_du_adj_type = 13;
} else if (max_interval == 2) {
btc8723b2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
true, 14);
- coex_dm->tdma_adj_type = 14;
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (max_interval == 3) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
} else {
btc8723b2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
true, 15);
- coex_dm->tdma_adj_type = 15;
+ coex_dm->ps_tdma_du_adj_type = 15;
}
} else {
if (max_interval == 1) {
btc8723b2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
true, 9);
- coex_dm->tdma_adj_type = 9;
+ coex_dm->ps_tdma_du_adj_type = 9;
} else if (max_interval == 2) {
btc8723b2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
true, 10);
- coex_dm->tdma_adj_type = 10;
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (max_interval == 3) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
} else {
btc8723b2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
true, 11);
- coex_dm->tdma_adj_type = 11;
+ coex_dm->ps_tdma_du_adj_type = 11;
}
}
} else {
@@ -2193,34 +1833,44 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8723b2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
true, 5);
- coex_dm->tdma_adj_type = 5;
+ coex_dm->ps_tdma_du_adj_type = 5;
} else if (max_interval == 2) {
btc8723b2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
true, 6);
- coex_dm->tdma_adj_type = 6;
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (max_interval == 3) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
} else {
btc8723b2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
true, 7);
- coex_dm->tdma_adj_type = 7;
+ coex_dm->ps_tdma_du_adj_type = 7;
}
} else {
if (max_interval == 1) {
btc8723b2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
true, 1);
- coex_dm->tdma_adj_type = 1;
+ coex_dm->ps_tdma_du_adj_type = 1;
} else if (max_interval == 2) {
btc8723b2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
true, 2);
- coex_dm->tdma_adj_type = 2;
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (max_interval == 3) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
} else {
btc8723b2ant_ps_tdma(btcoexist,
NORMAL_EXEC,
true, 3);
- coex_dm->tdma_adj_type = 3;
+ coex_dm->ps_tdma_du_adj_type = 3;
}
}
}
@@ -2234,6 +1884,11 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
} else {
/*accquire the BT TRx retry count from BT_Info byte2*/
retry_count = coex_sta->bt_retry_cnt;
+
+ if ((coex_sta->low_priority_tx) > 1050 ||
+ (coex_sta->low_priority_rx) > 1250)
+ retry_count++;
+
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], retry_count = %d\n", retry_count);
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -2250,6 +1905,9 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
if (up >= n) {
+ /* if retry count during continuous n*2
+ * seconds is 0, enlarge WiFi duration
+ */
wait_count = 0;
n = 3;
up = 0;
@@ -2266,12 +1924,20 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
if (dn == 2) {
+ /* if continuous 2 retry count(every 2
+ * seconds) >0 and < 3, reduce WiFi duration
+ */
if (wait_count <= 2)
+ /* avoid loop between the two levels */
m++;
else
m = 1;
if (m >= 20)
+ /* maximum of m = 20 ' will recheck if
+ * need to adjust wifi duration in
+ * maximum time interval 120 seconds
+ */
m = 20;
n = 3 * m;
@@ -2283,12 +1949,20 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
"[BTCoex], Decrease wifi duration for retry_counter<3!!\n");
}
} else {
+ /* retry count > 3, once retry count > 3, to reduce
+ * WiFi duration
+ */
if (wait_count == 1)
+ /* to avoid loop between the two levels */
m++;
else
m = 1;
if (m >= 20)
+ /* maximum of m = 20 ' will recheck if need to
+ * adjust wifi duration in maximum time interval
+ * 120 seconds
+ */
m = 20;
n = 3 * m;
@@ -2302,22 +1976,765 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], max Interval = %d\n", max_interval);
- if (max_interval == 1)
- set_tdma_int1(btcoexist, tx_pause, result);
- else if (max_interval == 2)
- set_tdma_int2(btcoexist, tx_pause, result);
- else if (max_interval == 3)
- set_tdma_int3(btcoexist, tx_pause, result);
+ if (max_interval == 1) {
+ if (tx_pause) {
+ if (coex_dm->cur_ps_tdma == 71) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+ if (coex_dm->cur_ps_tdma == 9) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 13);
+ coex_dm->ps_tdma_du_adj_type = 13;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type =
+ 5;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 13);
+ coex_dm->ps_tdma_du_adj_type =
+ 13;
+ }
+ }
+ } else {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 71);
+ coex_dm->ps_tdma_du_adj_type = 71;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+ if (coex_dm->cur_ps_tdma == 13) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 71) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type =
+ 1;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type =
+ 1;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 71);
+ coex_dm->ps_tdma_du_adj_type =
+ 71;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type =
+ 9;
+ }
+ }
+ }
+ } else if (max_interval == 2) {
+ if (tx_pause) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+ if (coex_dm->cur_ps_tdma == 9) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ }
+ }
+ } else {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+ if (coex_dm->cur_ps_tdma == 13) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ }
+ }
+ }
+ } else if (max_interval == 3) {
+ if (tx_pause) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+ if (coex_dm->cur_ps_tdma == 9) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ }
+ }
+ } else {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+ if (coex_dm->cur_ps_tdma == 13) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ }
+ }
+ }
+ }
}
- /*if current PsTdma not match with the recorded one (when scan, dhcp..),
- *then we have to adjust it back to the previous recorded one.
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], max Interval = %d\n", max_interval);
+
+ /* if current PsTdma not match with the recorded one (scan, dhcp, ...),
+ * then we have to adjust it back to the previous recorded one.
*/
- if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
+ if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) {
bool scan = false, link = false, roam = false;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
- coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+ coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2325,7 +2742,7 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
if (!scan && !link && !roam)
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- coex_dm->tdma_adj_type);
+ coex_dm->ps_tdma_du_adj_type);
else
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
@@ -2335,58 +2752,55 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
/* SCO only or SCO+PAN(HS) */
static void btc8723b2ant_action_sco(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state;
+ u8 wifi_rssi_state, bt_rssi_state;
u32 wifi_bw;
- wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
- 0, 2, 15, 0);
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(
+ btcoexist, 2, BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset,
+ 0);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
- if (btc8723b_need_dec_pwr(btcoexist))
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
else
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- /*for SCO quality at 11b/g mode*/
if (BTC_WIFI_BW_LEGACY == wifi_bw)
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 2);
- else /*for SCO quality & wifi performance balance at 11n mode*/
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 8);
+ /* for SCO quality at 11b/g mode */
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+ else
+ /* for SCO quality & wifi performance balance at 11n mode */
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8);
- /*for voice quality */
+ /* for voice quality */
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, true, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- true, 0x4);
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, true, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- true, 0x4);
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
}
} else {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, false, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- true, 0x4);
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, false, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- true, 0x4);
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
}
@@ -2395,26 +2809,32 @@ static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist)
{
u8 wifi_rssi_state, bt_rssi_state;
u32 wifi_bw;
+ u8 tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
- wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
- 0, 2, 15, 0);
- bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- if (btc8723b_need_dec_pwr(btcoexist))
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
else
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- if (BTC_WIFI_BW_LEGACY == wifi_bw) /*/for HID at 11b/g mode*/
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
- else /*for HID quality & wifi performance balance at 11n mode*/
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 9);
+ if (wifi_bw == BTC_WIFI_BW_LEGACY)
+ /* for HID at 11b/g mode */
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ else
+ /* for HID quality & wifi performance balance at 11n mode */
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 9);
+
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
@@ -2426,44 +2846,36 @@ static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist)
if (BTC_WIFI_BW_HT40 == wifi_bw) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, true, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, true, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
}
} else {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, false, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, false, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
}
-/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/
+/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
{
u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u32 wifi_bw;
u8 ap_num = 0;
+ u8 tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
- wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
- 0, 2, 15, 0);
- wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist,
- 1, 2, 40, 0);
- bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, 40, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
@@ -2474,35 +2886,40 @@ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
0x0);
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
/* sw mechanism */
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
- btc8723b2ant_sw_mechanism1(btcoexist, true, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- true, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, false, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- true, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
return;
}
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- if (btc8723b_need_dec_pwr(btcoexist))
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
else
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+ if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ } else {
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13);
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+ 0x4);
+ }
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
@@ -2516,104 +2933,116 @@ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
if (BTC_WIFI_BW_HT40 == wifi_bw) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, true, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, true, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
}
} else {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, false, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, false, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
}
static void btc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u32 wifi_bw;
+ u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
- wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
- 0, 2, 15, 0);
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
+ tmp, 0);
+ tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- if (btc8723b_need_dec_pwr(btcoexist))
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
else
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+ if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ } else {
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13);
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+ 0x4);
+ }
btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 2);
/* sw mechanism */
- btcoexist->btc_get(btcoexist,
- BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, true, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, true, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
}
} else {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, false, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, false, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
}
static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u32 wifi_bw;
+ u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
- wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
- 0, 2, 15, 0);
- bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
+ tmp, 0);
+ tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- if (btc8723b_need_dec_pwr(btcoexist))
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
else
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 10);
+ if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 10);
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ } else {
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13);
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+ 0x4);
+ }
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
@@ -2626,109 +3055,109 @@ static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
if (BTC_WIFI_BW_HT40 == wifi_bw) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, true, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, true, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
}
} else {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, false, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, false, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
}
-/*PAN(HS) only*/
+/* PAN(HS) only */
static void btc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u32 wifi_bw;
+ u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
- wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
- 0, 2, 15, 0);
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
+ tmp, 0);
+ tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
else
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
-
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if (BTC_WIFI_BW_HT40 == wifi_bw) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, true, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, true, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
}
} else {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, false, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, false, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
}
-/*PAN(EDR)+A2DP*/
+/* PAN(EDR) + A2DP */
static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u32 wifi_bw;
+ u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
- wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
- 0, 2, 15, 0);
- bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
+ tmp, 0);
+ tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- if (btc8723b_need_dec_pwr(btcoexist))
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
else
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state))
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ else
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+ 0x4);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 12);
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 12);
if (BTC_WIFI_BW_HT40 == wifi_bw)
btc8723b2ant_tdma_duration_adjust(btcoexist, false,
true, 3);
@@ -2736,74 +3165,80 @@ static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
btc8723b2ant_tdma_duration_adjust(btcoexist, false,
false, 3);
} else {
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
- btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 3);
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
}
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, true, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, true, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, false,
+ false, false);
}
} else {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, false, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, false, false,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, false,
+ false, false);
}
}
}
static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u32 wifi_bw;
-
- wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
- 0, 2, 15, 0);
- bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
+ u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
+ tmp, 0);
+ tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- if (btc8723b_need_dec_pwr(btcoexist))
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
else
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+ if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ } else {
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 14);
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+ 0x4);
+ }
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
if (BTC_WIFI_BW_HT40 == wifi_bw) {
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
3);
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 11);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
0xfffff, 0x780);
} else {
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
6);
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
0xfffff, 0x0);
}
btc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
} else {
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 11);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
0x0);
btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
@@ -2813,54 +3248,61 @@ static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
if (BTC_WIFI_BW_HT40 == wifi_bw) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, true, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, true, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
}
} else {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, false, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, false, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
}
-/* HID+A2DP+PAN(EDR) */
+/* HID + A2DP + PAN(EDR) */
static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u32 wifi_bw;
+ u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
- wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
- 0, 2, 15, 0);
- bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
+ tmp, 0);
+ tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- if (btc8723b_need_dec_pwr(btcoexist))
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
else
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+ if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ } else {
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 14);
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+ 0x4);
+ }
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
@@ -2878,94 +3320,148 @@ static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
if (BTC_WIFI_BW_HT40 == wifi_bw) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, true, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, true, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
}
} else {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, false, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, false, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
}
static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
u32 wifi_bw;
+ u8 ap_num = 0;
+ u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
- wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
- 0, 2, 15, 0);
- bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2,
+ tmp, 0);
+ tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES -
+ coex_dm->switch_thres_offset;
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 3, tmp, 37);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, true, 0x5);
btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- if (btc8723b_need_dec_pwr(btcoexist))
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
- else
- btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
-
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+ if (wifi_bw == BTC_WIFI_BW_LEGACY) {
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ else if (BTC_RSSI_MEDIUM(bt_rssi_state))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ } else {
+ /* only 802.11N mode we have to dec bt power to 4 degree */
+ if (BTC_RSSI_HIGH(bt_rssi_state)) {
+ /* need to check ap Number of Not */
+ if (ap_num < 10)
+ btc8723b2ant_dec_bt_pwr(btcoexist,
+ NORMAL_EXEC, 4);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist,
+ NORMAL_EXEC, 2);
+ } else if (BTC_RSSI_MEDIUM(bt_rssi_state)) {
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ } else {
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+ }
+ }
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
- btc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
- else
- btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+ if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ } else {
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 14);
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+ 0x4);
+ }
+
+ if (BTC_RSSI_HIGH(bt_rssi_state)) {
+ if (ap_num < 10)
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true,
+ false, 1);
+ else
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true,
+ false, 3);
+ } else {
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 18);
+ btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38);
+ btcoexist->btc_write_2byte(btcoexist, 0x42a, 0x0808);
+ btcoexist->btc_write_4byte(btcoexist, 0x430, 0x0);
+ btcoexist->btc_write_4byte(btcoexist, 0x434, 0x01010000);
+
+ if (ap_num < 10)
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true,
+ true, 1);
+ else
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true,
+ true, 3);
+ }
/* sw mechanism */
if (BTC_WIFI_BW_HT40 == wifi_bw) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, true, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, true, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, true, true,
+ false, false);
}
} else {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8723b2ant_sw_mechanism1(btcoexist, false, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, true, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
} else {
- btc8723b2ant_sw_mechanism1(btcoexist, false, true,
- false, false);
- btc8723b2ant_sw_mechanism2(btcoexist, false, false,
- false, 0x18);
+ btc8723b2ant_sw_mechanism(btcoexist, false, true,
+ false, false);
}
}
}
+static void btc8723b2ant_action_wifi_multi_port(struct btc_coexist *btcoexist)
+{
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+ /* sw all off */
+ btc8723b2ant_sw_mechanism(btcoexist, false, false, false, false);
+
+ /* hw all off */
+ btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+
+ btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+}
+
static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 algorithm = 0;
+ u32 num_of_wifi_link = 0;
+ u32 wifi_link_status = 0;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool miracast_plus_bt = false;
+ bool scan = false, link = false, roam = false;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], RunCoexistMechanism()===>\n");
@@ -2989,14 +3485,46 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
"[BTCoex], BT is under inquiry/page scan !!\n");
btc8723b2ant_action_bt_inquiry(btcoexist);
return;
- } else {
- if (coex_dm->need_recover_0x948) {
- coex_dm->need_recover_0x948 = false;
- btcoexist->btc_write_2byte(btcoexist, 0x948,
- coex_dm->backup_0x948);
- }
}
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+ if (scan || link || roam) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], WiFi is under Link Process !!\n");
+ btc8723b2ant_action_wifi_link_process(btcoexist);
+ return;
+ }
+
+ /* for P2P */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
+ &wifi_link_status);
+ num_of_wifi_link = wifi_link_status >> 16;
+
+ if ((num_of_wifi_link >= 2) ||
+ (wifi_link_status & WIFI_P2P_GO_CONNECTED)) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "############# [BTCoex], Multi-Port num_of_wifi_link = %d, wifi_link_status = 0x%x\n",
+ num_of_wifi_link, wifi_link_status);
+
+ if (bt_link_info->bt_link_exist)
+ miracast_plus_bt = true;
+ else
+ miracast_plus_bt = false;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT,
+ &miracast_plus_bt);
+ btc8723b2ant_action_wifi_multi_port(btcoexist);
+
+ return;
+ }
+
+ miracast_plus_bt = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT,
+ &miracast_plus_bt);
+
coex_dm->cur_algorithm = algorithm;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Algorithm = %d\n",
@@ -3077,19 +3605,37 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
static void btc8723b2ant_wifioff_hwcfg(struct btc_coexist *btcoexist)
{
+ bool is_in_mp_mode = false;
+ u8 h2c_parameter[2] = {0};
+ u32 fw_ver = 0;
+
/* set wlan_act to low */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
- /* Force GNT_BT to High */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3);
- /* BT select s0/s1 is controlled by BT */
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0);
+
+ /* WiFi standby while GNT_BT 0 -> 1 */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x780);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ if (fw_ver >= 0x180000) {
+ /* Use H2C to set GNT_BT to HIGH */
+ h2c_parameter[0] = 1;
+ btcoexist->btc_fill_h2c(btcoexist, 0x6E, 1, h2c_parameter);
+ } else {
+ btcoexist->btc_write_1byte(btcoexist, 0x765, 0x18);
+ }
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_IS_IN_MP_MODE,
+ &is_in_mp_mode);
+ if (!is_in_mp_mode)
+ /* BT select s0/s1 is controlled by BT */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0);
+ else
+ /* BT select s0/s1 is controlled by WiFi */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1);
}
/*********************************************************************
- * work around function start with wa_btc8723b2ant_
- *********************************************************************/
-/*********************************************************************
- * extern function start with EXbtc8723b2ant_
+ * extern function start with ex_btc8723b2ant_
*********************************************************************/
void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
{
@@ -3107,19 +3653,90 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
u8tmp |= 0x5;
btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
- /*Antenna config */
+ /* Antenna config */
btc8723b2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_MAIN,
true, false);
+ coex_sta->dis_ver_info_cnt = 0;
+
/* PTA parameter */
- btc8723b_coex_tbl_type(btcoexist, FORCE_EXEC, 0);
+ btc8723b2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
/* Enable counter statistics */
- /*0x76e[3] =1, WLAN_Act control by PTA*/
- btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+ /* 0x76e[3] = 1, WLAN_ACT controlled by PTA */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
}
+void ex_btc8723b2ant_power_on_setting(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ u16 u16tmp = 0x0;
+ u32 value = 0;
+
+ btcoexist->btc_write_1byte(btcoexist, 0x67, 0x20);
+
+ /* enable BB, REG_SYS_FUNC_EN such that we can write 0x948 correctly */
+ u16tmp = btcoexist->btc_read_2byte(btcoexist, 0x2);
+ btcoexist->btc_write_2byte(btcoexist, 0x2, u16tmp | BIT0 | BIT1);
+
+ btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0);
+
+ if (btcoexist->chip_interface == BTC_INTF_USB) {
+ /* fixed at S0 for USB interface */
+ board_info->btdm_ant_pos = BTC_ANTENNA_AT_AUX_PORT;
+ } else {
+ /* for PCIE and SDIO interface, we check efuse 0xc3[6] */
+ if (board_info->single_ant_path == 0) {
+ /* set to S1 */
+ board_info->btdm_ant_pos = BTC_ANTENNA_AT_MAIN_PORT;
+ } else if (board_info->single_ant_path == 1) {
+ /* set to S0 */
+ board_info->btdm_ant_pos = BTC_ANTENNA_AT_AUX_PORT;
+ }
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ANTPOSREGRISTRY_CTRL,
+ &value);
+ }
+}
+
+void ex_btc8723b2ant_pre_load_firmware(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ u8 u8tmp = 0x4; /* Set BIT2 by default since it's 2ant case */
+
+ /**
+ * S0 or S1 setting and Local register setting(By this fw can get
+ * ant number, S0/S1, ... info)
+ *
+ * Local setting bit define
+ * BIT0: "0" : no antenna inverse; "1" : antenna inverse
+ * BIT1: "0" : internal switch; "1" : external switch
+ * BIT2: "0" : one antenna; "1" : two antennas
+ *
+ * NOTE: here default all internal switch and 1-antenna ==> BIT1=0 and
+ * BIT2 = 0
+ */
+ if (btcoexist->chip_interface == BTC_INTF_USB) {
+ /* fixed at S0 for USB interface */
+ u8tmp |= 0x1; /* antenna inverse */
+ btcoexist->btc_write_local_reg_1byte(btcoexist, 0xfe08, u8tmp);
+ } else {
+ /* for PCIE and SDIO interface, we check efuse 0xc3[6] */
+ if (board_info->single_ant_path == 0) {
+ } else if (board_info->single_ant_path == 1) {
+ /* set to S0 */
+ u8tmp |= 0x1; /* antenna inverse */
+ }
+
+ if (btcoexist->chip_interface == BTC_INTF_PCI)
+ btcoexist->btc_write_local_reg_1byte(btcoexist, 0x384,
+ u8tmp);
+ else if (btcoexist->chip_interface == BTC_INTF_SDIO)
+ btcoexist->btc_write_local_reg_1byte(btcoexist, 0x60,
+ u8tmp);
+ }
+}
+
void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -3215,7 +3832,6 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
"uplink" : "downlink")));
-
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d",
"SCO/HID/PAN/A2DP",
bt_link_info->sco_exist, bt_link_info->hid_exist,
@@ -3265,7 +3881,7 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
ps_tdma_case, coex_dm->auto_tdma_adjust);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
- "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr,
+ "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr_lvl,
coex_dm->cur_ignore_wlan_act);
/* Hw setting */
@@ -3396,6 +4012,12 @@ void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u32 u32tmp;
+ u8 u8tmpa, u8tmpb;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x948);
+ u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
+ u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x76e);
if (BTC_SCAN_START == type)
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -3403,6 +4025,12 @@ void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
else if (BTC_SCAN_FINISH == type)
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], SCAN FINISH notify\n");
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
+ &coex_sta->scan_ap_num);
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x76e=0x%x\n",
+ u32tmp, u8tmpa, u8tmpb);
}
void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
@@ -3424,6 +4052,7 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
u8 h2c_parameter[3] = {0};
u32 wifi_bw;
u8 wifi_central_chnl;
+ u8 ap_num = 0;
if (BTC_MEDIA_CONNECT == type)
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -3441,10 +4070,16 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
h2c_parameter[1] = wifi_central_chnl;
btcoexist->btc_get(btcoexist,
BTC_GET_U4_WIFI_BW, &wifi_bw);
- if (BTC_WIFI_BW_HT40 == wifi_bw)
+ if (wifi_bw == BTC_WIFI_BW_HT40) {
h2c_parameter[2] = 0x30;
- else
- h2c_parameter[2] = 0x20;
+ } else {
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
+ &ap_num);
+ if (ap_num < 10)
+ h2c_parameter[2] = 0x30;
+ else
+ h2c_parameter[2] = 0x20;
+ }
}
coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
@@ -3492,7 +4127,7 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
if (i == 1)
bt_info = tmpbuf[i];
- if (i == length-1)
+ if (i == length - 1)
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"0x%02x]\n", tmpbuf[i]);
else
@@ -3507,17 +4142,30 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
}
if (BT_INFO_SRC_8723B_2ANT_WIFI_FW != rsp_source) {
- coex_sta->bt_retry_cnt = /* [3:0]*/
+ coex_sta->bt_retry_cnt =
coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
+ if (coex_sta->bt_retry_cnt >= 1)
+ coex_sta->pop_event_cnt++;
+
coex_sta->bt_rssi =
coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
- coex_sta->bt_info_ext =
- coex_sta->bt_info_c2h[rsp_source][4];
+ coex_sta->bt_info_ext = coex_sta->bt_info_c2h[rsp_source][4];
+
+ if (coex_sta->bt_info_c2h[rsp_source][2] & 0x20)
+ coex_sta->c2h_bt_remote_name_req = true;
+ else
+ coex_sta->c2h_bt_remote_name_req = false;
+
+ if (coex_sta->bt_info_c2h[rsp_source][1] == 0x49)
+ coex_sta->a2dp_bit_pool =
+ coex_sta->bt_info_c2h[rsp_source][6];
+ else
+ coex_sta->a2dp_bit_pool = 0;
/* Here we need to resend some wifi info to BT
- because bt is reset and loss of the info.
+ * because BT is reset and loss of the info.
*/
if ((coex_sta->bt_info_ext & BIT1)) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -3552,20 +4200,21 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
#endif
}
- /* check BIT2 first ==> check if bt is under inquiry or page scan*/
+ /* check BIT2 first ==> check if bt is under inquiry or page scan */
if (bt_info & BT_INFO_8723B_2ANT_B_INQ_PAGE)
coex_sta->c2h_bt_inquiry_page = true;
else
coex_sta->c2h_bt_inquiry_page = false;
- /* set link exist status*/
if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
+ /* set link exist status */
coex_sta->bt_link_exist = false;
coex_sta->pan_exist = false;
coex_sta->a2dp_exist = false;
coex_sta->hid_exist = false;
coex_sta->sco_exist = false;
- } else { /* connection exists */
+ } else {
+ /* connection exists */
coex_sta->bt_link_exist = true;
if (bt_info & BT_INFO_8723B_2ANT_B_FTP)
coex_sta->pan_exist = true;
@@ -3583,6 +4232,16 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
coex_sta->sco_exist = true;
else
coex_sta->sco_exist = false;
+
+ if ((!coex_sta->hid_exist) &&
+ (!coex_sta->c2h_bt_inquiry_page) &&
+ (!coex_sta->sco_exist)) {
+ if (coex_sta->high_priority_tx +
+ coex_sta->high_priority_rx >= 160) {
+ coex_sta->hid_exist = true;
+ bt_info = bt_info | 0x28;
+ }
+ }
}
btc8723b2ant_update_bt_link_info(btcoexist);
@@ -3640,46 +4299,67 @@ void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
ex_btc8723b2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
}
+void ex_btc8723b2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
+
+ if (pnp_state == BTC_WIFI_PNP_SLEEP) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to SLEEP\n");
+
+ /* Driver do not leave IPS/LPS when driver is going to sleep, so
+ * BTCoexistence think wifi is still under IPS/LPS
+ *
+ * BT should clear UnderIPS/UnderLPS state to avoid mismatch
+ * state after wakeup.
+ */
+ coex_sta->under_ips = false;
+ coex_sta->under_lps = false;
+ } else if (pnp_state == BTC_WIFI_PNP_WAKE_UP) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to WAKE UP\n");
+ ex_btc8723b2ant_init_hwconfig(btcoexist);
+ btc8723b2ant_init_coex_dm(btcoexist);
+ btc8723b2ant_query_bt_info(btcoexist);
+ }
+}
+
void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_board_info *board_info = &btcoexist->board_info;
- struct btc_stack_info *stack_info = &btcoexist->stack_info;
- static u8 dis_ver_info_cnt;
- u32 fw_ver = 0, bt_patch_ver = 0;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], ==========================Periodical===========================\n");
- if (dis_ver_info_cnt <= 5) {
- dis_ver_info_cnt += 1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ****************************************************************\n");
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num,
- board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
- stack_info->profile_notified ? "Yes" : "No",
- stack_info->hci_version);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
- &bt_patch_ver);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ****************************************************************\n");
+ if (coex_sta->dis_ver_info_cnt <= 5) {
+ coex_sta->dis_ver_info_cnt += 1;
+ if (coex_sta->dis_ver_info_cnt == 3) {
+ /* Antenna config to set 0x765 = 0x0 (GNT_BT control by
+ * PTA) after initial
+ */
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Set GNT_BT control by PTA\n");
+ btc8723b2ant_set_ant_path(
+ btcoexist, BTC_ANT_WIFI_AT_MAIN, false, false);
+ }
}
#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
btc8723b2ant_query_bt_info(btcoexist);
- btc8723b2ant_monitor_bt_ctr(btcoexist);
- btc8723b2ant_monitor_bt_enable_disable(btcoexist);
#else
+ btc8723b2ant_monitor_bt_ctr(btcoexist);
+ btc8723b2ant_monitor_wifi_ctr(btcoexist);
+
+ /* for some BT speakers that High-Priority pkts appear before
+ * playing, this will cause HID exist
+ */
+ if ((coex_sta->high_priority_tx + coex_sta->high_priority_rx < 50) &&
+ (bt_link_info->hid_exist))
+ bt_link_info->hid_exist = false;
+
if (btc8723b2ant_is_wifi_status_changed(btcoexist) ||
coex_dm->auto_tdma_adjust)
btc8723b2ant_run_coexist_mechanism(btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
index 567f354caf95..18a35c7faba9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
@@ -41,6 +41,11 @@
#define BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT 2
+/* WiFi RSSI Threshold for 2-Ant TDMA/1-Ant PS-TDMA translation */
+#define BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES 42
+/* BT RSSI Threshold for 2-Ant TDMA/1-Ant PS-TDMA translation */
+#define BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES 46
+
enum BT_INFO_SRC_8723B_2ANT {
BT_INFO_SRC_8723B_2ANT_WIFI_FW = 0x0,
BT_INFO_SRC_8723B_2ANT_BT_RSP = 0x1,
@@ -75,8 +80,8 @@ enum BT_8723B_2ANT_COEX_ALGO {
struct coex_dm_8723b_2ant {
/* fw mechanism */
- bool pre_dec_bt_pwr;
- bool cur_dec_bt_pwr;
+ bool pre_dec_bt_pwr_lvl;
+ bool cur_dec_bt_pwr_lvl;
u8 pre_fw_dac_swing_lvl;
u8 cur_fw_dac_swing_lvl;
bool cur_ignore_wlan_act;
@@ -84,7 +89,7 @@ struct coex_dm_8723b_2ant {
u8 pre_ps_tdma;
u8 cur_ps_tdma;
u8 ps_tdma_para[5];
- u8 tdma_adj_type;
+ u8 ps_tdma_du_adj_type;
bool reset_tdma_adjust;
bool auto_tdma_adjust;
bool pre_ps_tdma_on;
@@ -122,8 +127,13 @@ struct coex_dm_8723b_2ant {
u8 bt_status;
u8 wifi_chnl_info[3];
- bool need_recover_0x948;
- u16 backup_0x948;
+ u8 pre_lps;
+ u8 cur_lps;
+ u8 pre_rpwm;
+ u8 cur_rpwm;
+
+ bool is_switch_to_1dot5_ant;
+ u8 switch_thres_offset;
};
struct coex_sta_8723b_2ant {
@@ -132,6 +142,7 @@ struct coex_sta_8723b_2ant {
bool a2dp_exist;
bool hid_exist;
bool pan_exist;
+ bool bt_abnormal_scan;
bool under_lps;
bool under_ips;
@@ -140,14 +151,33 @@ struct coex_sta_8723b_2ant {
u32 low_priority_tx;
u32 low_priority_rx;
u8 bt_rssi;
+ bool bt_tx_rx_mask;
u8 pre_bt_rssi_state;
u8 pre_wifi_rssi_state[4];
bool c2h_bt_info_req_sent;
u8 bt_info_c2h[BT_INFO_SRC_8723B_2ANT_MAX][10];
u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_2ANT_MAX];
bool c2h_bt_inquiry_page;
+ bool c2h_bt_remote_name_req;
u8 bt_retry_cnt;
u8 bt_info_ext;
+ u32 pop_event_cnt;
+ u8 scan_ap_num;
+
+ u32 crc_ok_cck;
+ u32 crc_ok_11g;
+ u32 crc_ok_11n;
+ u32 crc_ok_11n_agg;
+
+ u32 crc_err_cck;
+ u32 crc_err_11g;
+ u32 crc_err_11n;
+ u32 crc_err_11n_agg;
+ bool force_lps_on;
+
+ u8 dis_ver_info_cnt;
+
+ u8 a2dp_bit_pool;
};
/*********************************************************************
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
index 8b689ed9a629..5e9f3b0f7a25 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
@@ -23,7 +23,7 @@
*
*****************************************************************************/
-/*============================================================
+/**************************************************************
* Description:
*
* This file is for RTL8821A Co-exist mechanism
@@ -31,21 +31,21 @@
* History
* 2012/11/15 Cosa first check in.
*
- *============================================================
-*/
-/*============================================================
+ **************************************************************/
+
+/**************************************************************
* include files
- *============================================================
- */
+ **************************************************************/
#include "halbt_precomp.h"
-/*============================================================
+/**************************************************************
* Global variables, these are static variables
- *============================================================
- */
+ **************************************************************/
static struct coex_dm_8821a_1ant glcoex_dm_8821a_1ant;
static struct coex_dm_8821a_1ant *coex_dm = &glcoex_dm_8821a_1ant;
static struct coex_sta_8821a_1ant glcoex_sta_8821a_1ant;
static struct coex_sta_8821a_1ant *coex_sta = &glcoex_sta_8821a_1ant;
+static void btc8821a1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist,
+ u8 wifi_status);
static const char *const glbt_info_src_8821a_1ant[] = {
"BT Info[wifi fw]",
@@ -53,22 +53,21 @@ static const char *const glbt_info_src_8821a_1ant[] = {
"BT Info[bt auto report]",
};
-static u32 glcoex_ver_date_8821a_1ant = 20130816;
-static u32 glcoex_ver_8821a_1ant = 0x41;
+static u32 glcoex_ver_date_8821a_1ant = 20130816;
+static u32 glcoex_ver_8821a_1ant = 0x41;
-/*============================================================
+/**************************************************************
* local function proto type if needed
*
- * local function start with halbtc8821a1ant_
- *============================================================
- */
-static u8 halbtc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist,
- u8 level_num, u8 rssi_thresh,
- u8 rssi_thresh1)
+ * local function start with btc8821a1ant_
+ **************************************************************/
+static u8 btc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist,
+ u8 level_num, u8 rssi_thresh,
+ u8 rssi_thresh1)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- long bt_rssi = 0;
- u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
+ long bt_rssi = 0;
+ u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
bt_rssi = coex_sta->bt_rssi;
@@ -150,9 +149,9 @@ static u8 halbtc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist,
return bt_rssi_state;
}
-static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
- u8 index, u8 level_num, u8 rssi_thresh,
- u8 rssi_thresh1)
+static u8 btc8821a1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
+ u8 index, u8 level_num, u8 rssi_thresh,
+ u8 rssi_thresh1)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
long wifi_rssi = 0;
@@ -165,8 +164,8 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
BTC_RSSI_STATE_LOW) ||
(coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_STAY_LOW)) {
- if (wifi_rssi >=
- (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
+ if (wifi_rssi >= (rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], wifi RSSI state switch to High\n");
@@ -197,8 +196,8 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
BTC_RSSI_STATE_LOW) ||
(coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_STAY_LOW)) {
- if (wifi_rssi >=
- (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
+ if (wifi_rssi >= (rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], wifi RSSI state switch to Medium\n");
@@ -211,9 +210,8 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
BTC_RSSI_STATE_MEDIUM) ||
(coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_STAY_MEDIUM)) {
- if (wifi_rssi >=
- (rssi_thresh1 +
- BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
+ if (wifi_rssi >= (rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], wifi RSSI state switch to High\n");
@@ -243,14 +241,14 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
return wifi_rssi_state;
}
-static void halbtc8821a1ant_update_ra_mask(struct btc_coexist *btcoexist,
- bool force_exec, u32 dis_rate_mask)
+static void btc8821a1ant_update_ra_mask(struct btc_coexist *btcoexist,
+ bool force_exec, u32 dis_rate_mask)
{
coex_dm->cur_ra_mask = dis_rate_mask;
if (force_exec ||
(coex_dm->pre_ra_mask != coex_dm->cur_ra_mask)) {
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask,
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_RAMASK,
&coex_dm->cur_ra_mask);
}
coex_dm->pre_ra_mask = coex_dm->cur_ra_mask;
@@ -259,14 +257,14 @@ static void halbtc8821a1ant_update_ra_mask(struct btc_coexist *btcoexist,
static void btc8821a1ant_auto_rate_fb_retry(struct btc_coexist *btcoexist,
bool force_exec, u8 type)
{
- bool wifi_under_b_mode = false;
+ bool wifi_under_b_mode = false;
coex_dm->cur_arfr_type = type;
if (force_exec ||
(coex_dm->pre_arfr_type != coex_dm->cur_arfr_type)) {
switch (coex_dm->cur_arfr_type) {
- case 0: /* normal mode*/
+ case 0: /* normal mode */
btcoexist->btc_write_4byte(btcoexist, 0x430,
coex_dm->backup_arfr_cnt1);
btcoexist->btc_write_4byte(btcoexist, 0x434,
@@ -296,19 +294,19 @@ static void btc8821a1ant_auto_rate_fb_retry(struct btc_coexist *btcoexist,
coex_dm->pre_arfr_type = coex_dm->cur_arfr_type;
}
-static void halbtc8821a1ant_retry_limit(struct btc_coexist *btcoexist,
- bool force_exec, u8 type)
+static void btc8821a1ant_retry_limit(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
{
coex_dm->cur_retry_limit_type = type;
if (force_exec ||
(coex_dm->pre_retry_limit_type != coex_dm->cur_retry_limit_type)) {
switch (coex_dm->cur_retry_limit_type) {
- case 0: /* normal mode*/
+ case 0: /* normal mode */
btcoexist->btc_write_2byte(btcoexist, 0x42a,
coex_dm->backup_retry_limit);
break;
- case 1: /* retry limit = 8*/
+ case 1: /* retry limit = 8 */
btcoexist->btc_write_2byte(btcoexist, 0x42a, 0x0808);
break;
default:
@@ -318,19 +316,19 @@ static void halbtc8821a1ant_retry_limit(struct btc_coexist *btcoexist,
coex_dm->pre_retry_limit_type = coex_dm->cur_retry_limit_type;
}
-static void halbtc8821a1ant_ampdu_max_time(struct btc_coexist *btcoexist,
- bool force_exec, u8 type)
+static void btc8821a1ant_ampdu_max_time(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
{
coex_dm->cur_ampdu_time_type = type;
if (force_exec ||
(coex_dm->pre_ampdu_time_type != coex_dm->cur_ampdu_time_type)) {
switch (coex_dm->cur_ampdu_time_type) {
- case 0: /* normal mode*/
+ case 0: /* normal mode */
btcoexist->btc_write_1byte(btcoexist, 0x456,
coex_dm->backup_ampdu_max_time);
break;
- case 1: /* AMPDU timw = 0x38 * 32us*/
+ case 1: /* AMPDU time = 0x38 * 32us */
btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38);
break;
default:
@@ -341,88 +339,85 @@ static void halbtc8821a1ant_ampdu_max_time(struct btc_coexist *btcoexist,
coex_dm->pre_ampdu_time_type = coex_dm->cur_ampdu_time_type;
}
-static void halbtc8821a1ant_limited_tx(struct btc_coexist *btcoexist,
- bool force_exec, u8 ra_mask_type,
- u8 arfr_type, u8 retry_limit_type,
- u8 ampdu_time_type)
+static void btc8821a1ant_limited_tx(struct btc_coexist *btcoexist,
+ bool force_exec, u8 ra_mask_type,
+ u8 arfr_type, u8 retry_limit_type,
+ u8 ampdu_time_type)
{
switch (ra_mask_type) {
- case 0: /* normal mode*/
- halbtc8821a1ant_update_ra_mask(btcoexist, force_exec, 0x0);
+ case 0: /* normal mode */
+ btc8821a1ant_update_ra_mask(btcoexist, force_exec, 0x0);
break;
- case 1: /* disable cck 1/2*/
- halbtc8821a1ant_update_ra_mask(btcoexist, force_exec,
- 0x00000003);
+ case 1: /* disable cck 1/2 */
+ btc8821a1ant_update_ra_mask(btcoexist, force_exec,
+ 0x00000003);
break;
- case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4*/
- halbtc8821a1ant_update_ra_mask(btcoexist, force_exec,
- 0x0001f1f7);
+ case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4 */
+ btc8821a1ant_update_ra_mask(btcoexist, force_exec,
+ 0x0001f1f7);
break;
default:
break;
}
btc8821a1ant_auto_rate_fb_retry(btcoexist, force_exec, arfr_type);
- halbtc8821a1ant_retry_limit(btcoexist, force_exec, retry_limit_type);
- halbtc8821a1ant_ampdu_max_time(btcoexist, force_exec, ampdu_time_type);
+ btc8821a1ant_retry_limit(btcoexist, force_exec, retry_limit_type);
+ btc8821a1ant_ampdu_max_time(btcoexist, force_exec, ampdu_time_type);
}
-static void halbtc8821a1ant_limited_rx(struct btc_coexist *btcoexist,
- bool force_exec, bool rej_ap_agg_pkt,
- bool bt_ctrl_agg_buf_size,
- u8 agg_buf_size)
+static void btc8821a1ant_limited_rx(struct btc_coexist *btcoexist,
+ bool force_exec, bool rej_ap_agg_pkt,
+ bool bt_ctrl_agg_buf_size, u8 agg_buf_size)
{
bool reject_rx_agg = rej_ap_agg_pkt;
bool bt_ctrl_rx_agg_size = bt_ctrl_agg_buf_size;
u8 rx_agg_size = agg_buf_size;
- /*============================================*/
- /* Rx Aggregation related setting*/
- /*============================================*/
+ /* Rx Aggregation related setting */
btcoexist->btc_set(btcoexist,
BTC_SET_BL_TO_REJ_AP_AGG_PKT, &reject_rx_agg);
- /* decide BT control aggregation buf size or not*/
+ /* decide BT control aggregation buf size or not */
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE,
&bt_ctrl_rx_agg_size);
- /* aggregation buf size, only work when BT control Rx agg size.*/
+ /* aggregation buf size, only work when BT control Rx agg size */
btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rx_agg_size);
- /* real update aggregation setting*/
+ /* real update aggregation setting */
btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
}
-static void halbtc8821a1ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+static void btc8821a1ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
{
- u32 reg_hp_tx_rx, reg_lp_tx_rx, u4_tmp;
- u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
+ u32 reg_hp_tx_rx, reg_lp_tx_rx, u4_tmp;
+ u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
reg_hp_tx_rx = 0x770;
reg_lp_tx_rx = 0x774;
u4_tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_tx_rx);
reg_hp_tx = u4_tmp & MASKLWORD;
- reg_hp_rx = (u4_tmp & MASKHWORD)>>16;
+ reg_hp_rx = (u4_tmp & MASKHWORD) >> 16;
u4_tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_tx_rx);
reg_lp_tx = u4_tmp & MASKLWORD;
- reg_lp_rx = (u4_tmp & MASKHWORD)>>16;
+ reg_lp_rx = (u4_tmp & MASKHWORD) >> 16;
coex_sta->high_priority_tx = reg_hp_tx;
coex_sta->high_priority_rx = reg_hp_rx;
coex_sta->low_priority_tx = reg_lp_tx;
coex_sta->low_priority_rx = reg_lp_rx;
- /* reset counter*/
+ /* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
}
-static void halbtc8821a1ant_query_bt_info(struct btc_coexist *btcoexist)
+static void btc8821a1ant_query_bt_info(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
coex_sta->c2h_bt_info_req_sent = true;
- h2c_parameter[0] |= BIT0; /* trigger*/
+ h2c_parameter[0] |= BIT0; /* trigger */
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
@@ -431,10 +426,43 @@ static void halbtc8821a1ant_query_bt_info(struct btc_coexist *btcoexist)
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
-static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist)
+bool btc8821a1ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
+{
+ static bool pre_wifi_busy = true;
+ static bool pre_under_4way = true;
+ static bool pre_bt_hs_on = true;
+ bool wifi_busy = false, under_4way = false, bt_hs_on = false;
+ bool wifi_connected = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ &under_4way);
+
+ if (wifi_connected) {
+ if (wifi_busy != pre_wifi_busy) {
+ pre_wifi_busy = wifi_busy;
+ return true;
+ }
+ if (under_4way != pre_under_4way) {
+ pre_under_4way = under_4way;
+ return true;
+ }
+ if (bt_hs_on != pre_bt_hs_on) {
+ pre_bt_hs_on = bt_hs_on;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void btc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist)
{
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool bt_hs_on = false;
+ bool bt_hs_on = false;
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
@@ -444,13 +472,13 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist)
bt_link_info->pan_exist = coex_sta->pan_exist;
bt_link_info->hid_exist = coex_sta->hid_exist;
- /* work around for HS mode.*/
+ /* work around for HS mode */
if (bt_hs_on) {
bt_link_info->pan_exist = true;
bt_link_info->bt_link_exist = true;
}
- /* check if Sco only*/
+ /* check if Sco only */
if (bt_link_info->sco_exist &&
!bt_link_info->a2dp_exist &&
!bt_link_info->pan_exist &&
@@ -459,7 +487,7 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist)
else
bt_link_info->sco_only = false;
- /* check if A2dp only*/
+ /* check if A2dp only */
if (!bt_link_info->sco_exist &&
bt_link_info->a2dp_exist &&
!bt_link_info->pan_exist &&
@@ -468,7 +496,7 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist)
else
bt_link_info->a2dp_only = false;
- /* check if Pan only*/
+ /* check if Pan only */
if (!bt_link_info->sco_exist &&
!bt_link_info->a2dp_exist &&
bt_link_info->pan_exist &&
@@ -477,7 +505,7 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist)
else
bt_link_info->pan_only = false;
- /* check if Hid only*/
+ /* check if Hid only */
if (!bt_link_info->sco_exist &&
!bt_link_info->a2dp_exist &&
!bt_link_info->pan_exist &&
@@ -487,13 +515,13 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist)
bt_link_info->hid_only = false;
}
-static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
+static u8 btc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool bt_hs_on = false;
- u8 algorithm = BT_8821A_1ANT_COEX_ALGO_UNDEFINED;
- u8 num_of_diff_profile = 0;
+ bool bt_hs_on = false;
+ u8 algorithm = BT_8821A_1ANT_COEX_ALGO_UNDEFINED;
+ u8 num_of_diff_profile = 0;
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
@@ -605,7 +633,7 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
"[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->hid_exist &&
- bt_link_info->pan_exist) {
+ bt_link_info->pan_exist) {
if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
@@ -618,7 +646,7 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
- bt_link_info->a2dp_exist) {
+ bt_link_info->a2dp_exist) {
if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
@@ -670,53 +698,8 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
return algorithm;
}
-static void halbtc8821a1ant_set_bt_auto_report(struct btc_coexist *btcoexist,
- bool enable_auto_report)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 h2c_parameter[1] = {0};
-
- h2c_parameter[0] = 0;
-
- if (enable_auto_report)
- h2c_parameter[0] |= BIT0;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
- (enable_auto_report ? "Enabled!!" : "Disabled!!"),
- h2c_parameter[0]);
-
- btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
-}
-
-static void halbtc8821a1ant_bt_auto_report(struct btc_coexist *btcoexist,
- bool force_exec,
- bool enable_auto_report)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s BT Auto report = %s\n",
- (force_exec ? "force to" : ""), ((enable_auto_report) ?
- "Enabled" : "Disabled"));
- coex_dm->cur_bt_auto_report = enable_auto_report;
-
- if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
- coex_dm->pre_bt_auto_report,
- coex_dm->cur_bt_auto_report);
-
- if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
- return;
- }
- halbtc8821a1ant_set_bt_auto_report(btcoexist, coex_dm->cur_bt_auto_report);
-
- coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
-}
-
-static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist,
- bool low_penalty_ra)
+static void btc8821a1ant_set_sw_penalty_tx_rate(struct btc_coexist *btcoexist,
+ bool low_penalty_ra)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[6] = {0};
@@ -725,11 +708,11 @@ static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist,
if (low_penalty_ra) {
h2c_parameter[1] |= BIT0;
- /*normal rate except MCS7/6/5, OFDM54/48/36*/
+ /* normal rate except MCS7/6/5, OFDM54/48/36 */
h2c_parameter[2] = 0x00;
- h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54*/
- h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48*/
- h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/
+ h2c_parameter[3] = 0xf7; /* MCS7 or OFDM54 */
+ h2c_parameter[4] = 0xf8; /* MCS6 or OFDM48 */
+ h2c_parameter[5] = 0xf9; /* MCS5 or OFDM36 */
}
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -739,8 +722,8 @@ static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist,
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
-static void halbtc8821a1ant_low_penalty_ra(struct btc_coexist *btcoexist,
- bool force_exec, bool low_penalty_ra)
+static void btc8821a1ant_low_penalty_ra(struct btc_coexist *btcoexist,
+ bool force_exec, bool low_penalty_ra)
{
coex_dm->cur_low_penalty_ra = low_penalty_ra;
@@ -748,14 +731,15 @@ static void halbtc8821a1ant_low_penalty_ra(struct btc_coexist *btcoexist,
if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
return;
}
- btc8821a1ant_set_sw_pen_tx_rate(btcoexist, coex_dm->cur_low_penalty_ra);
+ btc8821a1ant_set_sw_penalty_tx_rate(btcoexist,
+ coex_dm->cur_low_penalty_ra);
coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
}
-static void halbtc8821a1ant_set_coex_table(struct btc_coexist *btcoexist,
- u32 val0x6c0, u32 val0x6c4,
- u32 val0x6c8, u8 val0x6cc)
+static void btc8821a1ant_set_coex_table(struct btc_coexist *btcoexist,
+ u32 val0x6c0, u32 val0x6c4,
+ u32 val0x6c8, u8 val0x6cc)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -776,9 +760,9 @@ static void halbtc8821a1ant_set_coex_table(struct btc_coexist *btcoexist,
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
-static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist,
- bool force_exec, u32 val0x6c0,
- u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
+static void btc8821a1ant_coex_table(struct btc_coexist *btcoexist,
+ bool force_exec, u32 val0x6c0, u32 val0x6c4,
+ u32 val0x6c8, u8 val0x6cc)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -798,8 +782,8 @@ static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist,
(coex_dm->pre_val_0x6cc == coex_dm->cur_val_0x6cc))
return;
}
- halbtc8821a1ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
- val0x6c8, val0x6cc);
+ btc8821a1ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
+ val0x6c8, val0x6cc);
coex_dm->pre_val_0x6c0 = coex_dm->cur_val_0x6c0;
coex_dm->pre_val_0x6c4 = coex_dm->cur_val_0x6c4;
@@ -807,42 +791,41 @@ static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist,
coex_dm->pre_val_0x6cc = coex_dm->cur_val_0x6cc;
}
-static void halbtc8821a1ant_coex_table_with_type(struct btc_coexist *btcoexist,
- bool force_exec, u8 type)
+static void btc8821a1ant_coex_table_with_type(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
{
switch (type) {
case 0:
- halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0x55555555, 0xffffff, 0x3);
+ btc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x55555555, 0xffffff, 0x3);
break;
case 1:
- halbtc8821a1ant_coex_table(btcoexist, force_exec,
- 0x55555555, 0x5a5a5a5a,
- 0xffffff, 0x3);
- break;
+ btc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5a5a5a5a, 0xffffff, 0x3);
+ break;
case 2:
- halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
- 0x5a5a5a5a, 0xffffff, 0x3);
+ btc8821a1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+ 0x5a5a5a5a, 0xffffff, 0x3);
break;
case 3:
- halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555,
- 0xaaaaaaaa, 0xffffff, 0x3);
+ btc8821a1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+ 0xaaaaaaaa, 0xffffff, 0x3);
break;
case 4:
- halbtc8821a1ant_coex_table(btcoexist, force_exec, 0xffffffff,
- 0xffffffff, 0xffffff, 0x3);
+ btc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5a5a5a5a, 0xffffff, 0x3);
break;
case 5:
- halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
- 0x5fff5fff, 0xffffff, 0x3);
+ btc8821a1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+ 0xaaaa5a5a, 0xffffff, 0x3);
break;
case 6:
- halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
- 0x5a5a5a5a, 0xffffff, 0x3);
+ btc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0xaaaa5a5a, 0xffffff, 0x3);
break;
case 7:
- halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x5afa5afa,
- 0x5afa5afa, 0xffffff, 0x3);
+ btc8821a1ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa,
+ 0xaaaaaaaa, 0xffffff, 0x3);
break;
default:
break;
@@ -853,10 +836,10 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
bool enable)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 h2c_parameter[1] = {0};
+ u8 h2c_parameter[1] = {0};
if (enable)
- h2c_parameter[0] |= BIT0; /* function enable*/
+ h2c_parameter[0] |= BIT0; /* function enable */
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
@@ -865,8 +848,8 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
-static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
- bool force_exec, bool enable)
+static void btc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -890,24 +873,40 @@ static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
}
-static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist,
- u8 byte1, u8 byte2, u8 byte3,
- u8 byte4, u8 byte5)
+static void btc8821a1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
+ u8 byte2, u8 byte3, u8 byte4, u8 byte5)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[5] = {0};
+ u8 real_byte1 = byte1, real_byte5 = byte5;
+ bool ap_enable = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+
+ if (ap_enable) {
+ if (byte1 & BIT4 && !(byte1 & BIT5)) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW for 1Ant AP mode\n");
+ real_byte1 &= ~BIT4;
+ real_byte1 |= BIT5;
+
+ real_byte5 |= BIT5;
+ real_byte5 &= ~BIT6;
+ }
+ }
- h2c_parameter[0] = byte1;
+ h2c_parameter[0] = real_byte1;
h2c_parameter[1] = byte2;
h2c_parameter[2] = byte3;
h2c_parameter[3] = byte4;
- h2c_parameter[4] = byte5;
+ h2c_parameter[4] = real_byte5;
- coex_dm->ps_tdma_para[0] = byte1;
+ coex_dm->ps_tdma_para[0] = real_byte1;
coex_dm->ps_tdma_para[1] = byte2;
coex_dm->ps_tdma_para[2] = byte3;
coex_dm->ps_tdma_para[3] = byte4;
- coex_dm->ps_tdma_para[4] = byte5;
+ coex_dm->ps_tdma_para[4] = real_byte5;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
@@ -919,18 +918,18 @@ static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist,
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
-static void halbtc8821a1ant_set_lps_rpwm(struct btc_coexist *btcoexist,
- u8 lps_val, u8 rpwm_val)
+static void btc8821a1ant_set_lps_rpwm(struct btc_coexist *btcoexist,
+ u8 lps_val, u8 rpwm_val)
{
- u8 lps = lps_val;
- u8 rpwm = rpwm_val;
+ u8 lps = lps_val;
+ u8 rpwm = rpwm_val;
btcoexist->btc_set(btcoexist, BTC_SET_U1_LPS_VAL, &lps);
btcoexist->btc_set(btcoexist, BTC_SET_U1_RPWM_VAL, &rpwm);
}
-static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
- bool force_exec, u8 lps_val, u8 rpwm_val)
+static void btc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
+ bool force_exec, u8 lps_val, u8 rpwm_val)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -954,33 +953,33 @@ static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
return;
}
}
- halbtc8821a1ant_set_lps_rpwm(btcoexist, lps_val, rpwm_val);
+ btc8821a1ant_set_lps_rpwm(btcoexist, lps_val, rpwm_val);
coex_dm->pre_lps = coex_dm->cur_lps;
coex_dm->pre_rpwm = coex_dm->cur_rpwm;
}
-static void halbtc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist,
- bool low_penalty_ra)
+static void btc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist,
+ bool low_penalty_ra)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
- halbtc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
+ btc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
}
-static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist,
- u8 ant_pos_type, bool init_hw_cfg,
- bool wifi_off)
+static void btc8821a1ant_set_ant_path(struct btc_coexist *btcoexist,
+ u8 ant_pos_type, bool init_hw_cfg,
+ bool wifi_off)
{
struct btc_board_info *board_info = &btcoexist->board_info;
u32 u4_tmp = 0;
u8 h2c_parameter[2] = {0};
if (init_hw_cfg) {
- /* 0x4c[23] = 0, 0x4c[24] = 1 Antenna control by WL/BT*/
+ /* 0x4c[23] = 0, 0x4c[24] = 1 Antenna control by WL/BT */
u4_tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u4_tmp &= ~BIT23;
u4_tmp |= BIT24;
@@ -990,41 +989,42 @@ static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist,
btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x77);
if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
- /*tell firmware "antenna inverse" ==>
- * WRONG firmware antenna control code.==>need fw to fix
+ /* tell firmware "antenna inverse"
+ * WRONG firmware antenna control code, need fw to fix
*/
h2c_parameter[0] = 1;
h2c_parameter[1] = 1;
btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
h2c_parameter);
- /*Main Ant to BT for IPS case 0x4c[23] = 1*/
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64,
- 0x1, 0x1);
} else {
- /*tell firmware "no antenna inverse" ==>
- * WRONG firmware antenna control code.==>need fw to fix
+ /* tell firmware "no antenna inverse"
+ * WRONG firmware antenna control code, need fw to fix
*/
h2c_parameter[0] = 0;
h2c_parameter[1] = 1;
btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
h2c_parameter);
- /*Aux Ant to BT for IPS case 0x4c[23] = 1*/
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64,
- 0x1, 0x0);
}
} else if (wifi_off) {
/* 0x4c[24:23] = 00, Set Antenna control
- * by BT_RFE_CTRL BT Vendor 0xac = 0xf002
+ * by BT_RFE_CTRL BT Vendor 0xac = 0xf002
*/
u4_tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u4_tmp &= ~BIT23;
u4_tmp &= ~BIT24;
btcoexist->btc_write_4byte(btcoexist, 0x4c, u4_tmp);
+
+ /* 0x765 = 0x18 */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3);
+ } else {
+ /* 0x765 = 0x0 */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
}
- /* ext switch setting*/
+ /* ext switch setting */
switch (ant_pos_type) {
case BTC_ANT_PATH_WIFI:
+ btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x77);
if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7,
0x30, 0x1);
@@ -1033,6 +1033,7 @@ static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist,
0x30, 0x2);
break;
case BTC_ANT_PATH_BT:
+ btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x77);
if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7,
0x30, 0x2);
@@ -1042,6 +1043,7 @@ static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist,
break;
default:
case BTC_ANT_PATH_PTA:
+ btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x66);
if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7,
0x30, 0x1);
@@ -1052,8 +1054,8 @@ static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist,
}
}
-static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
- bool force_exec, bool turn_on, u8 type)
+static void btc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
+ bool force_exec, bool turn_on, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 rssi_adjust_val = 0;
@@ -1078,185 +1080,189 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
if (turn_on) {
switch (type) {
default:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x1a,
- 0x1a, 0x0, 0x50);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1a,
+ 0x1a, 0x0, 0x50);
break;
case 1:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x3a,
- 0x03, 0x10, 0x50);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x3a,
+ 0x03, 0x10, 0x50);
rssi_adjust_val = 11;
break;
case 2:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x2b,
- 0x03, 0x10, 0x50);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x2b,
+ 0x03, 0x10, 0x50);
rssi_adjust_val = 14;
break;
case 3:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x1d,
- 0x1d, 0x0, 0x10);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1d,
+ 0x1d, 0x0, 0x10);
break;
case 4:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x93, 0x15,
- 0x3, 0x14, 0x0);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15,
+ 0x3, 0x14, 0x0);
rssi_adjust_val = 17;
break;
case 5:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0x15,
- 0x3, 0x11, 0x10);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15,
+ 0x3, 0x11, 0x10);
break;
case 6:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x13, 0xa,
- 0x3, 0x0, 0x0);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa,
+ 0x3, 0x0, 0x0);
break;
case 7:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x13, 0xc,
- 0x5, 0x0, 0x0);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xc,
+ 0x5, 0x0, 0x0);
break;
case 8:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x93, 0x25,
- 0x3, 0x10, 0x0);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25,
+ 0x3, 0x10, 0x0);
break;
case 9:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x21,
- 0x3, 0x10, 0x50);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x21,
+ 0x3, 0x10, 0x50);
rssi_adjust_val = 18;
break;
case 10:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x13, 0xa,
- 0xa, 0x0, 0x40);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa,
+ 0xa, 0x0, 0x40);
break;
case 11:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x14,
- 0x03, 0x10, 0x10);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x14,
+ 0x03, 0x10, 0x10);
rssi_adjust_val = 20;
break;
case 12:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x0a,
- 0x0a, 0x0, 0x50);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x0a,
+ 0x0a, 0x0, 0x50);
break;
case 13:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x18,
- 0x18, 0x0, 0x10);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x18,
+ 0x18, 0x0, 0x10);
break;
case 14:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x21,
- 0x3, 0x10, 0x10);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1e,
+ 0x3, 0x10, 0x14);
break;
case 15:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x13, 0xa,
- 0x3, 0x8, 0x0);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa,
+ 0x3, 0x8, 0x0);
break;
case 16:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x93, 0x15,
- 0x3, 0x10, 0x0);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15,
+ 0x3, 0x10, 0x0);
rssi_adjust_val = 18;
break;
case 18:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x93, 0x25,
- 0x3, 0x10, 0x0);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25,
+ 0x3, 0x10, 0x0);
rssi_adjust_val = 14;
break;
case 20:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0x35,
- 0x03, 0x11, 0x10);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x35,
+ 0x03, 0x11, 0x10);
break;
case 21:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0x15,
- 0x03, 0x11, 0x10);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15,
+ 0x03, 0x11, 0x10);
break;
case 22:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0x25,
- 0x03, 0x11, 0x10);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x25,
+ 0x03, 0x11, 0x10);
break;
case 23:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0x25,
- 0x3, 0x31, 0x18);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+ 0x3, 0x31, 0x18);
rssi_adjust_val = 22;
break;
case 24:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0x15,
- 0x3, 0x31, 0x18);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+ 0x3, 0x31, 0x18);
rssi_adjust_val = 22;
break;
case 25:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0xa,
- 0x3, 0x31, 0x18);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+ 0x3, 0x31, 0x18);
rssi_adjust_val = 22;
break;
case 26:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0xa,
- 0x3, 0x31, 0x18);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+ 0x3, 0x31, 0x18);
rssi_adjust_val = 22;
break;
case 27:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0x25,
- 0x3, 0x31, 0x98);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+ 0x3, 0x31, 0x98);
rssi_adjust_val = 22;
break;
case 28:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x69, 0x25,
- 0x3, 0x31, 0x0);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x69, 0x25,
+ 0x3, 0x31, 0x0);
break;
case 29:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xab, 0x1a,
- 0x1a, 0x1, 0x10);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xab, 0x1a,
+ 0x1a, 0x1, 0x10);
break;
case 30:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x14,
- 0x3, 0x10, 0x50);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x14,
+ 0x3, 0x10, 0x50);
break;
case 31:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xd3, 0x1a,
- 0x1a, 0, 0x58);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1a,
+ 0x1a, 0, 0x58);
break;
case 32:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0xa,
- 0x3, 0x10, 0x0);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0xa,
+ 0x3, 0x10, 0x0);
break;
case 33:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xa3, 0x25,
- 0x3, 0x30, 0x90);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x25,
+ 0x3, 0x30, 0x90);
break;
case 34:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x53, 0x1a,
- 0x1a, 0x0, 0x10);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x53, 0x1a,
+ 0x1a, 0x0, 0x10);
break;
case 35:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x63, 0x1a,
- 0x1a, 0x0, 0x10);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x63, 0x1a,
+ 0x1a, 0x0, 0x10);
break;
case 36:
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xd3, 0x12,
- 0x3, 0x14, 0x50);
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x12,
+ 0x3, 0x14, 0x50);
break;
}
} else {
- /* disable PS tdma*/
+ /* disable PS tdma */
switch (type) {
- case 8: /*PTA Control*/
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x8, 0x0, 0x0,
- 0x0, 0x0);
- halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
- false, false);
+ case 8:
+ /* PTA Control */
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x8, 0x0, 0x0,
+ 0x0, 0x0);
+ btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
+ false, false);
break;
case 0:
- default: /*Software control, Antenna at BT side*/
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
- 0x0, 0x0);
- halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
- false, false);
+ default:
+ /* Software control, Antenna at BT side */
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x0, 0x0);
+ btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
+ false, false);
break;
- case 9: /*Software control, Antenna at WiFi side*/
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
- 0x0, 0x0);
- halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_WIFI,
- false, false);
+ case 9:
+ /* Software control, Antenna at WiFi side */
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x0, 0x0);
+ btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_WIFI,
+ false, false);
break;
- case 10: /* under 5G*/
- halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
- 0x8, 0x0);
- halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
- false, false);
+ case 10:
+ /* under 5G */
+ btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x8, 0x0);
+ btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
+ false, false);
break;
}
}
@@ -1264,15 +1270,15 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
btcoexist->btc_set(btcoexist,
BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, &rssi_adjust_val);
- /* update pre state*/
+ /* update pre state */
coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
}
-static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
+static bool btc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool common = false, wifi_connected = false, wifi_busy = false;
+ bool common = false, wifi_connected = false, wifi_busy = false;
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
@@ -1283,7 +1289,7 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
coex_dm->bt_status) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
- halbtc8821a1ant_sw_mechanism(btcoexist, false);
+ btc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (wifi_connected &&
@@ -1291,7 +1297,7 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
coex_dm->bt_status)) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Wifi connected + BT non connected-idle!!\n");
- halbtc8821a1ant_sw_mechanism(btcoexist, false);
+ btc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (!wifi_connected &&
@@ -1299,15 +1305,15 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
coex_dm->bt_status)) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
- halbtc8821a1ant_sw_mechanism(btcoexist, false);
+ btc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
- coex_dm->bt_status)) {
+ coex_dm->bt_status)) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Wifi connected + BT connected-idle!!\n");
- halbtc8821a1ant_sw_mechanism(btcoexist, false);
+ btc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (!wifi_connected &&
@@ -1315,7 +1321,7 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
coex_dm->bt_status)) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Wifi non connected-idle + BT Busy!!\n");
- halbtc8821a1ant_sw_mechanism(btcoexist, false);
+ btc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else {
@@ -1333,231 +1339,40 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
return common;
}
-static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
- u8 wifi_status)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static long up, dn, m, n, wait_count;
- /*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/
- long result;
- u8 retry_count = 0, bt_info_ext;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TdmaDurationAdjustForAcl()\n");
-
- if ((BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
- wifi_status) ||
- (BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SCAN ==
- wifi_status) ||
- (BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT ==
- wifi_status)) {
- if (coex_dm->cur_ps_tdma != 1 &&
- coex_dm->cur_ps_tdma != 2 &&
- coex_dm->cur_ps_tdma != 3 &&
- coex_dm->cur_ps_tdma != 9) {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- coex_dm->tdma_adj_type = 9;
-
- up = 0;
- dn = 0;
- m = 1;
- n = 3;
- result = 0;
- wait_count = 0;
- }
- return;
- }
-
- if (!coex_dm->auto_tdma_adjust) {
- coex_dm->auto_tdma_adjust = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
-
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
- coex_dm->tdma_adj_type = 2;
- /*============*/
- up = 0;
- dn = 0;
- m = 1;
- n = 3;
- result = 0;
- wait_count = 0;
- } else {
- /*accquire the BT TRx retry count from BT_Info byte2*/
- retry_count = coex_sta->bt_retry_cnt;
- bt_info_ext = coex_sta->bt_info_ext;
- result = 0;
- wait_count++;
-
- if (retry_count == 0) {
- /* no retry in the last 2-second duration*/
- up++;
- dn--;
-
- if (dn <= 0)
- dn = 0;
-
- if (up >= n) {
- /* if (retry count == 0) for 2*n seconds ,
- * make WiFi duration wider
- */
- wait_count = 0;
- n = 3;
- up = 0;
- dn = 0;
- result = 1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Increase wifi duration!!\n");
- }
- } else if (retry_count <= 3) {
- /* <=3 retry in the last 2-second duration*/
- up--;
- dn++;
-
- if (up <= 0)
- up = 0;
-
- if (dn == 2) {
- /* if retry count< 3 for 2*2 seconds,
- * shrink wifi duration
- */
- if (wait_count <= 2)
- m++; /* avoid bounce in two levels */
- else
- m = 1;
-
- if (m >= 20) {
- /* m max value is 20, max time is 120 s,
- * recheck if adjust WiFi duration.
- */
- m = 20;
- }
- n = 3*m;
- up = 0;
- dn = 0;
- wait_count = 0;
- result = -1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
- }
- } else {
- /* retry count > 3, if retry count > 3 happens once,
- * shrink WiFi duration
- */
- if (wait_count == 1)
- m++; /* avoid bounce in two levels */
- else
- m = 1;
- /* m max value is 20, max time is 120 second,
- * recheck if adjust WiFi duration.
- */
- if (m >= 20)
- m = 20;
-
- n = 3*m;
- up = 0;
- dn = 0;
- wait_count = 0;
- result = -1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
- }
-
- if (result == -1) {
- if ((BT_INFO_8821A_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
- ((coex_dm->cur_ps_tdma == 1) ||
- (coex_dm->cur_ps_tdma == 2))) {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- coex_dm->tdma_adj_type = 9;
- } else if (coex_dm->cur_ps_tdma == 1) {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- coex_dm->tdma_adj_type = 9;
- } else if (coex_dm->cur_ps_tdma == 9) {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- }
- } else if (result == 1) {
- if ((BT_INFO_8821A_1ANT_A2DP_BASIC_RATE(bt_info_ext)) &&
- ((coex_dm->cur_ps_tdma == 1) ||
- (coex_dm->cur_ps_tdma == 2))) {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- coex_dm->tdma_adj_type = 9;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- coex_dm->tdma_adj_type = 9;
- } else if (coex_dm->cur_ps_tdma == 9) {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 1);
- coex_dm->tdma_adj_type = 1;
- }
- } else {
- /*no change*/
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** TDMA(on, %d) **********\n",
- coex_dm->cur_ps_tdma);
- }
-
- if (coex_dm->cur_ps_tdma != 1 &&
- coex_dm->cur_ps_tdma != 2 &&
- coex_dm->cur_ps_tdma != 9 &&
- coex_dm->cur_ps_tdma != 11) {
- /* recover to previous adjust type*/
- halbtc8821a1ant_ps_tdma(btcoexist,
- NORMAL_EXEC, true,
- coex_dm->tdma_adj_type);
- }
- }
-}
-
static void btc8821a1ant_ps_tdma_check_for_pwr_save(struct btc_coexist *btcoex,
bool new_ps_state)
{
- u8 lps_mode = 0x0;
+ u8 lps_mode = 0x0;
btcoex->btc_get(btcoex, BTC_GET_U1_LPS_MODE, &lps_mode);
if (lps_mode) {
- /* already under LPS state*/
+ /* already under LPS state */
if (new_ps_state) {
- /* keep state under LPS, do nothing.*/
+ /* keep state under LPS, do nothing */
} else {
- /* will leave LPS state, turn off psTdma first*/
- halbtc8821a1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 0);
+ /* will leave LPS state, turn off psTdma first */
+ btc8821a1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 0);
}
} else {
/* NO PS state*/
if (new_ps_state) {
- /* will enter LPS state, turn off psTdma first*/
- halbtc8821a1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 0);
+ /* will enter LPS state, turn off psTdma first */
+ btc8821a1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 0);
} else {
- /* keep state under NO PS state, do nothing.*/
+ /* keep state under NO PS state, do nothing */
}
}
}
-static void halbtc8821a1ant_power_save_state(struct btc_coexist *btcoexist,
- u8 ps_type, u8 lps_val,
- u8 rpwm_val)
+static void btc8821a1ant_power_save_state(struct btc_coexist *btcoexist,
+ u8 ps_type, u8 lps_val, u8 rpwm_val)
{
bool low_pwr_disable = false;
switch (ps_type) {
case BTC_PS_WIFI_NATIVE:
- /* recover to original 32k low power setting*/
+ /* recover to original 32k low power setting */
low_pwr_disable = false;
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
@@ -1566,13 +1381,13 @@ static void halbtc8821a1ant_power_save_state(struct btc_coexist *btcoexist,
case BTC_PS_LPS_ON:
btc8821a1ant_ps_tdma_check_for_pwr_save(btcoexist,
true);
- halbtc8821a1ant_lps_rpwm(btcoexist,
- NORMAL_EXEC, lps_val, rpwm_val);
- /* when coex force to enter LPS, do not enter 32k low power.*/
+ btc8821a1ant_lps_rpwm(btcoexist, NORMAL_EXEC, lps_val,
+ rpwm_val);
+ /* when coex force to enter LPS, do not enter 32k low power */
low_pwr_disable = true;
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- /* power save must executed before psTdma.*/
+ /* power save must executed before psTdma */
btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
break;
case BTC_PS_LPS_OFF:
@@ -1584,295 +1399,332 @@ static void halbtc8821a1ant_power_save_state(struct btc_coexist *btcoexist,
}
}
-static void halbtc8821a1ant_coex_under_5g(struct btc_coexist *btcoexist)
+static void btc8821a1ant_coex_under_5g(struct btc_coexist *btcoexist)
{
- halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
- 0x0, 0x0);
- halbtc8821a1ant_ignore_wlan_act(btcoexist, NORMAL_EXEC, true);
+ btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ btc8821a1ant_ignore_wlan_act(btcoexist, NORMAL_EXEC, true);
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 10);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 10);
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
- halbtc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+ btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
- halbtc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 5);
+ btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 5);
}
-static void halbtc8821a1ant_action_wifi_only(struct btc_coexist *btcoexist)
+/***********************************************
+ *
+ * Software Coex Mechanism start
+ *
+ ***********************************************/
+
+/* SCO only or SCO+PAN(HS) */
+static void btc8821a1ant_action_sco(struct btc_coexist *btcoexist)
{
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
+ btc8821a1ant_sw_mechanism(btcoexist, true);
}
-static void btc8821a1ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
+static void btc8821a1ant_action_hid(struct btc_coexist *btcoexist)
{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static bool pre_bt_disabled;
- static u32 bt_disable_cnt;
- bool bt_active = true, bt_disabled = false;
-
- /* This function check if bt is disabled*/
-
- if (coex_sta->high_priority_tx == 0 &&
- coex_sta->high_priority_rx == 0 &&
- coex_sta->low_priority_tx == 0 &&
- coex_sta->low_priority_rx == 0) {
- bt_active = false;
- }
- if (coex_sta->high_priority_tx == 0xffff &&
- coex_sta->high_priority_rx == 0xffff &&
- coex_sta->low_priority_tx == 0xffff &&
- coex_sta->low_priority_rx == 0xffff) {
- bt_active = false;
- }
- if (bt_active) {
- bt_disable_cnt = 0;
- bt_disabled = false;
- btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
- &bt_disabled);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is enabled !!\n");
- } else {
- bt_disable_cnt++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bt all counters = 0, %d times!!\n",
- bt_disable_cnt);
- if (bt_disable_cnt >= 2) {
- bt_disabled = true;
- btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
- &bt_disabled);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is disabled !!\n");
- halbtc8821a1ant_action_wifi_only(btcoexist);
- }
- }
- if (pre_bt_disabled != bt_disabled) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is from %s to %s!!\n",
- (pre_bt_disabled ? "disabled" : "enabled"),
- (bt_disabled ? "disabled" : "enabled"));
- pre_bt_disabled = bt_disabled;
- if (bt_disabled) {
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS,
- NULL);
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS,
- NULL);
- }
- }
+ btc8821a1ant_sw_mechanism(btcoexist, true);
}
-/*=============================================*/
-/**/
-/* Software Coex Mechanism start*/
-/**/
-/*=============================================*/
-
-/* SCO only or SCO+PAN(HS)*/
-static void halbtc8821a1ant_action_sco(struct btc_coexist *btcoexist)
+/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
+static void btc8821a1ant_action_a2dp(struct btc_coexist *btcoexist)
{
- halbtc8821a1ant_sw_mechanism(btcoexist, true);
+ btc8821a1ant_sw_mechanism(btcoexist, false);
}
-static void halbtc8821a1ant_action_hid(struct btc_coexist *btcoexist)
+static void btc8821a1ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
{
- halbtc8821a1ant_sw_mechanism(btcoexist, true);
+ btc8821a1ant_sw_mechanism(btcoexist, false);
}
-/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/
-static void halbtc8821a1ant_action_a2dp(struct btc_coexist *btcoexist)
+static void btc8821a1ant_action_pan_edr(struct btc_coexist *btcoexist)
{
- halbtc8821a1ant_sw_mechanism(btcoexist, false);
+ btc8821a1ant_sw_mechanism(btcoexist, false);
}
-static void halbtc8821a1ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+/* PAN(HS) only */
+static void btc8821a1ant_action_pan_hs(struct btc_coexist *btcoexist)
{
- halbtc8821a1ant_sw_mechanism(btcoexist, false);
+ btc8821a1ant_sw_mechanism(btcoexist, false);
}
-static void halbtc8821a1ant_action_pan_edr(struct btc_coexist *btcoexist)
+/* PAN(EDR)+A2DP */
+static void btc8821a1ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
{
- halbtc8821a1ant_sw_mechanism(btcoexist, false);
+ btc8821a1ant_sw_mechanism(btcoexist, false);
}
-/*PAN(HS) only*/
-static void halbtc8821a1ant_action_pan_hs(struct btc_coexist *btcoexist)
+static void btc8821a1ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
{
- halbtc8821a1ant_sw_mechanism(btcoexist, false);
+ btc8821a1ant_sw_mechanism(btcoexist, true);
}
-/*PAN(EDR)+A2DP*/
-static void halbtc8821a1ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+/* HID+A2DP+PAN(EDR) */
+static void btc8821a1ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
{
- halbtc8821a1ant_sw_mechanism(btcoexist, false);
+ btc8821a1ant_sw_mechanism(btcoexist, true);
}
-static void halbtc8821a1ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+static void btc8821a1ant_action_hid_a2dp(struct btc_coexist *btcoexist)
{
- halbtc8821a1ant_sw_mechanism(btcoexist, true);
+ btc8821a1ant_sw_mechanism(btcoexist, true);
}
-/* HID+A2DP+PAN(EDR)*/
-static void btc8821a1ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
+/***********************************************
+ *
+ * Non-Software Coex Mechanism start
+ *
+ ***********************************************/
+static
+void btc8821a1ant_action_wifi_multi_port(struct btc_coexist *btcoexist)
{
- halbtc8821a1ant_sw_mechanism(btcoexist, true);
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+
+ btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ /* tdma and coex table */
+ if (coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_ACL_BUSY) {
+ if (bt_link_info->a2dp_exist) {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ btc8821a1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ } else if (bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist) {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ btc8821a1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 4);
+ } else {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+ btc8821a1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 4);
+ }
+ } else if ((coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_SCO_BUSY) ||
+ (BT_8821A_1ANT_BT_STATUS_ACL_SCO_BUSY ==
+ coex_dm->bt_status)) {
+ btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist,
+ BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SCAN);
+ } else {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+ }
}
-static void halbtc8821a1ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+static
+void btc8821a1ant_action_wifi_not_connected_asso_auth(
+ struct btc_coexist *btcoexist)
{
- halbtc8821a1ant_sw_mechanism(btcoexist, true);
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+
+ btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0,
+ 0x0);
+
+ /* tdma and coex table */
+ if ((bt_link_info->sco_exist) || (bt_link_info->hid_exist)) {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ } else if ((bt_link_info->a2dp_exist) || (bt_link_info->pan_exist)) {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+ } else {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+ }
}
-/*=============================================*/
-/**/
-/* Non-Software Coex Mechanism start*/
-/**/
-/*=============================================*/
-static void halbtc8821a1ant_action_hs(struct btc_coexist *btcoexist)
+static void btc8821a1ant_action_hs(struct btc_coexist *btcoexist)
{
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
- halbtc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 2);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+ btc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 2);
}
-static void halbtc8821a1ant_action_bt_inquiry(struct btc_coexist *btcoexist)
+static void btc8821a1ant_action_bt_inquiry(struct btc_coexist *btcoexist)
{
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
bool wifi_connected = false;
+ bool ap_enable = false;
+ bool wifi_busy = false, bt_busy = false;
- btcoexist->btc_get(btcoexist,
- BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
-
- if (!wifi_connected) {
- halbtc8821a1ant_power_save_state(btcoexist,
- BTC_PS_WIFI_NATIVE, 0x0, 0x0);
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
- } else if ((bt_link_info->sco_exist) ||
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+ if (!wifi_connected && !coex_sta->wifi_is_high_pri_task) {
+ btc8821a1ant_power_save_state(btcoexist,
+ BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ } else if ((bt_link_info->sco_exist) || (bt_link_info->a2dp_exist) ||
(bt_link_info->hid_only)) {
- /* SCO/HID-only busy*/
- halbtc8821a1ant_power_save_state(btcoexist,
- BTC_PS_WIFI_NATIVE, 0x0, 0x0);
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ /* SCO/HID-only busy */
+ btc8821a1ant_power_save_state(btcoexist,
+ BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
+ } else if ((bt_link_info->a2dp_exist) && (bt_link_info->hid_exist)) {
+ /* A2DP+HID busy */
+ btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ } else if ((bt_link_info->pan_exist) || (wifi_busy)) {
+ btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4);
} else {
- halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_LPS_ON,
- 0x50, 0x4);
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 30);
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
}
}
static void btc8821a1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist,
- u8 wifi_status) {
- /* tdma and coex table*/
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+ u8 wifi_status)
+{
+ /* tdma and coex table */
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ if (BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
+ wifi_status)
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ else
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
}
static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist,
u8 wifi_status)
{
- u8 bt_rssi_state;
+ u8 bt_rssi_state;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bt_rssi_state = halbtc8821a1ant_bt_rssi_state(btcoexist, 2, 28, 0);
+ bt_rssi_state = btc8821a1ant_bt_rssi_state(btcoexist, 2, 28, 0);
if (bt_link_info->hid_only) {
- /*HID*/
+ /* HID */
btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist,
wifi_status);
coex_dm->auto_tdma_adjust = false;
return;
} else if (bt_link_info->a2dp_only) {
- /*A2DP*/
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a1ant_tdma_dur_adj(btcoexist, wifi_status);
- } else {
- /*for low BT RSSI*/
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
+ /* A2DP */
+ if ((bt_rssi_state != BTC_RSSI_STATE_HIGH) &&
+ (bt_rssi_state != BTC_RSSI_STATE_STAY_HIGH)) {
+ /* for low BT RSSI */
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
coex_dm->auto_tdma_adjust = false;
}
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
} else if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) {
- /*HID+A2DP*/
+ /* HID+A2DP */
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
coex_dm->auto_tdma_adjust = false;
} else {
/*for low BT RSSI*/
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
coex_dm->auto_tdma_adjust = false;
}
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
} else if ((bt_link_info->pan_only) ||
(bt_link_info->hid_exist && bt_link_info->pan_exist)) {
- /*PAN(OPP, FTP), HID+PAN(OPP, FTP)*/
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ /* PAN(OPP, FTP), HID+PAN(OPP, FTP) */
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
coex_dm->auto_tdma_adjust = false;
} else if (((bt_link_info->a2dp_exist) && (bt_link_info->pan_exist)) ||
(bt_link_info->hid_exist && bt_link_info->a2dp_exist &&
bt_link_info->pan_exist)) {
- /*A2DP+PAN(OPP, FTP), HID+A2DP+PAN(OPP, FTP)*/
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ /* A2DP+PAN(OPP, FTP), HID+A2DP+PAN(OPP, FTP) */
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
coex_dm->auto_tdma_adjust = false;
} else {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
coex_dm->auto_tdma_adjust = false;
}
}
-static void halbtc8821a1ant_action_wifi_not_connected(
- struct btc_coexist *btcoexist)
+static
+void btc8821a1ant_action_wifi_not_connected(struct btc_coexist *btcoexist)
{
- /* power save state*/
- halbtc8821a1ant_power_save_state(btcoexist,
- BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ /* power save state */
+ btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
- /* tdma and coex table*/
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ /* tdma and coex table */
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
}
static void btc8821a1ant_act_wifi_not_conn_scan(struct btc_coexist *btcoexist)
{
- halbtc8821a1ant_power_save_state(btcoexist,
- BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ /* tdma and coex table */
+ if (coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_ACL_BUSY) {
+ if (bt_link_info->a2dp_exist) {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ btc8821a1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
+ } else if (bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist) {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
+ btc8821a1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 4);
+ } else {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+ btc8821a1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 4);
+ }
+ } else if ((coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_SCO_BUSY) ||
+ (BT_8821A_1ANT_BT_STATUS_ACL_SCO_BUSY ==
+ coex_dm->bt_status)) {
+ btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist,
+ BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SCAN);
+ } else {
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+ }
}
-static void halbtc8821a1ant_action_wifi_connected_scan(
- struct btc_coexist *btcoexist) {
+static
+void btc8821a1ant_action_wifi_connected_scan(struct btc_coexist *btcoexist)
+{
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- /* power save state*/
- halbtc8821a1ant_power_save_state(btcoexist,
- BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ /* power save state */
+ btc8821a1ant_power_save_state(btcoexist,
+ BTC_PS_WIFI_NATIVE, 0x0, 0x0);
- /* tdma and coex table*/
+ /* tdma and coex table */
if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 22);
- halbtc8821a1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
+ btc8821a1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
} else {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
}
} else if ((BT_8821A_1ANT_BT_STATUS_SCO_BUSY ==
coex_dm->bt_status) ||
@@ -1881,52 +1733,52 @@ static void halbtc8821a1ant_action_wifi_connected_scan(
btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist,
BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SCAN);
} else {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
}
}
static void btc8821a1ant_act_wifi_conn_sp_pkt(struct btc_coexist *btcoexist)
{
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool hs_connecting = false;
+ bool hs_connecting = false;
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting);
- halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
- 0x0, 0x0);
+ btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
- /* tdma and coex table*/
- if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
+ /* tdma and coex table */
+ if (coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_ACL_BUSY) {
if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 22);
- halbtc8821a1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 22);
+ btc8821a1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
} else {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 20);
- halbtc8821a1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 1);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 20);
+ btc8821a1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 1);
}
} else {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
- halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
}
}
-static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
+static void btc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool wifi_busy = false;
- bool scan = false, link = false, roam = false;
- bool under_4way = false;
+ bool wifi_busy = false;
+ bool scan = false, link = false, roam = false;
+ bool under_4way = false;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], CoexForWifiConnect()===>\n");
- btcoexist->btc_get(btcoexist,
- BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under_4way);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ &under_4way);
if (under_4way) {
btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -1938,7 +1790,7 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
if (scan || link || roam) {
- halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
+ btc8821a1ant_action_wifi_connected_scan(btcoexist);
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
return;
@@ -1947,14 +1799,14 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
/* power save state*/
if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY ==
coex_dm->bt_status && !btcoexist->bt_link_info.hid_only)
- halbtc8821a1ant_power_save_state(btcoexist,
- BTC_PS_LPS_ON, 0x50, 0x4);
+ btc8821a1ant_power_save_state(btcoexist,
+ BTC_PS_LPS_ON, 0x50, 0x4);
else
- halbtc8821a1ant_power_save_state(btcoexist,
- BTC_PS_WIFI_NATIVE,
- 0x0, 0x0);
+ btc8821a1ant_power_save_state(btcoexist,
+ BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
- /* tdma and coex table*/
+ /* tdma and coex table */
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
if (!wifi_busy) {
if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
@@ -1967,10 +1819,10 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist,
BT_8821A_1ANT_WIFI_STATUS_CONNECTED_IDLE);
} else {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
- halbtc8821a1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 2);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 5);
+ btc8821a1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 2);
}
} else {
if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) {
@@ -1983,10 +1835,9 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist,
BT_8821A_1ANT_WIFI_STATUS_CONNECTED_BUSY);
} else {
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
- halbtc8821a1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 2);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+ btc8821a1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 2);
}
}
}
@@ -1994,52 +1845,52 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 algorithm = 0;
+ u8 algorithm = 0;
- algorithm = halbtc8821a1ant_action_algorithm(btcoexist);
+ algorithm = btc8821a1ant_action_algorithm(btcoexist);
coex_dm->cur_algorithm = algorithm;
- if (!halbtc8821a1ant_is_common_action(btcoexist)) {
+ if (!btc8821a1ant_is_common_action(btcoexist)) {
switch (coex_dm->cur_algorithm) {
case BT_8821A_1ANT_COEX_ALGO_SCO:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action algorithm = SCO\n");
- halbtc8821a1ant_action_sco(btcoexist);
+ btc8821a1ant_action_sco(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action algorithm = HID\n");
- halbtc8821a1ant_action_hid(btcoexist);
+ btc8821a1ant_action_hid(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_A2DP:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action algorithm = A2DP\n");
- halbtc8821a1ant_action_a2dp(btcoexist);
+ btc8821a1ant_action_a2dp(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
- halbtc8821a1ant_action_a2dp_pan_hs(btcoexist);
+ btc8821a1ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action algorithm = PAN(EDR)\n");
- halbtc8821a1ant_action_pan_edr(btcoexist);
+ btc8821a1ant_action_pan_edr(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANHS:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action algorithm = HS mode\n");
- halbtc8821a1ant_action_pan_hs(btcoexist);
+ btc8821a1ant_action_pan_hs(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action algorithm = PAN+A2DP\n");
- halbtc8821a1ant_action_pan_edr_a2dp(btcoexist);
+ btc8821a1ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR_HID:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action algorithm = PAN(EDR)+HID\n");
- halbtc8821a1ant_action_pan_edr_hid(btcoexist);
+ btc8821a1ant_action_pan_edr_hid(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -2049,28 +1900,30 @@ static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
case BT_8821A_1ANT_COEX_ALGO_HID_A2DP:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action algorithm = HID+A2DP\n");
- halbtc8821a1ant_action_hid_a2dp(btcoexist);
+ btc8821a1ant_action_hid_a2dp(btcoexist);
break;
default:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action algorithm = coexist All Off!!\n");
- /*halbtc8821a1ant_coex_all_off(btcoexist);*/
+ /*btc8821a1ant_coex_all_off(btcoexist);*/
break;
}
coex_dm->pre_algorithm = coex_dm->cur_algorithm;
}
}
-static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+static void btc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- bool wifi_connected = false, bt_hs_on = false;
- bool increase_scan_dev_num = false;
- bool bt_ctrl_agg_buf_size = false;
- u8 agg_buf_size = 5;
- u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- bool wifi_under_5g = false;
+ bool wifi_connected = false, bt_hs_on = false;
+ bool increase_scan_dev_num = false;
+ bool bt_ctrl_agg_buf_size = false;
+ u8 agg_buf_size = 5;
+ u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ u32 wifi_link_status = 0;
+ u32 num_of_wifi_link = 0;
+ bool wifi_under_5g = false;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], RunCoexistMechanism()===>\n");
@@ -2097,7 +1950,7 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if (wifi_under_5g) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
- halbtc8821a1ant_coex_under_5g(btcoexist);
+ btc8821a1ant_coex_under_5g(btcoexist);
return;
}
@@ -2109,21 +1962,41 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist, BTC_SET_BL_INC_SCAN_DEV_NUM,
&increase_scan_dev_num);
- btcoexist->btc_get(btcoexist,
- BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
+ &wifi_link_status);
+ num_of_wifi_link = wifi_link_status >> 16;
+ if ((num_of_wifi_link >= 2) ||
+ (wifi_link_status & WIFI_P2P_GO_CONNECTED)) {
+ btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+ btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
+ bt_ctrl_agg_buf_size, agg_buf_size);
+ btc8821a1ant_action_wifi_multi_port(btcoexist);
+ return;
+ }
if (!bt_link_info->sco_exist && !bt_link_info->hid_exist) {
- halbtc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+ btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
} else {
if (wifi_connected) {
wifi_rssi_state =
- halbtc8821a1ant_WifiRssiState(btcoexist, 1, 2,
- 30, 0);
- halbtc8821a1ant_limited_tx(btcoexist,
- NORMAL_EXEC, 1, 1, 1, 1);
+ btc8821a1ant_wifi_rssi_state(btcoexist, 1, 2,
+ 30, 0);
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8821a1ant_limited_tx(btcoexist,
+ NORMAL_EXEC, 1, 1,
+ 1, 1);
+ } else {
+ btc8821a1ant_limited_tx(btcoexist,
+ NORMAL_EXEC, 1, 1,
+ 1, 1);
+ }
} else {
- halbtc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC,
- 0, 0, 0, 0);
+ btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC,
+ 0, 0, 0, 0);
}
}
@@ -2137,22 +2010,22 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
bt_ctrl_agg_buf_size = true;
agg_buf_size = 0x8;
}
- halbtc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
- bt_ctrl_agg_buf_size, agg_buf_size);
+ btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
+ bt_ctrl_agg_buf_size, agg_buf_size);
btc8821a1ant_run_sw_coex_mech(btcoexist);
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (coex_sta->c2h_bt_inquiry_page) {
- halbtc8821a1ant_action_bt_inquiry(btcoexist);
+ btc8821a1ant_action_bt_inquiry(btcoexist);
return;
} else if (bt_hs_on) {
- halbtc8821a1ant_action_hs(btcoexist);
+ btc8821a1ant_action_hs(btcoexist);
return;
}
if (!wifi_connected) {
- bool scan = false, link = false, roam = false;
+ bool scan = false, link = false, roam = false;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], wifi is non connected-idle !!!\n");
@@ -2161,48 +2034,57 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
- if (scan || link || roam)
- btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
- else
- halbtc8821a1ant_action_wifi_not_connected(btcoexist);
+ if (scan || link || roam) {
+ if (scan)
+ btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
+ else
+ btc8821a1ant_action_wifi_not_connected_asso_auth(
+ btcoexist);
+ } else {
+ btc8821a1ant_action_wifi_not_connected(btcoexist);
+ }
} else {
- /* wifi LPS/Busy*/
- halbtc8821a1ant_action_wifi_connected(btcoexist);
+ /* wifi LPS/Busy */
+ btc8821a1ant_action_wifi_connected(btcoexist);
}
}
-static void halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
+static void btc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
{
- /* force to reset coex mechanism*/
- /* sw all off*/
- halbtc8821a1ant_sw_mechanism(btcoexist, false);
+ /* force to reset coex mechanism
+ * sw all off
+ */
+ btc8821a1ant_sw_mechanism(btcoexist, false);
- halbtc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
- halbtc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+ btc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
+ btc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
}
-static void halbtc8821a1ant_init_hw_config(struct btc_coexist *btcoexist,
- bool back_up)
+static void btc8821a1ant_init_hw_config(struct btc_coexist *btcoexist,
+ bool back_up, bool wifi_only)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 u1_tmp = 0;
- bool wifi_under_5g = false;
+ u8 u1_tmp = 0;
+ bool wifi_under_5g = false;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], 1Ant Init HW Config!!\n");
+ if (wifi_only)
+ return;
+
if (back_up) {
coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist,
0x430);
coex_dm->backup_arfr_cnt2 = btcoexist->btc_read_4byte(btcoexist,
0x434);
coex_dm->backup_retry_limit =
- btcoexist->btc_read_2byte(btcoexist, 0x42a);
+ btcoexist->btc_read_2byte(btcoexist, 0x42a);
coex_dm->backup_ampdu_max_time =
- btcoexist->btc_read_1byte(btcoexist, 0x456);
+ btcoexist->btc_read_1byte(btcoexist, 0x456);
}
- /* 0x790[5:0] = 0x5*/
+ /* 0x790[5:0] = 0x5 */
u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
u1_tmp &= 0xc0;
u1_tmp |= 0x5;
@@ -2210,35 +2092,33 @@ static void halbtc8821a1ant_init_hw_config(struct btc_coexist *btcoexist,
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
- /*Antenna config*/
+ /* Antenna config */
if (wifi_under_5g)
- halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
- true, false);
+ btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
+ true, false);
else
- halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
- true, false);
- /* PTA parameter*/
- halbtc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
-
- /* Enable counter statistics*/
- /*0x76e[3] =1, WLAN_Act control by PTA*/
+ btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
+ true, false);
+ /* PTA parameter */
+ btc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
+
+ /* Enable counter statistics
+ * 0x76e[3] =1, WLAN_Act control by PTA
+ */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
}
-/*============================================================*/
-/* work around function start with wa_halbtc8821a1ant_*/
-/*============================================================*/
-/*============================================================*/
-/* extern function start with EXhalbtc8821a1ant_*/
-/*============================================================*/
-void ex_halbtc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist)
+/**************************************************************
+ * extern function start with ex_btc8821a1ant_
+ **************************************************************/
+void ex_btc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist, bool wifionly)
{
- halbtc8821a1ant_init_hw_config(btcoexist, true);
+ btc8821a1ant_init_hw_config(btcoexist, true, wifionly);
}
-void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
+void ex_btc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -2247,12 +2127,12 @@ void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
btcoexist->stop_coex_dm = false;
- halbtc8821a1ant_init_coex_dm(btcoexist);
+ btc8821a1ant_init_coex_dm(btcoexist);
- halbtc8821a1ant_query_bt_info(btcoexist);
+ btc8821a1ant_query_bt_info(btcoexist);
}
-void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
{
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
@@ -2359,7 +2239,7 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
"uplink" : "downlink")));
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]",
- ((btcoexist->bt_info.bt_disabled) ? ("disabled") :
+ ((coex_sta->bt_disabled) ? ("disabled") :
((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
((BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status) ?
@@ -2397,7 +2277,7 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
"\r\n %-35s = %s/%s, (0x%x/0x%x)",
"PS state, IPS/LPS, (lps/rpwm)",
((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
- ((coex_sta->under_Lps ? "LPS ON" : "LPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
btcoexist->bt_info.lps_val,
btcoexist->bt_info.rpwm_val);
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
@@ -2422,7 +2302,7 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
"\r\n %-35s = 0x%x ", "Rate Mask",
btcoexist->bt_info.ra_mask);
- /* Fw mechanism*/
+ /* Fw mechanism */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
"============[Fw mechanism]============");
@@ -2444,7 +2324,7 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
coex_dm->cur_ignore_wlan_act);
}
- /* Hw setting*/
+ /* Hw setting */
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"\r\n %-35s", "============[Hw setting]============");
@@ -2527,38 +2407,46 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
"\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)",
coex_sta->low_priority_rx, coex_sta->low_priority_tx);
#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 1)
- halbtc8821a1ant_monitor_bt_ctr(btcoexist);
+ btc8821a1ant_monitor_bt_ctr(btcoexist);
#endif
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
}
-void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
+ bool wifi_under_5g = false;
if (btcoexist->manual_control || btcoexist->stop_coex_dm)
return;
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ if (wifi_under_5g) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ btc8821a1ant_coex_under_5g(btcoexist);
+ return;
+ }
if (BTC_IPS_ENTER == type) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
- halbtc8821a1ant_set_ant_path(btcoexist,
- BTC_ANT_PATH_BT, false, true);
- /*set PTA control*/
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
- halbtc8821a1ant_coex_table_with_type(btcoexist,
- NORMAL_EXEC, 0);
+ btc8821a1ant_set_ant_path(btcoexist,
+ BTC_ANT_PATH_BT, false, true);
+ /* set PTA control */
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8);
+ btc8821a1ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 0);
} else if (BTC_IPS_LEAVE == type) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
- halbtc8821a1ant_run_coexist_mechanism(btcoexist);
+ btc8821a1ant_run_coexist_mechanism(btcoexist);
}
}
-void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -2568,22 +2456,35 @@ void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
if (BTC_LPS_ENABLE == type) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], LPS ENABLE notify\n");
- coex_sta->under_Lps = true;
+ coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], LPS DISABLE notify\n");
- coex_sta->under_Lps = false;
+ coex_sta->under_lps = false;
}
}
-void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
bool wifi_connected = false, bt_hs_on = false;
+ bool bt_ctrl_agg_buf_size = false;
+ bool wifi_under_5g = false;
+ u32 wifi_link_status = 0;
+ u32 num_of_wifi_link = 0;
+ u8 agg_buf_size = 5;
+
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm)
+ return;
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ if (wifi_under_5g) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ btc8821a1ant_coex_under_5g(btcoexist);
+ return;
+ }
- if (btcoexist->manual_control ||
- btcoexist->stop_coex_dm ||
- btcoexist->bt_info.bt_disabled)
+ if (coex_sta->bt_disabled)
return;
btcoexist->btc_get(btcoexist,
@@ -2591,13 +2492,24 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
btcoexist->btc_get(btcoexist,
BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
- halbtc8821a1ant_query_bt_info(btcoexist);
+ btc8821a1ant_query_bt_info(btcoexist);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
+ &wifi_link_status);
+ num_of_wifi_link = wifi_link_status >> 16;
+ if (num_of_wifi_link >= 2) {
+ btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+ btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
+ bt_ctrl_agg_buf_size, agg_buf_size);
+ btc8821a1ant_action_wifi_multi_port(btcoexist);
+ return;
+ }
if (coex_sta->c2h_bt_inquiry_page) {
- halbtc8821a1ant_action_bt_inquiry(btcoexist);
+ btc8821a1ant_action_bt_inquiry(btcoexist);
return;
} else if (bt_hs_on) {
- halbtc8821a1ant_action_hs(btcoexist);
+ btc8821a1ant_action_hs(btcoexist);
return;
}
@@ -2605,40 +2517,62 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], SCAN START notify\n");
if (!wifi_connected) {
- /* non-connected scan*/
+ /* non-connected scan */
btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
} else {
- /* wifi is connected*/
- halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
+ /* wifi is connected */
+ btc8821a1ant_action_wifi_connected_scan(btcoexist);
}
} else if (BTC_SCAN_FINISH == type) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], SCAN FINISH notify\n");
if (!wifi_connected) {
- /* non-connected scan*/
- halbtc8821a1ant_action_wifi_not_connected(btcoexist);
+ /* non-connected scan */
+ btc8821a1ant_action_wifi_not_connected(btcoexist);
} else {
- halbtc8821a1ant_action_wifi_connected(btcoexist);
+ btc8821a1ant_action_wifi_connected(btcoexist);
}
}
}
-void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
bool wifi_connected = false, bt_hs_on = false;
+ u32 wifi_link_status = 0;
+ u32 num_of_wifi_link = 0;
+ bool bt_ctrl_agg_buf_size = false;
+ bool wifi_under_5g = false;
+ u8 agg_buf_size = 5;
+
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+ coex_sta->bt_disabled)
+ return;
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ if (wifi_under_5g) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ btc8821a1ant_coex_under_5g(btcoexist);
+ return;
+ }
- if (btcoexist->manual_control ||
- btcoexist->stop_coex_dm ||
- btcoexist->bt_info.bt_disabled)
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
+ &wifi_link_status);
+ num_of_wifi_link = wifi_link_status >> 16;
+ if (num_of_wifi_link >= 2) {
+ btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+ btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
+ bt_ctrl_agg_buf_size, agg_buf_size);
+ btc8821a1ant_action_wifi_multi_port(btcoexist);
return;
+ }
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (coex_sta->c2h_bt_inquiry_page) {
- halbtc8821a1ant_action_bt_inquiry(btcoexist);
+ btc8821a1ant_action_bt_inquiry(btcoexist);
return;
} else if (bt_hs_on) {
- halbtc8821a1ant_action_hs(btcoexist);
+ btc8821a1ant_action_hs(btcoexist);
return;
}
@@ -2653,26 +2587,33 @@ void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
btcoexist->btc_get(btcoexist,
BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
if (!wifi_connected) {
- /* non-connected scan*/
- halbtc8821a1ant_action_wifi_not_connected(btcoexist);
+ /* non-connected scan */
+ btc8821a1ant_action_wifi_not_connected(btcoexist);
} else {
- halbtc8821a1ant_action_wifi_connected(btcoexist);
+ btc8821a1ant_action_wifi_connected(btcoexist);
}
}
}
-void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
- u8 type)
+void ex_btc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[3] = {0};
u32 wifi_bw;
u8 wifi_central_chnl;
+ bool wifi_under_5g = false;
- if (btcoexist->manual_control ||
- btcoexist->stop_coex_dm ||
- btcoexist->bt_info.bt_disabled)
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+ coex_sta->bt_disabled)
+ return;
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ if (wifi_under_5g) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ btc8821a1ant_coex_under_5g(btcoexist);
return;
+ }
if (BTC_MEDIA_CONNECT == type) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -2682,17 +2623,16 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
"[BTCoex], MEDIA disconnect notify\n");
}
- /* only 2.4G we need to inform bt the chnl mask*/
+ /* only 2.4G we need to inform bt the chnl mask */
btcoexist->btc_get(btcoexist,
BTC_GET_U1_WIFI_CENTRAL_CHNL,
&wifi_central_chnl);
- if ((BTC_MEDIA_CONNECT == type) &&
+ if ((type == BTC_MEDIA_CONNECT) &&
(wifi_central_chnl <= 14)) {
- /*h2c_parameter[0] = 0x1;*/
h2c_parameter[0] = 0x0;
h2c_parameter[1] = wifi_central_chnl;
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- if (BTC_WIFI_BW_HT40 == wifi_bw)
+ if (wifi_bw == BTC_WIFI_BW_HT40)
h2c_parameter[2] = 0x30;
else
h2c_parameter[2] = 0x20;
@@ -2711,25 +2651,48 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
-void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
- u8 type)
+void ex_btc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
bool bt_hs_on = false;
+ bool bt_ctrl_agg_buf_size = false;
+ bool wifi_under_5g = false;
+ u32 wifi_link_status = 0;
+ u32 num_of_wifi_link = 0;
+ u8 agg_buf_size = 5;
+
+ if (btcoexist->manual_control || btcoexist->stop_coex_dm ||
+ coex_sta->bt_disabled)
+ return;
- if (btcoexist->manual_control ||
- btcoexist->stop_coex_dm ||
- btcoexist->bt_info.bt_disabled)
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ if (wifi_under_5g) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ btc8821a1ant_coex_under_5g(btcoexist);
return;
+ }
coex_sta->special_pkt_period_cnt = 0;
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
+ &wifi_link_status);
+ num_of_wifi_link = wifi_link_status >> 16;
+ if (num_of_wifi_link >= 2) {
+ btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0);
+ btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false,
+ bt_ctrl_agg_buf_size, agg_buf_size);
+ btc8821a1ant_action_wifi_multi_port(btcoexist);
+ return;
+ }
+
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (coex_sta->c2h_bt_inquiry_page) {
- halbtc8821a1ant_action_bt_inquiry(btcoexist);
+ btc8821a1ant_action_bt_inquiry(btcoexist);
return;
} else if (bt_hs_on) {
- halbtc8821a1ant_action_hs(btcoexist);
+ btc8821a1ant_action_hs(btcoexist);
return;
}
@@ -2741,12 +2704,13 @@ void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
}
}
-void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
- u8 *tmp_buf, u8 length)
+void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmp_buf, u8 length)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u8 i;
u8 bt_info = 0;
- u8 i, rsp_source = 0;
+ u8 rsp_source = 0;
bool wifi_connected = false;
bool bt_busy = false;
bool wifi_under_5g = false;
@@ -2756,7 +2720,7 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
btcoexist->btc_get(btcoexist,
BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
- rsp_source = tmp_buf[0]&0xf;
+ rsp_source = tmp_buf[0] & 0xf;
if (rsp_source >= BT_INFO_SRC_8821A_1ANT_MAX)
rsp_source = BT_INFO_SRC_8821A_1ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
@@ -2768,7 +2732,7 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
- if (i == length-1) {
+ if (i == length - 1) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"0x%02x]\n", tmp_buf[i]);
} else {
@@ -2787,19 +2751,19 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
coex_sta->bt_info_ext =
coex_sta->bt_info_c2h[rsp_source][4];
- /* Here we need to resend some wifi info to BT*/
- /* because bt is reset and loss of the info.*/
+ /* Here we need to resend some wifi info to BT
+ * because bt is reset and lost the info
+ */
if (coex_sta->bt_info_ext & BIT1) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
- btcoexist->btc_get(btcoexist,
- BTC_GET_BL_WIFI_CONNECTED,
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected) {
- ex_halbtc8821a1ant_media_status_notify(btcoexist,
+ ex_btc8821a1ant_media_status_notify(btcoexist,
BTC_MEDIA_CONNECT);
} else {
- ex_halbtc8821a1ant_media_status_notify(btcoexist,
+ ex_btc8821a1ant_media_status_notify(btcoexist,
BTC_MEDIA_DISCONNECT);
}
}
@@ -2809,36 +2773,28 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
!btcoexist->stop_coex_dm) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
- halbtc8821a1ant_ignore_wlan_act(btcoexist,
- FORCE_EXEC,
- false);
+ btc8821a1ant_ignore_wlan_act(btcoexist,
+ FORCE_EXEC,
+ false);
}
}
-#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
- if (!(coex_sta->bt_info_ext & BIT4)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n");
- halbtc8821a1ant_bt_auto_report(btcoexist,
- FORCE_EXEC, true);
- }
-#endif
}
- /* check BIT2 first ==> check if bt is under inquiry or page scan*/
+ /* check BIT2 first ==> check if bt is under inquiry or page scan */
if (bt_info & BT_INFO_8821A_1ANT_B_INQ_PAGE)
coex_sta->c2h_bt_inquiry_page = true;
else
coex_sta->c2h_bt_inquiry_page = false;
- /* set link exist status*/
- if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) {
+ /* set link exist status */
+ if (!(bt_info & BT_INFO_8821A_1ANT_B_CONNECTION)) {
coex_sta->bt_link_exist = false;
coex_sta->pan_exist = false;
coex_sta->a2dp_exist = false;
coex_sta->hid_exist = false;
coex_sta->sco_exist = false;
} else {
- /* connection exists*/
+ /* connection exists */
coex_sta->bt_link_exist = true;
if (bt_info & BT_INFO_8821A_1ANT_B_FTP)
coex_sta->pan_exist = true;
@@ -2858,14 +2814,19 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
coex_sta->sco_exist = false;
}
- halbtc8821a1ant_update_bt_link_info(btcoexist);
+ btc8821a1ant_update_bt_link_info(btcoexist);
- if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) {
+ /* mask profile bit for connect-ilde identification
+ * (for CSR case: A2DP idle --> 0x41)
+ */
+ bt_info = bt_info & 0x1f;
+
+ if (!(bt_info & BT_INFO_8821A_1ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
} else if (bt_info == BT_INFO_8821A_1ANT_B_CONNECTION) {
- /* connection exists but no busy*/
+ /* connection exists but no busy */
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
@@ -2895,33 +2856,48 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
btcoexist->btc_set(btcoexist,
BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
- halbtc8821a1ant_run_coexist_mechanism(btcoexist);
+ btc8821a1ant_run_coexist_mechanism(btcoexist);
}
-void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
+void ex_btc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
+ bool wifi_under_5g = false;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Halt notify\n");
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ if (wifi_under_5g) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ btc8821a1ant_coex_under_5g(btcoexist);
+ return;
+ }
+
btcoexist->stop_coex_dm = true;
- halbtc8821a1ant_set_ant_path(btcoexist,
- BTC_ANT_PATH_BT, false, true);
- halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+ btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false, true);
+ btc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
- halbtc8821a1ant_power_save_state(btcoexist,
- BTC_PS_WIFI_NATIVE, 0x0, 0x0);
- halbtc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
+ btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ btc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0);
- ex_halbtc8821a1ant_media_status_notify(btcoexist,
- BTC_MEDIA_DISCONNECT);
+ ex_btc8821a1ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
}
-void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
+void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
+ bool wifi_under_5g = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ if (wifi_under_5g) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ btc8821a1ant_coex_under_5g(btcoexist);
+ return;
+ }
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Pnp notify\n");
@@ -2929,26 +2905,33 @@ void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
if (BTC_WIFI_PNP_SLEEP == pnp_state) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Pnp notify to SLEEP\n");
+ /* BT should clear UnderIPS/UnderLPS state to avoid mismatch
+ * state after wakeup.
+ */
+ coex_sta->under_ips = false;
+ coex_sta->under_lps = false;
btcoexist->stop_coex_dm = true;
- halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
- halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
- 0x0, 0x0);
- halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
+ btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
+ btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false,
+ true);
} else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Pnp notify to WAKE UP\n");
btcoexist->stop_coex_dm = false;
- halbtc8821a1ant_init_hw_config(btcoexist, false);
- halbtc8821a1ant_init_coex_dm(btcoexist);
- halbtc8821a1ant_query_bt_info(btcoexist);
+ btc8821a1ant_init_hw_config(btcoexist, false, false);
+ btc8821a1ant_init_coex_dm(btcoexist);
+ btc8821a1ant_query_bt_info(btcoexist);
}
}
-void ex_halbtc8821a1ant_periodical(struct btc_coexist *btcoexist)
+void ex_btc8821a1ant_periodical(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- static u8 dis_ver_info_cnt;
- u32 fw_ver = 0, bt_patch_ver = 0;
+ static u8 dis_ver_info_cnt;
+ u32 fw_ver = 0, bt_patch_ver = 0;
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
@@ -2982,16 +2965,9 @@ void ex_halbtc8821a1ant_periodical(struct btc_coexist *btcoexist)
}
#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
- halbtc8821a1ant_query_bt_info(btcoexist);
- halbtc8821a1ant_monitor_bt_ctr(btcoexist);
- btc8821a1ant_mon_bt_en_dis(btcoexist);
+ btc8821a1ant_query_bt_info(btcoexist);
+ btc8821a1ant_monitor_bt_ctr(btcoexist);
#else
- if (halbtc8821a1ant_Is_wifi_status_changed(btcoexist) ||
- coex_dm->auto_tdma_adjust) {
- if (coex_sta->special_pkt_period_cnt > 2)
- halbtc8821a1ant_run_coexist_mechanism(btcoexist);
- }
-
coex_sta->special_pkt_period_cnt++;
#endif
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
index 20e904890fc2..1bd1ebe3364e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
@@ -140,13 +140,14 @@ struct coex_dm_8821a_1ant {
};
struct coex_sta_8821a_1ant {
+ bool bt_disabled;
bool bt_link_exist;
bool sco_exist;
bool a2dp_exist;
bool hid_exist;
bool pan_exist;
- bool under_Lps;
+ bool under_lps;
bool under_ips;
u32 special_pkt_period_cnt;
u32 high_priority_tx;
@@ -160,6 +161,7 @@ struct coex_sta_8821a_1ant {
u8 bt_info_c2h[BT_INFO_SRC_8821A_1ANT_MAX][10];
u32 bt_info_c2h_cnt[BT_INFO_SRC_8821A_1ANT_MAX];
bool c2h_bt_inquiry_page;
+ bool wifi_is_high_pri_task;
u8 bt_retry_cnt;
u8 bt_info_ext;
};
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
index 1717e9ce96ca..841b4a83ab70 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
@@ -23,7 +23,7 @@
*
*****************************************************************************/
-/*============================================================
+/************************************************************
* Description:
*
* This file is for RTL8821A Co-exist mechanism
@@ -32,22 +32,19 @@
* 2012/08/22 Cosa first check in.
* 2012/11/14 Cosa Revise for 8821A 2Ant out sourcing.
*
- *============================================================
- */
+ ************************************************************/
-/*============================================================
+/************************************************************
* include files
- *============================================================
-*/
+ ************************************************************/
#include "halbt_precomp.h"
-/*============================================================
+/************************************************************
* Global variables, these are static variables
- *============================================================
- */
-static struct coex_dm_8821a_2ant glcoex_dm_8821a_2ant;
-static struct coex_dm_8821a_2ant *coex_dm = &glcoex_dm_8821a_2ant;
-static struct coex_sta_8821a_2ant glcoex_sta_8821a_2ant;
-static struct coex_sta_8821a_2ant *coex_sta = &glcoex_sta_8821a_2ant;
+ ************************************************************/
+static struct coex_dm_8821a_2ant glcoex_dm_8821a_2ant;
+static struct coex_dm_8821a_2ant *coex_dm = &glcoex_dm_8821a_2ant;
+static struct coex_sta_8821a_2ant glcoex_sta_8821a_2ant;
+static struct coex_sta_8821a_2ant *coex_sta = &glcoex_sta_8821a_2ant;
static const char *const glbt_info_src_8821a_2ant[] = {
"BT Info[wifi fw]",
@@ -55,32 +52,29 @@ static const char *const glbt_info_src_8821a_2ant[] = {
"BT Info[bt auto report]",
};
-static u32 glcoex_ver_date_8821a_2ant = 20130618;
-static u32 glcoex_ver_8821a_2ant = 0x5050;
+static u32 glcoex_ver_date_8821a_2ant = 20130618;
+static u32 glcoex_ver_8821a_2ant = 0x5050;
-/*============================================================
+/************************************************************
* local function proto type if needed
- *============================================================
- *============================================================
- * local function start with halbtc8821a2ant_
- *============================================================
- */
-static u8 halbtc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist,
- u8 level_num, u8 rssi_thresh,
- u8 rssi_thresh1)
+ *
+ * local function start with btc8821a2ant_
+ ************************************************************/
+static u8 btc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist,
+ u8 level_num, u8 rssi_thresh,
+ u8 rssi_thresh1)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- long bt_rssi = 0;
- u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
+ long bt_rssi = 0;
+ u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
bt_rssi = coex_sta->bt_rssi;
if (level_num == 2) {
if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
(coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- long tmp = rssi_thresh +
- BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT;
- if (bt_rssi >= tmp) {
+ if (bt_rssi >=
+ rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], BT Rssi state switch to High\n");
@@ -110,7 +104,8 @@ static u8 halbtc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist,
if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
(coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
if (bt_rssi >=
- (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
+ (rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], BT Rssi state switch to Medium\n");
@@ -156,13 +151,13 @@ static u8 halbtc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist,
return bt_rssi_state;
}
-static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
- u8 index, u8 level_num,
- u8 rssi_thresh, u8 rssi_thresh1)
+static u8 btc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
+ u8 index, u8 level_num,
+ u8 rssi_thresh, u8 rssi_thresh1)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- long wifi_rssi = 0;
- u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
+ long wifi_rssi = 0;
+ u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
@@ -204,7 +199,8 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
(coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_STAY_LOW)) {
if (wifi_rssi >=
- (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
+ (rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], wifi RSSI state switch to Medium\n");
@@ -248,76 +244,57 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
return wifi_rssi_state;
}
-static void btc8821a2ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
+static
+void btc8821a2ant_limited_rx(struct btc_coexist *btcoexist, bool force_exec,
+ bool rej_ap_agg_pkt, bool bt_ctrl_agg_buf_size,
+ u8 agg_buf_size)
{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- static bool pre_bt_disabled;
- static u32 bt_disable_cnt;
- bool bt_active = true, bt_disabled = false;
-
- /* This function check if bt is disabled*/
-
- if (coex_sta->high_priority_tx == 0 &&
- coex_sta->high_priority_rx == 0 &&
- coex_sta->low_priority_tx == 0 &&
- coex_sta->low_priority_rx == 0)
- bt_active = false;
- if (coex_sta->high_priority_tx == 0xffff &&
- coex_sta->high_priority_rx == 0xffff &&
- coex_sta->low_priority_tx == 0xffff &&
- coex_sta->low_priority_rx == 0xffff)
- bt_active = false;
- if (bt_active) {
- bt_disable_cnt = 0;
- bt_disabled = false;
- btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
- &bt_disabled);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is enabled !!\n");
- } else {
- bt_disable_cnt++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bt all counters = 0, %d times!!\n",
- bt_disable_cnt);
- if (bt_disable_cnt >= 2) {
- bt_disabled = true;
- btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
- &bt_disabled);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is disabled !!\n");
- }
- }
- if (pre_bt_disabled != bt_disabled) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is from %s to %s!!\n",
- (pre_bt_disabled ? "disabled" : "enabled"),
- (bt_disabled ? "disabled" : "enabled"));
- pre_bt_disabled = bt_disabled;
- }
+ bool reject_rx_agg = rej_ap_agg_pkt;
+ bool bt_ctrl_rx_agg_size = bt_ctrl_agg_buf_size;
+ u8 rx_agg_size = agg_buf_size;
+
+ /* Rx Aggregation related setting */
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+ &reject_rx_agg);
+ /* decide BT control aggregation buf size or not */
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE,
+ &bt_ctrl_rx_agg_size);
+ /* aggregation buf size, works when BT control Rx aggregation size */
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rx_agg_size);
+ /* real update aggregation setting */
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
}
-static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+static void btc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- u32 reg_hp_txrx, reg_lp_txrx, u4tmp;
- u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ u32 reg_hp_txrx, reg_lp_txrx, u4tmp;
+ u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
reg_hp_txrx = 0x770;
reg_lp_txrx = 0x774;
u4tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
reg_hp_tx = u4tmp & MASKLWORD;
- reg_hp_rx = (u4tmp & MASKHWORD)>>16;
+ reg_hp_rx = (u4tmp & MASKHWORD) >> 16;
u4tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
reg_lp_tx = u4tmp & MASKLWORD;
- reg_lp_rx = (u4tmp & MASKHWORD)>>16;
+ reg_lp_rx = (u4tmp & MASKHWORD) >> 16;
coex_sta->high_priority_tx = reg_hp_tx;
coex_sta->high_priority_rx = reg_hp_rx;
coex_sta->low_priority_tx = reg_lp_tx;
coex_sta->low_priority_rx = reg_lp_rx;
+ if ((coex_sta->low_priority_rx >= 950) &&
+ (coex_sta->low_priority_rx >= coex_sta->low_priority_tx) &&
+ (!coex_sta->under_ips))
+ bt_link_info->slave_role = true;
+ else
+ bt_link_info->slave_role = false;
+
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
@@ -329,14 +306,51 @@ static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
}
-static void halbtc8821a2ant_query_bt_info(struct btc_coexist *btcoexist)
+static void btc8821a2ant_monitor_wifi_ctr(struct btc_coexist *btcoexist)
+{
+ if (coex_sta->under_ips) {
+ coex_sta->crc_ok_cck = 0;
+ coex_sta->crc_ok_11g = 0;
+ coex_sta->crc_ok_11n = 0;
+ coex_sta->crc_ok_11n_agg = 0;
+
+ coex_sta->crc_err_cck = 0;
+ coex_sta->crc_err_11g = 0;
+ coex_sta->crc_err_11n = 0;
+ coex_sta->crc_err_11n_agg = 0;
+ } else {
+ coex_sta->crc_ok_cck =
+ btcoexist->btc_read_4byte(btcoexist, 0xf88);
+ coex_sta->crc_ok_11g =
+ btcoexist->btc_read_2byte(btcoexist, 0xf94);
+ coex_sta->crc_ok_11n =
+ btcoexist->btc_read_2byte(btcoexist, 0xf90);
+ coex_sta->crc_ok_11n_agg =
+ btcoexist->btc_read_2byte(btcoexist, 0xfb8);
+
+ coex_sta->crc_err_cck =
+ btcoexist->btc_read_4byte(btcoexist, 0xf84);
+ coex_sta->crc_err_11g =
+ btcoexist->btc_read_2byte(btcoexist, 0xf96);
+ coex_sta->crc_err_11n =
+ btcoexist->btc_read_2byte(btcoexist, 0xf92);
+ coex_sta->crc_err_11n_agg =
+ btcoexist->btc_read_2byte(btcoexist, 0xfba);
+ }
+
+ /* reset counter */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xf16, 0x1, 0x1);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0xf16, 0x1, 0x0);
+}
+
+static void btc8821a2ant_query_bt_info(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
coex_sta->c2h_bt_info_req_sent = true;
- h2c_parameter[0] |= BIT0; /* trigger */
+ h2c_parameter[0] |= BIT0; /* trigger */
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
@@ -345,54 +359,135 @@ static void halbtc8821a2ant_query_bt_info(struct btc_coexist *btcoexist)
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
-static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
+bool btc8821a2ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
+{
+ static bool pre_wifi_busy = true;
+ static bool pre_under_4way = true;
+ static bool pre_bt_hs_on = true;
+ bool wifi_busy = false, under_4way = false, bt_hs_on = false;
+ bool wifi_connected = false;
+ u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ &under_4way);
+
+ if (wifi_connected) {
+ if (wifi_busy != pre_wifi_busy) {
+ pre_wifi_busy = wifi_busy;
+ return true;
+ }
+ if (under_4way != pre_under_4way) {
+ pre_under_4way = under_4way;
+ return true;
+ }
+ if (bt_hs_on != pre_bt_hs_on) {
+ pre_bt_hs_on = bt_hs_on;
+ return true;
+ }
+
+ wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 3, 2,
+ BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_LOW))
+ return true;
+ }
+
+ return false;
+}
+
+static void btc8821a2ant_update_bt_link_info(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool bt_hs_on = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
+ bt_link_info->sco_exist = coex_sta->sco_exist;
+ bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
+ bt_link_info->pan_exist = coex_sta->pan_exist;
+ bt_link_info->hid_exist = coex_sta->hid_exist;
+
+ /* work around for HS mode. */
+ if (bt_hs_on) {
+ bt_link_info->pan_exist = true;
+ bt_link_info->bt_link_exist = true;
+ }
+
+ /* check if Sco only */
+ if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->sco_only = true;
+ else
+ bt_link_info->sco_only = false;
+
+ /* check if A2dp only */
+ if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->a2dp_only = true;
+ else
+ bt_link_info->a2dp_only = false;
+
+ /* check if Pan only */
+ if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->pan_only = true;
+ else
+ bt_link_info->pan_only = false;
+
+ /* check if Hid only */
+ if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && bt_link_info->hid_exist)
+ bt_link_info->hid_only = true;
+ else
+ bt_link_info->hid_only = false;
+}
+
+static u8 btc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- struct btc_stack_info *stack_info = &btcoexist->stack_info;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
bool bt_hs_on = false;
u8 algorithm = BT_8821A_2ANT_COEX_ALGO_UNDEFINED;
u8 num_of_diff_profile = 0;
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
- /*for win-8 stack HID report error*/
- /* sync BTInfo with BT firmware and stack */
- if (!stack_info->hid_exist)
- stack_info->hid_exist = coex_sta->hid_exist;
- /* when stack HID report error, here we use the info from bt fw. */
- if (!stack_info->bt_link_exist)
- stack_info->bt_link_exist = coex_sta->bt_link_exist;
-
- if (!coex_sta->bt_link_exist) {
+ if (!bt_link_info->bt_link_exist) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], No profile exists!!!\n");
+ "[BTCoex], No BT link exists!!!\n");
return algorithm;
}
- if (coex_sta->sco_exist)
+ if (bt_link_info->sco_exist)
num_of_diff_profile++;
- if (coex_sta->hid_exist)
+ if (bt_link_info->hid_exist)
num_of_diff_profile++;
- if (coex_sta->pan_exist)
+ if (bt_link_info->pan_exist)
num_of_diff_profile++;
- if (coex_sta->a2dp_exist)
+ if (bt_link_info->a2dp_exist)
num_of_diff_profile++;
if (num_of_diff_profile == 1) {
- if (coex_sta->sco_exist) {
+ if (bt_link_info->sco_exist) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], SCO only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
- if (coex_sta->hid_exist) {
+ if (bt_link_info->hid_exist) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], HID only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
- } else if (coex_sta->a2dp_exist) {
+ } else if (bt_link_info->a2dp_exist) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], A2DP only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP;
- } else if (coex_sta->pan_exist) {
+ } else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
@@ -407,16 +502,16 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
}
}
} else if (num_of_diff_profile == 2) {
- if (coex_sta->sco_exist) {
- if (coex_sta->hid_exist) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], SCO + HID\n");
- algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
- } else if (coex_sta->a2dp_exist) {
+ algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
+ } else if (bt_link_info->a2dp_exist) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], SCO + A2DP ==> SCO\n");
- algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
- } else if (coex_sta->pan_exist) {
+ algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
+ } else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
@@ -426,99 +521,104 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"[BTCoex], SCO + PAN(EDR)\n");
- algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
+ algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
}
}
} else {
- if (coex_sta->hid_exist &&
- coex_sta->a2dp_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], HID + A2DP\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
- } else if (coex_sta->hid_exist &&
- coex_sta->pan_exist) {
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"[BTCoex], HID + PAN(HS)\n");
- algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
+ algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
} else {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"[BTCoex], HID + PAN(EDR)\n");
- algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
+ algorithm =
+ BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
- } else if (coex_sta->pan_exist &&
- coex_sta->a2dp_exist) {
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"[BTCoex], A2DP + PAN(HS)\n");
- algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS;
+ algorithm =
+ BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"[BTCoex], A2DP + PAN(EDR)\n");
- algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP;
+ algorithm =
+ BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP;
}
}
}
} else if (num_of_diff_profile == 3) {
- if (coex_sta->sco_exist) {
- if (coex_sta->hid_exist &&
- coex_sta->a2dp_exist) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], SCO + HID + A2DP ==> HID\n");
- algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
- } else if (coex_sta->hid_exist &&
- coex_sta->pan_exist) {
+ algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"[BTCoex], SCO + HID + PAN(HS)\n");
- algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
+ algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"[BTCoex], SCO + HID + PAN(EDR)\n");
- algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
+ algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
}
- } else if (coex_sta->pan_exist &&
- coex_sta->a2dp_exist) {
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"[BTCoex], SCO + A2DP + PAN(HS)\n");
- algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
+ algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
- algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
+ algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
}
}
} else {
- if (coex_sta->hid_exist &&
- coex_sta->pan_exist &&
- coex_sta->a2dp_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"[BTCoex], HID + A2DP + PAN(HS)\n");
- algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
+ algorithm =
+ BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
} else {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"[BTCoex], HID + A2DP + PAN(EDR)\n");
- algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ algorithm =
+ BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
}
}
} else if (num_of_diff_profile >= 3) {
- if (coex_sta->sco_exist) {
- if (coex_sta->hid_exist &&
- coex_sta->pan_exist &&
- coex_sta->a2dp_exist) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
if (bt_hs_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
@@ -528,7 +628,7 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
RT_TRACE(rtlpriv, COMP_BT_COEXIST,
DBG_LOUD,
"[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
- algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
+ algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
}
}
}
@@ -536,44 +636,7 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
return algorithm;
}
-static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool ret = false;
- bool bt_hs_on = false, wifi_connected = false;
- long bt_hs_rssi = 0;
- u8 bt_rssi_state;
-
- if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on))
- return false;
- if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
- &wifi_connected))
- return false;
- if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
- return false;
-
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
-
- if (wifi_connected) {
- if (bt_hs_on) {
- if (bt_hs_rssi > 37) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Need to decrease bt power for HS mode!!\n");
- ret = true;
- }
- } else {
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
- ret = true;
- }
- }
- }
- return ret;
-}
-
-static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist,
+static void btc8821a2ant_set_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
u8 dac_swing_lvl)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -592,185 +655,47 @@ static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist,
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
-static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
- bool dec_bt_pwr)
+static void btc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
+ u8 dec_bt_pwr_lvl)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[1] = {0};
- h2c_parameter[0] = 0;
-
- if (dec_bt_pwr)
- h2c_parameter[0] |= BIT1;
+ h2c_parameter[0] = dec_bt_pwr_lvl;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n",
- (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+ "[BTCoex], decrease Bt Power Level : %u, FW write 0x62 = 0x%x\n",
+ dec_bt_pwr_lvl, h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
-static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
- bool force_exec, bool dec_bt_pwr)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s Dec BT power = %s\n",
- (force_exec ? "force to" : ""),
- ((dec_bt_pwr) ? "ON" : "OFF"));
- coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
-
- if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n",
- coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
-
- if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
- return;
- }
- halbtc8821a2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
-
- coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
-}
-
-static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist,
- bool bt_lna_cons_on)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 h2c_parameter[2] = {0};
-
- h2c_parameter[0] = 0x3; /* opCode, 0x3 = BT_SET_LNA_CONSTRAIN */
-
- if (bt_lna_cons_on)
- h2c_parameter[1] |= BIT0;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n",
- bt_lna_cons_on ? "ON!!" : "OFF!!",
- h2c_parameter[0] << 8 | h2c_parameter[1]);
-
- btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
-}
-
-static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist,
- bool force_exec, bool bt_lna_cons_on)
+static void btc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
+ bool force_exec, u8 dec_bt_pwr_lvl)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s BT Constrain = %s\n",
- (force_exec ? "force" : ""),
- ((bt_lna_cons_on) ? "ON" : "OFF"));
- coex_dm->cur_bt_lna_constrain = bt_lna_cons_on;
+ "[BTCoex], %s Dec BT power level = %u\n",
+ (force_exec ? "force to" : ""), dec_bt_pwr_lvl);
+ coex_dm->cur_dec_bt_pwr_lvl = dec_bt_pwr_lvl;
if (!force_exec) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n",
- coex_dm->pre_bt_lna_constrain,
- coex_dm->cur_bt_lna_constrain);
+ "[BTCoex], pre_dec_bt_pwr_lvl = %d, cur_dec_bt_pwr_lvl = %d\n",
+ coex_dm->pre_dec_bt_pwr_lvl,
+ coex_dm->cur_dec_bt_pwr_lvl);
- if (coex_dm->pre_bt_lna_constrain ==
- coex_dm->cur_bt_lna_constrain)
+ if (coex_dm->pre_dec_bt_pwr_lvl == coex_dm->cur_dec_bt_pwr_lvl)
return;
}
- btc8821a2ant_set_fw_bt_lna_constr(btcoexist,
- coex_dm->cur_bt_lna_constrain);
-
- coex_dm->pre_bt_lna_constrain = coex_dm->cur_bt_lna_constrain;
-}
-
-static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist,
- u8 bt_psd_mode)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 h2c_parameter[2] = {0};
+ btc8821a2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr_lvl);
- h2c_parameter[0] = 0x2; /* opCode, 0x2 = BT_SET_PSD_MODE */
-
- h2c_parameter[1] = bt_psd_mode;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n",
- h2c_parameter[1],
- h2c_parameter[0] << 8 | h2c_parameter[1]);
-
- btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
+ coex_dm->pre_dec_bt_pwr_lvl = coex_dm->cur_dec_bt_pwr_lvl;
}
-static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist,
- bool force_exec, u8 bt_psd_mode)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s BT PSD mode = 0x%x\n",
- (force_exec ? "force" : ""), bt_psd_mode);
- coex_dm->cur_bt_psd_mode = bt_psd_mode;
-
- if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n",
- coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode);
-
- if (coex_dm->pre_bt_psd_mode == coex_dm->cur_bt_psd_mode)
- return;
- }
- halbtc8821a2ant_set_fw_bt_psd_mode(btcoexist,
- coex_dm->cur_bt_psd_mode);
-
- coex_dm->pre_bt_psd_mode = coex_dm->cur_bt_psd_mode;
-}
-
-static void halbtc8821a2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
- bool enable_auto_report)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 h2c_parameter[1] = {0};
-
- h2c_parameter[0] = 0;
-
- if (enable_auto_report)
- h2c_parameter[0] |= BIT0;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
- (enable_auto_report ? "Enabled!!" : "Disabled!!"),
- h2c_parameter[0]);
-
- btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
-}
-
-static void halbtc8821a2ant_bt_auto_report(struct btc_coexist *btcoexist,
- bool force_exec,
- bool enable_auto_report)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s BT Auto report = %s\n",
- (force_exec ? "force to" : ""),
- ((enable_auto_report) ? "Enabled" : "Disabled"));
- coex_dm->cur_bt_auto_report = enable_auto_report;
-
- if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
- coex_dm->pre_bt_auto_report,
- coex_dm->cur_bt_auto_report);
-
- if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
- return;
- }
- halbtc8821a2ant_set_bt_auto_report(btcoexist,
- coex_dm->cur_bt_auto_report);
-
- coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report;
-}
-
-static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
- bool force_exec,
- u8 fw_dac_swing_lvl)
+static void btc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
+ bool force_exec, u8 fw_dac_swing_lvl)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -790,66 +715,14 @@ static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
return;
}
- btc8821a2ant_set_fw_dac_swing_lev(btcoexist,
+ btc8821a2ant_set_fw_dac_swing_lvl(btcoexist,
coex_dm->cur_fw_dac_swing_lvl);
coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
}
-static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
- bool rx_rf_shrink_on)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- if (rx_rf_shrink_on) {
- /* Shrink RF Rx LPF corner */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Shrink RF Rx LPF corner!!\n");
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
- 0xfffff, 0xffffc);
- } else {
- /* Resume RF Rx LPF corner
- * After initialized, we can use coex_dm->bt_rf0x1e_backup
- */
- if (btcoexist->initilized) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Resume RF Rx LPF corner!!\n");
- btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
- 0x1e, 0xfffff,
- coex_dm->bt_rf0x1e_backup);
- }
- }
-}
-
-static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist,
- bool force_exec, bool rx_rf_shrink_on)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn Rx RF Shrink = %s\n",
- (force_exec ? "force to" : ""),
- ((rx_rf_shrink_on) ? "ON" : "OFF"));
- coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
-
- if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n",
- coex_dm->pre_rf_rx_lpf_shrink,
- coex_dm->cur_rf_rx_lpf_shrink);
-
- if (coex_dm->pre_rf_rx_lpf_shrink ==
- coex_dm->cur_rf_rx_lpf_shrink)
- return;
- }
- btc8821a2ant_set_sw_rf_rx_lpf_corner(btcoexist,
- coex_dm->cur_rf_rx_lpf_shrink);
-
- coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
-}
-
-static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist,
- bool low_penalty_ra)
+static void btc8821a2ant_set_sw_penalty_tx_rate_adaptive(
+ struct btc_coexist *btcoexist, bool low_penalty_ra)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[6] = {0};
@@ -858,14 +731,14 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist,
if (low_penalty_ra) {
h2c_parameter[1] |= BIT0;
- /*normal rate except MCS7/6/5, OFDM54/48/36 */
+ /* normal rate except MCS7/6/5, OFDM54/48/36 */
h2c_parameter[2] = 0x00;
- /*MCS7 or OFDM54 */
- h2c_parameter[3] = 0xf7;
- /*MCS6 or OFDM48 */
- h2c_parameter[4] = 0xf8;
- /*MCS5 or OFDM36 */
- h2c_parameter[5] = 0xf9;
+ /* MCS7 or OFDM54 */
+ h2c_parameter[3] = 0xf5;
+ /* MCS6 or OFDM48 */
+ h2c_parameter[4] = 0xa0;
+ /* MCS5 or OFDM36 */
+ h2c_parameter[5] = 0xa0;
}
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -875,12 +748,11 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist,
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
-static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist,
- bool force_exec, bool low_penalty_ra)
+static void btc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist,
+ bool force_exec, bool low_penalty_ra)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- /*return;*/
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], %s turn LowPenaltyRA = %s\n",
(force_exec ? "force to" : ""),
@@ -891,19 +763,19 @@ static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist,
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
coex_dm->pre_low_penalty_ra,
- coex_dm->cur_low_penalty_ra);
+ coex_dm->cur_low_penalty_ra);
if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
return;
}
- btc8821a2ant_SetSwPenTxRateAdapt(btcoexist,
+ btc8821a2ant_set_sw_penalty_tx_rate_adaptive(btcoexist,
coex_dm->cur_low_penalty_ra);
coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
}
-static void halbtc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
- u32 level)
+static void btc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
+ u32 level)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 val = (u8)level;
@@ -918,14 +790,14 @@ static void btc8821a2ant_set_sw_full_dac_swing(struct btc_coexist *btcoexist,
u32 sw_dac_swing_lvl)
{
if (sw_dac_swing_on)
- halbtc8821a2ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl);
+ btc8821a2ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl);
else
- halbtc8821a2ant_set_dac_swing_reg(btcoexist, 0x18);
+ btc8821a2ant_set_dac_swing_reg(btcoexist, 0x18);
}
-static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist,
- bool force_exec, bool dac_swing_on,
- u32 dac_swing_lvl)
+static void btc8821a2ant_dac_swing(struct btc_coexist *btcoexist,
+ bool force_exec, bool dac_swing_on,
+ u32 dac_swing_lvl)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -958,50 +830,9 @@ static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist,
coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
}
-static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist,
- bool adc_back_off)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- if (adc_back_off) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BB BackOff Level On!\n");
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3);
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BB BackOff Level Off!\n");
- btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1);
- }
-}
-
-static void halbtc8821a2ant_adc_back_off(struct btc_coexist *btcoexist,
- bool force_exec, bool adc_back_off)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn AdcBackOff = %s\n",
- (force_exec ? "force to" : ""),
- ((adc_back_off) ? "ON" : "OFF"));
- coex_dm->cur_adc_back_off = adc_back_off;
-
- if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n",
- coex_dm->pre_adc_back_off,
- coex_dm->cur_adc_back_off);
-
- if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
- return;
- }
- halbtc8821a2ant_set_adc_back_off(btcoexist, coex_dm->cur_adc_back_off);
-
- coex_dm->pre_adc_back_off = coex_dm->cur_adc_back_off;
-}
-
-static void halbtc8821a2ant_set_coex_table(struct btc_coexist *btcoexist,
- u32 val0x6c0, u32 val0x6c4,
- u32 val0x6c8, u8 val0x6cc)
+static void btc8821a2ant_set_coex_table(struct btc_coexist *btcoexist,
+ u32 val0x6c0, u32 val0x6c4,
+ u32 val0x6c8, u8 val0x6cc)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -1022,9 +853,9 @@ static void halbtc8821a2ant_set_coex_table(struct btc_coexist *btcoexist,
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
-static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist,
- bool force_exec, u32 val0x6c0,
- u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
+static void btc8821a2ant_coex_table(struct btc_coexist *btcoexist,
+ bool force_exec, u32 val0x6c0,
+ u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -1057,8 +888,8 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist,
(coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
return;
}
- halbtc8821a2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, val0x6c8,
- val0x6cc);
+ btc8821a2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, val0x6c8,
+ val0x6cc);
coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
@@ -1066,14 +897,97 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist,
coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
}
-static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
- bool enable)
+static void btc8821a2ant_coex_table_with_type(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ coex_sta->coex_table_type = type;
+
+ switch (type) {
+ case 0:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x55555555, 0xffffff, 0x3);
+ break;
+ case 1:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5afa5afa, 0xffffff, 0x3);
+ break;
+ case 2:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0x5ada5ada,
+ 0x5ada5ada, 0xffffff, 0x3);
+ break;
+ case 3:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa,
+ 0xaaaaaaaa, 0xffffff, 0x3);
+ break;
+ case 4:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0xffffffff,
+ 0xffffffff, 0xffffff, 0x3);
+ break;
+ case 5:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+ 0x5fff5fff, 0xffffff, 0x3);
+ break;
+ case 6:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5a5a5a5a, 0xffffff, 0x3);
+ break;
+ case 7:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
+ break;
+ case 8:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
+ break;
+ case 9:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
+ break;
+ case 10:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
+ break;
+ case 11:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
+ break;
+ case 12:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0x5ada5ada, 0xffffff, 0x3);
+ break;
+ case 13:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+ 0xaaaaaaaa, 0xffffff, 0x3);
+ break;
+ case 14:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+ 0x5ada5ada, 0xffffff, 0x3);
+ break;
+ case 15:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd,
+ 0xaaaaaaaa, 0xffffff, 0x3);
+ break;
+ case 16:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0x5fdf5fdf,
+ 0x5fdb5fdb, 0xffffff, 0x3);
+ break;
+ case 17:
+ btc8821a2ant_coex_table(btcoexist, force_exec, 0xfafafafa,
+ 0xfafafafa, 0xffffff, 0x3);
+ break;
+ default:
+ break;
+ }
+}
+
+static void btc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
+ bool enable)
{
struct rtl_priv *rtlpriv = btcoex->adapter;
u8 h2c_parameter[1] = {0};
if (enable)
- h2c_parameter[0] |= BIT0;/* function enable */
+ h2c_parameter[0] |= BIT0; /* function enable */
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
@@ -1082,8 +996,35 @@ static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
btcoex->btc_fill_h2c(btcoex, 0x63, 1, h2c_parameter);
}
-static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
- bool force_exec, bool enable)
+static void btc8821a2ant_set_lps_rpwm(struct btc_coexist *btcoexist, u8 lps_val,
+ u8 rpwm_val)
+{
+ u8 lps = lps_val;
+ u8 rpwm = rpwm_val;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_LPS_VAL, &lps);
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_RPWM_VAL, &rpwm);
+}
+
+static void btc8821a2ant_lps_rpwm(struct btc_coexist *btcoexist,
+ bool force_exec, u8 lps_val, u8 rpwm_val)
+{
+ coex_dm->cur_lps = lps_val;
+ coex_dm->cur_rpwm = rpwm_val;
+
+ if (!force_exec) {
+ if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
+ (coex_dm->pre_rpwm == coex_dm->cur_rpwm))
+ return;
+ }
+ btc8821a2ant_set_lps_rpwm(btcoexist, lps_val, rpwm_val);
+
+ coex_dm->pre_lps = coex_dm->cur_lps;
+ coex_dm->pre_rpwm = coex_dm->cur_rpwm;
+}
+
+static void btc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -1102,14 +1043,14 @@ static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
coex_dm->cur_ignore_wlan_act)
return;
}
- halbtc8821a2ant_set_fw_ignore_wlan_act(btcoexist, enable);
+ btc8821a2ant_set_fw_ignore_wlan_act(btcoexist, enable);
coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
}
-static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist,
- u8 byte1, u8 byte2, u8 byte3,
- u8 byte4, u8 byte5)
+static void btc8821a2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
+ u8 byte1, u8 byte2, u8 byte3,
+ u8 byte4, u8 byte5)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 h2c_parameter[5];
@@ -1137,45 +1078,24 @@ static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist,
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
-static void btc8821a2ant_sw_mech1(struct btc_coexist *btcoexist,
- bool shrink_rx_lpf,
- bool low_penalty_ra, bool limited_dig,
- bool bt_lna_constrain)
+static void btc8821a2ant_sw_mechanism1(struct btc_coexist *btcoexist,
+ bool shrink_rx_lpf, bool low_penalty_ra,
+ bool limited_dig, bool bt_lna_constrain)
{
- u32 wifi_bw;
-
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
-
- if (BTC_WIFI_BW_HT40 != wifi_bw) {
- /*only shrink RF Rx LPF for HT40*/
- if (shrink_rx_lpf)
- shrink_rx_lpf = false;
- }
-
- halbtc8821a2ant_RfShrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
- halbtc8821a2ant_low_penalty_ra(btcoexist,
- NORMAL_EXEC, low_penalty_ra);
-
- /* no limited DIG
- * btc8821a2_set_bt_lna_const(btcoexist,
- NORMAL_EXEC, bBTLNAConstrain);
- */
+ btc8821a2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
}
-static void btc8821a2ant_sw_mech2(struct btc_coexist *btcoexist,
- bool agc_table_shift,
- bool adc_back_off, bool sw_dac_swing,
- u32 dac_swing_lvl)
+static void btc8821a2ant_sw_mechanism2(struct btc_coexist *btcoexist,
+ bool agc_table_shift, bool adc_back_off,
+ bool sw_dac_swing, u32 dac_swing_lvl)
{
- /* halbtc8821a2ant_AgcTable(btcoexist, NORMAL_EXEC, bAGCTableShift); */
- halbtc8821a2ant_adc_back_off(btcoexist, NORMAL_EXEC, adc_back_off);
- halbtc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing,
- sw_dac_swing);
+ btc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing,
+ dac_swing_lvl);
}
-static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist,
- u8 ant_pos_type, bool init_hw_cfg,
- bool wifi_off)
+static void btc8821a2ant_set_ant_path(struct btc_coexist *btcoexist,
+ u8 ant_pos_type, bool init_hw_cfg,
+ bool wifi_off)
{
struct btc_board_info *board_info = &btcoexist->board_info;
u32 u4tmp = 0;
@@ -1189,21 +1109,18 @@ static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist,
btcoexist->btc_write_4byte(btcoexist, 0x4c, u4tmp);
btcoexist->btc_write_4byte(btcoexist, 0x974, 0x3ff);
- btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x77);
if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
- /* tell firmware "antenna inverse" ==>
- * WRONG firmware antenna control code.
- * ==>need fw to fix
+ /* tell firmware "antenna inverse" ==> WRONG firmware
+ * antenna control code ==>need fw to fix
*/
h2c_parameter[0] = 1;
h2c_parameter[1] = 1;
btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
h2c_parameter);
} else {
- /* tell firmware "no antenna inverse"
- * ==> WRONG firmware antenna control code.
- * ==>need fw to fix
+ /* tell firmware "no antenna inverse" ==> WRONG firmware
+ * antenna control code ==>need fw to fix
*/
h2c_parameter[0] = 0;
h2c_parameter[1] = 1;
@@ -1223,11 +1140,25 @@ static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist,
}
}
-static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
- bool force_exec, bool turn_on, u8 type)
+static void btc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
+ bool force_exec, bool turn_on, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u8 wifi_rssi_state, bt_rssi_state;
+
+ wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+ BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+ bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, 2,
+ BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
+
+ if (!(BTC_RSSI_HIGH(wifi_rssi_state) &&
+ BTC_RSSI_HIGH(bt_rssi_state)) &&
+ turn_on) {
+ /* for WiFi RSSI low or BT RSSI low */
+ type = type + 100;
+ }
+
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], %s turn %s PS TDMA, type = %d\n",
(force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
@@ -1251,108 +1182,181 @@ static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
switch (type) {
case 1:
default:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a,
- 0x1a, 0xe1, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c,
+ 0x03, 0xf1, 0x90);
break;
case 2:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x12,
- 0x12, 0xe1, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x2d,
+ 0x03, 0xf1, 0x90);
break;
case 3:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1c,
- 0x3, 0xf1, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0xf1, 0x90);
break;
case 4:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x10,
- 0x03, 0xf1, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
+ 0x03, 0xf1, 0x90);
break;
case 5:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a,
- 0x1a, 0x60, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c,
+ 0x3, 0x70, 0x90);
break;
case 6:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x12,
- 0x12, 0x60, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x2d,
+ 0x3, 0x70, 0x90);
break;
case 7:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1c,
- 0x3, 0x70, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0x70, 0x90);
break;
case 8:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xa3, 0x10,
- 0x3, 0x70, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10,
+ 0x3, 0x70, 0x90);
break;
case 9:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a,
- 0x1a, 0xe1, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c,
+ 0x03, 0xf1, 0x90);
break;
case 10:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x12,
- 0x12, 0xe1, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x2d,
+ 0x03, 0xf1, 0x90);
break;
case 11:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0xa,
- 0xa, 0xe1, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0xf1, 0x90);
break;
case 12:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x5,
- 0x5, 0xe1, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
+ 0x3, 0xf1, 0x90);
break;
case 13:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a,
- 0x1a, 0x60, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c,
+ 0x3, 0x70, 0x90);
break;
case 14:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3,
- 0x12, 0x12, 0x60, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x2d,
+ 0x3, 0x70, 0x90);
break;
case 15:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0xa,
- 0xa, 0x60, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0x70, 0x90);
break;
case 16:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x5,
- 0x5, 0x60, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
+ 0x3, 0x70, 0x90);
break;
case 17:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xa3, 0x2f,
- 0x2f, 0x60, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x2f,
+ 0x2f, 0x60, 0x90);
break;
case 18:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x5,
- 0x5, 0xe1, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5, 0x5,
+ 0xe1, 0x90);
break;
case 19:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x25,
- 0x25, 0xe1, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+ 0x25, 0xe1, 0x90);
break;
case 20:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x25,
- 0x25, 0x60, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+ 0x25, 0x60, 0x90);
break;
case 21:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x15,
- 0x03, 0x70, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+ 0x03, 0x70, 0x90);
+ break;
+ case 23:
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1e,
+ 0x03, 0xf0, 0x14);
+ break;
+ case 24:
+ case 124:
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x3c,
+ 0x03, 0x70, 0x50);
+ break;
+ case 25:
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x14,
+ 0x03, 0xf1, 0x90);
+ break;
+ case 26:
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x30,
+ 0x03, 0xf1, 0x90);
break;
case 71:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a,
- 0x1a, 0xe1, 0x90);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c,
+ 0x03, 0xf1, 0x90);
+ break;
+ case 101:
+ case 105:
+ case 171:
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x3a,
+ 0x03, 0x70, 0x50);
+ break;
+ case 102:
+ case 106:
+ case 110:
+ case 114:
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x2d,
+ 0x03, 0x70, 0x50);
+ break;
+ case 103:
+ case 107:
+ case 111:
+ case 115:
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1c,
+ 0x03, 0x70, 0x50);
+ break;
+ case 104:
+ case 108:
+ case 112:
+ case 116:
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x10,
+ 0x03, 0x70, 0x50);
+ break;
+ case 109:
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c,
+ 0x03, 0xf1, 0x90);
+ break;
+ case 113:
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c,
+ 0x03, 0x70, 0x90);
+ break;
+ case 121:
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+ 0x03, 0x70, 0x90);
+ break;
+ case 22:
+ case 122:
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x35,
+ 0x03, 0x71, 0x11);
+ break;
+ case 123:
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1c,
+ 0x03, 0x70, 0x54);
+ break;
+ case 125:
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x14,
+ 0x03, 0x70, 0x50);
+ break;
+ case 126:
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x30,
+ 0x03, 0x70, 0x50);
break;
}
} else {
/* disable PS tdma */
switch (type) {
case 0:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
- 0x40, 0x0);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x40, 0x0);
break;
case 1:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
- 0x48, 0x0);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x48, 0x0);
break;
default:
- halbtc8821a2ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0,
- 0x40, 0x0);
+ btc8821a2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x40, 0x0);
break;
}
}
@@ -1362,867 +1366,450 @@ static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
}
-static void halbtc8821a2ant_coex_all_off(struct btc_coexist *btcoexist)
+static void
+btc8821a2ant_ps_tdma_check_for_power_save_state(struct btc_coexist *btcoexist,
+ bool new_ps_state)
+{
+ u8 lps_mode = 0x0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode);
+
+ if (lps_mode) {
+ /* already under LPS state */
+ if (new_ps_state) {
+ /* keep state under LPS, do nothing */
+ } else {
+ /* will leave LPS state, turn off psTdma first */
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ }
+ } else {
+ /* NO PS state */
+ if (new_ps_state) {
+ /* will enter LPS state, turn off psTdma first */
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ } else {
+ /* keep state under NO PS state, do nothing */
+ }
+ }
+}
+
+static void btc8821a2ant_power_save_state(struct btc_coexist *btcoexist,
+ u8 ps_type, u8 lps_val, u8 rpwm_val)
+{
+ bool low_pwr_disable = false;
+
+ switch (ps_type) {
+ case BTC_PS_WIFI_NATIVE:
+ /* recover to original 32k low power setting */
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+ coex_sta->force_lps_on = false;
+ break;
+ case BTC_PS_LPS_ON:
+ btc8821a2ant_ps_tdma_check_for_power_save_state(btcoexist,
+ true);
+ btc8821a2ant_lps_rpwm(btcoexist, NORMAL_EXEC, lps_val,
+ rpwm_val);
+ /* when coex force to enter LPS, do not enter 32k low power */
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+ /* power save must executed before psTdma */
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ coex_sta->force_lps_on = true;
+ break;
+ case BTC_PS_LPS_OFF:
+ btc8821a2ant_ps_tdma_check_for_power_save_state(btcoexist,
+ false);
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ coex_sta->force_lps_on = false;
+ break;
+ default:
+ break;
+ }
+}
+
+static void btc8821a2ant_coex_all_off(struct btc_coexist *btcoexist)
{
/* fw all off */
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
/* sw all off */
- btc8821a2ant_sw_mech1(btcoexist, false, false, false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
/* hw all off */
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC,
- 0x55555555, 0x55555555, 0xffff, 0x3);
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
}
-static void halbtc8821a2ant_coex_under_5g(struct btc_coexist *btcoexist)
+static void btc8821a2ant_coex_under_5g(struct btc_coexist *btcoexist)
{
- halbtc8821a2ant_coex_all_off(btcoexist);
+ btc8821a2ant_coex_all_off(btcoexist);
+ btc8821a2ant_ignore_wlan_act(btcoexist, NORMAL_EXEC, true);
}
-static void halbtc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
+static void btc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
/* force to reset coex mechanism */
- halbtc8821a2ant_coex_table(btcoexist, FORCE_EXEC, 0x55555555,
- 0x55555555, 0xffff, 0x3);
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
- halbtc8821a2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
- halbtc8821a2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ btc8821a2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+ btc8821a2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, 0);
- btc8821a2ant_sw_mech1(btcoexist, false, false, false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
}
-static void halbtc8821a2ant_bt_inquiry_page(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+ bool wifi_connected = false;
bool low_pwr_disable = true;
+ bool scan = false, link = false, roam = false;
+
+ wifi_rssi_state =
+ btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+ BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+ bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+ 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
-
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5afa5afa, 0xffff, 0x3);
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
-}
-
-static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool common = false, wifi_connected = false, wifi_busy = false;
- bool low_pwr_disable = false;
-
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
- btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5afa5afa, 0xffff, 0x3);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
- if (!wifi_connected &&
- BT_8821A_2ANT_BT_STATUS_IDLE == coex_dm->bt_status) {
- low_pwr_disable = false;
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
- &low_pwr_disable);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ if (scan || link || roam) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi link process + BT Inq/Page!!\n");
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
+ } else if (wifi_connected) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT Inq/Page!!\n");
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
+ } else {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi IPS + BT IPS!!\n");
+ "[BTCoex], Wifi no-link + BT Inq/Page!!\n");
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ }
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
- btc8821a2ant_sw_mech1(btcoexist, false, false, false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+}
- common = true;
- } else if (wifi_connected &&
- (BT_8821A_2ANT_BT_STATUS_IDLE == coex_dm->bt_status)) {
- low_pwr_disable = false;
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
- &low_pwr_disable);
+void btc8821a2ant_action_wifi_link_process(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u8 u8tmpa, u8tmpb;
- if (wifi_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Busy + BT IPS!!\n");
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 1);
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi LPS + BT IPS!!\n");
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 1);
- }
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
- btc8821a2ant_sw_mech1(btcoexist, false, false, false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18);
+ u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
+ u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x76e);
- common = true;
- } else if (!wifi_connected &&
- (BT_8821A_2ANT_BT_STATUS_CON_IDLE == coex_dm->bt_status)) {
- low_pwr_disable = true;
- btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
- &low_pwr_disable);
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 0x765=0x%x, 0x76e=0x%x\n", u8tmpa, u8tmpb);
+}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi IPS + BT LPS!!\n");
+static bool btc8821a2ant_action_wifi_idle_process(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+ u8 ap_num = 0;
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ wifi_rssi_state =
+ btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+ BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES - 20, 0);
+ bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+ 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
- btc8821a2ant_sw_mech1(btcoexist, false, false, false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18);
- common = true;
- } else if (wifi_connected &&
- (BT_8821A_2ANT_BT_STATUS_CON_IDLE == coex_dm->bt_status)) {
- low_pwr_disable = true;
- btcoexist->btc_set(btcoexist,
- BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
- if (wifi_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Busy + BT LPS!!\n");
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 1);
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi LPS + BT LPS!!\n");
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 1);
- }
+ /* define the office environment */
+ if (BTC_RSSI_HIGH(wifi_rssi_state1) && (coex_sta->hid_exist) &&
+ (coex_sta->a2dp_exist)) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi idle process for BT HID+A2DP exist!!\n");
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x6);
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
- btc8821a2ant_sw_mech1(btcoexist, true, true, true, true);
- btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18);
+ /* sw all off */
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false, false,
+ false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false, false,
+ 0x18);
- common = true;
- } else if (!wifi_connected &&
- (BT_8821A_2ANT_BT_STATUS_NON_IDLE ==
- coex_dm->bt_status)) {
- low_pwr_disable = false;
- btcoexist->btc_set(btcoexist,
- BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ return true;
+ } else if (coex_sta->pan_exist) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi IPS + BT Busy!!\n");
+ "[BTCoex], Wifi idle process for BT PAN exist!!\n");
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x6);
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
- btc8821a2ant_sw_mech1(btcoexist, false, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ /* sw all off */
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false, false,
+ false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false, false,
+ 0x18);
- common = true;
- } else {
- low_pwr_disable = true;
- btcoexist->btc_set(btcoexist,
- BTC_SET_ACT_DISABLE_LOW_POWER,
- &low_pwr_disable);
-
- if (wifi_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Busy + BT Busy!!\n");
- common = false;
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi LPS + BT Busy!!\n");
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC, true, 21);
-
- if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
- halbtc8821a2ant_dec_bt_pwr(btcoexist,
- NORMAL_EXEC, true);
- else
- halbtc8821a2ant_dec_bt_pwr(btcoexist,
- NORMAL_EXEC, false);
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
- common = true;
- }
- btc8821a2ant_sw_mech1(btcoexist, true, true, true, true);
+ return true;
}
- return common;
+ btc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x18);
+ return false;
}
-static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause,
- int result)
+static bool btc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
+ bool common = false, wifi_connected = false, wifi_busy = false;
+ bool low_pwr_disable = false;
+ bool bt_hs_on = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+ if (!wifi_connected) {
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+ btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false,
+ 0x8);
- if (tx_pause) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 1\n");
-
- if (coex_dm->cur_ps_tdma == 71) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
- coex_dm->tdma_adj_type = 5;
- } else if (coex_dm->cur_ps_tdma == 1) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
- coex_dm->tdma_adj_type = 5;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 4) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 8);
- coex_dm->tdma_adj_type = 8;
- }
- if (coex_dm->cur_ps_tdma == 9) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 13);
- coex_dm->tdma_adj_type = 13;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 12) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 16);
- coex_dm->tdma_adj_type = 16;
- }
+ "[BTCoex], Wifi non-connected idle!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+ 0x0);
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false, false,
+ false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false, false,
+ 0x18);
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 5) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 8);
- coex_dm->tdma_adj_type = 8;
- } else if (coex_dm->cur_ps_tdma == 13) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 16);
- coex_dm->tdma_adj_type = 16;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 8) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
- coex_dm->tdma_adj_type = 5;
- } else if (coex_dm->cur_ps_tdma == 16) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 13);
- coex_dm->tdma_adj_type = 13;
- }
- }
+ common = true;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 0\n");
- if (coex_dm->cur_ps_tdma == 5) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 71);
- coex_dm->tdma_adj_type = 71;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 8) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 4);
- coex_dm->tdma_adj_type = 4;
- }
- if (coex_dm->cur_ps_tdma == 13) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- coex_dm->tdma_adj_type = 9;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 16) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 12);
- coex_dm->tdma_adj_type = 12;
- }
+ if (BT_8821A_2ANT_BT_STATUS_IDLE ==
+ coex_dm->bt_status) {
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+ btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC,
+ false, false, 0x8);
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 71) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 1);
- coex_dm->tdma_adj_type = 1;
- } else if (coex_dm->cur_ps_tdma == 1) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 4);
- coex_dm->tdma_adj_type = 4;
- } else if (coex_dm->cur_ps_tdma == 9) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 12);
- coex_dm->tdma_adj_type = 12;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 4) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 1);
- coex_dm->tdma_adj_type = 1;
- } else if (coex_dm->cur_ps_tdma == 1) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 71);
- coex_dm->tdma_adj_type = 71;
- } else if (coex_dm->cur_ps_tdma == 12) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- coex_dm->tdma_adj_type = 9;
- }
- }
- }
-}
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT non connected-idle!!\n");
-static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause,
- int result)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x0);
+ btc8821a2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 0);
- if (tx_pause) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 1\n");
- if (coex_dm->cur_ps_tdma == 1) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 4) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 8);
- coex_dm->tdma_adj_type = 8;
- }
- if (coex_dm->cur_ps_tdma == 9) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 12) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 16);
- coex_dm->tdma_adj_type = 16;
- }
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 5) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 8);
- coex_dm->tdma_adj_type = 8;
- } else if (coex_dm->cur_ps_tdma == 13) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 16);
- coex_dm->tdma_adj_type = 16;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 8) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
- } else if (coex_dm->cur_ps_tdma == 16) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
- }
- }
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 0\n");
- if (coex_dm->cur_ps_tdma == 5) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 8) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 4);
- coex_dm->tdma_adj_type = 4;
- }
- if (coex_dm->cur_ps_tdma == 13) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 16) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 12);
- coex_dm->tdma_adj_type = 12;
- }
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 1) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 4);
- coex_dm->tdma_adj_type = 4;
- } else if (coex_dm->cur_ps_tdma == 9) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 12);
- coex_dm->tdma_adj_type = 12;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 4) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
- } else if (coex_dm->cur_ps_tdma == 12) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
- }
- }
- }
-}
+ btc8821a2ant_power_save_state(
+ btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 0xb);
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
-static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause,
- int result)
-{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
- if (tx_pause) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 1\n");
- if (coex_dm->cur_ps_tdma == 1) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 4) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 8);
- coex_dm->tdma_adj_type = 8;
- }
- if (coex_dm->cur_ps_tdma == 9) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 12) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 16);
- coex_dm->tdma_adj_type = 16;
- }
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 5) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 8);
- coex_dm->tdma_adj_type = 8;
- } else if (coex_dm->cur_ps_tdma == 13) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 16);
- coex_dm->tdma_adj_type = 16;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 8) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
- } else if (coex_dm->cur_ps_tdma == 16) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
- }
- }
- } else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TxPause = 0\n");
- if (coex_dm->cur_ps_tdma == 5) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 6) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 7) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 8) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 4);
- coex_dm->tdma_adj_type = 4;
- }
- if (coex_dm->cur_ps_tdma == 13) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 14) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 15) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 16) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 12);
- coex_dm->tdma_adj_type = 12;
- }
- if (result == -1) {
- if (coex_dm->cur_ps_tdma == 1) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 4);
- coex_dm->tdma_adj_type = 4;
- } else if (coex_dm->cur_ps_tdma == 9) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 12);
- coex_dm->tdma_adj_type = 12;
- }
- } else if (result == 1) {
- if (coex_dm->cur_ps_tdma == 4) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 3) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 2) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
- } else if (coex_dm->cur_ps_tdma == 12) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 11) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
- } else if (coex_dm->cur_ps_tdma == 10) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
+ common = true;
+ } else if (BT_8821A_2ANT_BT_STATUS_CON_IDLE ==
+ coex_dm->bt_status) {
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ if (bt_hs_on)
+ return false;
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
+ btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC,
+ false, false, 0x8);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x0);
+ btc8821a2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 0);
+
+ btc8821a2ant_power_save_state(
+ btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 0xb);
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+ btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ common = true;
+ } else {
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ if (wifi_busy) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+ common = false;
+ } else {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+ common =
+ btc8821a2ant_action_wifi_idle_process(
+ btcoexist);
}
}
}
+ return common;
}
-static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
- bool sco_hid, bool tx_pause,
- u8 max_interval)
+static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
+ bool sco_hid, bool tx_pause,
+ u8 max_interval)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- static long up, dn, m, n, wait_count;
- /* 0: no change, +1: increase WiFi duration,
+ static long up, dn, m, n, wait_count;
+ /* 0 : no change
+ * +1: increase WiFi duration
* -1: decrease WiFi duration
*/
- int result;
- u8 retry_count = 0;
+ int result;
+ u8 retry_count = 0;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], TdmaDurationAdjust()\n");
- if (coex_dm->reset_tdma_adjust) {
- coex_dm->reset_tdma_adjust = false;
+ if (coex_dm->auto_tdma_adjust) {
+ coex_dm->auto_tdma_adjust = false;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], first run TdmaDurationAdjust()!!\n");
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 13);
- coex_dm->tdma_adj_type = 13;
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 13);
+ coex_dm->ps_tdma_du_adj_type = 13;
} else if (max_interval == 2) {
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 14);
- coex_dm->tdma_adj_type = 14;
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (max_interval == 3) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
} else {
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 15);
- coex_dm->tdma_adj_type = 15;
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
}
} else {
if (max_interval == 1) {
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 9);
- coex_dm->tdma_adj_type = 9;
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
} else if (max_interval == 2) {
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 10);
- coex_dm->tdma_adj_type = 10;
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (max_interval == 3) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
} else {
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 11);
- coex_dm->tdma_adj_type = 11;
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
}
}
} else {
if (tx_pause) {
if (max_interval == 1) {
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 5);
- coex_dm->tdma_adj_type = 5;
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
} else if (max_interval == 2) {
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 6);
- coex_dm->tdma_adj_type = 6;
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (max_interval == 3) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
} else {
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 7);
- coex_dm->tdma_adj_type = 7;
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
}
} else {
if (max_interval == 1) {
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 1);
- coex_dm->tdma_adj_type = 1;
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 1);
+ coex_dm->ps_tdma_du_adj_type = 1;
} else if (max_interval == 2) {
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 2);
- coex_dm->tdma_adj_type = 2;
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (max_interval == 3) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
} else {
- halbtc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC,
- true, 3);
- coex_dm->tdma_adj_type = 3;
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
}
}
}
@@ -2273,7 +1860,7 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
up = 0;
if (dn == 2) {
- /* if retry count< 3 for 2*2 seconds,
+ /* if retry count < 3 for 2*2 seconds,
* shrink wifi duration
*/
if (wait_count <= 2)
@@ -2286,7 +1873,7 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
if (m >= 20)
m = 20;
- n = 3*m;
+ n = 3 * m;
up = 0;
dn = 0;
wait_count = 0;
@@ -2308,7 +1895,7 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
if (m >= 20)
m = 20;
- n = 3*m;
+ n = 3 * m;
up = 0;
dn = 0;
wait_count = 0;
@@ -2319,624 +1906,1313 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], max Interval = %d\n", max_interval);
- if (max_interval == 1)
- btc8821a2_int1(btcoexist, tx_pause, result);
- else if (max_interval == 2)
- btc8821a2_int2(btcoexist, tx_pause, result);
- else if (max_interval == 3)
- btc8821a2_int3(btcoexist, tx_pause, result);
+
+ if (max_interval == 1) {
+ if (tx_pause) {
+ if (coex_dm->cur_ps_tdma == 71) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 5);
+ coex_dm->ps_tdma_du_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+ if (coex_dm->cur_ps_tdma == 9) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 13);
+ coex_dm->ps_tdma_du_adj_type = 13;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 5);
+ coex_dm->ps_tdma_du_adj_type =
+ 5;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 13);
+ coex_dm->ps_tdma_du_adj_type =
+ 13;
+ }
+ }
+ } else {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 71);
+ coex_dm->ps_tdma_du_adj_type = 71;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+ if (coex_dm->cur_ps_tdma == 13) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 9);
+ coex_dm->ps_tdma_du_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 71) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type =
+ 1;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 1);
+ coex_dm->ps_tdma_du_adj_type =
+ 1;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 71);
+ coex_dm->ps_tdma_du_adj_type =
+ 71;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 9);
+ coex_dm->ps_tdma_du_adj_type =
+ 9;
+ }
+ }
+ }
+ } else if (max_interval == 2) {
+ if (tx_pause) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 6);
+ coex_dm->ps_tdma_du_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+ if (coex_dm->cur_ps_tdma == 9) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 14);
+ coex_dm->ps_tdma_du_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->ps_tdma_du_adj_type =
+ 6;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->ps_tdma_du_adj_type =
+ 14;
+ }
+ }
+ } else {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 2);
+ coex_dm->ps_tdma_du_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+ if (coex_dm->cur_ps_tdma == 13) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 10);
+ coex_dm->ps_tdma_du_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->ps_tdma_du_adj_type =
+ 2;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->ps_tdma_du_adj_type =
+ 10;
+ }
+ }
+ }
+ } else if (max_interval == 3) {
+ if (tx_pause) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 7);
+ coex_dm->ps_tdma_du_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 8);
+ coex_dm->ps_tdma_du_adj_type = 8;
+ }
+ if (coex_dm->cur_ps_tdma == 9) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 15);
+ coex_dm->ps_tdma_du_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 16);
+ coex_dm->ps_tdma_du_adj_type = 16;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 8);
+ coex_dm->ps_tdma_du_adj_type =
+ 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 16);
+ coex_dm->ps_tdma_du_adj_type =
+ 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->ps_tdma_du_adj_type =
+ 7;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->ps_tdma_du_adj_type =
+ 15;
+ }
+ }
+ } else {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 3);
+ coex_dm->ps_tdma_du_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 4);
+ coex_dm->ps_tdma_du_adj_type = 4;
+ }
+ if (coex_dm->cur_ps_tdma == 13) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 11);
+ coex_dm->ps_tdma_du_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8821a2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC, true, 12);
+ coex_dm->ps_tdma_du_adj_type = 12;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 4);
+ coex_dm->ps_tdma_du_adj_type =
+ 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 12);
+ coex_dm->ps_tdma_du_adj_type =
+ 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->ps_tdma_du_adj_type =
+ 3;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8821a2ant_ps_tdma(
+ btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->ps_tdma_du_adj_type =
+ 11;
+ }
+ }
+ }
+ }
}
/* if current PsTdma not match with the recorded one
* (when scan, dhcp...), then we have to adjust it back to
* the previous recorded one.
*/
- if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
- bool scan = false, link = false, roam = false;
+ if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) {
+ bool scan = false, link = false, roam = false;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
- coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+ coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
if (!scan && !link && !roam) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
- coex_dm->tdma_adj_type);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
+ coex_dm->ps_tdma_du_adj_type);
} else {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
}
}
-
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0x6);
}
/* SCO only or SCO+PAN(HS)*/
-static void halbtc8821a2ant_action_sco(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_sco(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ u8 wifi_rssi_state, bt_rssi_state;
u32 wifi_bw;
- wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
- 15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+ wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
- if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
+
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
else
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- if (BTC_WIFI_BW_LEGACY == wifi_bw) {
+ if (wifi_bw == BTC_WIFI_BW_LEGACY) {
/* for SCO quality at 11b/g mode */
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC,
- 0x5a5a5a5a, 0x5a5a5a5a, 0xffff, 0x3);
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
} else {
/* for SCO quality & wifi performance balance at 11n mode */
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC,
- 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3);
+ if (wifi_bw == BTC_WIFI_BW_HT40) {
+ btc8821a2ant_coex_table_with_type(btcoexist,
+ NORMAL_EXEC, 8);
+ } else {
+ if (bt_link_info->sco_only)
+ btc8821a2ant_coex_table_with_type(
+ btcoexist, NORMAL_EXEC, 17);
+ else
+ btc8821a2ant_coex_table_with_type(
+ btcoexist, NORMAL_EXEC, 12);
+ }
}
- if (BTC_WIFI_BW_HT40 == wifi_bw) {
- /* fw mechanism
- * halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
- */
-
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 0); /*for voice quality*/
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ /* for voice quality */
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
- /* sw mechanism */
+ /* sw mechanism */
+ if (wifi_bw == BTC_WIFI_BW_HT40) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, true, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, true, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ true, 0x18);
}
} else {
- /* fw mechanism
- * halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
- */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 0); /*for voice quality*/
- } else {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- false, 0); /*for voice quality*/
- }
-
- /* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, false, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, false, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ true, 0x18);
}
}
}
-static void halbtc8821a2ant_action_hid(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_hid(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state;
- u32 wifi_bw;
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
- wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist,
- 0, 2, 15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+ wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+ 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
- if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
else
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- if (BTC_WIFI_BW_LEGACY == wifi_bw) {
+ if (wifi_bw == BTC_WIFI_BW_LEGACY) {
/* for HID at 11b/g mode */
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5a5a5a5a, 0xffff, 0x3);
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
} else {
/* for HID quality & wifi performance balance at 11n mode */
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5aea5aea, 0xffff, 0x3);
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
}
- if (BTC_WIFI_BW_HT40 == wifi_bw) {
- /* fw mechanism */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- } else {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 13);
- }
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 24);
+ if (wifi_bw == BTC_WIFI_BW_HT40) {
/* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, true, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, true, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
- /* fw mechanism */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 9);
- } else {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 13);
- }
-
/* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, false, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, false, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
-static void halbtc8821a2ant_action_a2dp(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_a2dp(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state;
- u32 wifi_bw;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+ u8 ap_num = 0;
+ u32 wifi_bw;
- wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
- 15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+ wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+ BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+ bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+ 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
- /* fw dac swing is called in btc8821a2ant_tdma_dur_adj()
- * halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- */
+ if ((ap_num >= 10) && BTC_RSSI_HIGH(wifi_rssi_state1) &&
+ BTC_RSSI_HIGH(bt_rssi_state)) {
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
- if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
- else
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+ 0x0);
+ btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false,
+ 0x8);
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
- if (BTC_WIFI_BW_HT40 == wifi_bw) {
- /* fw mechanism */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_tdma_dur_adj(btcoexist, false, false, 1);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23);
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (wifi_bw == BTC_WIFI_BW_HT40) {
+ btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x6);
} else {
- btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 1);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x6);
}
+ return;
+ }
- /* sw mechanism */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ else
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+ if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ } else {
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+ 0x4);
+ }
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23);
+ } else {
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23);
+ }
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (wifi_bw == BTC_WIFI_BW_HT40) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, true, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, true, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
- /* fw mechanism */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_tdma_dur_adj(btcoexist, false, false, 1);
- } else {
- btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 1);
- }
-
- /* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, false, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, false, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
-static void halbtc8821a2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
- u32 wifi_bw;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+ u32 wifi_bw;
- bt_info_ext = coex_sta->bt_info_ext;
- wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
- 15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+ wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+ BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+ bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+ 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
- /*fw dac swing is called in btc8821a2ant_tdma_dur_adj()
- *halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
- if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
else
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ } else {
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+ 0x4);
+ }
- if (BTC_WIFI_BW_HT40 == wifi_bw) {
- /* fw mechanism */
- if (bt_info_ext&BIT0) {
- /*a2dp basic rate*/
- btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 2);
- } else {
- /*a2dp edr rate*/
- btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 1);
- }
+ btc8821a2ant_tdma_duration_adjust(btcoexist, false, true, 2);
- /* sw mechanism */
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (wifi_bw == BTC_WIFI_BW_HT40) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, true, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, true, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
- /* fw mechanism */
- if (bt_info_ext&BIT0) {
- /* a2dp basic rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 2);
- } else {
- /* a2dp edr rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 1);
- }
-
- /* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, false, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, false, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
-static void halbtc8821a2ant_action_pan_edr(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_pan_edr(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state;
- u32 wifi_bw;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+ u32 wifi_bw;
- wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
- 15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+ wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+ BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+ bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+ 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
- if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
- else
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- if (BTC_WIFI_BW_LEGACY == wifi_bw) {
- /* for HID at 11b/g mode */
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5aff5aff, 0xffff, 0x3);
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) {
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 10);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
} else {
- /* for HID quality & wifi performance balance at 11n mode */
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5aff5aff, 0xffff, 0x3);
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+ 0x4);
}
- if (BTC_WIFI_BW_HT40 == wifi_bw) {
- /* fw mechanism */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 1);
- } else {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
- }
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 26);
+ else
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 26);
- /* sw mechanism */
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (wifi_bw == BTC_WIFI_BW_HT40) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, true, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, true, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
- /* fw mechanism */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 1);
- } else {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 5);
- }
-
- /* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, false, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, false, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
/* PAN(HS) only */
-static void halbtc8821a2ant_action_pan_hs(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_pan_hs(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state;
- u32 wifi_bw;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+ u32 wifi_bw;
- wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist,
- 0, 2, 15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+ wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+ BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+ bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+ 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
- btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- if (BTC_WIFI_BW_HT40 == wifi_bw) {
- /* fw mechanism */
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
- true);
- } else {
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
- false);
- }
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ else
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
- /* sw mechanism */
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (wifi_bw == BTC_WIFI_BW_HT40) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, true, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, true, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
- /* fw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8821a2ant_dec_bt_pwr(btcoexist,
- NORMAL_EXEC, true);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- halbtc8821a2ant_dec_bt_pwr(btcoexist,
- NORMAL_EXEC, false);
- }
-
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
-
- /* sw mechanism */
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, false, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
- } else {
- btc8821a2ant_sw_mech1(btcoexist, false, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
/* PAN(EDR)+A2DP */
-static void halbtc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
- u32 wifi_bw;
+ u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state;
+ u32 wifi_bw;
- bt_info_ext = coex_sta->bt_info_ext;
- wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
- 15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+ wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2,
+ BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0);
+ bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+ 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
+ else
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
- if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state))
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
else
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+ 0x4);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5afa5afa, 0xffff, 0x3);
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 12);
- if (BTC_WIFI_BW_HT40 == wifi_bw) {
- /* fw mechanism */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
- btc8821a2ant_tdma_dur_adj(btcoexist, false,
- false, 3);
+ if (wifi_bw == BTC_WIFI_BW_HT40)
+ btc8821a2ant_tdma_duration_adjust(btcoexist, false,
+ true, 3);
else
- btc8821a2ant_tdma_dur_adj(btcoexist, false,
- true, 3);
+ btc8821a2ant_tdma_duration_adjust(btcoexist, false,
+ false, 3);
+ } else {
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13);
+ btc8821a2ant_tdma_duration_adjust(btcoexist, false, true, 3);
+ }
- /* sw mechanism */
+ /* sw mechanism */
+ if (wifi_bw == BTC_WIFI_BW_HT40) {
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, true, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, true, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
- /* fw mechanism */
- if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
- btc8821a2ant_tdma_dur_adj(btcoexist, false, false, 3);
- else
- btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 3);
-
- /* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, false, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, false, false,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
-static void halbtc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state;
- u32 wifi_bw;
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
- wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
- 15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+ wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist,
+ 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0);
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
else
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5a5f5a5f, 0xffff, 0x3);
+ if (wifi_bw == BTC_WIFI_BW_LEGACY) {
+ /* for HID at 11b/g mode */
+ btc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
+ 0x5a5f5a5f, 0xffff, 0x3);
+ } else {
+ /* for HID quality & wifi performance balance at 11n mode */
+ btc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
+ 0x5a5f5a5f, 0xffff, 0x3);
+ }
- if (BTC_WIFI_BW_HT40 == wifi_bw) {
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 3);
+ if (wifi_bw == BTC_WIFI_BW_HT40) {
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 3);
/* fw mechanism */
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 10);
} else {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
}
/* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, true, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, true, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
/* fw mechanism */
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 10);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
} else {
- halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
- true, 14);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
}
/* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, false, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, false, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
@@ -2944,42 +3220,70 @@ static void halbtc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
/* HID+A2DP+PAN(EDR) */
static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
- u32 wifi_bw;
+ u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u32 wifi_bw;
bt_info_ext = coex_sta->bt_info_ext;
- wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist,
- 0, 2, 15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+ wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
- halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
- if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2);
else
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5a5a5a5a, 0xffff, 0x3);
+ if (wifi_bw == BTC_WIFI_BW_LEGACY) {
+ /* for HID at 11b/g mode */
+ btc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
+ 0x5a5a5a5a, 0xffff, 0x3);
+ } else {
+ /* for HID quality & wifi performance balance at 11n mode */
+ btc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
+ 0x5a5a5a5a, 0xffff, 0x3);
+ }
if (BTC_WIFI_BW_HT40 == wifi_bw) {
/* fw mechanism */
- btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 3);
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ if (bt_info_ext&BIT0) {
+ /* a2dp basic rate */
+ btc8821a2ant_tdma_duration_adjust(btcoexist,
+ true, true, 3);
+ } else {
+ /* a2dp edr rate */
+ btc8821a2ant_tdma_duration_adjust(btcoexist,
+ true, true, 3);
+ }
+ } else {
+ if (bt_info_ext&BIT0) {
+ /* a2dp basic rate */
+ btc8821a2ant_tdma_duration_adjust(btcoexist,
+ true, true, 3);
+ } else {
+ /* a2dp edr rate */
+ btc8821a2ant_tdma_duration_adjust(btcoexist,
+ true, true, 3);
+ }
+ }
/* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, true, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, true, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
/* fw mechanism */
@@ -2987,103 +3291,183 @@ static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
if (bt_info_ext&BIT0) {
/* a2dp basic rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, true,
- false, 3);
+ btc8821a2ant_tdma_duration_adjust(btcoexist,
+ true, false, 3);
} else {
/* a2dp edr rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, true,
- false, 3);
+ btc8821a2ant_tdma_duration_adjust(btcoexist,
+ true, false, 3);
}
} else {
if (bt_info_ext&BIT0) {
/* a2dp basic rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, true,
- true, 3);
+ btc8821a2ant_tdma_duration_adjust(btcoexist,
+ true, true,
+ 3);
} else {
/* a2dp edr rate */
- btc8821a2ant_tdma_dur_adj(btcoexist, true,
- true, 3);
+ btc8821a2ant_tdma_duration_adjust(btcoexist,
+ true, true,
+ 3);
}
}
/* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, false, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, false, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
-static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
{
- u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
- u32 wifi_bw;
+ u8 wifi_rssi_state, bt_rssi_state, bt_info_ext;
+ u32 wifi_bw;
bt_info_ext = coex_sta->bt_info_ext;
- wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
- 15, 0);
- bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
+ wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0);
+ bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
- if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ if (BTC_RSSI_HIGH(bt_rssi_state))
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
else
- halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
- 0x5f5b5f5b, 0xffffff, 0x3);
+ if (wifi_bw == BTC_WIFI_BW_LEGACY) {
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
+ 0x0, 0x0);
+ } else {
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 14);
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50,
+ 0x4);
+ }
if (BTC_WIFI_BW_HT40 == wifi_bw) {
/* fw mechanism */
- btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 2);
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ if (bt_info_ext & BIT0) {
+ /* a2dp basic rate */
+ btc8821a2ant_tdma_duration_adjust(btcoexist,
+ true, true,
+ 2);
+ } else {
+ /* a2dp edr rate */
+ btc8821a2ant_tdma_duration_adjust(btcoexist,
+ true, true,
+ 2);
+ }
+ } else {
+ if (bt_info_ext & BIT0) {
+ /* a2dp basic rate */
+ btc8821a2ant_tdma_duration_adjust(btcoexist,
+ true, true,
+ 2);
+ } else {
+ /* a2dp edr rate */
+ btc8821a2ant_tdma_duration_adjust(btcoexist,
+ true, true,
+ 2);
+ }
+ }
/* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, true, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, true, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
} else {
/* fw mechanism */
- btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 2);
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ if (bt_info_ext & BIT0) {
+ /* a2dp basic rate */
+ btc8821a2ant_tdma_duration_adjust(btcoexist,
+ true, true,
+ 2);
+
+ } else {
+ /* a2dp edr rate */
+ btc8821a2ant_tdma_duration_adjust(btcoexist,
+ true, true,
+ 2);
+ }
+ } else {
+ if (bt_info_ext & BIT0) {
+ /*a2dp basic rate*/
+ btc8821a2ant_tdma_duration_adjust(btcoexist,
+ true, true,
+ 2);
+ } else {
+ /*a2dp edr rate*/
+ btc8821a2ant_tdma_duration_adjust(btcoexist,
+ true, true,
+ 2);
+ }
+ }
/* sw mechanism */
if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
(wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a2ant_sw_mech1(btcoexist, false, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, true, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
} else {
- btc8821a2ant_sw_mech1(btcoexist, false, true,
- false, false);
- btc8821a2ant_sw_mech2(btcoexist, false, false,
- false, 0x18);
+ btc8821a2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
}
}
}
-static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+static void btc8821a2ant_action_wifi_multi_port(struct btc_coexist *btcoexist)
+{
+ btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
+
+ /* sw all off */
+ btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+ /* hw all off */
+ btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
+
+ btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+}
+
+static void btc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- bool wifi_under_5g = false;
- u8 algorithm = 0;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool wifi_under_5g = false;
+ u8 algorithm = 0;
+ u32 num_of_wifi_link = 0;
+ u32 wifi_link_status = 0;
+ bool miracast_plus_bt = false;
+ bool scan = false, link = false, roam = false;
if (btcoexist->manual_control) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -3091,30 +3475,73 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
return;
}
- btcoexist->btc_get(btcoexist,
- BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
- halbtc8821a2ant_coex_under_5g(btcoexist);
+ btc8821a2ant_coex_under_5g(btcoexist);
return;
}
- algorithm = halbtc8821a2ant_action_algorithm(btcoexist);
+ if (coex_sta->under_ips) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
+ return;
+ }
+
+ algorithm = btc8821a2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8821A_2ANT_COEX_ALGO_PANHS != algorithm)) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], BT is under inquiry/page scan !!\n");
- halbtc8821a2ant_bt_inquiry_page(btcoexist);
+ btc8821a2ant_action_bt_inquiry(btcoexist);
return;
}
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+ if (scan || link || roam) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], WiFi is under Link Process !!\n");
+ btc8821a2ant_action_wifi_link_process(btcoexist);
+ return;
+ }
+
+ /* for P2P */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
+ &wifi_link_status);
+ num_of_wifi_link = wifi_link_status >> 16;
+
+ if ((num_of_wifi_link >= 2) ||
+ (wifi_link_status & WIFI_P2P_GO_CONNECTED)) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "############# [BTCoex], Multi-Port num_of_wifi_link = %d, wifi_link_status = 0x%x\n",
+ num_of_wifi_link, wifi_link_status);
+
+ if (bt_link_info->bt_link_exist)
+ miracast_plus_bt = true;
+ else
+ miracast_plus_bt = false;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT,
+ &miracast_plus_bt);
+ btc8821a2ant_action_wifi_multi_port(btcoexist);
+
+ return;
+ }
+
+ miracast_plus_bt = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT,
+ &miracast_plus_bt);
+
coex_dm->cur_algorithm = algorithm;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
- if (halbtc8821a2ant_is_common_action(btcoexist)) {
+ if (btc8821a2ant_is_common_action(btcoexist)) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action 2-Ant common\n");
coex_dm->reset_tdma_adjust = true;
@@ -3130,42 +3557,42 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
case BT_8821A_2ANT_COEX_ALGO_SCO:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action 2-Ant, algorithm = SCO\n");
- halbtc8821a2ant_action_sco(btcoexist);
+ btc8821a2ant_action_sco(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action 2-Ant, algorithm = HID\n");
- halbtc8821a2ant_action_hid(btcoexist);
+ btc8821a2ant_action_hid(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_A2DP:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action 2-Ant, algorithm = A2DP\n");
- halbtc8821a2ant_action_a2dp(btcoexist);
+ btc8821a2ant_action_a2dp(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
- halbtc8821a2ant_action_a2dp_pan_hs(btcoexist);
+ btc8821a2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
- halbtc8821a2ant_action_pan_edr(btcoexist);
+ btc8821a2ant_action_pan_edr(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANHS:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action 2-Ant, algorithm = HS mode\n");
- halbtc8821a2ant_action_pan_hs(btcoexist);
+ btc8821a2ant_action_pan_hs(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
- halbtc8821a2ant_action_pan_edr_a2dp(btcoexist);
+ btc8821a2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR_HID:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
- halbtc8821a2ant_action_pan_edr_hid(btcoexist);
+ btc8821a2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -3175,26 +3602,22 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
case BT_8821A_2ANT_COEX_ALGO_HID_A2DP:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
- halbtc8821a2ant_action_hid_a2dp(btcoexist);
+ btc8821a2ant_action_hid_a2dp(btcoexist);
break;
default:
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
- halbtc8821a2ant_coex_all_off(btcoexist);
+ btc8821a2ant_coex_all_off(btcoexist);
break;
}
coex_dm->pre_algorithm = coex_dm->cur_algorithm;
}
}
-/*============================================================
- *work around function start with wa_halbtc8821a2ant_
- *============================================================
- *============================================================
- * extern function start with EXhalbtc8821a2ant_
- *============================================================
- */
-void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist)
+/**************************************************************
+ * extern function start with ex_btc8821a2ant_
+ **************************************************************/
+void ex_btc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u1tmp = 0;
@@ -3212,36 +3635,30 @@ void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist)
u1tmp |= 0x5;
btcoexist->btc_write_1byte(btcoexist, 0x790, u1tmp);
- /*Antenna config */
- halbtc8821a2ant_set_ant_path(btcoexist,
- BTC_ANT_WIFI_AT_MAIN, true, false);
+ /* Antenna config */
+ btc8821a2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_MAIN, true, false);
/* PTA parameter */
- halbtc8821a2ant_coex_table(btcoexist,
- FORCE_EXEC, 0x55555555, 0x55555555,
- 0xffff, 0x3);
+ btc8821a2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0);
/* Enable counter statistics */
- /*0x76e[3] = 1, WLAN_Act control by PTA*/
- btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+ /* 0x76e[3] = 1, WLAN_Act control by PTA */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4);
btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
}
-void ex_halbtc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
+void ex_btc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Coex Mechanism Init!!\n");
- halbtc8821a2ant_init_coex_dm(btcoexist);
+ btc8821a2ant_init_coex_dm(btcoexist);
}
-void
-ex_halbtc8821a2ant_display_coex_info(
- struct btc_coexist *btcoexist
- )
+void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
{
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
@@ -3397,7 +3814,7 @@ ex_halbtc8821a2ant_display_coex_info(
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct",
- coex_dm->cur_dec_bt_pwr,
+ coex_dm->cur_dec_bt_pwr_lvl,
coex_dm->cur_ignore_wlan_act);
}
@@ -3475,7 +3892,7 @@ ex_halbtc8821a2ant_display_coex_info(
btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
}
-void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -3483,16 +3900,15 @@ void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
- halbtc8821a2ant_coex_all_off(btcoexist);
+ btc8821a2ant_coex_all_off(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
- /*halbtc8821a2ant_init_coex_dm(btcoexist);*/
}
}
-void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -3507,7 +3923,7 @@ void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
}
}
-void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -3520,7 +3936,7 @@ void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
}
}
-void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+void ex_btc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -3533,13 +3949,14 @@ void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
}
}
-void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
- u8 type)
+void ex_btc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
+ u8 type)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 h2c_parameter[3] = {0};
- u32 wifi_bw;
- u8 wifi_central_chnl;
+ u8 h2c_parameter[3] = {0};
+ u32 wifi_bw;
+ u8 wifi_central_chnl;
+ u8 ap_num = 0;
if (BTC_MEDIA_CONNECT == type) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
@@ -3549,7 +3966,7 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
"[BTCoex], MEDIA disconnect notify\n");
}
- /* only 2.4G we need to inform bt the chnl mask*/
+ /* only 2.4G we need to inform bt the chnl mask */
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
&wifi_central_chnl);
if ((BTC_MEDIA_CONNECT == type) &&
@@ -3557,10 +3974,15 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
h2c_parameter[0] = 0x1;
h2c_parameter[1] = wifi_central_chnl;
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
- if (BTC_WIFI_BW_HT40 == wifi_bw)
+ if (wifi_bw == BTC_WIFI_BW_HT40) {
h2c_parameter[2] = 0x30;
- else
+ } else {
h2c_parameter[2] = 0x20;
+ if (ap_num < 10)
+ h2c_parameter[2] = 0x30;
+ else
+ h2c_parameter[2] = 0x20;
+ }
}
coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
@@ -3576,8 +3998,9 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
-void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist,
- u8 type) {
+void ex_btc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (type == BTC_PACKET_DHCP) {
@@ -3586,19 +4009,18 @@ void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist,
}
}
-void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
- u8 *tmp_buf, u8 length)
+void ex_btc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmp_buf, u8 length)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- u8 bt_info = 0;
- u8 i, rsp_source = 0;
- static u32 set_bt_lna_cnt, set_bt_psd_mode;
- bool bt_busy = false, limited_dig = false;
- bool wifi_connected = false, bt_hs_on = false;
+ u8 bt_info = 0;
+ u8 i, rsp_source = 0;
+ bool bt_busy = false, limited_dig = false;
+ bool wifi_connected = false, bt_hs_on = false;
coex_sta->c2h_bt_info_req_sent = false;
- rsp_source = tmp_buf[0]&0xf;
+ rsp_source = tmp_buf[0] & 0xf;
if (rsp_source >= BT_INFO_SRC_8821A_2ANT_MAX)
rsp_source = BT_INFO_SRC_8821A_2ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
@@ -3610,7 +4032,7 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
- if (i == length-1) {
+ if (i == length - 1) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"0x%02x]\n", tmp_buf[i]);
} else {
@@ -3620,7 +4042,8 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
}
if (BT_INFO_SRC_8821A_2ANT_WIFI_FW != rsp_source) {
- coex_sta->bt_retry_cnt = /* [3:0]*/
+ /* [3:0] */
+ coex_sta->bt_retry_cnt =
coex_sta->bt_info_c2h[rsp_source][2]&0xf;
coex_sta->bt_rssi =
@@ -3629,53 +4052,28 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
coex_sta->bt_info_ext =
coex_sta->bt_info_c2h[rsp_source][4];
- /* Here we need to resend some wifi info to BT*/
- /* because bt is reset and loss of the info.*/
+ /* Here we need to resend some wifi info to BT
+ * because bt is reset and loss of the info
+ */
if ((coex_sta->bt_info_ext & BIT1)) {
btcoexist->btc_get(btcoexist,
BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
if (wifi_connected) {
- ex_halbtc8821a2ant_media_status_notify(btcoexist,
+ ex_btc8821a2ant_media_status_notify(btcoexist,
BTC_MEDIA_CONNECT);
} else {
- ex_halbtc8821a2ant_media_status_notify(btcoexist,
+ ex_btc8821a2ant_media_status_notify(btcoexist,
BTC_MEDIA_DISCONNECT);
}
- set_bt_psd_mode = 0;
- }
- if (set_bt_psd_mode <= 3) {
- halbtc8821a2ant_set_bt_psd_mode(btcoexist, FORCE_EXEC,
- 0x0); /*fix CH-BW mode*/
- set_bt_psd_mode++;
- }
-
- if (coex_dm->cur_bt_lna_constrain) {
- if (!(coex_sta->bt_info_ext & BIT2)) {
- if (set_bt_lna_cnt <= 3) {
- btc8821a2_set_bt_lna_const(btcoexist,
- FORCE_EXEC,
- true);
- set_bt_lna_cnt++;
- }
- }
- } else {
- set_bt_lna_cnt = 0;
}
if ((coex_sta->bt_info_ext & BIT3)) {
- halbtc8821a2ant_ignore_wlan_act(btcoexist,
- FORCE_EXEC, false);
+ btc8821a2ant_ignore_wlan_act(btcoexist,
+ FORCE_EXEC, false);
} else {
/* BT already NOT ignore Wlan active, do nothing here.*/
}
-
- if ((coex_sta->bt_info_ext & BIT4)) {
- /* BT auto report already enabled, do nothing*/
- } else {
- halbtc8821a2ant_bt_auto_report(btcoexist,
- FORCE_EXEC, true);
- }
}
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
@@ -3718,8 +4116,7 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_IDLE;
}
- if (bt_hs_on)
- coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_NON_IDLE;
+ btc8821a2ant_update_bt_link_info(btcoexist);
}
if (BT_8821A_2ANT_BT_STATUS_NON_IDLE == coex_dm->bt_status)
@@ -3736,27 +4133,27 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
btcoexist->btc_set(btcoexist,
BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
- halbtc8821a2ant_run_coexist_mechanism(btcoexist);
+ btc8821a2ant_run_coexist_mechanism(btcoexist);
}
-void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist)
+void ex_btc8821a2ant_halt_notify(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Halt notify\n");
- halbtc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
- ex_halbtc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+ btc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+ ex_btc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
}
-void ex_halbtc8821a2ant_periodical(struct btc_coexist *btcoexist)
+void ex_btc8821a2ant_periodical(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- static u8 dis_ver_info_cnt;
- u32 fw_ver = 0, bt_patch_ver = 0;
+ static u8 dis_ver_info_cnt;
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
+ u32 fw_ver = 0, bt_patch_ver = 0;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], ==========================Periodical===========================\n");
@@ -3785,7 +4182,7 @@ void ex_halbtc8821a2ant_periodical(struct btc_coexist *btcoexist)
"[BTCoex], ****************************************************************\n");
}
- halbtc8821a2ant_query_bt_info(btcoexist);
- halbtc8821a2ant_monitor_bt_ctr(btcoexist);
- btc8821a2ant_mon_bt_en_dis(btcoexist);
+ btc8821a2ant_query_bt_info(btcoexist);
+ btc8821a2ant_monitor_bt_ctr(btcoexist);
+ btc8821a2ant_monitor_wifi_ctr(btcoexist);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
index b4cf1f53d510..535ca10e910b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
@@ -38,6 +38,11 @@
#define BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT 2
+/* WiFi RSSI Threshold for 2-Ant TDMA/1-Ant PS-TDMA translation */
+#define BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES 42
+/* BT RSSI Threshold for 2-Ant TDMA/1-Ant PS-TDMA translation */
+#define BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES 46
+
enum _BT_INFO_SRC_8821A_2ANT {
BT_INFO_SRC_8821A_2ANT_WIFI_FW = 0x0,
BT_INFO_SRC_8821A_2ANT_BT_RSP = 0x1,
@@ -69,8 +74,8 @@ enum _BT_8821A_2ANT_COEX_ALGO {
struct coex_dm_8821a_2ant {
/* fw mechanism */
- bool pre_dec_bt_pwr;
- bool cur_dec_bt_pwr;
+ bool pre_dec_bt_pwr_lvl;
+ bool cur_dec_bt_pwr_lvl;
bool pre_bt_lna_constrain;
bool cur_bt_lna_constrain;
u8 pre_bt_psd_mode;
@@ -82,8 +87,9 @@ struct coex_dm_8821a_2ant {
u8 pre_ps_tdma;
u8 cur_ps_tdma;
u8 ps_tdma_para[5];
- u8 tdma_adj_type;
+ u8 ps_tdma_du_adj_type;
bool reset_tdma_adjust;
+ bool auto_tdma_adjust;
bool pre_ps_tdma_on;
bool cur_ps_tdma_on;
bool pre_bt_auto_report;
@@ -118,6 +124,10 @@ struct coex_dm_8821a_2ant {
u8 cur_algorithm;
u8 bt_status;
u8 wifi_chnl_info[3];
+ u8 pre_lps;
+ u8 cur_lps;
+ u8 pre_rpwm;
+ u8 cur_rpwm;
};
struct coex_sta_8821a_2ant {
@@ -141,6 +151,19 @@ struct coex_sta_8821a_2ant {
bool c2h_bt_inquiry_page;
u8 bt_retry_cnt;
u8 bt_info_ext;
+
+ u32 crc_ok_cck;
+ u32 crc_ok_11g;
+ u32 crc_ok_11n;
+ u32 crc_ok_11n_agg;
+
+ u32 crc_err_cck;
+ u32 crc_err_11g;
+ u32 crc_err_11n;
+ u32 crc_err_11n_agg;
+
+ u8 coex_table_type;
+ bool force_lps_on;
};
/*===========================================
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index 150aeb8e79d1..f13000612913 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -466,7 +466,7 @@ static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
case BTC_SET_ACT_DISABLE_LOW_POWER:
halbtc_disable_low_power();
break;
- case BTC_SET_ACT_UPDATE_ra_mask:
+ case BTC_SET_ACT_UPDATE_RAMASK:
btcoexist->bt_info.ra_mask = *u32_tmp;
break;
case BTC_SET_ACT_SEND_MIMO_PS:
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index 601bbe1d22b3..c8271135aaaa 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -66,6 +66,15 @@
#define BTC_ANT_WIFI_AT_CPL_MAIN 0
#define BTC_ANT_WIFI_AT_CPL_AUX 1
+enum btc_bt_reg_type {
+ BTC_BT_REG_RF = 0,
+ BTC_BT_REG_MODEM = 1,
+ BTC_BT_REG_BLUEWIZE = 2,
+ BTC_BT_REG_VENDOR = 3,
+ BTC_BT_REG_LE = 4,
+ BTC_BT_REG_MAX
+};
+
enum btc_chip_interface {
BTC_INTF_UNKNOWN = 0,
BTC_INTF_PCI = 1,
@@ -139,6 +148,7 @@ struct btc_board_info {
u8 pg_ant_num; /* pg ant number */
u8 btdm_ant_num; /* ant number for btdm */
u8 btdm_ant_pos;
+ u8 single_ant_path; /* current used for 8723b only, 1=>s0, 0=>s1 */
bool bt_exist;
};
@@ -205,6 +215,7 @@ enum btc_get_type {
BTC_GET_BL_WIFI_ENABLE_ENCRYPTION,
BTC_GET_BL_WIFI_UNDER_B_MODE,
BTC_GET_BL_EXT_SWITCH,
+ BTC_GET_BL_WIFI_IS_IN_MP_MODE,
/* type s4Byte */
BTC_GET_S4_WIFI_RSSI,
@@ -249,6 +260,8 @@ enum btc_set_type {
BTC_SET_BL_TO_REJ_AP_AGG_PKT,
BTC_SET_BL_BT_CTRL_AGG_SIZE,
BTC_SET_BL_INC_SCAN_DEV_NUM,
+ BTC_SET_BL_BT_TX_RX_MASK,
+ BTC_SET_BL_MIRACAST_PLUS_BT,
/* type u1Byte */
BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
@@ -275,7 +288,7 @@ enum btc_set_type {
BTC_SET_ACT_NORMAL_LPS,
BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT,
BTC_SET_ACT_DISABLE_LOW_POWER,
- BTC_SET_ACT_UPDATE_ra_mask,
+ BTC_SET_ACT_UPDATE_RAMASK,
BTC_SET_ACT_SEND_MIMO_PS,
/* BT Coex related */
BTC_SET_ACT_CTRL_BT_INFO,
@@ -366,6 +379,7 @@ typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data);
typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data);
+typedef void (*bfp_btc_local_reg_w1)(void *btc_context, u32 reg_addr, u8 data);
typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr,
u8 bit_mask, u8 data);
@@ -388,6 +402,9 @@ typedef bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf);
typedef bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf);
+typedef void (*bfp_btc_set_bt_reg)(void *btc_context, u8 reg_type, u32 offset,
+ u32 value);
+
typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type);
struct btc_bt_info {
@@ -459,6 +476,7 @@ struct btc_bt_link_info {
bool hid_only;
bool pan_exist;
bool pan_only;
+ bool slave_role;
};
enum btc_antenna_pos {
@@ -492,6 +510,7 @@ struct btc_coexist {
bfp_btc_w2 btc_write_2byte;
bfp_btc_r4 btc_read_4byte;
bfp_btc_w4 btc_write_4byte;
+ bfp_btc_local_reg_w1 btc_write_local_reg_1byte;
bfp_btc_set_bb_reg btc_set_bb_reg;
bfp_btc_get_bb_reg btc_get_bb_reg;
@@ -505,6 +524,8 @@ struct btc_coexist {
bfp_btc_get btc_get;
bfp_btc_set btc_set;
+
+ bfp_btc_set_bt_reg btc_set_bt_reg;
};
bool halbtc_is_wifi_uplink(struct rtl_priv *adapter);
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 558c31bf5c80..1bf3eb25c1da 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -435,7 +435,7 @@ int rtl_regd_init(struct ieee80211_hw *hw,
channel_plan_to_country_code(rtlpriv->efuse.channel_plan);
RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
- "rtl: EEPROM regdomain: 0x%0x conuntry code: %d\n",
+ "rtl: EEPROM regdomain: 0x%0x country code: %d\n",
rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code);
if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
index 09c908d4cf91..dd3e12b74447 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
@@ -444,10 +444,10 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (status->rx_is40Mhzpacket)
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
if (status->is_ht)
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
rx_status->flag |= RX_FLAG_MACTIME_START;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index 3616ba21959d..94a4b39437cd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -369,10 +369,10 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (stats->rx_is40Mhzpacket)
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
if (stats->is_ht)
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
rx_status->flag |= RX_FLAG_MACTIME_START;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
index 1611e42479d9..41422e4da8b7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
@@ -329,9 +329,9 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
if (!GET_RX_DESC_SWDEC(pdesc))
rx_status->flag |= RX_FLAG_DECRYPTED;
if (GET_RX_DESC_BW(pdesc))
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
if (GET_RX_DESC_RX_HT(pdesc))
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (stats->decrypted)
rx_status->flag |= RX_FLAG_DECRYPTED;
@@ -398,9 +398,9 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
if (!GET_RX_DESC_SWDEC(rxdesc))
rx_status->flag |= RX_FLAG_DECRYPTED;
if (GET_RX_DESC_BW(rxdesc))
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
if (GET_RX_DESC_RX_HT(rxdesc))
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
/* Data rate */
rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats.is_ht,
false, stats.rate);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 5c9c8741134f..86019f654428 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -503,9 +503,9 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
if (!GET_RX_DESC_SWDEC(pdesc))
rx_status->flag |= RX_FLAG_DECRYPTED;
if (GET_RX_DESC_BW(pdesc))
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
if (GET_RX_DESC_RXHT(pdesc))
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (stats->decrypted)
rx_status->flag |= RX_FLAG_DECRYPTED;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index 9fec345a42a0..1f42ce5f8f27 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -468,8 +468,10 @@ void rtl92ee_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus)
#define PSPOLL_PG 2
#define NULL_PG 3
#define PROBERSP_PG 4 /* ->5 */
+#define QOS_NULL_PG 6
+#define BT_QOS_NULL_PG 7
-#define TOTAL_RESERVED_PKT_LEN 768
+#define TOTAL_RESERVED_PKT_LEN 1024
static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
/* page 0 beacon */
@@ -570,6 +572,42 @@ static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 6 qos null data */
+ 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
+ 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+ 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x80, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 7 BT-qos null data */
+ 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
+ 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+ 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -595,6 +633,8 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
u8 *p_pspoll;
u8 *nullfunc;
u8 *p_probersp;
+ u8 *qosnull;
+ u8 *btqosnull;
/*---------------------------------------------------------
* (1) beacon
*---------------------------------------------------------
@@ -636,6 +676,28 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG);
+ /*---------------------------------------------------------
+ * (5) QoS null data
+ *----------------------------------------------------------
+ */
+ qosnull = &reserved_page_packet[QOS_NULL_PG * 128];
+ SET_80211_HDR_ADDRESS1(qosnull, mac->bssid);
+ SET_80211_HDR_ADDRESS2(qosnull, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(qosnull, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1rsvdpageloc, QOS_NULL_PG);
+
+ /*---------------------------------------------------------
+ * (6) BT QoS null data
+ *----------------------------------------------------------
+ */
+ btqosnull = &reserved_page_packet[BT_QOS_NULL_PG * 128];
+ SET_80211_HDR_ADDRESS1(btqosnull, mac->bssid);
+ SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1rsvdpageloc, BT_QOS_NULL_PG);
+
totalpacketlen = TOTAL_RESERVED_PKT_LEN;
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD ,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
index 72da3f92f02c..af8271967a88 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
@@ -165,6 +165,10 @@ enum rtl8192e_c2h_evt {
SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 3, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
/* _MEDIA_STATUS_RPT_PARM_CMD1 */
#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __val) \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index 56ca7f5351ea..6f5098a18655 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -699,9 +699,9 @@ static bool _rtl92ee_llt_table_init(struct ieee80211_hw *hw)
u8 txpktbuf_bndy;
u8 u8tmp, testcnt = 0;
- txpktbuf_bndy = 0xFA;
+ txpktbuf_bndy = 0xF7;
- rtl_write_dword(rtlpriv, REG_RQPN, 0x80E90808);
+ rtl_write_dword(rtlpriv, REG_RQPN, 0x80E60808);
rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy);
rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x3d00 - 1);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 07440e9a8ca2..b1864bb07c2c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -394,10 +394,10 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (status->rx_is40Mhzpacket)
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
if (status->is_ht)
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
rx_status->flag |= RX_FLAG_MACTIME_START;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
index 12cef01e593b..a01dbd31d1b4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
@@ -289,10 +289,10 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (stats->rx_is40Mhzpacket)
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
if (stats->is_ht)
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
rx_status->flag |= RX_FLAG_MACTIME_START;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
index c9838f52a7ea..f713c7249fed 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
@@ -317,10 +317,10 @@ bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw,
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (status->rx_is40Mhzpacket)
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
if (status->is_ht)
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
rx_status->flag |= RX_FLAG_MACTIME_START;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
index c7ee9ba5e26e..4fc839b1d601 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
@@ -284,8 +284,10 @@ void rtl8723be_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus)
#define PSPOLL_PG 2
#define NULL_PG 3
#define PROBERSP_PG 4 /* ->5 */
+#define QOS_NULL_PG 6
+#define BT_QOS_NULL_PG 7
-#define TOTAL_RESERVED_PKT_LEN 768
+#define TOTAL_RESERVED_PKT_LEN 1024 /* can be up to 1280 (tx_bndy=245) */
static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
/* page 0 beacon */
@@ -390,11 +392,48 @@ static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 6 qos null data */
+ 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
+ 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+ 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x80, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 7 BT-qos null data */
+ 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
+ 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+ 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
};
void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
@@ -413,6 +452,8 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
u8 *p_pspoll;
u8 *nullfunc;
u8 *p_probersp;
+ u8 *qosnull;
+ u8 *btqosnull;
/*---------------------------------------------------------
* (1) beacon
*---------------------------------------------------------
@@ -454,6 +495,28 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG);
+ /*---------------------------------------------------------
+ * (5) QoS Null
+ *---------------------------------------------------------
+ */
+ qosnull = &reserved_page_packet[QOS_NULL_PG * 128];
+ SET_80211_HDR_ADDRESS1(qosnull, mac->bssid);
+ SET_80211_HDR_ADDRESS2(qosnull, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(qosnull, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1rsvdpageloc, QOS_NULL_PG);
+
+ /*---------------------------------------------------------
+ * (5) QoS Null
+ *---------------------------------------------------------
+ */
+ btqosnull = &reserved_page_packet[BT_QOS_NULL_PG * 128];
+ SET_80211_HDR_ADDRESS1(btqosnull, mac->bssid);
+ SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1rsvdpageloc, BT_QOS_NULL_PG);
+
totalpacketlen = TOTAL_RESERVED_PKT_LEN;
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
@@ -461,7 +524,7 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
&reserved_page_packet[0], totalpacketlen);
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"rtl8723be_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
- u1rsvdpageloc, 3);
+ u1rsvdpageloc, sizeof(u1rsvdpageloc));
skb = dev_alloc_skb(totalpacketlen);
memcpy((u8 *)skb_put(skb, totalpacketlen),
@@ -476,7 +539,7 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"Set RSVD page location to Fw.\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "H2C_RSVDPAGE:\n",
- u1rsvdpageloc, 3);
+ u1rsvdpageloc, sizeof(u1rsvdpageloc));
rtl8723be_fill_h2c_cmd(hw, H2C_8723B_RSVDPAGE,
sizeof(u1rsvdpageloc), u1rsvdpageloc);
} else
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
index c652fa1339a7..2482b3bc2bfa 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
@@ -139,6 +139,10 @@ enum rtl8723b_c2h_evt {
SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 3, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 92dbfa8f297f..8c0ac96b5430 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -91,7 +91,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- char *fw_name = "rtlwifi/rtl8723befw.bin";
+ char *fw_name = "rtlwifi/rtl8723befw_36.bin";
rtl8723be_bt_reg_init(hw);
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
@@ -187,8 +187,16 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
- pr_err("Failed to request firmware!\n");
- return 1;
+ /* Failed to get firmware. Check if old version available */
+ fw_name = "rtlwifi/rtl8723befw.bin";
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
+ rtlpriv->io.dev, GFP_KERNEL, hw,
+ rtl_fw_cb);
+ if (err) {
+ pr_err("Failed to request firmware!\n");
+ return 1;
+ }
}
return 0;
}
@@ -384,6 +392,7 @@ MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 8723BE 802.11n PCI wireless");
MODULE_FIRMWARE("rtlwifi/rtl8723befw.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723befw_36.bin");
module_param_named(swenc, rtl8723be_mod_params.sw_crypto, bool, 0444);
module_param_named(debug_level, rtl8723be_mod_params.debug_level, int, 0644);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
index 6f65003a895a..3c6ce994c6aa 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
@@ -373,10 +373,10 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (status->rx_is40Mhzpacket)
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
if (status->is_ht)
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
rx_status->flag |= RX_FLAG_MACTIME_START;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
index a504dfae4ed3..73350103b736 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
@@ -678,12 +678,13 @@ void rtl8821ae_set_fw_global_info_cmd(struct ieee80211_hw *hw)
#define PSPOLL_PG 1
#define NULL_PG 2
#define QOSNULL_PG 3
-#define ARPRESP_PG 4
-#define REMOTE_PG 5
-#define GTKEXT_PG 6
+#define BT_QOSNULL_PG 4
+#define ARPRESP_PG 5
+#define REMOTE_PG 6
+#define GTKEXT_PG 7
-#define TOTAL_RESERVED_PKT_LEN_8812 3584
-#define TOTAL_RESERVED_PKT_LEN_8821 1792
+#define TOTAL_RESERVED_PKT_LEN_8812 4096
+#define TOTAL_RESERVED_PKT_LEN_8821 2048
static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = {
/* page 0: beacon */
@@ -813,13 +814,46 @@ static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x80, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* page 4: BT qos null data */
+ 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
+ 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+ 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x3C, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* page 4~6 is for wowlan */
- /* page 4: ARP resp */
+ /* page 5~7 is for wowlan */
+ /* page 5: ARP resp */
0x08, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
@@ -852,7 +886,7 @@ static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* page 5: H2C_REMOTE_WAKE_CTRL_INFO */
+ /* page 6: H2C_REMOTE_WAKE_CTRL_INFO */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -885,7 +919,7 @@ static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* page 6: Rsvd GTK extend memory (zero memory) */
+ /* page 7: Rsvd GTK extend memory (zero memory) */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1176,13 +1210,78 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x80, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* page 4: BT Qos null data */
+ 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
+ 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
+ 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x3C, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* page 4~6 is for wowlan */
- /* page 4: ARP resp */
+ /* page 5~7 is for wowlan */
+ /* page 5: ARP resp */
0x08, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7,
0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02,
0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00,
@@ -1247,7 +1346,7 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* page 5: H2C_REMOTE_WAKE_CTRL_INFO */
+ /* page 6: H2C_REMOTE_WAKE_CTRL_INFO */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1312,7 +1411,7 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* page 6: Rsvd GTK extend memory (zero memory) */
+ /* page 7: Rsvd GTK extend memory (zero memory) */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1394,6 +1493,7 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
u8 *p_pspoll;
u8 *nullfunc;
u8 *qosnull;
+ u8 *btqosnull;
u8 *arpresp;
/*---------------------------------------------------------
@@ -1441,12 +1541,23 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1RsvdPageLoc, QOSNULL_PG);
+ /*---------------------------------------------------------
+ * (5) BT Qos null data
+ *----------------------------------------------------------
+ */
+ btqosnull = &reserved_page_packet_8812[BT_QOSNULL_PG * 512];
+ SET_80211_HDR_ADDRESS1(btqosnull, mac->bssid);
+ SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1RsvdPageLoc, BT_QOSNULL_PG);
+
if (!dl_whole_packets) {
- totalpacketlen = 512 * (QOSNULL_PG + 1) - 40;
+ totalpacketlen = 512 * (BT_QOSNULL_PG + 1) - 40;
goto out;
}
/*---------------------------------------------------------
- * (5) ARP Resp
+ * (6) ARP Resp
*----------------------------------------------------------
*/
arpresp = &reserved_page_packet_8812[ARPRESP_PG * 512];
@@ -1457,14 +1568,14 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1RsvdPageLoc2, ARPRESP_PG);
/*---------------------------------------------------------
- * (6) Remote Wake Ctrl
+ * (7) Remote Wake Ctrl
*----------------------------------------------------------
*/
SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1RsvdPageLoc2,
REMOTE_PG);
/*---------------------------------------------------------
- * (7) GTK Ext Memory
+ * (8) GTK Ext Memory
*----------------------------------------------------------
*/
SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1RsvdPageLoc2, GTKEXT_PG);
@@ -1518,6 +1629,7 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
u8 *p_pspoll;
u8 *nullfunc;
u8 *qosnull;
+ u8 *btqosnull;
u8 *arpresp;
/*---------------------------------------------------------
@@ -1565,12 +1677,23 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1RsvdPageLoc, QOSNULL_PG);
+ /*---------------------------------------------------------
+ * (5) Qos null data
+ *----------------------------------------------------------
+ */
+ btqosnull = &reserved_page_packet_8821[BT_QOSNULL_PG * 256];
+ SET_80211_HDR_ADDRESS1(btqosnull, mac->bssid);
+ SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1RsvdPageLoc, BT_QOSNULL_PG);
+
if (!dl_whole_packets) {
- totalpacketlen = 256 * (QOSNULL_PG + 1) - 40;
+ totalpacketlen = 256 * (BT_QOSNULL_PG + 1) - 40;
goto out;
}
/*---------------------------------------------------------
- * (5) ARP Resp
+ * (6) ARP Resp
*----------------------------------------------------------
*/
arpresp = &reserved_page_packet_8821[ARPRESP_PG * 256];
@@ -1581,14 +1704,14 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1RsvdPageLoc2, ARPRESP_PG);
/*---------------------------------------------------------
- * (6) Remote Wake Ctrl
+ * (7) Remote Wake Ctrl
*----------------------------------------------------------
*/
SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1RsvdPageLoc2,
REMOTE_PG);
/*---------------------------------------------------------
- * (7) GTK Ext Memory
+ * (8) GTK Ext Memory
*----------------------------------------------------------
*/
SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1RsvdPageLoc2, GTKEXT_PG);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
index 90a98ed879f7..98d871afd92a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
@@ -229,6 +229,8 @@ enum rtl8821a_h2c_cmd {
SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \
SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val)
/* _MEDIA_STATUS_RPT_PARM_CMD1 */
#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __value) \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 363d2f28da1f..3571ce4bd276 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -842,12 +842,8 @@ static bool _rtl8821ae_llt_table_init(struct ieee80211_hw *hw)
bool status;
maxpage = 255;
- txpktbuf_bndy = 0xF8;
- rqpn = 0x80e70808;
- if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE) {
- txpktbuf_bndy = 0xFA;
- rqpn = 0x80e90808;
- }
+ txpktbuf_bndy = 0xF7;
+ rqpn = 0x80e60808;
rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy);
rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, MAX_RX_DMA_BUFFER_SIZE - 1);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 8da874cbec1a..aa3ccc740521 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -358,6 +358,107 @@ bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw)
return rtl8821ae_phy_rf6052_config(hw);
}
+static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp;
+
+ switch (rtlhal->rfe_type) {
+ case 3:
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337770);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337770);
+ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+ rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
+ break;
+ case 4:
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x001);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x001);
+ break;
+ case 5:
+ rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x77);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+ tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
+ rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp & ~0x1);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+ break;
+ case 1:
+ if (rtlpriv->btcoexist.bt_coexistence) {
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x777777);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+ 0x77777777);
+ rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+ break;
+ }
+ case 0:
+ case 2:
+ default:
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+ break;
+ }
+}
+
+static void _rtl8812ae_phy_set_rfe_reg_5g(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp;
+
+ switch (rtlhal->rfe_type) {
+ case 0:
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337717);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337717);
+ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+ break;
+ case 1:
+ if (rtlpriv->btcoexist.bt_coexistence) {
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x337717);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+ 0x77337717);
+ rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+ } else {
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
+ 0x77337717);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+ 0x77337717);
+ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+ }
+ break;
+ case 3:
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337717);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337717);
+ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+ rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
+ break;
+ case 5:
+ rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x33);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
+ tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
+ rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp | 0x1);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+ break;
+ case 2:
+ case 4:
+ default:
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
+ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+ break;
+ }
+}
+
u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
u8 rf_path)
{
@@ -552,14 +653,9 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
/* 0x82C[1:0] = 2b'00 */
rtl_set_bbreg(hw, 0x82c, 0x3, 0);
}
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
- rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
- 0x77777777);
- rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
- 0x77777777);
- rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000);
- rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000);
- }
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ _rtl8812ae_phy_set_rfe_reg_24g(hw);
rtl_set_bbreg(hw, RTXPATH, 0xf0, 0x1);
rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0x1);
@@ -614,14 +710,8 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
/* 0x82C[1:0] = 2'b00 */
rtl_set_bbreg(hw, 0x82c, 0x3, 1);
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
- rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
- 0x77337777);
- rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
- 0x77337777);
- rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010);
- rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010);
- }
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ _rtl8812ae_phy_set_rfe_reg_5g(hw);
rtl_set_bbreg(hw, RTXPATH, 0xf0, 0);
rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0xf);
@@ -660,6 +750,88 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
return;
}
+static bool _rtl8821ae_check_positive(struct ieee80211_hw *hw,
+ const u32 condition1,
+ const u32 condition2)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u32 cut_ver = ((rtlhal->version & CHIP_VER_RTL_MASK)
+ >> CHIP_VER_RTL_SHIFT);
+ u32 intf = (rtlhal->interface == INTF_USB ? BIT(1) : BIT(0));
+
+ u8 board_type = ((rtlhal->board_type & BIT(4)) >> 4) << 0 | /* _GLNA */
+ ((rtlhal->board_type & BIT(3)) >> 3) << 1 | /* _GPA */
+ ((rtlhal->board_type & BIT(7)) >> 7) << 2 | /* _ALNA */
+ ((rtlhal->board_type & BIT(6)) >> 6) << 3 | /* _APA */
+ ((rtlhal->board_type & BIT(2)) >> 2) << 4; /* _BT */
+
+ u32 cond1 = condition1, cond2 = condition2;
+ u32 driver1 = cut_ver << 24 | /* CUT ver */
+ 0 << 20 | /* interface 2/2 */
+ 0x04 << 16 | /* platform */
+ rtlhal->package_type << 12 |
+ intf << 8 | /* interface 1/2 */
+ board_type;
+
+ u32 driver2 = rtlhal->type_glna << 0 |
+ rtlhal->type_gpa << 8 |
+ rtlhal->type_alna << 16 |
+ rtlhal->type_apa << 24;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n",
+ cond1, cond2);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n",
+ driver1, driver2);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ " (Platform, Interface) = (0x%X, 0x%X)\n", 0x04, intf);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ " (Board, Package) = (0x%X, 0x%X)\n",
+ rtlhal->board_type, rtlhal->package_type);
+
+ /*============== Value Defined Check ===============*/
+ /*QFN Type [15:12] and Cut Version [27:24] need to do value check*/
+
+ if (((cond1 & 0x0000F000) != 0) && ((cond1 & 0x0000F000) !=
+ (driver1 & 0x0000F000)))
+ return false;
+ if (((cond1 & 0x0F000000) != 0) && ((cond1 & 0x0F000000) !=
+ (driver1 & 0x0F000000)))
+ return false;
+
+ /*=============== Bit Defined Check ================*/
+ /* We don't care [31:28] */
+
+ cond1 &= 0x00FF0FFF;
+ driver1 &= 0x00FF0FFF;
+
+ if ((cond1 & driver1) == cond1) {
+ u32 mask = 0;
+
+ if ((cond1 & 0x0F) == 0) /* BoardType is DONTCARE*/
+ return true;
+
+ if ((cond1 & BIT(0)) != 0) /*GLNA*/
+ mask |= 0x000000FF;
+ if ((cond1 & BIT(1)) != 0) /*GPA*/
+ mask |= 0x0000FF00;
+ if ((cond1 & BIT(2)) != 0) /*ALNA*/
+ mask |= 0x00FF0000;
+ if ((cond1 & BIT(3)) != 0) /*APA*/
+ mask |= 0xFF000000;
+
+ /* BoardType of each RF path is matched*/
+ if ((cond2 & mask) == (driver2 & mask))
+ return true;
+ else
+ return false;
+ } else
+ return false;
+}
+
static bool _rtl8821ae_check_condition(struct ieee80211_hw *hw,
const u32 condition)
{
@@ -1695,55 +1867,78 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
return true;
}
+static bool
+__rtl8821ae_phy_config_with_headerfile(struct ieee80211_hw *hw,
+ u32 *array_table, u16 arraylen,
+ void (*set_reg)(struct ieee80211_hw *hw,
+ u32 regaddr, u32 data))
+{
+ #define COND_ELSE 2
+ #define COND_ENDIF 3
+
+ int i = 0;
+ u8 cond;
+ bool matched = true, skipped = false;
+
+ while ((i + 1) < arraylen) {
+ u32 v1 = array_table[i];
+ u32 v2 = array_table[i + 1];
+
+ if (v1 & (BIT(31) | BIT(30))) {/*positive & negative condition*/
+ if (v1 & BIT(31)) {/* positive condition*/
+ cond = (u8)((v1 & (BIT(29) | BIT(28))) >> 28);
+ if (cond == COND_ENDIF) {/*end*/
+ matched = true;
+ skipped = false;
+ } else if (cond == COND_ELSE) /*else*/
+ matched = skipped ? false : true;
+ else {/*if , else if*/
+ if (skipped) {
+ matched = false;
+ } else {
+ if (_rtl8821ae_check_positive(
+ hw, v1, v2)) {
+ matched = true;
+ skipped = true;
+ } else {
+ matched = false;
+ skipped = false;
+ }
+ }
+ }
+ } else if (v1 & BIT(30)) { /*negative condition*/
+ /*do nothing*/
+ }
+ } else {
+ if (matched)
+ set_reg(hw, v1, v2);
+ }
+ i = i + 2;
+ }
+
+ return true;
+}
+
static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- u32 i, v1, v2;
u32 arraylength;
u32 *ptrarray;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read MAC_REG_Array\n");
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
- arraylength = RTL8821AEMAC_1T_ARRAYLEN;
+ arraylength = RTL8821AE_MAC_1T_ARRAYLEN;
ptrarray = RTL8821AE_MAC_REG_ARRAY;
} else {
- arraylength = RTL8812AEMAC_1T_ARRAYLEN;
+ arraylength = RTL8812AE_MAC_1T_ARRAYLEN;
ptrarray = RTL8812AE_MAC_REG_ARRAY;
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Img: MAC_REG_ARRAY LEN %d\n", arraylength);
- for (i = 0; i < arraylength; i += 2) {
- v1 = ptrarray[i];
- v2 = (u8)ptrarray[i + 1];
- if (v1 < 0xCDCDCDCD) {
- rtl_write_byte(rtlpriv, v1, (u8)v2);
- continue;
- } else {
- if (!_rtl8821ae_check_condition(hw, v1)) {
- /*Discard the following (offset, data) pairs*/
- READ_NEXT_PAIR(ptrarray, v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < arraylength - 2) {
- READ_NEXT_PAIR(ptrarray, v1, v2, i);
- }
- i -= 2; /* prevent from for-loop += 2*/
- } else {/*Configure matched pairs and skip to end of if-else.*/
- READ_NEXT_PAIR(ptrarray, v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < arraylength - 2) {
- rtl_write_byte(rtlpriv, v1, v2);
- READ_NEXT_PAIR(ptrarray, v1, v2, i);
- }
- while (v2 != 0xDEAD && i < arraylength - 2)
- READ_NEXT_PAIR(ptrarray, v1, v2, i);
- }
- }
- }
- return true;
+ return __rtl8821ae_phy_config_with_headerfile(hw,
+ ptrarray, arraylength, rtl_write_byte_with_val32);
}
static bool _rtl8821ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
@@ -1751,111 +1946,33 @@ static bool _rtl8821ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- int i;
u32 *array_table;
u16 arraylen;
- u32 v1 = 0, v2 = 0;
if (configtype == BASEBAND_CONFIG_PHY_REG) {
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
- arraylen = RTL8812AEPHY_REG_1TARRAYLEN;
+ arraylen = RTL8812AE_PHY_REG_1TARRAYLEN;
array_table = RTL8812AE_PHY_REG_ARRAY;
} else {
- arraylen = RTL8821AEPHY_REG_1TARRAYLEN;
+ arraylen = RTL8821AE_PHY_REG_1TARRAYLEN;
array_table = RTL8821AE_PHY_REG_ARRAY;
}
- for (i = 0; i < arraylen; i += 2) {
- v1 = array_table[i];
- v2 = array_table[i + 1];
- if (v1 < 0xCDCDCDCD) {
- _rtl8821ae_config_bb_reg(hw, v1, v2);
- continue;
- } else {/*This line is the start line of branch.*/
- if (!_rtl8821ae_check_condition(hw, v1)) {
- /*Discard the following (offset, data) pairs*/
- READ_NEXT_PAIR(array_table, v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD &&
- i < arraylen - 2) {
- READ_NEXT_PAIR(array_table, v1,
- v2, i);
- }
-
- i -= 2; /* prevent from for-loop += 2*/
- } else {/*Configure matched pairs and skip to end of if-else.*/
- READ_NEXT_PAIR(array_table, v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD &&
- i < arraylen - 2) {
- _rtl8821ae_config_bb_reg(hw, v1,
- v2);
- READ_NEXT_PAIR(array_table, v1,
- v2, i);
- }
-
- while (v2 != 0xDEAD &&
- i < arraylen - 2) {
- READ_NEXT_PAIR(array_table, v1,
- v2, i);
- }
- }
- }
- }
+ return __rtl8821ae_phy_config_with_headerfile(hw,
+ array_table, arraylen,
+ _rtl8821ae_config_bb_reg);
} else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
- arraylen = RTL8812AEAGCTAB_1TARRAYLEN;
+ arraylen = RTL8812AE_AGC_TAB_1TARRAYLEN;
array_table = RTL8812AE_AGC_TAB_ARRAY;
} else {
- arraylen = RTL8821AEAGCTAB_1TARRAYLEN;
+ arraylen = RTL8821AE_AGC_TAB_1TARRAYLEN;
array_table = RTL8821AE_AGC_TAB_ARRAY;
}
- for (i = 0; i < arraylen; i = i + 2) {
- v1 = array_table[i];
- v2 = array_table[i+1];
- if (v1 < 0xCDCDCDCD) {
- rtl_set_bbreg(hw, v1, MASKDWORD, v2);
- udelay(1);
- continue;
- } else {/*This line is the start line of branch.*/
- if (!_rtl8821ae_check_condition(hw, v1)) {
- /*Discard the following (offset, data) pairs*/
- READ_NEXT_PAIR(array_table, v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD &&
- i < arraylen - 2) {
- READ_NEXT_PAIR(array_table, v1,
- v2, i);
- }
- i -= 2; /* prevent from for-loop += 2*/
- } else {/*Configure matched pairs and skip to end of if-else.*/
- READ_NEXT_PAIR(array_table, v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD &&
- i < arraylen - 2) {
- rtl_set_bbreg(hw, v1, MASKDWORD,
- v2);
- udelay(1);
- READ_NEXT_PAIR(array_table, v1,
- v2, i);
- }
-
- while (v2 != 0xDEAD &&
- i < arraylen - 2) {
- READ_NEXT_PAIR(array_table, v1,
- v2, i);
- }
- }
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n",
- array_table[i], array_table[i + 1]);
- }
- }
+ return __rtl8821ae_phy_config_with_headerfile(hw,
+ array_table, arraylen,
+ rtl_set_bbreg_with_dwmask);
}
return true;
}
@@ -1913,10 +2030,10 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
u32 v1, v2, v3, v4, v5, v6;
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
- arraylen = RTL8812AEPHY_REG_ARRAY_PGLEN;
+ arraylen = RTL8812AE_PHY_REG_ARRAY_PGLEN;
array = RTL8812AE_PHY_REG_ARRAY_PG;
} else {
- arraylen = RTL8821AEPHY_REG_ARRAY_PGLEN;
+ arraylen = RTL8821AE_PHY_REG_ARRAY_PGLEN;
array = RTL8821AE_PHY_REG_ARRAY_PG;
}
@@ -1980,12 +2097,10 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath)
{
- int i;
bool rtstatus = true;
u32 *radioa_array_table_a, *radioa_array_table_b;
u16 radioa_arraylen_a, radioa_arraylen_b;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 v1 = 0, v2 = 0;
radioa_arraylen_a = RTL8812AE_RADIOA_1TARRAYLEN;
radioa_array_table_a = RTL8812AE_RADIOA_ARRAY;
@@ -1997,69 +2112,14 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
rtstatus = true;
switch (rfpath) {
case RF90_PATH_A:
- for (i = 0; i < radioa_arraylen_a; i = i + 2) {
- v1 = radioa_array_table_a[i];
- v2 = radioa_array_table_a[i+1];
- if (v1 < 0xcdcdcdcd) {
- _rtl8821ae_config_rf_radio_a(hw, v1, v2);
- continue;
- } else{/*This line is the start line of branch.*/
- if (!_rtl8821ae_check_condition(hw, v1)) {
- /*Discard the following (offset, data) pairs*/
- READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < radioa_arraylen_a-2)
- READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i);
-
- i -= 2; /* prevent from for-loop += 2*/
- } else {/*Configure matched pairs and skip to end of if-else.*/
- READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < radioa_arraylen_a - 2) {
- _rtl8821ae_config_rf_radio_a(hw, v1, v2);
- READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i);
- }
-
- while (v2 != 0xDEAD && i < radioa_arraylen_a-2)
- READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i);
-
- }
- }
- }
+ return __rtl8821ae_phy_config_with_headerfile(hw,
+ radioa_array_table_a, radioa_arraylen_a,
+ _rtl8821ae_config_rf_radio_a);
break;
case RF90_PATH_B:
- for (i = 0; i < radioa_arraylen_b; i = i + 2) {
- v1 = radioa_array_table_b[i];
- v2 = radioa_array_table_b[i+1];
- if (v1 < 0xcdcdcdcd) {
- _rtl8821ae_config_rf_radio_b(hw, v1, v2);
- continue;
- } else{/*This line is the start line of branch.*/
- if (!_rtl8821ae_check_condition(hw, v1)) {
- /*Discard the following (offset, data) pairs*/
- READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < radioa_arraylen_b-2)
- READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i);
-
- i -= 2; /* prevent from for-loop += 2*/
- } else {/*Configure matched pairs and skip to end of if-else.*/
- READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < radioa_arraylen_b-2) {
- _rtl8821ae_config_rf_radio_b(hw, v1, v2);
- READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i);
- }
-
- while (v2 != 0xDEAD && i < radioa_arraylen_b-2)
- READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i);
- }
- }
- }
+ return __rtl8821ae_phy_config_with_headerfile(hw,
+ radioa_array_table_b, radioa_arraylen_b,
+ _rtl8821ae_config_rf_radio_b);
break;
case RF90_PATH_C:
case RF90_PATH_D:
@@ -2072,21 +2132,10 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum radio_path rfpath)
{
- #define READ_NEXT_RF_PAIR(v1, v2, i) \
- do { \
- i += 2; \
- v1 = radioa_array_table[i]; \
- v2 = radioa_array_table[i+1]; \
- } \
- while (0)
-
- int i;
bool rtstatus = true;
u32 *radioa_array_table;
u16 radioa_arraylen;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- /* struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); */
- u32 v1 = 0, v2 = 0;
radioa_arraylen = RTL8821AE_RADIOA_1TARRAYLEN;
radioa_array_table = RTL8821AE_RADIOA_ARRAY;
@@ -2096,35 +2145,9 @@ bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
rtstatus = true;
switch (rfpath) {
case RF90_PATH_A:
- for (i = 0; i < radioa_arraylen; i = i + 2) {
- v1 = radioa_array_table[i];
- v2 = radioa_array_table[i+1];
- if (v1 < 0xcdcdcdcd)
- _rtl8821ae_config_rf_radio_a(hw, v1, v2);
- else{/*This line is the start line of branch.*/
- if (!_rtl8821ae_check_condition(hw, v1)) {
- /*Discard the following (offset, data) pairs*/
- READ_NEXT_RF_PAIR(v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < radioa_arraylen - 2)
- READ_NEXT_RF_PAIR(v1, v2, i);
-
- i -= 2; /* prevent from for-loop += 2*/
- } else {/*Configure matched pairs and skip to end of if-else.*/
- READ_NEXT_RF_PAIR(v1, v2, i);
- while (v2 != 0xDEAD &&
- v2 != 0xCDEF &&
- v2 != 0xCDCD && i < radioa_arraylen - 2) {
- _rtl8821ae_config_rf_radio_a(hw, v1, v2);
- READ_NEXT_RF_PAIR(v1, v2, i);
- }
-
- while (v2 != 0xDEAD && i < radioa_arraylen - 2)
- READ_NEXT_RF_PAIR(v1, v2, i);
- }
- }
- }
+ return __rtl8821ae_phy_config_with_headerfile(hw,
+ radioa_array_table, radioa_arraylen,
+ _rtl8821ae_config_rf_radio_a);
break;
case RF90_PATH_B:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
index 1d6110f9c1fb..ed69dbe178ff 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
@@ -2424,6 +2424,7 @@
#define BMASKH4BITS 0xf0000000
#define BMASKOFDM_D 0xffc00000
#define BMASKCCK 0x3f3f3f3f
+#define BMASKRFEINV 0x3ff00000
#define BRFREGOFFSETMASK 0xfffff
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 77cf3b2cd3f1..abaf34cb1433 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -203,7 +203,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
fw_name = "rtlwifi/rtl8812aefw.bin";
wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
} else {
- fw_name = "rtlwifi/rtl8821aefw.bin";
+ fw_name = "rtlwifi/rtl8821aefw_29.bin";
wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
}
@@ -214,8 +214,16 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
- pr_err("Failed to request normal firmware!\n");
- return 1;
+ /* Failed to get firmware. Check if old version available */
+ fw_name = "rtlwifi/rtl8821aefw.bin";
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
+ rtlpriv->io.dev, GFP_KERNEL, hw,
+ rtl_fw_cb);
+ if (err) {
+ pr_err("Failed to request normal firmware!\n");
+ return 1;
+ }
}
/*load wowlan firmware*/
pr_info("Using firmware %s\n", wowlan_fw_name);
@@ -428,6 +436,7 @@ MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 8821ae 802.11ac PCI wireless");
MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8821aefw_29.bin");
module_param_named(swenc, rtl8821ae_mod_params.sw_crypto, bool, 0444);
module_param_named(debug_level, rtl8821ae_mod_params.debug_level, int, 0644);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
index 62a0fb76f080..408c4611e5de 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
@@ -38,7 +38,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
0x824, 0x00030FE0,
0x828, 0x00000000,
0x82C, 0x002083DD,
- 0x830, 0x2AAA6C86,
+ 0x830, 0x2EAAEEB8,
0x834, 0x0037A706,
0x838, 0x06C89B44,
0x83C, 0x0000095B,
@@ -68,7 +68,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
0x8BC, 0x4CA520A3,
0x8C0, 0x27F00020,
0x8C4, 0x00000000,
- 0x8C8, 0x00013169,
+ 0x8C8, 0x00012D69,
0x8CC, 0x08248492,
0x8D0, 0x0000B800,
0x8DC, 0x00000000,
@@ -76,13 +76,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
0x8D8, 0x290B5612,
0x8F8, 0x400002C0,
0x8FC, 0x00000000,
- 0xFF0F07D8, 0xABCD,
0x900, 0x00000701,
- 0xFF0F07D0, 0xCDEF,
- 0x900, 0x00000701,
- 0xCDCDCDCD, 0xCDCD,
- 0x900, 0x00000700,
- 0xFF0F07D8, 0xDEAD,
0x90C, 0x00000000,
0x910, 0x0000FC00,
0x914, 0x00000404,
@@ -120,7 +114,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
0x9D4, 0x00000000,
0x9D8, 0x00000000,
0x9DC, 0x00000000,
- 0x9E4, 0x00000002,
+ 0x9E4, 0x00000003,
0x9E8, 0x000002D5,
0xA00, 0x00D047C8,
0xA04, 0x01FF000C,
@@ -189,7 +183,21 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
0xC5C, 0x00000058,
0xC60, 0x34344443,
0xC64, 0x07003333,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0xC68, 0x59791979,
+ 0x90000008, 0x05000000, 0x40000000, 0x00000000,
+ 0xC68, 0x59791979,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0xC68, 0x59791979,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0xC68, 0x59791979,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0xC68, 0x59791979,
+ 0x90000001, 0x00000005, 0x40000000, 0x00000000,
0xC68, 0x59791979,
+ 0xA0000000, 0x00000000,
+ 0xC68, 0x59799979,
+ 0xB0000000, 0x00000000,
0xC6C, 0x59795979,
0xC70, 0x19795979,
0xC74, 0x19795979,
@@ -203,19 +211,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
0xCA0, 0x00000029,
0xCA4, 0x08040201,
0xCA8, 0x80402010,
- 0xFF0F0740, 0xABCD,
- 0xCB0, 0x77547717,
- 0xFF0F01C0, 0xCDEF,
- 0xCB0, 0x77547717,
- 0xFF0F02C0, 0xCDEF,
- 0xCB0, 0x77547717,
- 0xFF0F07D8, 0xCDEF,
- 0xCB0, 0x54547710,
- 0xFF0F07D0, 0xCDEF,
- 0xCB0, 0x54547710,
- 0xCDCDCDCD, 0xCDCD,
0xCB0, 0x77547777,
- 0xFF0F0740, 0xDEAD,
0xCB4, 0x00000077,
0xCB8, 0x00508242,
0xE00, 0x00000007,
@@ -257,23 +253,14 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
0xEA0, 0x00000029,
0xEA4, 0x08040201,
0xEA8, 0x80402010,
- 0xFF0F0740, 0xABCD,
- 0xEB0, 0x77547717,
- 0xFF0F01C0, 0xCDEF,
- 0xEB0, 0x77547717,
- 0xFF0F02C0, 0xCDEF,
- 0xEB0, 0x77547717,
- 0xFF0F07D8, 0xCDEF,
- 0xEB0, 0x54547710,
- 0xFF0F07D0, 0xCDEF,
- 0xEB0, 0x54547710,
- 0xCDCDCDCD, 0xCDCD,
0xEB0, 0x77547777,
- 0xFF0F0740, 0xDEAD,
0xEB4, 0x00000077,
0xEB8, 0x00508242,
};
+u32 RTL8812AE_PHY_REG_1TARRAYLEN =
+ sizeof(RTL8812AE_PHY_REG_ARRAY) / sizeof(u32);
+
u32 RTL8821AE_PHY_REG_ARRAY[] = {
0x800, 0x0020D090,
0x804, 0x080112E0,
@@ -449,6 +436,9 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
0xCB8, 0x00508240,
};
+u32 RTL8821AE_PHY_REG_1TARRAYLEN =
+ sizeof(RTL8821AE_PHY_REG_ARRAY) / sizeof(u32);
+
u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
0, 0, 0, 0x00000c20, 0xffffffff, 0x34363840,
0, 0, 0, 0x00000c24, 0xffffffff, 0x42424444,
@@ -498,6 +488,9 @@ u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628
};
+u32 RTL8812AE_PHY_REG_ARRAY_PGLEN =
+ sizeof(RTL8812AE_PHY_REG_ARRAY_PG) / sizeof(u32);
+
u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
0, 0, 0, 0x00000c24, 0xffffffff, 0x36363838,
@@ -516,6 +509,9 @@ u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
1, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022
};
+u32 RTL8821AE_PHY_REG_ARRAY_PGLEN =
+ sizeof(RTL8821AE_PHY_REG_ARRAY_PG) / sizeof(u32);
+
u32 RTL8812AE_RADIOA_ARRAY[] = {
0x000, 0x00010000,
0x018, 0x0001712A,
@@ -523,26 +519,25 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x066, 0x00040000,
0x01E, 0x00080000,
0x089, 0x00000080,
- 0xFF0F0740, 0xABCD,
- 0x086, 0x00014B38,
- 0xFF0F02C0, 0xCDEF,
- 0x086, 0x00014B38,
- 0xFF0F01C0, 0xCDEF,
- 0x086, 0x00014B38,
- 0xFF0F07D8, 0xCDEF,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
0x086, 0x00014B3A,
- 0xFF0F07D0, 0xCDEF,
+ 0x90000001, 0x00000005, 0x40000000, 0x00000000,
0x086, 0x00014B3A,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x086, 0x00014B38,
- 0xFF0F0740, 0xDEAD,
+ 0xB0000000, 0x00000000,
+ 0x80000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08B, 0x00080180,
+ 0xA0000000, 0x00000000,
+ 0x08B, 0x00087180,
+ 0xB0000000, 0x00000000,
0x0B1, 0x0001FC1A,
0x0B3, 0x000F0810,
0x0B4, 0x0001A78D,
0x0BA, 0x00086180,
0x018, 0x00000006,
0x0EF, 0x00002000,
- 0xFF0F07D8, 0xABCD,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
0x03B, 0x0003F218,
0x03B, 0x00030A58,
0x03B, 0x0002FA58,
@@ -550,7 +545,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x03B, 0x0001FA50,
0x03B, 0x00010248,
0x03B, 0x00008240,
- 0xFF0F07D0, 0xCDEF,
+ 0x90000001, 0x00000005, 0x40000000, 0x00000000,
0x03B, 0x0003F218,
0x03B, 0x00030A58,
0x03B, 0x0002FA58,
@@ -558,7 +553,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x03B, 0x0001FA50,
0x03B, 0x00010248,
0x03B, 0x00008240,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x03B, 0x00038A58,
0x03B, 0x00037A58,
0x03B, 0x0002A590,
@@ -566,9 +561,9 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x03B, 0x00018248,
0x03B, 0x00010240,
0x03B, 0x00008240,
- 0xFF0F07D8, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000100,
- 0xFF0F07D8, 0xABCD,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0000A4EE,
0x034, 0x00009076,
0x034, 0x00008073,
@@ -580,7 +575,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x034, 0x00002028,
0x034, 0x00001025,
0x034, 0x00000022,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x034, 0x0000ADF4,
0x034, 0x00009DF1,
0x034, 0x00008DEE,
@@ -592,7 +587,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x034, 0x000024E7,
0x034, 0x0000146B,
0x034, 0x0000006D,
- 0xFF0F07D8, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0EF, 0x000020A2,
0x0DF, 0x00000080,
@@ -646,7 +641,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x03B, 0x0006B064,
0x03C, 0x00004000,
0x03A, 0x000000D8,
- 0x03B, 0x00023070,
+ 0x03B, 0x00063070,
0x03C, 0x00004000,
0x03A, 0x00000468,
0x03B, 0x0005B870,
@@ -685,43 +680,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x03B, 0x00082080,
0x03C, 0x00010000,
0x0EF, 0x00001100,
- 0xFF0F0740, 0xABCD,
- 0x034, 0x0004A0B2,
- 0x034, 0x000490AF,
- 0x034, 0x00048070,
- 0x034, 0x0004706D,
- 0x034, 0x00046050,
- 0x034, 0x0004504D,
- 0x034, 0x0004404A,
- 0x034, 0x00043047,
- 0x034, 0x0004200A,
- 0x034, 0x00041007,
- 0x034, 0x00040004,
- 0xFF0F02C0, 0xCDEF,
- 0x034, 0x0004A0B2,
- 0x034, 0x000490AF,
- 0x034, 0x00048070,
- 0x034, 0x0004706D,
- 0x034, 0x00046050,
- 0x034, 0x0004504D,
- 0x034, 0x0004404A,
- 0x034, 0x00043047,
- 0x034, 0x0004200A,
- 0x034, 0x00041007,
- 0x034, 0x00040004,
- 0xFF0F01C0, 0xCDEF,
- 0x034, 0x0004A0B2,
- 0x034, 0x000490AF,
- 0x034, 0x00048070,
- 0x034, 0x0004706D,
- 0x034, 0x00046050,
- 0x034, 0x0004504D,
- 0x034, 0x0004404A,
- 0x034, 0x00043047,
- 0x034, 0x0004200A,
- 0x034, 0x00041007,
- 0x034, 0x00040004,
- 0xFF0F07D8, 0xCDEF,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0004A0B2,
0x034, 0x000490AF,
0x034, 0x00048070,
@@ -733,80 +692,32 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x034, 0x0004200A,
0x034, 0x00041007,
0x034, 0x00040004,
- 0xFF0F07D0, 0xCDEF,
+ 0x90000008, 0x05000000, 0x40000000, 0x00000000,
0x034, 0x0004A0B2,
0x034, 0x000490AF,
0x034, 0x00048070,
0x034, 0x0004706D,
- 0x034, 0x00046050,
- 0x034, 0x0004504D,
- 0x034, 0x0004404A,
- 0x034, 0x00043047,
- 0x034, 0x0004200A,
- 0x034, 0x00041007,
- 0x034, 0x00040004,
- 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0004604D,
+ 0x034, 0x0004504A,
+ 0x034, 0x00044047,
+ 0x034, 0x00043044,
+ 0x034, 0x00042007,
+ 0x034, 0x00041004,
+ 0x034, 0x00040001,
+ 0xA0000000, 0x00000000,
0x034, 0x0004ADF5,
0x034, 0x00049DF2,
0x034, 0x00048DEF,
0x034, 0x00047DEC,
0x034, 0x00046DE9,
- 0x034, 0x00045DC9,
- 0x034, 0x00044CE8,
- 0x034, 0x000438CA,
- 0x034, 0x00042889,
- 0x034, 0x0004184A,
- 0x034, 0x0004044A,
- 0xFF0F0740, 0xDEAD,
- 0xFF0F0740, 0xABCD,
- 0x034, 0x0002A0B2,
- 0x034, 0x000290AF,
- 0x034, 0x00028070,
- 0x034, 0x0002706D,
- 0x034, 0x00026050,
- 0x034, 0x0002504D,
- 0x034, 0x0002404A,
- 0x034, 0x00023047,
- 0x034, 0x0002200A,
- 0x034, 0x00021007,
- 0x034, 0x00020004,
- 0xFF0F02C0, 0xCDEF,
- 0x034, 0x0002A0B2,
- 0x034, 0x000290AF,
- 0x034, 0x00028070,
- 0x034, 0x0002706D,
- 0x034, 0x00026050,
- 0x034, 0x0002504D,
- 0x034, 0x0002404A,
- 0x034, 0x00023047,
- 0x034, 0x0002200A,
- 0x034, 0x00021007,
- 0x034, 0x00020004,
- 0xFF0F01C0, 0xCDEF,
- 0x034, 0x0002A0B2,
- 0x034, 0x000290AF,
- 0x034, 0x00028070,
- 0x034, 0x0002706D,
- 0x034, 0x00026050,
- 0x034, 0x0002504D,
- 0x034, 0x0002404A,
- 0x034, 0x00023047,
- 0x034, 0x0002200A,
- 0x034, 0x00021007,
- 0x034, 0x00020004,
- 0xFF0F07D8, 0xCDEF,
- 0x034, 0x0002A0B2,
- 0x034, 0x000290AF,
- 0x034, 0x00028070,
- 0x034, 0x0002706D,
- 0x034, 0x00026050,
- 0x034, 0x0002504D,
- 0x034, 0x0002404A,
- 0x034, 0x00023047,
- 0x034, 0x0002200A,
- 0x034, 0x00021007,
- 0x034, 0x00020004,
- 0xFF0F07D0, 0xCDEF,
+ 0x034, 0x00045DE6,
+ 0x034, 0x00044DE3,
+ 0x034, 0x000438C8,
+ 0x034, 0x000428C5,
+ 0x034, 0x000418C2,
+ 0x034, 0x000408C0,
+ 0xB0000000, 0x00000000,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0002A0B2,
0x034, 0x000290AF,
0x034, 0x00028070,
@@ -818,32 +729,32 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x034, 0x0002200A,
0x034, 0x00021007,
0x034, 0x00020004,
- 0xCDCDCDCD, 0xCDCD,
+ 0x90000008, 0x05000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A0B4,
+ 0x034, 0x000290B1,
+ 0x034, 0x00028072,
+ 0x034, 0x0002706F,
+ 0x034, 0x0002604F,
+ 0x034, 0x0002504C,
+ 0x034, 0x00024049,
+ 0x034, 0x00023046,
+ 0x034, 0x00022009,
+ 0x034, 0x00021006,
+ 0x034, 0x00020003,
+ 0xA0000000, 0x00000000,
0x034, 0x0002ADF5,
0x034, 0x00029DF2,
0x034, 0x00028DEF,
0x034, 0x00027DEC,
0x034, 0x00026DE9,
- 0x034, 0x00025DC9,
- 0x034, 0x00024CE8,
- 0x034, 0x000238CA,
- 0x034, 0x00022889,
- 0x034, 0x0002184A,
- 0x034, 0x0002044A,
- 0xFF0F0740, 0xDEAD,
- 0xFF0F0740, 0xABCD,
- 0x034, 0x0000A0B2,
- 0x034, 0x000090AF,
- 0x034, 0x00008070,
- 0x034, 0x0000706D,
- 0x034, 0x00006050,
- 0x034, 0x0000504D,
- 0x034, 0x0000404A,
- 0x034, 0x00003047,
- 0x034, 0x0000200A,
- 0x034, 0x00001007,
- 0x034, 0x00000004,
- 0xFF0F02C0, 0xCDEF,
+ 0x034, 0x00025DE6,
+ 0x034, 0x00024DE3,
+ 0x034, 0x000238C8,
+ 0x034, 0x000228C5,
+ 0x034, 0x000218C2,
+ 0x034, 0x000208C0,
+ 0xB0000000, 0x00000000,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0000A0B2,
0x034, 0x000090AF,
0x034, 0x00008070,
@@ -855,93 +766,33 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x034, 0x0000200A,
0x034, 0x00001007,
0x034, 0x00000004,
- 0xFF0F01C0, 0xCDEF,
+ 0x90000008, 0x05000000, 0x40000000, 0x00000000,
0x034, 0x0000A0B2,
0x034, 0x000090AF,
0x034, 0x00008070,
0x034, 0x0000706D,
- 0x034, 0x00006050,
- 0x034, 0x0000504D,
- 0x034, 0x0000404A,
- 0x034, 0x00003047,
- 0x034, 0x0000200A,
- 0x034, 0x00001007,
- 0x034, 0x00000004,
- 0xFF0F07D8, 0xCDEF,
- 0x034, 0x0000A0B2,
- 0x034, 0x000090AF,
- 0x034, 0x00008070,
- 0x034, 0x0000706D,
- 0x034, 0x00006050,
- 0x034, 0x0000504D,
- 0x034, 0x0000404A,
- 0x034, 0x00003047,
- 0x034, 0x0000200A,
- 0x034, 0x00001007,
- 0x034, 0x00000004,
- 0xFF0F07D0, 0xCDEF,
- 0x034, 0x0000A0B2,
- 0x034, 0x000090AF,
- 0x034, 0x00008070,
- 0x034, 0x0000706D,
- 0x034, 0x00006050,
- 0x034, 0x0000504D,
- 0x034, 0x0000404A,
- 0x034, 0x00003047,
- 0x034, 0x0000200A,
- 0x034, 0x00001007,
- 0x034, 0x00000004,
- 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0000604D,
+ 0x034, 0x0000504A,
+ 0x034, 0x00004047,
+ 0x034, 0x00003044,
+ 0x034, 0x00002007,
+ 0x034, 0x00001004,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
0x034, 0x0000AFF7,
0x034, 0x00009DF7,
0x034, 0x00008DF4,
0x034, 0x00007DF1,
0x034, 0x00006DEE,
- 0x034, 0x00005DCD,
- 0x034, 0x00004CEB,
+ 0x034, 0x00005DEB,
+ 0x034, 0x00004DE8,
0x034, 0x000038CC,
- 0x034, 0x0000288B,
- 0x034, 0x0000184C,
- 0x034, 0x0000044C,
- 0xFF0F0740, 0xDEAD,
+ 0x034, 0x000028C9,
+ 0x034, 0x000018C6,
+ 0x034, 0x000008C3,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
- 0xFF0F0740, 0xABCD,
- 0x018, 0x0001712A,
- 0x0EF, 0x00000040,
- 0x035, 0x000001D4,
- 0x035, 0x000081D4,
- 0x035, 0x000101D4,
- 0x035, 0x000201B4,
- 0x035, 0x000281B4,
- 0x035, 0x000301B4,
- 0x035, 0x000401B4,
- 0x035, 0x000481B4,
- 0x035, 0x000501B4,
- 0xFF0F02C0, 0xCDEF,
- 0x018, 0x0001712A,
- 0x0EF, 0x00000040,
- 0x035, 0x000001D4,
- 0x035, 0x000081D4,
- 0x035, 0x000101D4,
- 0x035, 0x000201B4,
- 0x035, 0x000281B4,
- 0x035, 0x000301B4,
- 0x035, 0x000401B4,
- 0x035, 0x000481B4,
- 0x035, 0x000501B4,
- 0xFF0F01C0, 0xCDEF,
- 0x018, 0x0001712A,
- 0x0EF, 0x00000040,
- 0x035, 0x000001D4,
- 0x035, 0x000081D4,
- 0x035, 0x000101D4,
- 0x035, 0x000201B4,
- 0x035, 0x000281B4,
- 0x035, 0x000301B4,
- 0x035, 0x000401B4,
- 0x035, 0x000481B4,
- 0x035, 0x000501B4,
- 0xFF0F07D8, 0xCDEF,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000040,
0x035, 0x000001D4,
@@ -953,7 +804,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x035, 0x000401B4,
0x035, 0x000481B4,
0x035, 0x000501B4,
- 0xFF0F07D0, 0xCDEF,
+ 0x90000008, 0x05000000, 0x40000000, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000040,
0x035, 0x000001D4,
@@ -965,7 +816,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x035, 0x000401B4,
0x035, 0x000481B4,
0x035, 0x000501B4,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000040,
0x035, 0x00000188,
@@ -977,54 +828,9 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x035, 0x000401D8,
0x035, 0x000481D8,
0x035, 0x000501D8,
- 0xFF0F0740, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
- 0xFF0F0740, 0xABCD,
- 0x018, 0x0001712A,
- 0x0EF, 0x00000010,
- 0x036, 0x00004BFB,
- 0x036, 0x0000CBFB,
- 0x036, 0x00014BFB,
- 0x036, 0x0001CBFB,
- 0x036, 0x00024F4B,
- 0x036, 0x0002CF4B,
- 0x036, 0x00034F4B,
- 0x036, 0x0003CF4B,
- 0x036, 0x00044F4B,
- 0x036, 0x0004CF4B,
- 0x036, 0x00054F4B,
- 0x036, 0x0005CF4B,
- 0xFF0F02C0, 0xCDEF,
- 0x018, 0x0001712A,
- 0x0EF, 0x00000010,
- 0x036, 0x00004BFB,
- 0x036, 0x0000CBFB,
- 0x036, 0x00014BFB,
- 0x036, 0x0001CBFB,
- 0x036, 0x00024F4B,
- 0x036, 0x0002CF4B,
- 0x036, 0x00034F4B,
- 0x036, 0x0003CF4B,
- 0x036, 0x00044F4B,
- 0x036, 0x0004CF4B,
- 0x036, 0x00054F4B,
- 0x036, 0x0005CF4B,
- 0xFF0F01C0, 0xCDEF,
- 0x018, 0x0001712A,
- 0x0EF, 0x00000010,
- 0x036, 0x00004BFB,
- 0x036, 0x0000CBFB,
- 0x036, 0x00014BFB,
- 0x036, 0x0001CBFB,
- 0x036, 0x00024F4B,
- 0x036, 0x0002CF4B,
- 0x036, 0x00034F4B,
- 0x036, 0x0003CF4B,
- 0x036, 0x00044F4B,
- 0x036, 0x0004CF4B,
- 0x036, 0x00054F4B,
- 0x036, 0x0005CF4B,
- 0xFF0F07D8, 0xCDEF,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000010,
0x036, 0x00004BFB,
@@ -1039,7 +845,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x036, 0x0004CF4B,
0x036, 0x00054F4B,
0x036, 0x0005CF4B,
- 0xFF0F07D0, 0xCDEF,
+ 0x90000008, 0x05000000, 0x40000000, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000010,
0x036, 0x00004BFB,
@@ -1054,91 +860,61 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x036, 0x0004CF4B,
0x036, 0x00054F4B,
0x036, 0x0005CF4B,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000010,
0x036, 0x00084EB4,
0x036, 0x0008CC35,
0x036, 0x00094C35,
0x036, 0x0009CC35,
- 0x036, 0x000A4935,
+ 0x036, 0x000A4C35,
0x036, 0x000ACC35,
0x036, 0x000B4C35,
0x036, 0x000BCC35,
- 0x036, 0x000C4EB4,
- 0x036, 0x000CCEB5,
- 0x036, 0x000D4EB5,
- 0x036, 0x000DCEB5,
- 0xFF0F0740, 0xDEAD,
+ 0x036, 0x000C4C34,
+ 0x036, 0x000CCC35,
+ 0x036, 0x000D4C35,
+ 0x036, 0x000DCC35,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0EF, 0x00000008,
- 0xFF0F0740, 0xABCD,
- 0x03C, 0x000002CC,
- 0x03C, 0x00000522,
- 0x03C, 0x00000902,
- 0xFF0F02C0, 0xCDEF,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x03C, 0x000002CC,
0x03C, 0x00000522,
0x03C, 0x00000902,
- 0xFF0F01C0, 0xCDEF,
+ 0x90000008, 0x05000000, 0x40000000, 0x00000000,
0x03C, 0x000002CC,
0x03C, 0x00000522,
0x03C, 0x00000902,
- 0xFF0F07D8, 0xCDEF,
- 0x03C, 0x000002CC,
- 0x03C, 0x00000522,
- 0x03C, 0x00000902,
- 0xFF0F07D0, 0xCDEF,
- 0x03C, 0x000002CC,
- 0x03C, 0x00000522,
- 0x03C, 0x00000902,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x03C, 0x000002A8,
0x03C, 0x000005A2,
0x03C, 0x00000880,
- 0xFF0F0740, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000002,
0x0DF, 0x00000080,
- 0x01F, 0x00040064,
- 0xFF0F0740, 0xABCD,
- 0x061, 0x000FDD43,
- 0x062, 0x00038F4B,
- 0x063, 0x00032117,
- 0x064, 0x000194AC,
- 0x065, 0x000931D1,
- 0xFF0F02C0, 0xCDEF,
+ 0x01F, 0x00000064,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x061, 0x000FDD43,
0x062, 0x00038F4B,
0x063, 0x00032117,
0x064, 0x000194AC,
0x065, 0x000931D1,
- 0xFF0F01C0, 0xCDEF,
+ 0x90000008, 0x05000000, 0x40000000, 0x00000000,
0x061, 0x000FDD43,
0x062, 0x00038F4B,
0x063, 0x00032117,
0x064, 0x000194AC,
- 0x065, 0x000931D1,
- 0xFF0F07D8, 0xCDEF,
- 0x061, 0x000FDD43,
- 0x062, 0x00038F4B,
- 0x063, 0x00032117,
- 0x064, 0x000194AC,
- 0x065, 0x000931D1,
- 0xFF0F07D0, 0xCDEF,
- 0x061, 0x000FDD43,
- 0x062, 0x00038F4B,
- 0x063, 0x00032117,
- 0x064, 0x000194AC,
- 0x065, 0x000931D1,
- 0xCDCDCDCD, 0xCDCD,
+ 0x065, 0x000931D2,
+ 0xA0000000, 0x00000000,
0x061, 0x000E5D53,
0x062, 0x00038FCD,
- 0x063, 0x000314EB,
+ 0x063, 0x000114EB,
0x064, 0x000196AC,
0x065, 0x000911D7,
- 0xFF0F0740, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x008, 0x00008400,
0x01C, 0x000739D2,
0x0B4, 0x0001E78D,
@@ -1149,29 +925,29 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x0FE, 0x00000000,
0x0B4, 0x0001A78D,
0x018, 0x0001712A,
-
};
+u32 RTL8812AE_RADIOA_1TARRAYLEN = sizeof(RTL8812AE_RADIOA_ARRAY) / sizeof(u32);
+
u32 RTL8812AE_RADIOB_ARRAY[] = {
0x056, 0x00051CF2,
0x066, 0x00040000,
0x089, 0x00000080,
- 0xFF0F0740, 0xABCD,
- 0x086, 0x00014B38,
- 0xFF0F01C0, 0xCDEF,
- 0x086, 0x00014B38,
- 0xFF0F02C0, 0xCDEF,
- 0x086, 0x00014B38,
- 0xFF0F07D8, 0xCDEF,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
0x086, 0x00014B3A,
- 0xFF0F07D0, 0xCDEF,
+ 0x90000001, 0x00000005, 0x40000000, 0x00000000,
0x086, 0x00014B3A,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x086, 0x00014B38,
- 0xFF0F0740, 0xDEAD,
+ 0xB0000000, 0x00000000,
+ 0x80000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08B, 0x00080180,
+ 0xA0000000, 0x00000000,
+ 0x08B, 0x00087180,
+ 0xB0000000, 0x00000000,
0x018, 0x00000006,
0x0EF, 0x00002000,
- 0xFF0F07D8, 0xABCD,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
0x03B, 0x0003F218,
0x03B, 0x00030A58,
0x03B, 0x0002FA58,
@@ -1179,7 +955,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x03B, 0x0001FA50,
0x03B, 0x00010248,
0x03B, 0x00008240,
- 0xFF0F07D0, 0xCDEF,
+ 0x90000001, 0x00000005, 0x40000000, 0x00000000,
0x03B, 0x0003F218,
0x03B, 0x00030A58,
0x03B, 0x0002FA58,
@@ -1187,7 +963,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x03B, 0x0001FA50,
0x03B, 0x00010248,
0x03B, 0x00008240,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x03B, 0x00038A58,
0x03B, 0x00037A58,
0x03B, 0x0002A590,
@@ -1195,9 +971,9 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x03B, 0x00018248,
0x03B, 0x00010240,
0x03B, 0x00008240,
- 0xFF0F07D8, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000100,
- 0xFF0F07D8, 0xABCD,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0000A4EE,
0x034, 0x00009076,
0x034, 0x00008073,
@@ -1209,7 +985,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x034, 0x00002028,
0x034, 0x00001025,
0x034, 0x00000022,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x034, 0x0000ADF4,
0x034, 0x00009DF1,
0x034, 0x00008DEE,
@@ -1221,7 +997,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x034, 0x000024E7,
0x034, 0x0000146B,
0x034, 0x0000006D,
- 0xFF0F07D8, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0EF, 0x000020A2,
0x0DF, 0x00000080,
@@ -1314,31 +1090,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x03B, 0x00082080,
0x03C, 0x00010000,
0x0EF, 0x00001100,
- 0xFF0F0740, 0xABCD,
- 0x034, 0x0004A0B2,
- 0x034, 0x000490AF,
- 0x034, 0x00048070,
- 0x034, 0x0004706D,
- 0x034, 0x00046050,
- 0x034, 0x0004504D,
- 0x034, 0x0004404A,
- 0x034, 0x00043047,
- 0x034, 0x0004200A,
- 0x034, 0x00041007,
- 0x034, 0x00040004,
- 0xFF0F01C0, 0xCDEF,
- 0x034, 0x0004A0B2,
- 0x034, 0x000490AF,
- 0x034, 0x00048070,
- 0x034, 0x0004706D,
- 0x034, 0x00046050,
- 0x034, 0x0004504D,
- 0x034, 0x0004404A,
- 0x034, 0x00043047,
- 0x034, 0x0004200A,
- 0x034, 0x00041007,
- 0x034, 0x00040004,
- 0xFF0F02C0, 0xCDEF,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0004A0B2,
0x034, 0x000490AF,
0x034, 0x00048070,
@@ -1350,80 +1102,32 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x034, 0x0004200A,
0x034, 0x00041007,
0x034, 0x00040004,
- 0xFF0F07D8, 0xCDEF,
- 0x034, 0x0004A0B2,
- 0x034, 0x000490AF,
- 0x034, 0x00048070,
- 0x034, 0x0004706D,
- 0x034, 0x00046050,
- 0x034, 0x0004504D,
- 0x034, 0x0004404A,
- 0x034, 0x00043047,
- 0x034, 0x0004200A,
- 0x034, 0x00041007,
- 0x034, 0x00040004,
- 0xFF0F07D0, 0xCDEF,
- 0x034, 0x0004A0B2,
- 0x034, 0x000490AF,
- 0x034, 0x00048070,
- 0x034, 0x0004706D,
- 0x034, 0x00046050,
- 0x034, 0x0004504D,
- 0x034, 0x0004404A,
- 0x034, 0x00043047,
- 0x034, 0x0004200A,
- 0x034, 0x00041007,
- 0x034, 0x00040004,
- 0xCDCDCDCD, 0xCDCD,
+ 0x90000008, 0x05000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A0B1,
+ 0x034, 0x000490AE,
+ 0x034, 0x0004806F,
+ 0x034, 0x0004706C,
+ 0x034, 0x0004604C,
+ 0x034, 0x00045049,
+ 0x034, 0x00044046,
+ 0x034, 0x00043043,
+ 0x034, 0x00042006,
+ 0x034, 0x00041003,
+ 0x034, 0x00040000,
+ 0xA0000000, 0x00000000,
0x034, 0x0004ADF5,
0x034, 0x00049DF2,
0x034, 0x00048DEF,
0x034, 0x00047DEC,
0x034, 0x00046DE9,
- 0x034, 0x00045DC9,
- 0x034, 0x00044CE8,
- 0x034, 0x000438CA,
- 0x034, 0x00042889,
- 0x034, 0x0004184A,
- 0x034, 0x0004044A,
- 0xFF0F0740, 0xDEAD,
- 0xFF0F0740, 0xABCD,
- 0x034, 0x0002A0B2,
- 0x034, 0x000290AF,
- 0x034, 0x00028070,
- 0x034, 0x0002706D,
- 0x034, 0x00026050,
- 0x034, 0x0002504D,
- 0x034, 0x0002404A,
- 0x034, 0x00023047,
- 0x034, 0x0002200A,
- 0x034, 0x00021007,
- 0x034, 0x00020004,
- 0xFF0F01C0, 0xCDEF,
- 0x034, 0x0002A0B2,
- 0x034, 0x000290AF,
- 0x034, 0x00028070,
- 0x034, 0x0002706D,
- 0x034, 0x00026050,
- 0x034, 0x0002504D,
- 0x034, 0x0002404A,
- 0x034, 0x00023047,
- 0x034, 0x0002200A,
- 0x034, 0x00021007,
- 0x034, 0x00020004,
- 0xFF0F02C0, 0xCDEF,
- 0x034, 0x0002A0B2,
- 0x034, 0x000290AF,
- 0x034, 0x00028070,
- 0x034, 0x0002706D,
- 0x034, 0x00026050,
- 0x034, 0x0002504D,
- 0x034, 0x0002404A,
- 0x034, 0x00023047,
- 0x034, 0x0002200A,
- 0x034, 0x00021007,
- 0x034, 0x00020004,
- 0xFF0F07D8, 0xCDEF,
+ 0x034, 0x00045DE6,
+ 0x034, 0x00044DE3,
+ 0x034, 0x000438C8,
+ 0x034, 0x000428C5,
+ 0x034, 0x000418C2,
+ 0x034, 0x000408C0,
+ 0xB0000000, 0x00000000,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0002A0B2,
0x034, 0x000290AF,
0x034, 0x00028070,
@@ -1435,68 +1139,32 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x034, 0x0002200A,
0x034, 0x00021007,
0x034, 0x00020004,
- 0xFF0F07D0, 0xCDEF,
- 0x034, 0x0002A0B2,
- 0x034, 0x000290AF,
- 0x034, 0x00028070,
- 0x034, 0x0002706D,
- 0x034, 0x00026050,
- 0x034, 0x0002504D,
- 0x034, 0x0002404A,
- 0x034, 0x00023047,
- 0x034, 0x0002200A,
- 0x034, 0x00021007,
- 0x034, 0x00020004,
- 0xCDCDCDCD, 0xCDCD,
+ 0x90000008, 0x05000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A0B3,
+ 0x034, 0x000290B0,
+ 0x034, 0x00028071,
+ 0x034, 0x0002706E,
+ 0x034, 0x0002604E,
+ 0x034, 0x0002504B,
+ 0x034, 0x00024048,
+ 0x034, 0x00023045,
+ 0x034, 0x00022008,
+ 0x034, 0x00021005,
+ 0x034, 0x00020002,
+ 0xA0000000, 0x00000000,
0x034, 0x0002ADF5,
0x034, 0x00029DF2,
0x034, 0x00028DEF,
0x034, 0x00027DEC,
0x034, 0x00026DE9,
- 0x034, 0x00025DC9,
- 0x034, 0x00024CE8,
- 0x034, 0x000238CA,
- 0x034, 0x00022889,
- 0x034, 0x0002184A,
- 0x034, 0x0002044A,
- 0xFF0F0740, 0xDEAD,
- 0xFF0F0740, 0xABCD,
- 0x034, 0x0000A0B2,
- 0x034, 0x000090AF,
- 0x034, 0x00008070,
- 0x034, 0x0000706D,
- 0x034, 0x00006050,
- 0x034, 0x0000504D,
- 0x034, 0x0000404A,
- 0x034, 0x00003047,
- 0x034, 0x0000200A,
- 0x034, 0x00001007,
- 0x034, 0x00000004,
- 0xFF0F01C0, 0xCDEF,
- 0x034, 0x0000A0B2,
- 0x034, 0x000090AF,
- 0x034, 0x00008070,
- 0x034, 0x0000706D,
- 0x034, 0x00006050,
- 0x034, 0x0000504D,
- 0x034, 0x0000404A,
- 0x034, 0x00003047,
- 0x034, 0x0000200A,
- 0x034, 0x00001007,
- 0x034, 0x00000004,
- 0xFF0F02C0, 0xCDEF,
- 0x034, 0x0000A0B2,
- 0x034, 0x000090AF,
- 0x034, 0x00008070,
- 0x034, 0x0000706D,
- 0x034, 0x00006050,
- 0x034, 0x0000504D,
- 0x034, 0x0000404A,
- 0x034, 0x00003047,
- 0x034, 0x0000200A,
- 0x034, 0x00001007,
- 0x034, 0x00000004,
- 0xFF0F07D8, 0xCDEF,
+ 0x034, 0x00025DE6,
+ 0x034, 0x00024DE3,
+ 0x034, 0x000238C8,
+ 0x034, 0x000228C5,
+ 0x034, 0x000218C2,
+ 0x034, 0x000208C0,
+ 0xB0000000, 0x00000000,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x034, 0x0000A0B2,
0x034, 0x000090AF,
0x034, 0x00008070,
@@ -1508,72 +1176,33 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x034, 0x0000200A,
0x034, 0x00001007,
0x034, 0x00000004,
- 0xFF0F07D0, 0xCDEF,
- 0x034, 0x0000A0B2,
- 0x034, 0x000090AF,
+ 0x90000008, 0x05000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0B3,
+ 0x034, 0x000090B0,
0x034, 0x00008070,
0x034, 0x0000706D,
- 0x034, 0x00006050,
- 0x034, 0x0000504D,
- 0x034, 0x0000404A,
- 0x034, 0x00003047,
- 0x034, 0x0000200A,
- 0x034, 0x00001007,
- 0x034, 0x00000004,
- 0xCDCDCDCD, 0xCDCD,
+ 0x034, 0x0000604D,
+ 0x034, 0x0000504A,
+ 0x034, 0x00004047,
+ 0x034, 0x00003044,
+ 0x034, 0x00002007,
+ 0x034, 0x00001004,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
0x034, 0x0000AFF7,
0x034, 0x00009DF7,
0x034, 0x00008DF4,
0x034, 0x00007DF1,
0x034, 0x00006DEE,
- 0x034, 0x00005DCD,
- 0x034, 0x00004CEB,
+ 0x034, 0x00005DEB,
+ 0x034, 0x00004DE8,
0x034, 0x000038CC,
- 0x034, 0x0000288B,
- 0x034, 0x0000184C,
- 0x034, 0x0000044C,
- 0xFF0F0740, 0xDEAD,
- 0x0EF, 0x00000000,
- 0xFF0F0740, 0xABCD,
- 0x018, 0x0001712A,
- 0x0EF, 0x00000040,
- 0x035, 0x000001C5,
- 0x035, 0x000081C5,
- 0x035, 0x000101C5,
- 0x035, 0x00020174,
- 0x035, 0x00028174,
- 0x035, 0x00030174,
- 0x035, 0x00040185,
- 0x035, 0x00048185,
- 0x035, 0x00050185,
- 0x0EF, 0x00000000,
- 0xFF0F01C0, 0xCDEF,
- 0x018, 0x0001712A,
- 0x0EF, 0x00000040,
- 0x035, 0x000001C5,
- 0x035, 0x000081C5,
- 0x035, 0x000101C5,
- 0x035, 0x00020174,
- 0x035, 0x00028174,
- 0x035, 0x00030174,
- 0x035, 0x00040185,
- 0x035, 0x00048185,
- 0x035, 0x00050185,
- 0x0EF, 0x00000000,
- 0xFF0F02C0, 0xCDEF,
- 0x018, 0x0001712A,
- 0x0EF, 0x00000040,
- 0x035, 0x000001C5,
- 0x035, 0x000081C5,
- 0x035, 0x000101C5,
- 0x035, 0x00020174,
- 0x035, 0x00028174,
- 0x035, 0x00030174,
- 0x035, 0x00040185,
- 0x035, 0x00048185,
- 0x035, 0x00050185,
+ 0x034, 0x000028C9,
+ 0x034, 0x000018C6,
+ 0x034, 0x000008C3,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
- 0xFF0F07D8, 0xCDEF,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000040,
0x035, 0x000001C5,
@@ -1586,7 +1215,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x035, 0x00048185,
0x035, 0x00050185,
0x0EF, 0x00000000,
- 0xFF0F07D0, 0xCDEF,
+ 0x90000008, 0x05000000, 0x40000000, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000040,
0x035, 0x000001C5,
@@ -1599,51 +1228,21 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x035, 0x00048185,
0x035, 0x00050185,
0x0EF, 0x00000000,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000040,
- 0x035, 0x00000186,
- 0x035, 0x00008186,
- 0x035, 0x00010185,
- 0x035, 0x000201D5,
- 0x035, 0x000281D5,
- 0x035, 0x000301D5,
- 0x035, 0x000401D5,
- 0x035, 0x000481D5,
- 0x035, 0x000501D5,
+ 0x035, 0x00000188,
+ 0x035, 0x00008147,
+ 0x035, 0x00010147,
+ 0x035, 0x000201D7,
+ 0x035, 0x000281D7,
+ 0x035, 0x000301D7,
+ 0x035, 0x000401D8,
+ 0x035, 0x000481D8,
+ 0x035, 0x000501D8,
0x0EF, 0x00000000,
- 0xFF0F0740, 0xDEAD,
- 0xFF0F0740, 0xABCD,
- 0x018, 0x0001712A,
- 0x0EF, 0x00000010,
- 0x036, 0x00005B8B,
- 0x036, 0x0000DB8B,
- 0x036, 0x00015B8B,
- 0x036, 0x0001DB8B,
- 0x036, 0x000262DB,
- 0x036, 0x0002E2DB,
- 0x036, 0x000362DB,
- 0x036, 0x0003E2DB,
- 0x036, 0x0004553B,
- 0x036, 0x0004D53B,
- 0x036, 0x0005553B,
- 0x036, 0x0005D53B,
- 0xFF0F01C0, 0xCDEF,
- 0x018, 0x0001712A,
- 0x0EF, 0x00000010,
- 0x036, 0x00005B8B,
- 0x036, 0x0000DB8B,
- 0x036, 0x00015B8B,
- 0x036, 0x0001DB8B,
- 0x036, 0x000262DB,
- 0x036, 0x0002E2DB,
- 0x036, 0x000362DB,
- 0x036, 0x0003E2DB,
- 0x036, 0x0004553B,
- 0x036, 0x0004D53B,
- 0x036, 0x0005553B,
- 0x036, 0x0005D53B,
- 0xFF0F02C0, 0xCDEF,
+ 0xB0000000, 0x00000000,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000010,
0x036, 0x00005B8B,
@@ -1658,7 +1257,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x036, 0x0004D53B,
0x036, 0x0005553B,
0x036, 0x0005D53B,
- 0xFF0F07D8, 0xCDEF,
+ 0x90000008, 0x05000000, 0x40000000, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000010,
0x036, 0x00005B8B,
@@ -1673,109 +1272,71 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x036, 0x0004D53B,
0x036, 0x0005553B,
0x036, 0x0005D53B,
- 0xFF0F07D0, 0xCDEF,
- 0x018, 0x0001712A,
- 0x0EF, 0x00000010,
- 0x036, 0x00005B8B,
- 0x036, 0x0000DB8B,
- 0x036, 0x00015B8B,
- 0x036, 0x0001DB8B,
- 0x036, 0x000262DB,
- 0x036, 0x0002E2DB,
- 0x036, 0x000362DB,
- 0x036, 0x0003E2DB,
- 0x036, 0x0004553B,
- 0x036, 0x0004D53B,
- 0x036, 0x0005553B,
- 0x036, 0x0005D53B,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000010,
0x036, 0x00084EB4,
- 0x036, 0x0008C9B4,
- 0x036, 0x000949B4,
- 0x036, 0x0009C9B4,
- 0x036, 0x000A4935,
- 0x036, 0x000AC935,
- 0x036, 0x000B4935,
- 0x036, 0x000BC935,
- 0x036, 0x000C4EB4,
- 0x036, 0x000CCEB4,
- 0x036, 0x000D4EB4,
- 0x036, 0x000DCEB4,
- 0xFF0F0740, 0xDEAD,
+ 0x036, 0x0008CC35,
+ 0x036, 0x00094C35,
+ 0x036, 0x0009CC35,
+ 0x036, 0x000A4C35,
+ 0x036, 0x000ACC35,
+ 0x036, 0x000B4C35,
+ 0x036, 0x000BCC35,
+ 0x036, 0x000C4C34,
+ 0x036, 0x000CCC35,
+ 0x036, 0x000D4C35,
+ 0x036, 0x000DCC35,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x0EF, 0x00000008,
- 0xFF0F0740, 0xABCD,
- 0x03C, 0x000002DC,
- 0x03C, 0x00000524,
- 0x03C, 0x00000902,
- 0xFF0F01C0, 0xCDEF,
- 0x03C, 0x000002DC,
- 0x03C, 0x00000524,
- 0x03C, 0x00000902,
- 0xFF0F02C0, 0xCDEF,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x03C, 0x000002DC,
0x03C, 0x00000524,
0x03C, 0x00000902,
- 0xFF0F07D8, 0xCDEF,
+ 0x90000008, 0x05000000, 0x40000000, 0x00000000,
0x03C, 0x000002DC,
0x03C, 0x00000524,
0x03C, 0x00000902,
- 0xFF0F07D0, 0xCDEF,
- 0x03C, 0x000002DC,
- 0x03C, 0x00000524,
- 0x03C, 0x00000902,
- 0xCDCDCDCD, 0xCDCD,
- 0x03C, 0x000002AA,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x000002A8,
0x03C, 0x000005A2,
0x03C, 0x00000880,
- 0xFF0F0740, 0xDEAD,
+ 0xB0000000, 0x00000000,
0x0EF, 0x00000000,
0x018, 0x0001712A,
0x0EF, 0x00000002,
0x0DF, 0x00000080,
- 0xFF0F0740, 0xABCD,
- 0x061, 0x000EAC43,
- 0x062, 0x00038F47,
- 0x063, 0x00031157,
- 0x064, 0x0001C4AC,
- 0x065, 0x000931D1,
- 0xFF0F01C0, 0xCDEF,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
0x061, 0x000EAC43,
0x062, 0x00038F47,
0x063, 0x00031157,
0x064, 0x0001C4AC,
0x065, 0x000931D1,
- 0xFF0F02C0, 0xCDEF,
+ 0x90000008, 0x05000000, 0x40000000, 0x00000000,
0x061, 0x000EAC43,
0x062, 0x00038F47,
0x063, 0x00031157,
0x064, 0x0001C4AC,
- 0x065, 0x000931D1,
- 0xFF0F07D8, 0xCDEF,
+ 0x065, 0x000931D2,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
0x061, 0x000EAC43,
0x062, 0x00038F47,
0x063, 0x00031157,
0x064, 0x0001C4AC,
0x065, 0x000931D1,
- 0xFF0F07D0, 0xCDEF,
- 0x061, 0x000EAC43,
- 0x062, 0x00038F47,
- 0x063, 0x00031157,
- 0x064, 0x0001C4AC,
- 0x065, 0x000931D1,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x061, 0x000E5D53,
0x062, 0x00038FCD,
- 0x063, 0x000314EB,
+ 0x063, 0x000114EB,
0x064, 0x000196AC,
- 0x065, 0x000931D7,
- 0xFF0F0740, 0xDEAD,
+ 0x065, 0x000911D7,
+ 0xB0000000, 0x00000000,
0x008, 0x00008400,
-
};
+u32 RTL8812AE_RADIOB_1TARRAYLEN = sizeof(RTL8812AE_RADIOB_ARRAY) / sizeof(u32);
+
u32 RTL8821AE_RADIOA_ARRAY[] = {
0x018, 0x0001712A,
0x056, 0x00051CF2,
@@ -2285,16 +1846,16 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x0EF, 0x00000000,
0x0EF, 0x00000100,
0x034, 0x0000ADF3,
- 0x034, 0x00009DEF,
- 0x034, 0x00008DEC,
- 0x034, 0x00007DE9,
- 0x034, 0x00006CED,
- 0x034, 0x00005CE9,
- 0x034, 0x000044E9,
- 0x034, 0x000034E6,
- 0x034, 0x0000246A,
- 0x034, 0x00001467,
- 0x034, 0x00000068,
+ 0x034, 0x00009DF0,
+ 0x034, 0x00008D70,
+ 0x034, 0x00007D6D,
+ 0x034, 0x00006CEE,
+ 0x034, 0x00005CCC,
+ 0x034, 0x000044EC,
+ 0x034, 0x000034AC,
+ 0x034, 0x0000246D,
+ 0x034, 0x0000106F,
+ 0x034, 0x0000006C,
0x0EF, 0x00000000,
0x0ED, 0x00000010,
0x044, 0x0000ADF2,
@@ -2365,18 +1926,21 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
0x0FE, 0x00000000,
0x0FE, 0x00000000,
0x018, 0x0001712A,
+
};
+u32 RTL8821AE_RADIOA_1TARRAYLEN = sizeof(RTL8821AE_RADIOA_ARRAY) / sizeof(u32);
+
u32 RTL8812AE_MAC_REG_ARRAY[] = {
0x010, 0x0000000C,
- 0xFF0F0180, 0xABCD,
- 0x025, 0x0000000F,
- 0xFF0F01C0, 0xCDEF,
+ 0x80000200, 0x00000000, 0x40000000, 0x00000000,
+ 0x011, 0x00000066,
+ 0xA0000000, 0x00000000,
+ 0x011, 0x0000005A,
+ 0xB0000000, 0x00000000,
0x025, 0x0000000F,
- 0xCDCDCDCD, 0xCDCD,
- 0x025, 0x0000006F,
- 0xFF0F0180, 0xDEAD,
0x072, 0x00000000,
+ 0x420, 0x00000080,
0x428, 0x0000000A,
0x429, 0x00000010,
0x430, 0x00000000,
@@ -2443,7 +2007,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
0x559, 0x00000002,
0x55C, 0x00000050,
0x55D, 0x000000FF,
- 0x604, 0x00000001,
+ 0x604, 0x00000009,
0x605, 0x00000030,
0x607, 0x00000003,
0x608, 0x0000000E,
@@ -2475,9 +2039,10 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
0x70A, 0x00000065,
0x70B, 0x00000087,
0x718, 0x00000040,
-
};
+u32 RTL8812AE_MAC_1T_ARRAYLEN = sizeof(RTL8812AE_MAC_REG_ARRAY) / sizeof(u32);
+
u32 RTL8821AE_MAC_REG_ARRAY[] = {
0x428, 0x0000000A,
0x429, 0x00000010,
@@ -2578,8 +2143,10 @@ u32 RTL8821AE_MAC_REG_ARRAY[] = {
0x718, 0x00000040,
};
+u32 RTL8821AE_MAC_1T_ARRAYLEN = sizeof(RTL8821AE_MAC_REG_ARRAY) / sizeof(u32);
+
u32 RTL8812AE_AGC_TAB_ARRAY[] = {
- 0xFF0F07D8, 0xABCD,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
0x81C, 0xFC000001,
0x81C, 0xFB020001,
0x81C, 0xFA040001,
@@ -2644,7 +2211,7 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = {
0x81C, 0x217A0001,
0x81C, 0x217C0001,
0x81C, 0x217E0001,
- 0xFF0F07D0, 0xCDEF,
+ 0x90000001, 0x00000005, 0x40000000, 0x00000000,
0x81C, 0xF9000001,
0x81C, 0xF8020001,
0x81C, 0xF7040001,
@@ -2709,7 +2276,7 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = {
0x81C, 0x217A0001,
0x81C, 0x217C0001,
0x81C, 0x217E0001,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x81C, 0xFF000001,
0x81C, 0xFF020001,
0x81C, 0xFF040001,
@@ -2774,333 +2341,8 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = {
0x81C, 0x417A0001,
0x81C, 0x417C0001,
0x81C, 0x417E0001,
- 0xFF0F07D8, 0xDEAD,
- 0xFF0F0180, 0xABCD,
- 0x81C, 0xFC800001,
- 0x81C, 0xFB820001,
- 0x81C, 0xFA840001,
- 0x81C, 0xF9860001,
- 0x81C, 0xF8880001,
- 0x81C, 0xF78A0001,
- 0x81C, 0xF68C0001,
- 0x81C, 0xF58E0001,
- 0x81C, 0xF4900001,
- 0x81C, 0xF3920001,
- 0x81C, 0xF2940001,
- 0x81C, 0xF1960001,
- 0x81C, 0xF0980001,
- 0x81C, 0xEF9A0001,
- 0x81C, 0xEE9C0001,
- 0x81C, 0xED9E0001,
- 0x81C, 0xECA00001,
- 0x81C, 0xEBA20001,
- 0x81C, 0xEAA40001,
- 0x81C, 0xE9A60001,
- 0x81C, 0xE8A80001,
- 0x81C, 0xE7AA0001,
- 0x81C, 0xE6AC0001,
- 0x81C, 0xE5AE0001,
- 0x81C, 0xE4B00001,
- 0x81C, 0xE3B20001,
- 0x81C, 0xA8B40001,
- 0x81C, 0xA7B60001,
- 0x81C, 0xA6B80001,
- 0x81C, 0xA5BA0001,
- 0x81C, 0xA4BC0001,
- 0x81C, 0xA3BE0001,
- 0x81C, 0xA2C00001,
- 0x81C, 0xA1C20001,
- 0x81C, 0x68C40001,
- 0x81C, 0x67C60001,
- 0x81C, 0x66C80001,
- 0x81C, 0x65CA0001,
- 0x81C, 0x64CC0001,
- 0x81C, 0x47CE0001,
- 0x81C, 0x46D00001,
- 0x81C, 0x45D20001,
- 0x81C, 0x44D40001,
- 0x81C, 0x43D60001,
- 0x81C, 0x42D80001,
- 0x81C, 0x08DA0001,
- 0x81C, 0x07DC0001,
- 0x81C, 0x06DE0001,
- 0x81C, 0x05E00001,
- 0x81C, 0x04E20001,
- 0x81C, 0x03E40001,
- 0x81C, 0x02E60001,
- 0x81C, 0x01E80001,
- 0x81C, 0x01EA0001,
- 0x81C, 0x01EC0001,
- 0x81C, 0x01EE0001,
- 0x81C, 0x01F00001,
- 0x81C, 0x01F20001,
- 0x81C, 0x01F40001,
- 0x81C, 0x01F60001,
- 0x81C, 0x01F80001,
- 0x81C, 0x01FA0001,
- 0x81C, 0x01FC0001,
- 0x81C, 0x01FE0001,
- 0xFF0F0280, 0xCDEF,
- 0x81C, 0xFC800001,
- 0x81C, 0xFB820001,
- 0x81C, 0xFA840001,
- 0x81C, 0xF9860001,
- 0x81C, 0xF8880001,
- 0x81C, 0xF78A0001,
- 0x81C, 0xF68C0001,
- 0x81C, 0xF58E0001,
- 0x81C, 0xF4900001,
- 0x81C, 0xF3920001,
- 0x81C, 0xF2940001,
- 0x81C, 0xF1960001,
- 0x81C, 0xF0980001,
- 0x81C, 0xEF9A0001,
- 0x81C, 0xEE9C0001,
- 0x81C, 0xED9E0001,
- 0x81C, 0xECA00001,
- 0x81C, 0xEBA20001,
- 0x81C, 0xEAA40001,
- 0x81C, 0xE9A60001,
- 0x81C, 0xE8A80001,
- 0x81C, 0xE7AA0001,
- 0x81C, 0xE6AC0001,
- 0x81C, 0xE5AE0001,
- 0x81C, 0xE4B00001,
- 0x81C, 0xE3B20001,
- 0x81C, 0xA8B40001,
- 0x81C, 0xA7B60001,
- 0x81C, 0xA6B80001,
- 0x81C, 0xA5BA0001,
- 0x81C, 0xA4BC0001,
- 0x81C, 0xA3BE0001,
- 0x81C, 0xA2C00001,
- 0x81C, 0xA1C20001,
- 0x81C, 0x68C40001,
- 0x81C, 0x67C60001,
- 0x81C, 0x66C80001,
- 0x81C, 0x65CA0001,
- 0x81C, 0x64CC0001,
- 0x81C, 0x47CE0001,
- 0x81C, 0x46D00001,
- 0x81C, 0x45D20001,
- 0x81C, 0x44D40001,
- 0x81C, 0x43D60001,
- 0x81C, 0x42D80001,
- 0x81C, 0x08DA0001,
- 0x81C, 0x07DC0001,
- 0x81C, 0x06DE0001,
- 0x81C, 0x05E00001,
- 0x81C, 0x04E20001,
- 0x81C, 0x03E40001,
- 0x81C, 0x02E60001,
- 0x81C, 0x01E80001,
- 0x81C, 0x01EA0001,
- 0x81C, 0x01EC0001,
- 0x81C, 0x01EE0001,
- 0x81C, 0x01F00001,
- 0x81C, 0x01F20001,
- 0x81C, 0x01F40001,
- 0x81C, 0x01F60001,
- 0x81C, 0x01F80001,
- 0x81C, 0x01FA0001,
- 0x81C, 0x01FC0001,
- 0x81C, 0x01FE0001,
- 0xFF0F01C0, 0xCDEF,
- 0x81C, 0xFC800001,
- 0x81C, 0xFB820001,
- 0x81C, 0xFA840001,
- 0x81C, 0xF9860001,
- 0x81C, 0xF8880001,
- 0x81C, 0xF78A0001,
- 0x81C, 0xF68C0001,
- 0x81C, 0xF58E0001,
- 0x81C, 0xF4900001,
- 0x81C, 0xF3920001,
- 0x81C, 0xF2940001,
- 0x81C, 0xF1960001,
- 0x81C, 0xF0980001,
- 0x81C, 0xEF9A0001,
- 0x81C, 0xEE9C0001,
- 0x81C, 0xED9E0001,
- 0x81C, 0xECA00001,
- 0x81C, 0xEBA20001,
- 0x81C, 0xEAA40001,
- 0x81C, 0xE9A60001,
- 0x81C, 0xE8A80001,
- 0x81C, 0xE7AA0001,
- 0x81C, 0xE6AC0001,
- 0x81C, 0xE5AE0001,
- 0x81C, 0xE4B00001,
- 0x81C, 0xE3B20001,
- 0x81C, 0xA8B40001,
- 0x81C, 0xA7B60001,
- 0x81C, 0xA6B80001,
- 0x81C, 0xA5BA0001,
- 0x81C, 0xA4BC0001,
- 0x81C, 0xA3BE0001,
- 0x81C, 0xA2C00001,
- 0x81C, 0xA1C20001,
- 0x81C, 0x68C40001,
- 0x81C, 0x67C60001,
- 0x81C, 0x66C80001,
- 0x81C, 0x65CA0001,
- 0x81C, 0x64CC0001,
- 0x81C, 0x47CE0001,
- 0x81C, 0x46D00001,
- 0x81C, 0x45D20001,
- 0x81C, 0x44D40001,
- 0x81C, 0x43D60001,
- 0x81C, 0x42D80001,
- 0x81C, 0x08DA0001,
- 0x81C, 0x07DC0001,
- 0x81C, 0x06DE0001,
- 0x81C, 0x05E00001,
- 0x81C, 0x04E20001,
- 0x81C, 0x03E40001,
- 0x81C, 0x02E60001,
- 0x81C, 0x01E80001,
- 0x81C, 0x01EA0001,
- 0x81C, 0x01EC0001,
- 0x81C, 0x01EE0001,
- 0x81C, 0x01F00001,
- 0x81C, 0x01F20001,
- 0x81C, 0x01F40001,
- 0x81C, 0x01F60001,
- 0x81C, 0x01F80001,
- 0x81C, 0x01FA0001,
- 0x81C, 0x01FC0001,
- 0x81C, 0x01FE0001,
- 0xFF0F02C0, 0xCDEF,
- 0x81C, 0xFC800001,
- 0x81C, 0xFB820001,
- 0x81C, 0xFA840001,
- 0x81C, 0xF9860001,
- 0x81C, 0xF8880001,
- 0x81C, 0xF78A0001,
- 0x81C, 0xF68C0001,
- 0x81C, 0xF58E0001,
- 0x81C, 0xF4900001,
- 0x81C, 0xF3920001,
- 0x81C, 0xF2940001,
- 0x81C, 0xF1960001,
- 0x81C, 0xF0980001,
- 0x81C, 0xEF9A0001,
- 0x81C, 0xEE9C0001,
- 0x81C, 0xED9E0001,
- 0x81C, 0xECA00001,
- 0x81C, 0xEBA20001,
- 0x81C, 0xEAA40001,
- 0x81C, 0xE9A60001,
- 0x81C, 0xE8A80001,
- 0x81C, 0xE7AA0001,
- 0x81C, 0xE6AC0001,
- 0x81C, 0xE5AE0001,
- 0x81C, 0xE4B00001,
- 0x81C, 0xE3B20001,
- 0x81C, 0xA8B40001,
- 0x81C, 0xA7B60001,
- 0x81C, 0xA6B80001,
- 0x81C, 0xA5BA0001,
- 0x81C, 0xA4BC0001,
- 0x81C, 0xA3BE0001,
- 0x81C, 0xA2C00001,
- 0x81C, 0xA1C20001,
- 0x81C, 0x68C40001,
- 0x81C, 0x67C60001,
- 0x81C, 0x66C80001,
- 0x81C, 0x65CA0001,
- 0x81C, 0x64CC0001,
- 0x81C, 0x47CE0001,
- 0x81C, 0x46D00001,
- 0x81C, 0x45D20001,
- 0x81C, 0x44D40001,
- 0x81C, 0x43D60001,
- 0x81C, 0x42D80001,
- 0x81C, 0x08DA0001,
- 0x81C, 0x07DC0001,
- 0x81C, 0x06DE0001,
- 0x81C, 0x05E00001,
- 0x81C, 0x04E20001,
- 0x81C, 0x03E40001,
- 0x81C, 0x02E60001,
- 0x81C, 0x01E80001,
- 0x81C, 0x01EA0001,
- 0x81C, 0x01EC0001,
- 0x81C, 0x01EE0001,
- 0x81C, 0x01F00001,
- 0x81C, 0x01F20001,
- 0x81C, 0x01F40001,
- 0x81C, 0x01F60001,
- 0x81C, 0x01F80001,
- 0x81C, 0x01FA0001,
- 0x81C, 0x01FC0001,
- 0x81C, 0x01FE0001,
- 0xFF0F07D8, 0xCDEF,
- 0x81C, 0xFC800001,
- 0x81C, 0xFB820001,
- 0x81C, 0xFA840001,
- 0x81C, 0xF9860001,
- 0x81C, 0xF8880001,
- 0x81C, 0xF78A0001,
- 0x81C, 0xF68C0001,
- 0x81C, 0xF58E0001,
- 0x81C, 0xF4900001,
- 0x81C, 0xF3920001,
- 0x81C, 0xF2940001,
- 0x81C, 0xF1960001,
- 0x81C, 0xF0980001,
- 0x81C, 0xEF9A0001,
- 0x81C, 0xEE9C0001,
- 0x81C, 0xED9E0001,
- 0x81C, 0xECA00001,
- 0x81C, 0xEBA20001,
- 0x81C, 0xEAA40001,
- 0x81C, 0xE9A60001,
- 0x81C, 0xE8A80001,
- 0x81C, 0xE7AA0001,
- 0x81C, 0xE6AC0001,
- 0x81C, 0xE5AE0001,
- 0x81C, 0xE4B00001,
- 0x81C, 0xE3B20001,
- 0x81C, 0xA8B40001,
- 0x81C, 0xA7B60001,
- 0x81C, 0xA6B80001,
- 0x81C, 0xA5BA0001,
- 0x81C, 0xA4BC0001,
- 0x81C, 0xA3BE0001,
- 0x81C, 0xA2C00001,
- 0x81C, 0xA1C20001,
- 0x81C, 0x68C40001,
- 0x81C, 0x67C60001,
- 0x81C, 0x66C80001,
- 0x81C, 0x65CA0001,
- 0x81C, 0x64CC0001,
- 0x81C, 0x47CE0001,
- 0x81C, 0x46D00001,
- 0x81C, 0x45D20001,
- 0x81C, 0x44D40001,
- 0x81C, 0x43D60001,
- 0x81C, 0x42D80001,
- 0x81C, 0x08DA0001,
- 0x81C, 0x07DC0001,
- 0x81C, 0x06DE0001,
- 0x81C, 0x05E00001,
- 0x81C, 0x04E20001,
- 0x81C, 0x03E40001,
- 0x81C, 0x02E60001,
- 0x81C, 0x01E80001,
- 0x81C, 0x01EA0001,
- 0x81C, 0x01EC0001,
- 0x81C, 0x01EE0001,
- 0x81C, 0x01F00001,
- 0x81C, 0x01F20001,
- 0x81C, 0x01F40001,
- 0x81C, 0x01F60001,
- 0x81C, 0x01F80001,
- 0x81C, 0x01FA0001,
- 0x81C, 0x01FC0001,
- 0x81C, 0x01FE0001,
- 0xFF0F07D0, 0xCDEF,
+ 0xB0000000, 0x00000000,
+ 0x80000004, 0x00000000, 0x40000000, 0x00000000,
0x81C, 0xFC800001,
0x81C, 0xFB820001,
0x81C, 0xFA840001,
@@ -3165,7 +2407,7 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = {
0x81C, 0x01FA0001,
0x81C, 0x01FC0001,
0x81C, 0x01FE0001,
- 0xCDCDCDCD, 0xCDCD,
+ 0xA0000000, 0x00000000,
0x81C, 0xFF800001,
0x81C, 0xFF820001,
0x81C, 0xFF840001,
@@ -3230,14 +2472,16 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = {
0x81C, 0x01FA0001,
0x81C, 0x01FC0001,
0x81C, 0x01FE0001,
- 0xFF0F0180, 0xDEAD,
+ 0xB0000000, 0x00000000,
0xC50, 0x00000022,
0xC50, 0x00000020,
0xE50, 0x00000022,
0xE50, 0x00000020,
-
};
+u32 RTL8812AE_AGC_TAB_1TARRAYLEN =
+ sizeof(RTL8812AE_AGC_TAB_ARRAY) / sizeof(u32);
+
u32 RTL8821AE_AGC_TAB_ARRAY[] = {
0x81C, 0xBF000001,
0x81C, 0xBF020001,
@@ -3430,9 +2674,11 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
0x81C, 0x017E0101,
0xC50, 0x00000022,
0xC50, 0x00000020,
-
};
+u32 RTL8821AE_AGC_TAB_1TARRAYLEN =
+ sizeof(RTL8821AE_AGC_TAB_ARRAY) / sizeof(u32);
+
/******************************************************************************
* TXPWR_LMT.TXT
******************************************************************************/
@@ -3717,9 +2963,9 @@ u8 *RTL8812AE_TXPWR_LMT[] = {
"FCC", "5G", "20M", "OFDM", "1T", "100", "30",
"ETSI", "5G", "20M", "OFDM", "1T", "100", "32",
"MKK", "5G", "20M", "OFDM", "1T", "100", "32",
- "FCC", "5G", "20M", "OFDM", "1T", "114", "30",
- "ETSI", "5G", "20M", "OFDM", "1T", "114", "32",
- "MKK", "5G", "20M", "OFDM", "1T", "114", "32",
+ "FCC", "5G", "20M", "OFDM", "1T", "104", "30",
+ "ETSI", "5G", "20M", "OFDM", "1T", "104", "32",
+ "MKK", "5G", "20M", "OFDM", "1T", "104", "32",
"FCC", "5G", "20M", "OFDM", "1T", "108", "32",
"ETSI", "5G", "20M", "OFDM", "1T", "108", "32",
"MKK", "5G", "20M", "OFDM", "1T", "108", "32",
@@ -3789,9 +3035,9 @@ u8 *RTL8812AE_TXPWR_LMT[] = {
"FCC", "5G", "20M", "HT", "1T", "100", "30",
"ETSI", "5G", "20M", "HT", "1T", "100", "32",
"MKK", "5G", "20M", "HT", "1T", "100", "32",
- "FCC", "5G", "20M", "HT", "1T", "114", "30",
- "ETSI", "5G", "20M", "HT", "1T", "114", "32",
- "MKK", "5G", "20M", "HT", "1T", "114", "32",
+ "FCC", "5G", "20M", "HT", "1T", "104", "30",
+ "ETSI", "5G", "20M", "HT", "1T", "104", "32",
+ "MKK", "5G", "20M", "HT", "1T", "104", "32",
"FCC", "5G", "20M", "HT", "1T", "108", "32",
"ETSI", "5G", "20M", "HT", "1T", "108", "32",
"MKK", "5G", "20M", "HT", "1T", "108", "32",
@@ -3861,9 +3107,9 @@ u8 *RTL8812AE_TXPWR_LMT[] = {
"FCC", "5G", "20M", "HT", "2T", "100", "28",
"ETSI", "5G", "20M", "HT", "2T", "100", "30",
"MKK", "5G", "20M", "HT", "2T", "100", "30",
- "FCC", "5G", "20M", "HT", "2T", "114", "28",
- "ETSI", "5G", "20M", "HT", "2T", "114", "30",
- "MKK", "5G", "20M", "HT", "2T", "114", "30",
+ "FCC", "5G", "20M", "HT", "2T", "104", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "104", "30",
+ "MKK", "5G", "20M", "HT", "2T", "104", "30",
"FCC", "5G", "20M", "HT", "2T", "108", "30",
"ETSI", "5G", "20M", "HT", "2T", "108", "30",
"MKK", "5G", "20M", "HT", "2T", "108", "30",
@@ -4004,6 +3250,8 @@ u8 *RTL8812AE_TXPWR_LMT[] = {
"MKK", "5G", "80M", "VHT", "2T", "155", "63"
};
+u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8812AE_TXPWR_LMT) / sizeof(u8 *);
+
u8 *RTL8821AE_TXPWR_LMT[] = {
"FCC", "2.4G", "20M", "CCK", "1T", "01", "32",
"ETSI", "2.4G", "20M", "CCK", "1T", "01", "32",
@@ -4284,9 +3532,9 @@ u8 *RTL8821AE_TXPWR_LMT[] = {
"FCC", "5G", "20M", "OFDM", "1T", "100", "32",
"ETSI", "5G", "20M", "OFDM", "1T", "100", "30",
"MKK", "5G", "20M", "OFDM", "1T", "100", "30",
- "FCC", "5G", "20M", "OFDM", "1T", "114", "32",
- "ETSI", "5G", "20M", "OFDM", "1T", "114", "30",
- "MKK", "5G", "20M", "OFDM", "1T", "114", "30",
+ "FCC", "5G", "20M", "OFDM", "1T", "104", "32",
+ "ETSI", "5G", "20M", "OFDM", "1T", "104", "30",
+ "MKK", "5G", "20M", "OFDM", "1T", "104", "30",
"FCC", "5G", "20M", "OFDM", "1T", "108", "32",
"ETSI", "5G", "20M", "OFDM", "1T", "108", "30",
"MKK", "5G", "20M", "OFDM", "1T", "108", "30",
@@ -4356,9 +3604,9 @@ u8 *RTL8821AE_TXPWR_LMT[] = {
"FCC", "5G", "20M", "HT", "1T", "100", "32",
"ETSI", "5G", "20M", "HT", "1T", "100", "30",
"MKK", "5G", "20M", "HT", "1T", "100", "30",
- "FCC", "5G", "20M", "HT", "1T", "114", "32",
- "ETSI", "5G", "20M", "HT", "1T", "114", "30",
- "MKK", "5G", "20M", "HT", "1T", "114", "30",
+ "FCC", "5G", "20M", "HT", "1T", "104", "32",
+ "ETSI", "5G", "20M", "HT", "1T", "104", "30",
+ "MKK", "5G", "20M", "HT", "1T", "104", "30",
"FCC", "5G", "20M", "HT", "1T", "108", "32",
"ETSI", "5G", "20M", "HT", "1T", "108", "30",
"MKK", "5G", "20M", "HT", "1T", "108", "30",
@@ -4428,9 +3676,9 @@ u8 *RTL8821AE_TXPWR_LMT[] = {
"FCC", "5G", "20M", "HT", "2T", "100", "28",
"ETSI", "5G", "20M", "HT", "2T", "100", "30",
"MKK", "5G", "20M", "HT", "2T", "100", "30",
- "FCC", "5G", "20M", "HT", "2T", "114", "28",
- "ETSI", "5G", "20M", "HT", "2T", "114", "30",
- "MKK", "5G", "20M", "HT", "2T", "114", "30",
+ "FCC", "5G", "20M", "HT", "2T", "104", "28",
+ "ETSI", "5G", "20M", "HT", "2T", "104", "30",
+ "MKK", "5G", "20M", "HT", "2T", "104", "30",
"FCC", "5G", "20M", "HT", "2T", "108", "30",
"ETSI", "5G", "20M", "HT", "2T", "108", "30",
"MKK", "5G", "20M", "HT", "2T", "108", "30",
@@ -4570,3 +3818,5 @@ u8 *RTL8821AE_TXPWR_LMT[] = {
"ETSI", "5G", "80M", "VHT", "2T", "155", "30",
"MKK", "5G", "80M", "VHT", "2T", "155", "63"
};
+
+u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8821AE_TXPWR_LMT) / sizeof(u8 *);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h
index 24bcff6bc507..36c2388b60bc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h
@@ -29,32 +29,30 @@
#define __RTL8821AE_TABLE__H_
#include <linux/types.h>
-#define RTL8821AEPHY_REG_1TARRAYLEN 344
+extern u32 RTL8821AE_PHY_REG_1TARRAYLEN;
extern u32 RTL8821AE_PHY_REG_ARRAY[];
-#define RTL8812AEPHY_REG_1TARRAYLEN 490
+extern u32 RTL8812AE_PHY_REG_1TARRAYLEN;
extern u32 RTL8812AE_PHY_REG_ARRAY[];
-#define RTL8821AEPHY_REG_ARRAY_PGLEN 90
+extern u32 RTL8821AE_PHY_REG_ARRAY_PGLEN;
extern u32 RTL8821AE_PHY_REG_ARRAY_PG[];
-#define RTL8812AEPHY_REG_ARRAY_PGLEN 276
+extern u32 RTL8812AE_PHY_REG_ARRAY_PGLEN;
extern u32 RTL8812AE_PHY_REG_ARRAY_PG[];
-/* #define RTL8723BE_RADIOA_1TARRAYLEN 206 */
-/* extern u8 *RTL8821AE_TXPWR_LMT_ARRAY[]; */
-#define RTL8812AE_RADIOA_1TARRAYLEN 1264
+extern u32 RTL8812AE_RADIOA_1TARRAYLEN;
extern u32 RTL8812AE_RADIOA_ARRAY[];
-#define RTL8812AE_RADIOB_1TARRAYLEN 1240
+extern u32 RTL8812AE_RADIOB_1TARRAYLEN;
extern u32 RTL8812AE_RADIOB_ARRAY[];
-#define RTL8821AE_RADIOA_1TARRAYLEN 1176
+extern u32 RTL8821AE_RADIOA_1TARRAYLEN;
extern u32 RTL8821AE_RADIOA_ARRAY[];
-#define RTL8821AEMAC_1T_ARRAYLEN 194
+extern u32 RTL8821AE_MAC_1T_ARRAYLEN;
extern u32 RTL8821AE_MAC_REG_ARRAY[];
-#define RTL8812AEMAC_1T_ARRAYLEN 214
+extern u32 RTL8812AE_MAC_1T_ARRAYLEN;
extern u32 RTL8812AE_MAC_REG_ARRAY[];
-#define RTL8821AEAGCTAB_1TARRAYLEN 382
+extern u32 RTL8821AE_AGC_TAB_1TARRAYLEN;
extern u32 RTL8821AE_AGC_TAB_ARRAY[];
-#define RTL8812AEAGCTAB_1TARRAYLEN 1312
+extern u32 RTL8812AE_AGC_TAB_1TARRAYLEN;
extern u32 RTL8812AE_AGC_TAB_ARRAY[];
-#define RTL8812AE_TXPWR_LMT_ARRAY_LEN 3948
+extern u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN;
extern u8 *RTL8812AE_TXPWR_LMT[];
-#define RTL8821AE_TXPWR_LMT_ARRAY_LEN 3948
+extern u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN;
extern u8 *RTL8821AE_TXPWR_LMT[];
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
index 108098152cf3..03665e82065f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
@@ -520,18 +520,18 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (status->rx_packet_bw == HT_CHANNEL_WIDTH_20_40)
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status->bw = RATE_INFO_BW_40;
else if (status->rx_packet_bw == HT_CHANNEL_WIDTH_80)
- rx_status->vht_flag |= RX_VHT_FLAG_80MHZ;
+ rx_status->bw = RATE_INFO_BW_80;
if (status->is_ht)
- rx_status->flag |= RX_FLAG_HT;
+ rx_status->encoding = RX_ENC_HT;
if (status->is_vht)
- rx_status->flag |= RX_FLAG_VHT;
+ rx_status->encoding = RX_ENC_VHT;
if (status->is_short_gi)
- rx_status->flag |= RX_FLAG_SHORT_GI;
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- rx_status->vht_nss = status->vht_nss;
+ rx_status->nss = status->vht_nss;
rx_status->flag |= RX_FLAG_MACTIME_START;
/* hw will set status->decrypted true, if it finds the
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 65ef42b37651..c0d2601bc55f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1529,6 +1529,10 @@ struct rtl_hal {
u8 external_lna_2g;
u8 external_pa_5g;
u8 external_lna_5g;
+ u8 type_glna;
+ u8 type_gpa;
+ u8 type_alna;
+ u8 type_apa;
u8 rfe_type;
/*firmware */
@@ -2933,6 +2937,14 @@ static inline void rtl_write_byte(struct rtl_priv *rtlpriv, u32 addr, u8 val8)
rtlpriv->io.read8_sync(rtlpriv, addr);
}
+static inline void rtl_write_byte_with_val32(struct ieee80211_hw *hw,
+ u32 addr, u32 val8)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, addr, (u8)val8);
+}
+
static inline void rtl_write_word(struct rtl_priv *rtlpriv, u32 addr, u16 val16)
{
rtlpriv->io.write16_async(rtlpriv, addr, val16);
@@ -2966,6 +2978,12 @@ static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr,
rtlpriv->cfg->ops->set_bbreg(hw, regaddr, bitmask, data);
}
+static inline void rtl_set_bbreg_with_dwmask(struct ieee80211_hw *hw,
+ u32 regaddr, u32 data)
+{
+ rtl_set_bbreg(hw, regaddr, 0xffffffff, data);
+}
+
static inline u32 rtl_get_rfreg(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 regaddr,
u32 bitmask)
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 785334f7a538..9935bd09db1f 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -479,7 +479,7 @@ struct rndis_wlan_private {
*/
static int rndis_change_virtual_intf(struct wiphy *wiphy,
struct net_device *dev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params);
static int rndis_scan(struct wiphy *wiphy,
@@ -1857,7 +1857,7 @@ error:
*/
static int rndis_change_virtual_intf(struct wiphy *wiphy,
struct net_device *dev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
@@ -2830,15 +2830,22 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
}
if (priv->infra_mode == NDIS_80211_INFRA_INFRA) {
- if (!roamed)
+ if (!roamed) {
cfg80211_connect_result(usbdev->net, bssid, req_ie,
req_ie_len, resp_ie,
resp_ie_len, 0, GFP_KERNEL);
- else
- cfg80211_roamed(usbdev->net,
- get_current_channel(usbdev, NULL),
- bssid, req_ie, req_ie_len,
- resp_ie, resp_ie_len, GFP_KERNEL);
+ } else {
+ struct cfg80211_roam_info roam_info = {
+ .channel = get_current_channel(usbdev, NULL),
+ .bssid = bssid,
+ .req_ie = req_ie,
+ .req_ie_len = req_ie_len,
+ .resp_ie = resp_ie,
+ .resp_ie_len = resp_ie_len,
+ };
+
+ cfg80211_roamed(usbdev->net, &roam_info, GFP_KERNEL);
+ }
} else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
cfg80211_ibss_joined(usbdev->net, bssid,
get_current_channel(usbdev, NULL),
@@ -3392,6 +3399,7 @@ static const struct net_device_ops rndis_wlan_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_get_stats64 = usbnet_get_stats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = rndis_wlan_set_multicast_list,
@@ -3427,6 +3435,10 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
/* because rndis_command() sleeps we need to use workqueue */
priv->workqueue = create_singlethread_workqueue("rndis_wlan");
+ if (!priv->workqueue) {
+ wiphy_free(wiphy);
+ return -ENOMEM;
+ }
INIT_WORK(&priv->work, rndis_wlan_worker);
INIT_DELAYED_WORK(&priv->dev_poller_work, rndis_device_poller);
INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index e3216473aecb..021e5ac5f107 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -1261,6 +1261,8 @@ int rsi_mac80211_attach(struct rsi_common *common)
wiphy->reg_notifier = rsi_reg_notify;
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
status = ieee80211_register_hw(hw);
if (status)
return status;
diff --git a/drivers/net/wireless/st/cw1200/cw1200_sdio.c b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
index d3acc85932a5..709f56e5ad87 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_sdio.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
@@ -10,6 +10,7 @@
*/
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mmc/host.h>
diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
index 3d170287cd0b..cd63ffef025a 100644
--- a/drivers/net/wireless/st/cw1200/txrx.c
+++ b/drivers/net/wireless/st/cw1200/txrx.c
@@ -1085,7 +1085,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
hdr->band);
if (arg->rx_rate >= 14) {
- hdr->flag |= RX_FLAG_HT;
+ hdr->encoding = RX_ENC_HT;
hdr->rate_idx = arg->rx_rate - 14;
} else if (arg->rx_rate >= 4) {
hdr->rate_idx = arg->rx_rate - 2;
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
index a27d4c22b6e8..50fb2a4a5259 100644
--- a/drivers/net/wireless/ti/wl1251/rx.c
+++ b/drivers/net/wireless/ti/wl1251/rx.c
@@ -141,7 +141,7 @@ static void wl1251_rx_status(struct wl1251 *wl,
}
if (desc->mod_pre & SHORT_PREAMBLE_BIT)
- status->flag |= RX_FLAG_SHORTPRE;
+ status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
}
static void wl1251_rx_body(struct wl1251 *wl,
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index 58e148d7bc7b..de7e2a5fdffa 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -1249,7 +1249,7 @@ static ssize_t fw_logger_write(struct file *file,
}
if (wl->conf.fwlog.output == 0) {
- wl1271_warning("iligal opperation - fw logger disabled by default, please change mode via wlconf");
+ wl1271_warning("invalid operation - fw logger disabled by default, please change mode via wlconf");
return -EINVAL;
}
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index a21fda910529..382ec15ec1af 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -6128,6 +6128,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
sizeof(struct ieee80211_header);
+ wl->hw->wiphy->max_sched_scan_reqs = 1;
wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
sizeof(struct ieee80211_header);
@@ -6135,7 +6136,6 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
- WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
WIPHY_FLAG_HAS_CHANNEL_SWITCH;
wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index b9e14045195f..52a55f9acd80 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -72,7 +72,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
/* 11n support */
if (desc->rate <= wl->hw_min_ht_rate)
- status->flag |= RX_FLAG_HT;
+ status->encoding = RX_ENC_HT;
/*
* Read the signal level and antenna diversity indication.
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index ddad58f614da..009ec07c4cec 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -366,7 +366,8 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 nla_cmd;
int err;
- err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy);
+ err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy,
+ NULL);
if (err)
return err;
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
index fd4e9ba176c9..5c0bcb1fe1a1 100644
--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
@@ -41,7 +41,7 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy,
return -EINVAL;
ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len,
- wlcore_vendor_attr_policy);
+ wlcore_vendor_attr_policy, NULL);
if (ret)
return ret;
@@ -116,7 +116,7 @@ wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy,
return -EINVAL;
ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len,
- wlcore_vendor_attr_policy);
+ wlcore_vendor_attr_policy, NULL);
if (ret)
return ret;
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index 3e37a045f702..fe6517a621b0 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -1408,6 +1408,8 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
hw->max_signal = 100;
hw->queues = 1;
hw->extra_tx_headroom = sizeof(struct zd_ctrlset);
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index c5effd6c6be9..01ca1d57b3d9 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -1278,6 +1278,9 @@ static int eject_installer(struct usb_interface *intf)
u8 bulk_out_ep;
int r;
+ if (iface_desc->desc.bNumEndpoints < 2)
+ return -ENODEV;
+
/* Find bulk out endpoint */
for (r = 1; r >= 0; r--) {
endpoint = &iface_desc->endpoint[r].desc;